VirtualBox

Ticket #20055: linux-5.10-r0drv-memobj-fix-r0.patch

File linux-5.10-r0drv-memobj-fix-r0.patch, 3.2 KB (added by aeichner, 4 years ago)

Fix to get the kernel module compiled and working on 5.10

  • src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c

     
    5656 * Whether we use alloc_vm_area (3.2+) for executable memory.
    5757 * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
    5858 * better W^R compliance (fExecutable flag). */
    59 #if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING)
     59#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING)
    6060# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
    6161#endif
     62#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING)
     63# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
     64#endif
    6265
    6366/*
    6467 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
     
    502505}
    503506
    504507
     508#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
    505509/**
     510 * User data passed to the apply_to_page_range() callback.
     511 */
     512typedef struct LNXAPPLYPGRANGE
     513{
     514    /** Pointer to the memory object. */
     515    PRTR0MEMOBJLNX pMemLnx;
     516    /** The page protection flags to apply. */
     517    pgprot_t       fPg;
     518} LNXAPPLYPGRANGE;
     519/** Pointer to the user data. */
     520typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
     521/** Pointer to the const user data. */
     522typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
     523
     524/**
     525 * Callback called in apply_to_page_range().
     526 *
     527 * @returns Linux status code.
     528 * @param   pPte                Pointer to the page table entry for the given address.
     529 * @param   uAddr               The address to apply the new protection to.
     530 * @param   pvUser              The opaque user data.
     531 */
     532static DECLCALLBACK(int) rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser)
     533{
     534    PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
     535    PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
     536    uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
     537
     538    set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
     539    return 0;
     540}
     541#endif
     542
     543
     544/**
    506545 * Maps the allocation into ring-0.
    507546 *
    508547 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
     
    584623        else
    585624# endif
    586625        {
     626#  if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
     627            if (fExecutable)
     628                pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */
     629#  endif
     630
    587631# ifdef VM_MAP
    588632            pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
    589633# else
     
    18511895        preempt_enable();
    18521896        return VINF_SUCCESS;
    18531897    }
     1898# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
     1899    PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
     1900    if (   pMemLnx->fExecutable
     1901        && pMemLnx->fMappedToRing0)
     1902    {
     1903        LNXAPPLYPGRANGE Args;
     1904        Args.pMemLnx = pMemLnx;
     1905        Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
     1906        int rcLnx = apply_to_page_range(current->active_mm, (unsigned long)pMemLnx->Core.pv + offSub, cbSub,
     1907                                        rtR0MemObjLinuxApplyPageRange, (void *)&Args);
     1908        if (rcLnx)
     1909            return VERR_NOT_SUPPORTED;
     1910
     1911        return VINF_SUCCESS;
     1912    }
    18541913# endif
    18551914
    18561915    NOREF(pMem);

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy