VirtualBox

Ticket #19644: vb-6.0.24-kernel-5.8-p4-2.patch

File vb-6.0.24-kernel-5.8-p4-2.patch, 11.3 KB (added by hwertz, 4 years ago)

6.0.24 patch 2

  • src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c

    a b  
    5252# define PAGE_READONLY_EXEC PAGE_READONLY
    5353#endif
    5454
     55/** @def IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
     56 * Whether we use alloc_vm_area (3.2+) for executable memory.
     57 * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
     58 * better W^R compliance (fExecutable flag). */
     59#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) || defined(DOXYGEN_RUNNING)
     60# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
     61#endif
     62
    5563/*
    5664 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
    5765 * track_pfn_vma_new() is apparently not defined for non-RAM pages.
     
    7280# define gfp_t  unsigned
    7381#endif
    7482
     83/*
     84 * Wrappers around mmap_lock/mmap_sem difference.
     85 */
     86#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
     87# define LNX_MM_DOWN_READ(a_pMm)    down_read(&(a_pMm)->mmap_lock)
     88# define LNX_MM_UP_READ(a_pMm)        up_read(&(a_pMm)->mmap_lock)
     89# define LNX_MM_DOWN_WRITE(a_pMm)   down_write(&(a_pMm)->mmap_lock)
     90# define LNX_MM_UP_WRITE(a_pMm)       up_write(&(a_pMm)->mmap_lock)
     91#else
     92# define LNX_MM_DOWN_READ(a_pMm)    down_read(&(a_pMm)->mmap_sem)
     93# define LNX_MM_UP_READ(a_pMm)        up_read(&(a_pMm)->mmap_sem)
     94# define LNX_MM_DOWN_WRITE(a_pMm)   down_write(&(a_pMm)->mmap_sem)
     95# define LNX_MM_UP_WRITE(a_pMm)       up_write(&(a_pMm)->mmap_sem)
     96#endif
     97
    7598
    7699/*********************************************************************************************************************************
    77100*   Structures and Typedefs                                                                                                      *
    78101*********************************************************************************************************************************/
    79102/**
    80  * The Darwin version of the memory object structure.
     103 * The Linux version of the memory object structure.
    81104 */
    82105typedef struct RTR0MEMOBJLNX
    83106{
    typedef struct RTR0MEMOBJLNX  
    90113    bool                fExecutable;
    91114    /** Set if we've vmap'ed the memory into ring-0. */
    92115    bool                fMappedToRing0;
     116#ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
     117    /** Return from alloc_vm_area() that we now need to use for executable
     118     *  memory. */
     119    struct vm_struct   *pArea;
     120    /** PTE array that goes along with pArea (must be freed). */
     121    pte_t             **papPtesForArea;
     122#endif
    93123    /** The pages in the apPages array. */
    94124    size_t              cPages;
    95125    /** Array of struct page pointers. (variable size) */
    96126    struct page        *apPages[1];
    97 } RTR0MEMOBJLNX, *PRTR0MEMOBJLNX;
     127} RTR0MEMOBJLNX;
     128/** Pointer to the linux memory object. */
     129typedef RTR0MEMOBJLNX *PRTR0MEMOBJLNX;
    98130
    99131
    100132static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx);
    static pgprot_t rtR0MemObjLinuxConvertPr  
    182214 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
    183215 * an empty user space mapping.
    184216 *
    185  * We acquire the mmap_sem of the task!
     217 * We acquire the mmap_sem/mmap_lock of the task!
    186218 *
    187219 * @returns Pointer to the mapping.
    188220 *          (void *)-1 on failure.
    static void *rtR0MemObjLinuxDoMmap(RTR3P  
    222254#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
    223255        ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
    224256#else
    225         down_write(&pTask->mm->mmap_sem);
     257        LNX_MM_DOWN_WRITE(pTask->mm);
    226258        ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
    227         up_write(&pTask->mm->mmap_sem);
     259        LNX_MM_UP_WRITE(pTask->mm);
    228260#endif
    229261    }
    230262    else
    static void *rtR0MemObjLinuxDoMmap(RTR3P  
    232264#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
    233265        ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
    234266#else
    235         down_write(&pTask->mm->mmap_sem);
     267        LNX_MM_DOWN_WRITE(pTask->mm);
    236268        ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
    237         up_write(&pTask->mm->mmap_sem);
     269        LNX_MM_UP_WRITE(pTask->mm);
    238270#endif
    239271        if (    !(ulAddr & ~PAGE_MASK)
    240272            &&  (ulAddr & (uAlignment - 1)))
    static void *rtR0MemObjLinuxDoMmap(RTR3P  
    257289 * Worker that destroys a user space mapping.
    258290 * Undoes what rtR0MemObjLinuxDoMmap did.
    259291 *
    260  * We acquire the mmap_sem of the task!
     292 * We acquire the mmap_sem/mmap_lock of the task!
    261293 *
    262294 * @param   pv          The ring-3 mapping.
    263295 * @param   cb          The size of the mapping.
    static void rtR0MemObjLinuxDoMunmap(void  
    269301    Assert(pTask == current); RT_NOREF_PV(pTask);
    270302    vm_munmap((unsigned long)pv, cb);
    271303#elif defined(USE_RHEL4_MUNMAP)
    272     down_write(&pTask->mm->mmap_sem);
     304    LNX_MM_DOWN_WRITE(pTask->mm);
    273305    do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */
    274     up_write(&pTask->mm->mmap_sem);
     306    LNX_MM_UP_WRITE(pTask->mm);
    275307#else
    276     down_write(&pTask->mm->mmap_sem);
     308    LNX_MM_DOWN_WRITE(pTask->mm);
    277309    do_munmap(pTask->mm, (unsigned long)pv, cb);
    278     up_write(&pTask->mm->mmap_sem);
     310    LNX_MM_UP_WRITE(pTask->mm);
    279311#endif
    280312}
    281313
    static int rtR0MemObjLinuxVMap(PRTR0MEMO  
    520552            pgprot_val(fPg) |= _PAGE_NX;
    521553# endif
    522554
     555# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
     556        if (fExecutable)
     557        {
     558            pte_t **papPtes = (pte_t **)kmalloc_array(pMemLnx->cPages, sizeof(papPtes[0]), GFP_KERNEL);
     559            if (papPtes)
     560            {
     561                pMemLnx->pArea = alloc_vm_area(pMemLnx->Core.cb, papPtes); /* Note! pArea->nr_pages is not set. */
     562                if (pMemLnx->pArea)
     563                {
     564                    size_t i;
     565                    Assert(pMemLnx->pArea->size >= pMemLnx->Core.cb);   /* Note! includes guard page. */
     566                    Assert(pMemLnx->pArea->addr);
     567#  ifdef _PAGE_NX
     568                    pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */
     569#  endif
     570                    pMemLnx->papPtesForArea = papPtes;
     571                    for (i = 0; i < pMemLnx->cPages; i++)
     572                        *papPtes[i] = mk_pte(pMemLnx->apPages[i], fPg);
     573                    pMemLnx->Core.pv = pMemLnx->pArea->addr;
     574                    pMemLnx->fMappedToRing0 = true;
     575                }
     576                else
     577                {
     578                    kfree(papPtes);
     579                    rc = VERR_MAP_FAILED;
     580                }
     581            }
     582            else
     583                rc = VERR_MAP_FAILED;
     584        }
     585        else
     586# endif
     587        {
    523588# ifdef VM_MAP
    524         pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
     589            pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
    525590# else
    526         pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
     591            pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
    527592# endif
    528         if (pMemLnx->Core.pv)
    529             pMemLnx->fMappedToRing0 = true;
    530         else
    531             rc = VERR_MAP_FAILED;
     593            if (pMemLnx->Core.pv)
     594                pMemLnx->fMappedToRing0 = true;
     595            else
     596                rc = VERR_MAP_FAILED;
     597        }
    532598#else   /* < 2.4.22 */
    533599        rc = VERR_NOT_SUPPORTED;
    534600#endif
    static int rtR0MemObjLinuxVMap(PRTR0MEMO  
    554620static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
    555621{
    556622#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
     623# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
     624    if (pMemLnx->pArea)
     625    {
     626#  if 0
     627        pte_t **papPtes = pMemLnx->papPtesForArea;
     628        size_t  i;
     629        for (i = 0; i < pMemLnx->cPages; i++)
     630            *papPtes[i] = 0;
     631#  endif
     632        free_vm_area(pMemLnx->pArea);
     633        kfree(pMemLnx->papPtesForArea);
     634        pMemLnx->pArea = NULL;
     635        pMemLnx->papPtesForArea = NULL;
     636    }
     637    else
     638# endif
    557639    if (pMemLnx->fMappedToRing0)
    558640    {
    559641        Assert(pMemLnx->Core.pv);
    DECLHIDDEN(int) rtR0MemObjNativeFree(RTR  
    593675                size_t              iPage;
    594676                Assert(pTask);
    595677                if (pTask && pTask->mm)
    596                     down_read(&pTask->mm->mmap_sem);
     678                    LNX_MM_DOWN_READ(pTask->mm);
    597679
    598680                iPage = pMemLnx->cPages;
    599681                while (iPage-- > 0)
    DECLHIDDEN(int) rtR0MemObjNativeFree(RTR  
    608690                }
    609691
    610692                if (pTask && pTask->mm)
    611                     up_read(&pTask->mm->mmap_sem);
     693                    LNX_MM_UP_READ(pTask->mm);
    612694            }
    613695            /* else: kernel memory - nothing to do here. */
    614696            break;
    DECLHIDDEN(int) rtR0MemObjNativeLockUser  
    10761158    papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
    10771159    if (papVMAs)
    10781160    {
    1079         down_read(&pTask->mm->mmap_sem);
     1161        LNX_MM_DOWN_READ(pTask->mm);
    10801162
    10811163        /*
    10821164         * Get user pages.
    DECLHIDDEN(int) rtR0MemObjNativeLockUser  
    11621244                papVMAs[rc]->vm_flags |= VM_DONTCOPY | VM_LOCKED;
    11631245            }
    11641246
    1165             up_read(&pTask->mm->mmap_sem);
     1247            LNX_MM_UP_READ(pTask->mm);
    11661248
    11671249            RTMemFree(papVMAs);
    11681250
    DECLHIDDEN(int) rtR0MemObjNativeLockUser  
    11891271#endif
    11901272        }
    11911273
    1192         up_read(&pTask->mm->mmap_sem);
     1274        LNX_MM_UP_READ(pTask->mm);
    11931275
    11941276        RTMemFree(papVMAs);
    11951277        rc = VERR_LOCK_FAILED;
    DECLHIDDEN(int) rtR0MemObjNativeMapKerne  
    14221504             * Use vmap - 2.4.22 and later.
    14231505             */
    14241506            pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
     1507            /** @todo We don't really care too much for EXEC here... 5.8 always adds NX. */
    14251508            Assert(((offSub + cbSub) >> PAGE_SHIFT) <= pMemLnxToMap->cPages);
    14261509# ifdef VM_MAP
    14271510            pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[offSub >> PAGE_SHIFT], cbSub >> PAGE_SHIFT, VM_MAP, fPg);
    DECLHIDDEN(int) rtR0MemObjNativeMapUser(  
    16041687            const size_t    cPages    = (offSub + cbSub) >> PAGE_SHIFT;
    16051688            size_t          iPage;
    16061689
    1607             down_write(&pTask->mm->mmap_sem);
     1690            LNX_MM_DOWN_WRITE(pTask->mm);
    16081691
    16091692            rc = VINF_SUCCESS;
    16101693            if (pMemLnxToMap->cPages)
    DECLHIDDEN(int) rtR0MemObjNativeMapUser(  
    17211804            }
    17221805#endif /* CONFIG_NUMA_BALANCING */
    17231806
    1724             up_write(&pTask->mm->mmap_sem);
     1807            LNX_MM_UP_WRITE(pTask->mm);
    17251808
    17261809            if (RT_SUCCESS(rc))
    17271810            {
    DECLHIDDEN(int) rtR0MemObjNativeMapUser(  
    17531836
    17541837DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
    17551838{
     1839# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
     1840    /*
     1841     * Currently only supported when we've got addresses PTEs from the kernel.
     1842     */
     1843    PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
     1844    if (pMemLnx->pArea && pMemLnx->papPtesForArea)
     1845    {
     1846        pgprot_t const  fPg     = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
     1847        size_t const    cPages  = (offSub + cbSub) >> PAGE_SHIFT;
     1848        pte_t         **papPtes = pMemLnx->papPtesForArea;
     1849        size_t          i;
     1850
     1851        for (i = offSub >> PAGE_SHIFT; i < cPages; i++)
     1852        {
     1853            set_pte(papPtes[i], mk_pte(pMemLnx->apPages[i], fPg));
     1854        }
     1855        preempt_disable();
     1856        __flush_tlb_all();
     1857        preempt_enable();
     1858        return VINF_SUCCESS;
     1859    }
     1860# endif
     1861
    17561862    NOREF(pMem);
    17571863    NOREF(offSub);
    17581864    NOREF(cbSub);

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy