Ticket #19644: vb-6.0.24-kernel-5.8-p4-2.patch
| File vb-6.0.24-kernel-5.8-p4-2.patch, 11.3 KB (added by , 4 years ago) |
|---|
-
src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
a b 52 52 # define PAGE_READONLY_EXEC PAGE_READONLY 53 53 #endif 54 54 55 /** @def IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 56 * Whether we use alloc_vm_area (3.2+) for executable memory. 57 * This is a must for 5.8+, but we enable it all the way back to 3.2.x for 58 * better W^R compliance (fExecutable flag). */ 59 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) || defined(DOXYGEN_RUNNING) 60 # define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 61 #endif 62 55 63 /* 56 64 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because 57 65 * track_pfn_vma_new() is apparently not defined for non-RAM pages. … … 72 80 # define gfp_t unsigned 73 81 #endif 74 82 83 /* 84 * Wrappers around mmap_lock/mmap_sem difference. 85 */ 86 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) 87 # define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_lock) 88 # define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_lock) 89 # define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_lock) 90 # define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_lock) 91 #else 92 # define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_sem) 93 # define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_sem) 94 # define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_sem) 95 # define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_sem) 96 #endif 97 75 98 76 99 /********************************************************************************************************************************* 77 100 * Structures and Typedefs * 78 101 *********************************************************************************************************************************/ 79 102 /** 80 * The Darwinversion of the memory object structure.103 * The Linux version of the memory object structure. 81 104 */ 82 105 typedef struct RTR0MEMOBJLNX 83 106 { … … typedef struct RTR0MEMOBJLNX 90 113 bool fExecutable; 91 114 /** Set if we've vmap'ed the memory into ring-0. */ 92 115 bool fMappedToRing0; 116 #ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 117 /** Return from alloc_vm_area() that we now need to use for executable 118 * memory. */ 119 struct vm_struct *pArea; 120 /** PTE array that goes along with pArea (must be freed). */ 121 pte_t **papPtesForArea; 122 #endif 93 123 /** The pages in the apPages array. */ 94 124 size_t cPages; 95 125 /** Array of struct page pointers. (variable size) */ 96 126 struct page *apPages[1]; 97 } RTR0MEMOBJLNX, *PRTR0MEMOBJLNX; 127 } RTR0MEMOBJLNX; 128 /** Pointer to the linux memory object. */ 129 typedef RTR0MEMOBJLNX *PRTR0MEMOBJLNX; 98 130 99 131 100 132 static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx); … … static pgprot_t rtR0MemObjLinuxConvertPr 182 214 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates 183 215 * an empty user space mapping. 184 216 * 185 * We acquire the mmap_sem of the task!217 * We acquire the mmap_sem/mmap_lock of the task! 186 218 * 187 219 * @returns Pointer to the mapping. 188 220 * (void *)-1 on failure. … … static void *rtR0MemObjLinuxDoMmap(RTR3P 222 254 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) 223 255 ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0); 224 256 #else 225 down_write(&pTask->mm->mmap_sem);257 LNX_MM_DOWN_WRITE(pTask->mm); 226 258 ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0); 227 up_write(&pTask->mm->mmap_sem);259 LNX_MM_UP_WRITE(pTask->mm); 228 260 #endif 229 261 } 230 262 else … … static void *rtR0MemObjLinuxDoMmap(RTR3P 232 264 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) 233 265 ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0); 234 266 #else 235 down_write(&pTask->mm->mmap_sem);267 LNX_MM_DOWN_WRITE(pTask->mm); 236 268 ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0); 237 up_write(&pTask->mm->mmap_sem);269 LNX_MM_UP_WRITE(pTask->mm); 238 270 #endif 239 271 if ( !(ulAddr & ~PAGE_MASK) 240 272 && (ulAddr & (uAlignment - 1))) … … static void *rtR0MemObjLinuxDoMmap(RTR3P 257 289 * Worker that destroys a user space mapping. 258 290 * Undoes what rtR0MemObjLinuxDoMmap did. 259 291 * 260 * We acquire the mmap_sem of the task!292 * We acquire the mmap_sem/mmap_lock of the task! 261 293 * 262 294 * @param pv The ring-3 mapping. 263 295 * @param cb The size of the mapping. … … static void rtR0MemObjLinuxDoMunmap(void 269 301 Assert(pTask == current); RT_NOREF_PV(pTask); 270 302 vm_munmap((unsigned long)pv, cb); 271 303 #elif defined(USE_RHEL4_MUNMAP) 272 down_write(&pTask->mm->mmap_sem);304 LNX_MM_DOWN_WRITE(pTask->mm); 273 305 do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */ 274 up_write(&pTask->mm->mmap_sem);306 LNX_MM_UP_WRITE(pTask->mm); 275 307 #else 276 down_write(&pTask->mm->mmap_sem);308 LNX_MM_DOWN_WRITE(pTask->mm); 277 309 do_munmap(pTask->mm, (unsigned long)pv, cb); 278 up_write(&pTask->mm->mmap_sem);310 LNX_MM_UP_WRITE(pTask->mm); 279 311 #endif 280 312 } 281 313 … … static int rtR0MemObjLinuxVMap(PRTR0MEMO 520 552 pgprot_val(fPg) |= _PAGE_NX; 521 553 # endif 522 554 555 # ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 556 if (fExecutable) 557 { 558 pte_t **papPtes = (pte_t **)kmalloc_array(pMemLnx->cPages, sizeof(papPtes[0]), GFP_KERNEL); 559 if (papPtes) 560 { 561 pMemLnx->pArea = alloc_vm_area(pMemLnx->Core.cb, papPtes); /* Note! pArea->nr_pages is not set. */ 562 if (pMemLnx->pArea) 563 { 564 size_t i; 565 Assert(pMemLnx->pArea->size >= pMemLnx->Core.cb); /* Note! includes guard page. */ 566 Assert(pMemLnx->pArea->addr); 567 # ifdef _PAGE_NX 568 pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */ 569 # endif 570 pMemLnx->papPtesForArea = papPtes; 571 for (i = 0; i < pMemLnx->cPages; i++) 572 *papPtes[i] = mk_pte(pMemLnx->apPages[i], fPg); 573 pMemLnx->Core.pv = pMemLnx->pArea->addr; 574 pMemLnx->fMappedToRing0 = true; 575 } 576 else 577 { 578 kfree(papPtes); 579 rc = VERR_MAP_FAILED; 580 } 581 } 582 else 583 rc = VERR_MAP_FAILED; 584 } 585 else 586 # endif 587 { 523 588 # ifdef VM_MAP 524 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);589 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg); 525 590 # else 526 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);591 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg); 527 592 # endif 528 if (pMemLnx->Core.pv) 529 pMemLnx->fMappedToRing0 = true; 530 else 531 rc = VERR_MAP_FAILED; 593 if (pMemLnx->Core.pv) 594 pMemLnx->fMappedToRing0 = true; 595 else 596 rc = VERR_MAP_FAILED; 597 } 532 598 #else /* < 2.4.22 */ 533 599 rc = VERR_NOT_SUPPORTED; 534 600 #endif … … static int rtR0MemObjLinuxVMap(PRTR0MEMO 554 620 static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx) 555 621 { 556 622 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) 623 # ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 624 if (pMemLnx->pArea) 625 { 626 # if 0 627 pte_t **papPtes = pMemLnx->papPtesForArea; 628 size_t i; 629 for (i = 0; i < pMemLnx->cPages; i++) 630 *papPtes[i] = 0; 631 # endif 632 free_vm_area(pMemLnx->pArea); 633 kfree(pMemLnx->papPtesForArea); 634 pMemLnx->pArea = NULL; 635 pMemLnx->papPtesForArea = NULL; 636 } 637 else 638 # endif 557 639 if (pMemLnx->fMappedToRing0) 558 640 { 559 641 Assert(pMemLnx->Core.pv); … … DECLHIDDEN(int) rtR0MemObjNativeFree(RTR 593 675 size_t iPage; 594 676 Assert(pTask); 595 677 if (pTask && pTask->mm) 596 down_read(&pTask->mm->mmap_sem);678 LNX_MM_DOWN_READ(pTask->mm); 597 679 598 680 iPage = pMemLnx->cPages; 599 681 while (iPage-- > 0) … … DECLHIDDEN(int) rtR0MemObjNativeFree(RTR 608 690 } 609 691 610 692 if (pTask && pTask->mm) 611 up_read(&pTask->mm->mmap_sem);693 LNX_MM_UP_READ(pTask->mm); 612 694 } 613 695 /* else: kernel memory - nothing to do here. */ 614 696 break; … … DECLHIDDEN(int) rtR0MemObjNativeLockUser 1076 1158 papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages); 1077 1159 if (papVMAs) 1078 1160 { 1079 down_read(&pTask->mm->mmap_sem);1161 LNX_MM_DOWN_READ(pTask->mm); 1080 1162 1081 1163 /* 1082 1164 * Get user pages. … … DECLHIDDEN(int) rtR0MemObjNativeLockUser 1162 1244 papVMAs[rc]->vm_flags |= VM_DONTCOPY | VM_LOCKED; 1163 1245 } 1164 1246 1165 up_read(&pTask->mm->mmap_sem);1247 LNX_MM_UP_READ(pTask->mm); 1166 1248 1167 1249 RTMemFree(papVMAs); 1168 1250 … … DECLHIDDEN(int) rtR0MemObjNativeLockUser 1189 1271 #endif 1190 1272 } 1191 1273 1192 up_read(&pTask->mm->mmap_sem);1274 LNX_MM_UP_READ(pTask->mm); 1193 1275 1194 1276 RTMemFree(papVMAs); 1195 1277 rc = VERR_LOCK_FAILED; … … DECLHIDDEN(int) rtR0MemObjNativeMapKerne 1422 1504 * Use vmap - 2.4.22 and later. 1423 1505 */ 1424 1506 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */); 1507 /** @todo We don't really care too much for EXEC here... 5.8 always adds NX. */ 1425 1508 Assert(((offSub + cbSub) >> PAGE_SHIFT) <= pMemLnxToMap->cPages); 1426 1509 # ifdef VM_MAP 1427 1510 pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[offSub >> PAGE_SHIFT], cbSub >> PAGE_SHIFT, VM_MAP, fPg); … … DECLHIDDEN(int) rtR0MemObjNativeMapUser( 1604 1687 const size_t cPages = (offSub + cbSub) >> PAGE_SHIFT; 1605 1688 size_t iPage; 1606 1689 1607 down_write(&pTask->mm->mmap_sem);1690 LNX_MM_DOWN_WRITE(pTask->mm); 1608 1691 1609 1692 rc = VINF_SUCCESS; 1610 1693 if (pMemLnxToMap->cPages) … … DECLHIDDEN(int) rtR0MemObjNativeMapUser( 1721 1804 } 1722 1805 #endif /* CONFIG_NUMA_BALANCING */ 1723 1806 1724 up_write(&pTask->mm->mmap_sem);1807 LNX_MM_UP_WRITE(pTask->mm); 1725 1808 1726 1809 if (RT_SUCCESS(rc)) 1727 1810 { … … DECLHIDDEN(int) rtR0MemObjNativeMapUser( 1753 1836 1754 1837 DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt) 1755 1838 { 1839 # ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 1840 /* 1841 * Currently only supported when we've got addresses PTEs from the kernel. 1842 */ 1843 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem; 1844 if (pMemLnx->pArea && pMemLnx->papPtesForArea) 1845 { 1846 pgprot_t const fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/); 1847 size_t const cPages = (offSub + cbSub) >> PAGE_SHIFT; 1848 pte_t **papPtes = pMemLnx->papPtesForArea; 1849 size_t i; 1850 1851 for (i = offSub >> PAGE_SHIFT; i < cPages; i++) 1852 { 1853 set_pte(papPtes[i], mk_pte(pMemLnx->apPages[i], fPg)); 1854 } 1855 preempt_disable(); 1856 __flush_tlb_all(); 1857 preempt_enable(); 1858 return VINF_SUCCESS; 1859 } 1860 # endif 1861 1756 1862 NOREF(pMem); 1757 1863 NOREF(offSub); 1758 1864 NOREF(cbSub);

