| 1 | #! /bin/sh /usr/share/dpatch/dpatch-run
|
|---|
| 2 | ## u04-fix-hypervisor-mapping-write-conflicts.dpatch by Arand Nash <ienorand@gmail.com>
|
|---|
| 3 | ##
|
|---|
| 4 | ## All lines beginning with `## DP:' are a description of the patch.
|
|---|
| 5 | ## DP: No description.
|
|---|
| 6 |
|
|---|
| 7 | @DPATCH@
|
|---|
| 8 | diff -urNad virtualbox-ose-3.0.8-dfsg~/src/VBox/VMM/VMMAll/PGMAllBth.h virtualbox-ose-3.0.8-dfsg/src/VBox/VMM/VMMAll/PGMAllBth.h
|
|---|
| 9 | --- virtualbox-ose-3.0.8-dfsg~/src/VBox/VMM/VMMAll/PGMAllBth.h 2009-10-13 12:51:11.000000000 +0100
|
|---|
| 10 | +++ virtualbox-ose-3.0.8-dfsg/src/VBox/VMM/VMMAll/PGMAllBth.h 2010-04-13 10:36:28.262176862 +0100
|
|---|
| 11 | @@ -149,6 +149,47 @@
|
|---|
| 12 | const unsigned iPDSrc = 0;
|
|---|
| 13 | # endif /* !PGM_WITH_PAGING */
|
|---|
| 14 |
|
|---|
| 15 | +# if !defined(PGM_WITHOUT_MAPPINGS) && ((PGM_GST_TYPE == PGM_TYPE_32BIT) || (PGM_GST_TYPE == PGM_TYPE_PAE))
|
|---|
| 16 | + /*
|
|---|
| 17 | + * Check for write conflicts with our hypervisor mapping early on. If the guest happens to access a non-present page,
|
|---|
| 18 | + * where our hypervisor is currently mapped, then we'll create a #PF storm in the guest.
|
|---|
| 19 | + */
|
|---|
| 20 | + if ((uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW))
|
|---|
| 21 | + {
|
|---|
| 22 | + pgmLock(pVM);
|
|---|
| 23 | +# if PGM_SHW_TYPE == PGM_TYPE_32BIT
|
|---|
| 24 | + const unsigned iPDDst = pvFault >> SHW_PD_SHIFT;
|
|---|
| 25 | + PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
|
|---|
| 26 | +# else /* PGM_SHW_TYPE == PGM_TYPE_PAE */
|
|---|
| 27 | + const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */
|
|---|
| 28 | +
|
|---|
| 29 | + PX86PDPAE pPDDst;
|
|---|
| 30 | +# if PGM_GST_TYPE != PGM_TYPE_PAE
|
|---|
| 31 | + X86PDPE PdpeSrc;
|
|---|
| 32 | +
|
|---|
| 33 | + /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
|
|---|
| 34 | + PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
|
|---|
| 35 | +# endif
|
|---|
| 36 | + int rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
|
|---|
| 37 | + if (rc != VINF_SUCCESS)
|
|---|
| 38 | + {
|
|---|
| 39 | + pgmUnlock(pVM);
|
|---|
| 40 | + AssertRC(rc);
|
|---|
| 41 | + return rc;
|
|---|
| 42 | + }
|
|---|
| 43 | + Assert(pPDDst);
|
|---|
| 44 | +# endif
|
|---|
| 45 | + if (pPDDst->a[iPDDst].u & PGM_PDFLAGS_MAPPING)
|
|---|
| 46 | + {
|
|---|
| 47 | + pgmUnlock(pVM);
|
|---|
| 48 | + /* Force a CR3 sync to check for conflicts and emulate the instruction. */
|
|---|
| 49 | + VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
|
|---|
| 50 | + return VINF_EM_RAW_EMULATE_INSTR;
|
|---|
| 51 | + }
|
|---|
| 52 | + pgmUnlock(pVM);
|
|---|
| 53 | + }
|
|---|
| 54 | +# endif
|
|---|
| 55 | +
|
|---|
| 56 | /* Fetch the guest PDE */
|
|---|
| 57 | # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
|
|---|
| 58 | GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
|
|---|