Ticket #17316: vmx_singlestep_001.diff
| File vmx_singlestep_001.diff, 13.8 KB (added by , 6 years ago) |
|---|
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp
3496 3496 3497 3497 3498 3498 /** 3499 * Exports the guest's interruptibility-state into the guest-state area in the3500 * VMCS.3501 *3502 * @returns VBox status code.3503 * @param pVCpu The cross context virtual CPU structure.3504 * @param fIntrState The interruptibility-state to set.3505 */3506 static int hmR0VmxExportGuestIntrState(PVMCPU pVCpu, uint32_t fIntrState)3507 {3508 NOREF(pVCpu);3509 AssertMsg(!(fIntrState & 0xfffffff0), ("%#x\n", fIntrState)); /* Bits 31:4 MBZ. */3510 Assert((fIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */3511 return VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);3512 }3513 3514 3515 /**3516 3499 * Exports the exception intercepts required for guest execution in the VMCS. 3517 3500 * 3518 3501 * @returns VBox status code. … … 3637 3620 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32); 3638 3621 AssertRCReturn(rc, rc); 3639 3622 3623 /* 3624 * Setup pending debug exceptions if the guest is single-stepping using EFLAGS.TF. 3625 * 3626 * We must avoid setting any automatic debug exceptions delivery when single-stepping 3627 * through the hypervisor debugger using EFLAGS.TF. 3628 */ 3629 if ( !pVCpu->hm.s.fSingleInstruction 3630 && fEFlags.Bits.u1TF) 3631 { 3632 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS); 3633 AssertRCReturn(rc, rc); 3634 } 3635 3640 3636 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS); 3641 3637 Log4Func(("EFlags=%#RX32\n", fEFlags.u32)); 3642 3638 } … … 4196 4192 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7); 4197 4193 AssertRCReturn(rc, rc); 4198 4194 4195 /* 4196 * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger, 4197 * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure. 4198 * 4199 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State". 4200 */ 4201 if (fSteppingDB) 4202 { 4203 Assert(pVCpu->hm.s.fSingleInstruction); 4204 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF); 4205 4206 uint32_t fIntrState = 0; 4207 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState); 4208 AssertRCReturn(rc, rc); 4209 4210 if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 4211 { 4212 fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS); 4213 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState); 4214 AssertRCReturn(rc, rc); 4215 } 4216 } 4217 4199 4218 return VINF_SUCCESS; 4200 4219 } 4201 4220 … … 6489 6508 uint32_t u32Val; 6490 6509 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6491 6510 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val); 6492 if (RT_SUCCESS(rc)) 6511 AssertRCReturn(rc, rc); 6512 6513 /* 6514 * We additionally have a requirement to import RIP, RFLAGS depending on whether we 6515 * might need them in hmR0VmxEvaluatePendingEvent(). 6516 */ 6517 if (!u32Val) 6493 6518 { 6494 /* 6495 * We additionally have a requirement to import RIP, RFLAGS depending on whether we 6496 * might need them in hmR0VmxEvaluatePendingEvent(). 6497 */ 6498 if (!u32Val) 6519 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6499 6520 { 6500 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))6501 {6502 rc = hmR0VmxImportGuestRip(pVCpu);6503 rc |= hmR0VmxImportGuestRFlags(pVCpu);6504 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);6505 }6506 6507 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))6508 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);6509 }6510 else6511 {6512 6521 rc = hmR0VmxImportGuestRip(pVCpu); 6513 6522 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6523 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6524 } 6514 6525 6515 if (u32Val & ( VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS6516 | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))6517 {6518 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);6519 }6520 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))6521 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);6526 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6527 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6528 } 6529 else 6530 { 6531 rc = hmR0VmxImportGuestRip(pVCpu); 6532 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6522 6533 6523 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 6524 { 6525 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6526 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6527 } 6528 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6529 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6534 if (u32Val & ( VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS 6535 | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)) 6536 { 6537 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 6530 6538 } 6539 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6540 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6541 6542 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 6543 { 6544 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6545 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6546 } 6547 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6548 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6531 6549 } 6532 return rc; 6550 6551 return VINF_SUCCESS; 6533 6552 } 6534 6553 6535 6554 … … 7087 7106 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr); 7088 7107 } 7089 7108 7090 /* Clear any pendingevents from the VMCS. */7109 /* Clear the events from the VMCS. */ 7091 7110 VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); 7092 VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0);7093 7111 7094 7112 /* We're now done converting the pending event. */ 7095 7113 pVCpu->hm.s.Event.fPending = false; … … 7589 7607 7590 7608 7591 7609 /** 7592 * Sets a pending-debug exception to be delivered to the guest if the guest is7593 * single-stepping in the VMCS.7594 *7595 * @returns VBox status code.7596 * @param pVCpu The cross context virtual CPU structure.7597 */7598 DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu)7599 {7600 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));7601 RT_NOREF(pVCpu);7602 return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS);7603 }7604 7605 7606 /**7607 7610 * Injects any pending events into the guest if the guest is in a state to 7608 7611 * receive them. 7609 7612 * … … 7619 7622 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu); 7620 7623 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 7621 7624 7622 bool fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);7623 bool fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);7625 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS); 7626 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI); 7624 7627 7625 7628 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7626 7629 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ … … 7657 7660 #endif 7658 7661 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo, 7659 7662 uIntType)); 7663 7664 /* 7665 * Inject the event and get any changes to the guest-interruptibility state. 7666 * 7667 * The guest-interruptibility state may need to be updated if we inject the event 7668 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts). 7669 */ 7660 7670 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr, 7661 7671 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, 7662 7672 &fIntrState); 7663 7673 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 7664 7674 7665 /* Update the interruptibility-state as it could have been changed by7666 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */7667 fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);7668 fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);7669 7670 7675 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT) 7671 7676 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 7672 7677 else … … 7673 7678 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt); 7674 7679 } 7675 7680 7676 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */ 7677 if ( fBlockSti 7678 || fBlockMovSS) 7679 { 7680 if (!pVCpu->hm.s.fSingleInstruction) 7681 { 7682 /* 7683 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, 7684 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI. 7685 * See Intel spec. 27.3.4 "Saving Non-Register State". 7686 */ 7687 Assert(!DBGFIsStepping(pVCpu)); 7688 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 7689 AssertRCReturn(rc, rc); 7690 if (pCtx->eflags.Bits.u1TF) 7691 { 7692 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 7693 AssertRCReturn(rc2, rc2); 7694 } 7695 } 7696 else if (pCtx->eflags.Bits.u1TF) 7697 { 7698 /* 7699 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the 7700 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be. 7701 */ 7702 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)); 7703 fIntrState = 0; 7704 } 7705 } 7681 /* 7682 * Update the guest-interruptibility state. 7683 * 7684 * This is required for the real-on-v86 software interrupt injection case above, as well as 7685 * updates to the guest state from ring-3 or IEM/REM. 7686 */ 7687 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState); 7688 AssertRCReturn(rc, rc); 7706 7689 7707 7690 /* 7708 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything. 7709 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 7691 * There's no need to clear the VM-entry interruption-information field here if we're not 7692 * injecting anything. VT-x clears the valid bit on every VM-exit. 7693 * 7694 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 7710 7695 */ 7711 int rc3 = hmR0VmxExportGuestIntrState(pVCpu, fIntrState);7712 AssertRCReturn(rc3, rc3);7713 7696 7714 7697 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping)); 7715 7698 NOREF(fBlockMovSS); NOREF(fBlockSti); … … 8293 8276 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu); 8294 8277 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8295 8278 8296 /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is8297 not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */8298 8279 rc = hmR0VmxExportGuestRip(pVCpu); 8299 8280 rc |= hmR0VmxExportGuestRsp(pVCpu); 8300 8281 rc |= hmR0VmxExportGuestRflags(pVCpu); … … 10491 10472 AssertRCReturn(rc, rc); 10492 10473 10493 10474 hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbInstr); 10494 10495 /*10496 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the10497 * pending debug exception field as it takes care of priority of events.10498 *10499 * See Intel spec. 32.2.1 "Debug Exceptions".10500 */10501 if ( !pVCpu->hm.s.fSingleInstruction10502 && pVCpu->cpum.GstCtx.eflags.Bits.u1TF)10503 {10504 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);10505 AssertRCReturn(rc, rc);10506 }10507 10508 10475 return VINF_SUCCESS; 10509 10476 } 10510 10477 … … 11303 11270 uint32_t fIntrState = 0; 11304 11271 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState); 11305 11272 AssertRCReturn(rc, rc); 11273 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)); 11274 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 11275 { 11276 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 11277 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 11306 11278 11307 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI); 11308 if ( fBlockSti 11309 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 11310 { 11311 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 11279 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; 11280 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState); 11281 AssertRCReturn(rc, rc); 11312 11282 } 11313 11283 11314 11284 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */ … … 12493 12463 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ. 12494 12464 */ 12495 12465 if (fIOString) 12496 {12497 /** @todo Single-step for INS/OUTS with REP prefix? */12498 12466 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 12499 }12500 else if ( !fDbgStepping12501 && fGstStepping)12502 {12503 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);12504 AssertRCReturn(rc, rc);12505 }12506 12467 12507 12468 /* 12508 12469 * If any I/O breakpoints are armed, we need to check if one triggered

