Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 15 Feb 2018 17:10:42 +0000 (UTC)
From:      Andriy Gapon <avg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r329321 - stable/10/sys/amd64/vmm/amd
Message-ID:  <201802151710.w1FHAgAq049006@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: avg
Date: Thu Feb 15 17:10:42 2018
New Revision: 329321
URL: https://svnweb.freebsd.org/changeset/base/329321

Log:
  MFC r328622: vmm/svm: post LAPIC interrupts using event injection
  
  PR:		215972

Modified:
  stable/10/sys/amd64/vmm/amd/svm.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/amd64/vmm/amd/svm.c
==============================================================================
--- stable/10/sys/amd64/vmm/amd/svm.c	Thu Feb 15 17:09:48 2018	(r329320)
+++ stable/10/sys/amd64/vmm/amd/svm.c	Thu Feb 15 17:10:42 2018	(r329321)
@@ -929,7 +929,6 @@ svm_update_virqinfo(struct svm_softc *sc, int vcpu)
 	struct vm *vm;
 	struct vlapic *vlapic;
 	struct vmcb_ctrl *ctrl;
-	int pending;
 
 	vm = sc->vm;
 	vlapic = vm_lapic(vm, vcpu);
@@ -938,20 +937,9 @@ svm_update_virqinfo(struct svm_softc *sc, int vcpu)
 	/* Update %cr8 in the emulated vlapic */
 	vlapic_set_cr8(vlapic, ctrl->v_tpr);
 
-	/*
-	 * If V_IRQ indicates that the interrupt injection attempted on then
-	 * last VMRUN was successful then update the vlapic accordingly.
-	 */
-	if (ctrl->v_intr_vector != 0) {
-		pending = ctrl->v_irq;
-		KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid "
-		    "v_intr_vector %d", __func__, ctrl->v_intr_vector));
-		KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
-		VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector,
-		    pending ? "pending" : "accepted");
-		if (!pending)
-			vlapic_intr_accepted(vlapic, ctrl->v_intr_vector);
-	}
+	/* Virtual interrupt injection is not used. */
+	KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
+	    "v_intr_vector %d", __func__, ctrl->v_intr_vector));
 }
 
 static void
@@ -1020,12 +1008,7 @@ disable_intr_window_exiting(struct svm_softc *sc, int 
 		return;
 	}
 
-#ifdef KTR
-	if (ctrl->v_intr_vector == 0)
-		VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
-	else
-		VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection");
-#endif
+	VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
 	ctrl->v_irq = 0;
 	ctrl->v_intr_vector = 0;
 	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
@@ -1570,14 +1553,14 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, str
 	struct vmcb_state *state;
 	struct svm_vcpu *vcpustate;
 	uint8_t v_tpr;
-	int vector, need_intr_window, pending_apic_vector;
+	int vector, need_intr_window;
+	int extint_pending;
 
 	state = svm_get_vmcb_state(sc, vcpu);
 	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
 	vcpustate = svm_get_vcpu(sc, vcpu);
 
 	need_intr_window = 0;
-	pending_apic_vector = 0;
 
 	if (vcpustate->nextrip != state->rip) {
 		ctrl->intr_shadow = 0;
@@ -1647,40 +1630,19 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, str
 		}
 	}
 
-	if (!vm_extint_pending(sc->vm, vcpu)) {
-		/*
-		 * APIC interrupts are delivered using the V_IRQ offload.
-		 *
-		 * The primary benefit is that the hypervisor doesn't need to
-		 * deal with the various conditions that inhibit interrupts.
-		 * It also means that TPR changes via CR8 will be handled
-		 * without any hypervisor involvement.
-		 *
-		 * Note that the APIC vector must remain pending in the vIRR
-		 * until it is confirmed that it was delivered to the guest.
-		 * This can be confirmed based on the value of V_IRQ at the
-		 * next #VMEXIT (1 = pending, 0 = delivered).
-		 *
-		 * Also note that it is possible that another higher priority
-		 * vector can become pending before this vector is delivered
-		 * to the guest. This is alright because vcpu_notify_event()
-		 * will send an IPI and force the vcpu to trap back into the
-		 * hypervisor. The higher priority vector will be injected on
-		 * the next VMRUN.
-		 */
-		if (vlapic_pending_intr(vlapic, &vector)) {
-			KASSERT(vector >= 16 && vector <= 255,
-			    ("invalid vector %d from local APIC", vector));
-			pending_apic_vector = vector;
-		}
-		goto done;
+	extint_pending = vm_extint_pending(sc->vm, vcpu);
+	if (!extint_pending) {
+		if (!vlapic_pending_intr(vlapic, &vector))
+			goto done;
+		KASSERT(vector >= 16 && vector <= 255,
+		    ("invalid vector %d from local APIC", vector));
+	} else {
+		/* Ask the legacy pic for a vector to inject */
+		vatpic_pending_intr(sc->vm, &vector);
+		KASSERT(vector >= 0 && vector <= 255,
+		    ("invalid vector %d from INTR", vector));
 	}
 
-	/* Ask the legacy pic for a vector to inject */
-	vatpic_pending_intr(sc->vm, &vector);
-	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR",
-	    vector));
-
 	/*
 	 * If the guest has disabled interrupts or is in an interrupt shadow
 	 * then we cannot inject the pending interrupt.
@@ -1706,14 +1668,14 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, str
 		goto done;
 	}
 
-	/*
-	 * Legacy PIC interrupts are delivered via the event injection
-	 * mechanism.
-	 */
 	svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
 
-	vm_extint_clear(sc->vm, vcpu);
-	vatpic_intr_accepted(sc->vm, vector);
+	if (!extint_pending) {
+		vlapic_intr_accepted(vlapic, vector);
+	} else {
+		vm_extint_clear(sc->vm, vcpu);
+		vatpic_intr_accepted(sc->vm, vector);
+	}
 
 	/*
 	 * Force a VM-exit as soon as the vcpu is ready to accept another
@@ -1743,21 +1705,7 @@ done:
 		svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
 	}
 
-	if (pending_apic_vector) {
-		/*
-		 * If an APIC vector is being injected then interrupt window
-		 * exiting is not possible on this VMRUN.
-		 */
-		KASSERT(!need_intr_window, ("intr_window exiting impossible"));
-		VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ",
-		    pending_apic_vector);
-
-		ctrl->v_irq = 1;
-		ctrl->v_ign_tpr = 0;
-		ctrl->v_intr_vector = pending_apic_vector;
-		ctrl->v_intr_prio = pending_apic_vector >> 4;
-		svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
-	} else if (need_intr_window) {
+	if (need_intr_window) {
 		/*
 		 * We use V_IRQ in conjunction with the VINTR intercept to
 		 * trap into the hypervisor as soon as a virtual interrupt



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201802151710.w1FHAgAq049006>