Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 26 Jan 2023 22:11:55 GMT
From:      John Baldwin <jhb@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-branches@FreeBSD.org
Subject:   git: 60ba8879c326 - stable/13 - vmm: Pass vcpu instead of vm and vcpuid to APIs used from CPU backends.
Message-ID:  <202301262211.30QMBtRG021972@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch stable/13 has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=60ba8879c3262ca236d7c1c98e42ca36ac2ad360

commit 60ba8879c3262ca236d7c1c98e42ca36ac2ad360
Author:     John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2022-11-18 18:03:05 +0000
Commit:     John Baldwin <jhb@FreeBSD.org>
CommitDate: 2023-01-26 22:00:36 +0000

    vmm: Pass vcpu instead of vm and vcpuid to APIs used from CPU backends.
    
    Reviewed by:    corvink, markj
    Differential Revision:  https://reviews.freebsd.org/D37162
    
    (cherry picked from commit 80cb5d845b8f4b7dc25b5dc7f4a9a653b98b0cc6)
---
 sys/amd64/include/vmm.h       |  58 +++++++++---------
 sys/amd64/vmm/amd/svm.c       |  94 +++++++++++++----------------
 sys/amd64/vmm/amd/svm.h       |   3 +-
 sys/amd64/vmm/amd/svm_msr.c   |  12 ++--
 sys/amd64/vmm/amd/svm_msr.h   |  10 ++--
 sys/amd64/vmm/intel/vmx.c     | 108 +++++++++++++++++-----------------
 sys/amd64/vmm/intel/vmx.h     |   3 +-
 sys/amd64/vmm/intel/vmx_msr.c |  12 ++--
 sys/amd64/vmm/intel/vmx_msr.h |  10 ++--
 sys/amd64/vmm/io/vlapic.c     |   4 +-
 sys/amd64/vmm/vmm.c           | 133 +++++++++++++-----------------------------
 sys/amd64/vmm/vmm_dev.c       |   2 +-
 sys/amd64/vmm/vmm_lapic.c     |   8 +--
 sys/amd64/vmm/vmm_lapic.h     |   7 +--
 sys/amd64/vmm/x86.c           |   8 ++-
 sys/amd64/vmm/x86.h           |   4 +-
 16 files changed, 197 insertions(+), 279 deletions(-)

diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 0224304f16a2..7c9393128df0 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -258,7 +258,7 @@ void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
     int prot, void **cookie);
 void vm_gpa_release(void *cookie);
-bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
+bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
 
 int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
@@ -269,11 +269,11 @@ int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
 int vm_run(struct vm *vm, struct vm_run *vmrun);
 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
 int vm_inject_nmi(struct vm *vm, int vcpu);
-int vm_nmi_pending(struct vm *vm, int vcpuid);
-void vm_nmi_clear(struct vm *vm, int vcpuid);
+int vm_nmi_pending(struct vcpu *vcpu);
+void vm_nmi_clear(struct vcpu *vcpu);
 int vm_inject_extint(struct vm *vm, int vcpu);
-int vm_extint_pending(struct vm *vm, int vcpuid);
-void vm_extint_clear(struct vm *vm, int vcpuid);
+int vm_extint_pending(struct vcpu *vcpu);
+void vm_extint_clear(struct vcpu *vcpu);
 int vcpu_vcpuid(struct vcpu *vcpu);
 struct vm *vcpu_vm(struct vcpu *vcpu);
 struct vcpu *vm_vcpu(struct vm *vm, int cpu);
@@ -289,12 +289,12 @@ int vm_activate_cpu(struct vm *vm, int vcpu);
 int vm_suspend_cpu(struct vm *vm, int vcpu);
 int vm_resume_cpu(struct vm *vm, int vcpu);
 int vm_restart_instruction(struct vcpu *vcpu);
-struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
-void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
-void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
-void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
-void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
-void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip);
+struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
+void vm_exit_suspended(struct vcpu *vcpu, uint64_t rip);
+void vm_exit_debug(struct vcpu *vcpu, uint64_t rip);
+void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip);
+void vm_exit_astpending(struct vcpu *vcpu, uint64_t rip);
+void vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip);
 int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta);
 int vm_restore_time(struct vm *vm);
 
@@ -342,7 +342,7 @@ vcpu_reqidle(struct vm_eventinfo *info)
 	return (*info->iptr);
 }
 
-int vcpu_debugged(struct vm *vm, int vcpuid);
+int vcpu_debugged(struct vcpu *vcpu);
 
 /*
  * Return true if device indicated by bus/slot/func is supposed to be a
@@ -366,14 +366,14 @@ int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
 enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
 
 static int __inline
-vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
+vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
 {
-	return (vcpu_get_state(vm_vcpu(vm, vcpu), hostcpu) == VCPU_RUNNING);
+	return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
 }
 
 #ifdef _SYS_PROC_H_
 static int __inline
-vcpu_should_yield(struct vm *vm, int vcpu)
+vcpu_should_yield(struct vcpu *vcpu)
 {
 
 	if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED))
@@ -420,7 +420,7 @@ int vm_inject_exception(struct vcpu *vcpu, int vector, int err_valid,
  *
  * Return value is 0 on success and non-zero on failure.
  */
-int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
+int vm_exit_intinfo(struct vcpu *vcpu, uint64_t intinfo);
 
 /*
  * This function is called before every VM-entry to retrieve a pending
@@ -430,7 +430,7 @@ int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
  * Returns 0 if there are no events that need to be injected into the guest
  * and non-zero otherwise.
  */
-int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
+int vm_entry_intinfo(struct vcpu *vcpu, uint64_t *info);
 
 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
 
@@ -438,10 +438,8 @@ int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
  * Function used to keep track of the guest's TSC offset. The
  * offset is used by the virutalization extensions to provide a consistent
  * value for the Time Stamp Counter to the guest.
- *
- * Return value is 0 on success and non-zero on failure.
  */
-int vm_set_tsc_offset(struct vm *vm, int vcpu_id, uint64_t offset);
+void vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset);
 
 enum vm_reg_name vm_segment_name(int seg_encoding);
 
@@ -473,8 +471,8 @@ void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo);
 void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len);
 void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len);
 
-int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
-int vcpu_trap_wbinvd(struct vm *vm, int vcpuid);
+int vcpu_trace_exceptions(struct vcpu *vcpu);
+int vcpu_trap_wbinvd(struct vcpu *vcpu);
 #endif	/* KERNEL */
 
 #ifdef _KERNEL
@@ -793,27 +791,27 @@ void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
     int errcode);
 
 static __inline void
-vm_inject_ud(void *vm, int vcpuid)
+vm_inject_ud(struct vcpu *vcpu)
 {
-	vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
+	vm_inject_fault(vcpu, IDT_UD, 0, 0);
 }
 
 static __inline void
-vm_inject_gp(void *vm, int vcpuid)
+vm_inject_gp(struct vcpu *vcpu)
 {
-	vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
+	vm_inject_fault(vcpu, IDT_GP, 1, 0);
 }
 
 static __inline void
-vm_inject_ac(void *vm, int vcpuid, int errcode)
+vm_inject_ac(struct vcpu *vcpu, int errcode)
 {
-	vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
+	vm_inject_fault(vcpu, IDT_AC, 1, errcode);
 }
 
 static __inline void
-vm_inject_ss(void *vm, int vcpuid, int errcode)
+vm_inject_ss(struct vcpu *vcpu, int errcode)
 {
-	vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
+	vm_inject_fault(vcpu, IDT_SS, 1, errcode);
 }
 
 void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index 48c7b53604c1..2448501401e3 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -282,10 +282,9 @@ svm_modresume(void)
 }		
 
 #ifdef BHYVE_SNAPSHOT
-int
-svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t offset)
+void
+svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset)
 {
-	int error;
 	struct vmcb_ctrl *ctrl;
 
 	ctrl = svm_get_vmcb_ctrl(vcpu);
@@ -294,9 +293,7 @@ svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t offset)
 	svm_set_dirty(vcpu, VMCB_CACHE_I);
 	SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset);
 
-	error = vm_set_tsc_offset(sc->vm, vcpu->vcpuid, offset);
-
-	return (error);
+	vm_set_tsc_offset(vcpu->vcpu, offset);
 }
 #endif
 
@@ -464,7 +461,7 @@ vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
 	 * Intercept everything when tracing guest exceptions otherwise
 	 * just intercept machine check exception.
 	 */
-	if (vcpu_trace_exceptions(sc->vm, vcpu->vcpuid)) {
+	if (vcpu_trace_exceptions(vcpu->vcpu)) {
 		for (n = 0; n < 32; n++) {
 			/*
 			 * Skip unimplemented vectors in the exception bitmap.
@@ -504,7 +501,7 @@ vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
 	svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
 	svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
 	svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
-	if (vcpu_trap_wbinvd(sc->vm, vcpu->vcpuid)) {
+	if (vcpu_trap_wbinvd(vcpu->vcpu)) {
 		svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT,
 		    VMCB_INTCPT_WBINVD);
 	}
@@ -992,9 +989,7 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
 {
 	struct vmcb_ctrl *ctrl;
 	uint64_t intinfo;
-	int vcpuid;
 
-	vcpuid = vcpu->vcpuid;
 	ctrl = svm_get_vmcb_ctrl(vcpu);
 	intinfo = ctrl->exitintinfo;	
 	if (!VMCB_EXITINTINFO_VALID(intinfo))
@@ -1009,7 +1004,7 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
 	SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo,
 	    VMCB_EXITINTINFO_VECTOR(intinfo));
 	vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
-	vm_exit_intinfo(svm_sc->vm, vcpuid, intinfo);
+	vm_exit_intinfo(vcpu->vcpu, intinfo);
 }
 
 #ifdef INVARIANTS
@@ -1149,10 +1144,9 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
 	struct vm_exit *vme;
 	struct vmcb_state *state;
 	uint64_t changed, lma, oldval;
-	int error __diagused, vcpuid;
+	int error __diagused;
 
 	state = svm_get_vmcb_state(vcpu);
-	vcpuid = vcpu->vcpuid;
 
 	oldval = state->efer;
 	SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
@@ -1179,7 +1173,7 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
 		goto gpf;
 
 	if (newval & EFER_NXE) {
-		if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_NO_EXECUTE))
+		if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE))
 			goto gpf;
 	}
 
@@ -1188,19 +1182,19 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
 	 * this is fixed flag guest attempt to set EFER_LMSLE as an error.
 	 */
 	if (newval & EFER_LMSLE) {
-		vme = vm_exitinfo(sc->vm, vcpuid);
+		vme = vm_exitinfo(vcpu->vcpu);
 		vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
 		*retu = true;
 		return (0);
 	}
 
 	if (newval & EFER_FFXSR) {
-		if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_FFXSR))
+		if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR))
 			goto gpf;
 	}
 
 	if (newval & EFER_TCE) {
-		if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_TCE))
+		if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE))
 			goto gpf;
 	}
 
@@ -1219,18 +1213,17 @@ emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
 	int error;
 
 	if (lapic_msr(num))
-		error = lapic_wrmsr(sc->vm, vcpu->vcpuid, num, val, retu);
+		error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
 	else if (num == MSR_EFER)
 		error = svm_write_efer(sc, vcpu, val, retu);
 	else
-		error = svm_wrmsr(sc, vcpu, num, val, retu);
+		error = svm_wrmsr(vcpu, num, val, retu);
 
 	return (error);
 }
 
 static int
-emulate_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
-    bool *retu)
+emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu)
 {
 	struct vmcb_state *state;
 	struct svm_regctx *ctx;
@@ -1238,9 +1231,9 @@ emulate_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
 	int error;
 
 	if (lapic_msr(num))
-		error = lapic_rdmsr(sc->vm, vcpu->vcpuid, num, &result, retu);
+		error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
 	else
-		error = svm_rdmsr(sc, vcpu, num, &result, retu);
+		error = svm_rdmsr(vcpu, num, &result, retu);
 
 	if (error == 0) {
 		state = svm_get_vmcb_state(vcpu);
@@ -1335,14 +1328,12 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 	uint64_t code, info1, info2, val;
 	uint32_t eax, ecx, edx;
 	int error __diagused, errcode_valid, handled, idtvec, reflect;
-	int vcpuid;
 	bool retu;
 
 	ctx = svm_get_guest_regctx(vcpu);
 	vmcb = svm_get_vmcb(vcpu);
 	state = &vmcb->state;
 	ctrl = &vmcb->ctrl;
-	vcpuid = vcpu->vcpuid;
 
 	handled = 0;
 	code = ctrl->exitcode;
@@ -1487,7 +1478,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 		} else {
 			SVM_CTR1(vcpu, "rdmsr %#x", ecx);
 			vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
-			if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
+			if (emulate_rdmsr(vcpu, ecx, &retu)) {
 				vmexit->exitcode = VM_EXITCODE_RDMSR;
 				vmexit->u.msr.code = ecx;
 			} else if (!retu) {
@@ -1504,8 +1495,9 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 		break;
 	case VMCB_EXIT_CPUID:
 		vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
-		handled = x86_emulate_cpuid(svm_sc->vm, vcpuid, &state->rax,
-		    &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
+		handled = x86_emulate_cpuid(vcpu->vcpu,
+		    &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx,
+		    &ctx->sctx_rdx);
 		break;
 	case VMCB_EXIT_HLT:
 		vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
@@ -1522,7 +1514,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 			SVM_CTR2(vcpu, "nested page fault with "
 			    "reserved bits set: info1(%#lx) info2(%#lx)",
 			    info1, info2);
-		} else if (vm_mem_allocated(svm_sc->vm, vcpuid, info2)) {
+		} else if (vm_mem_allocated(vcpu->vcpu, info2)) {
 			vmexit->exitcode = VM_EXITCODE_PAGING;
 			vmexit->u.paging.gpa = info2;
 			vmexit->u.paging.fault_type = npf_fault_type(info1);
@@ -1596,9 +1588,8 @@ static void
 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
 {
 	uint64_t intinfo;
-	int vcpuid = vcpu->vcpuid;
 
-	if (!vm_entry_intinfo(svm_sc->vm, vcpuid, &intinfo))
+	if (!vm_entry_intinfo(vcpu->vcpu, &intinfo))
 		return;
 
 	KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
@@ -1624,7 +1615,6 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
 	uint8_t v_tpr;
 	int vector, need_intr_window;
 	int extint_pending;
-	int vcpuid = vcpu->vcpuid;
 
 	state = svm_get_vmcb_state(vcpu);
 	ctrl  = svm_get_vmcb_ctrl(vcpu);
@@ -1650,7 +1640,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
 	svm_inj_intinfo(sc, vcpu);
 
 	/* NMI event has priority over interrupts. */
-	if (vm_nmi_pending(sc->vm, vcpuid)) {
+	if (vm_nmi_pending(vcpu->vcpu)) {
 		if (nmi_blocked(vcpu)) {
 			/*
 			 * Can't inject another NMI if the guest has not
@@ -1686,7 +1676,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
 			 */
 			ipi_cpu(curcpu, IPI_AST);	/* XXX vmm_ipinum? */
 		} else {
-			vm_nmi_clear(sc->vm, vcpuid);
+			vm_nmi_clear(vcpu->vcpu);
 
 			/* Inject NMI, vector number is not used */
 			svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI,
@@ -1699,7 +1689,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
 		}
 	}
 
-	extint_pending = vm_extint_pending(sc->vm, vcpuid);
+	extint_pending = vm_extint_pending(vcpu->vcpu);
 	if (!extint_pending) {
 		if (!vlapic_pending_intr(vlapic, &vector))
 			goto done;
@@ -1742,7 +1732,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
 	if (!extint_pending) {
 		vlapic_intr_accepted(vlapic, vector);
 	} else {
-		vm_extint_clear(sc->vm, vcpuid);
+		vm_extint_clear(vcpu->vcpu);
 		vatpic_intr_accepted(sc->vm, vector);
 	}
 
@@ -2003,18 +1993,15 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 	struct vmcb_ctrl *ctrl;
 	struct vm_exit *vmexit;
 	struct vlapic *vlapic;
-	struct vm *vm;
 	uint64_t vmcb_pa;
-	int handled, vcpuid;
+	int handled;
 	uint16_t ldt_sel;
 
 	vcpu = vcpui;
-	vcpuid = vcpu->vcpuid;
 	svm_sc = vcpu->sc;
-	vm = svm_sc->vm;
 	state = svm_get_vmcb_state(vcpu);
 	ctrl = svm_get_vmcb_ctrl(vcpu);
-	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit = vm_exitinfo(vcpu->vcpu);
 	vlapic = vm_lapic(vcpu->vcpu);
 
 	gctx = svm_get_guest_regctx(vcpu);
@@ -2045,7 +2032,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 		vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
 	}
 
-	svm_msr_guest_enter(svm_sc, vcpu);
+	svm_msr_guest_enter(vcpu);
 
 	/* Update Guest RIP */
 	state->rip = rip;
@@ -2062,32 +2049,32 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 
 		if (vcpu_suspended(evinfo)) {
 			enable_gintr();
-			vm_exit_suspended(vm, vcpuid, state->rip);
+			vm_exit_suspended(vcpu->vcpu, state->rip);
 			break;
 		}
 
 		if (vcpu_rendezvous_pending(evinfo)) {
 			enable_gintr();
-			vm_exit_rendezvous(vm, vcpuid, state->rip);
+			vm_exit_rendezvous(vcpu->vcpu, state->rip);
 			break;
 		}
 
 		if (vcpu_reqidle(evinfo)) {
 			enable_gintr();
-			vm_exit_reqidle(vm, vcpuid, state->rip);
+			vm_exit_reqidle(vcpu->vcpu, state->rip);
 			break;
 		}
 
 		/* We are asked to give the cpu by scheduler. */
-		if (vcpu_should_yield(vm, vcpuid)) {
+		if (vcpu_should_yield(vcpu->vcpu)) {
 			enable_gintr();
-			vm_exit_astpending(vm, vcpuid, state->rip);
+			vm_exit_astpending(vcpu->vcpu, state->rip);
 			break;
 		}
 
-		if (vcpu_debugged(vm, vcpuid)) {
+		if (vcpu_debugged(vcpu->vcpu)) {
 			enable_gintr();
-			vm_exit_debug(vm, vcpuid, state->rip);
+			vm_exit_debug(vcpu->vcpu, state->rip);
 			break;
 		}
 
@@ -2140,7 +2127,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 		handled = svm_vmexit(svm_sc, vcpu, vmexit);
 	} while (handled);
 
-	svm_msr_guest_exit(svm_sc, vcpu);
+	svm_msr_guest_exit(vcpu);
 
 	return (0);
 }
@@ -2446,7 +2433,7 @@ svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
 	vcpu = vcpui;
 	err = 0;
 
-	running = vcpu_is_running(vcpu->sc->vm, vcpu->vcpuid, &hostcpu);
+	running = vcpu_is_running(vcpu->vcpu, &hostcpu);
 	if (running && hostcpu != curcpu) {
 		printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
 		    vcpu->vcpuid);
@@ -2642,11 +2629,10 @@ static int
 svm_restore_tsc(void *vcpui, uint64_t offset)
 {
 	struct svm_vcpu *vcpu = vcpui;
-	int err;
 
-	err = svm_set_tsc_offset(vcpu->sc, vcpu, offset);
+	svm_set_tsc_offset(vcpu, offset);
 
-	return (err);
+	return (0);
 }
 #endif
 
diff --git a/sys/amd64/vmm/amd/svm.h b/sys/amd64/vmm/amd/svm.h
index 26f4809203d7..6ad25b3cb7ce 100644
--- a/sys/amd64/vmm/amd/svm.h
+++ b/sys/amd64/vmm/amd/svm.h
@@ -69,8 +69,7 @@ struct svm_regctx {
 
 void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
 #ifdef BHYVE_SNAPSHOT
-int  svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu,
-    uint64_t offset);
+void svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset);
 #endif
 
 #endif /* _SVM_H_ */
diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c
index 52ff6a29f336..82d4087c32ef 100644
--- a/sys/amd64/vmm/amd/svm_msr.c
+++ b/sys/amd64/vmm/amd/svm_msr.c
@@ -86,7 +86,7 @@ svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu)
 }
 
 void
-svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu)
+svm_msr_guest_enter(struct svm_vcpu *vcpu)
 {
 	/*
 	 * Save host MSRs (if any) and restore guest MSRs (if any).
@@ -94,7 +94,7 @@ svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu)
 }
 
 void
-svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu)
+svm_msr_guest_exit(struct svm_vcpu *vcpu)
 {
 	/*
 	 * Save guest MSRs (if any) and restore host MSRs.
@@ -108,8 +108,7 @@ svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu)
 }
 
 int
-svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
-    uint64_t *result, bool *retu)
+svm_rdmsr(struct svm_vcpu *vcpu, u_int num, uint64_t *result, bool *retu)
 {
 	int error = 0;
 
@@ -142,8 +141,7 @@ svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
 }
 
 int
-svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val,
-    bool *retu)
+svm_wrmsr(struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
 {
 	int error = 0;
 
@@ -175,7 +173,7 @@ svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val,
 		break;
 #ifdef BHYVE_SNAPSHOT
 	case MSR_TSC:
-		error = svm_set_tsc_offset(sc, vcpu, val - rdtsc());
+		svm_set_tsc_offset(vcpu, val - rdtsc());
 		break;
 #endif
 	case MSR_EXTFEATURES:
diff --git a/sys/amd64/vmm/amd/svm_msr.h b/sys/amd64/vmm/amd/svm_msr.h
index 9e78b7f15ae8..7b3cab6e31a3 100644
--- a/sys/amd64/vmm/amd/svm_msr.h
+++ b/sys/amd64/vmm/amd/svm_msr.h
@@ -36,12 +36,10 @@ struct svm_vcpu;
 
 void svm_msr_init(void);
 void svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu);
-void svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu);
-void svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu);
+void svm_msr_guest_enter(struct svm_vcpu *vcpu);
+void svm_msr_guest_exit(struct svm_vcpu *vcpu);
 
-int svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
-    uint64_t val, bool *retu);
-int svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
-    uint64_t *result, bool *retu);
+int svm_wrmsr(struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu);
+int svm_rdmsr(struct svm_vcpu *vcpu, u_int num, uint64_t *result, bool *retu);
 
 #endif	/* _SVM_MSR_H_ */
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 57cc73633b7f..9db638fd858e 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -1148,7 +1148,7 @@ vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
 	error += vmwrite(VMCS_EPTP, vmx->eptp);
 	error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
 	error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
-	if (vcpu_trap_wbinvd(vmx->vm, vcpuid)) {
+	if (vcpu_trap_wbinvd(vcpu->vcpu)) {
 		KASSERT(cap_wbinvd_exit, ("WBINVD trap not available"));
 		procbased_ctls2 |= PROCBASED2_WBINVD_EXITING;
 	}
@@ -1168,7 +1168,7 @@ vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
 	}
 
 	/* exception bitmap */
-	if (vcpu_trace_exceptions(vmx->vm, vcpuid))
+	if (vcpu_trace_exceptions(vcpu->vcpu))
 		exc_bitmap = 0xffffffff;
 	else
 		exc_bitmap = 1 << IDT_MC;
@@ -1226,11 +1226,11 @@ vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
 }
 
 static int
-vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
+vmx_handle_cpuid(struct vmx_vcpu *vcpu, struct vmxctx *vmxctx)
 {
 	int handled;
 
-	handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax,
+	handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax,
 	    (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx,
 	    (uint64_t *)&vmxctx->guest_rdx);
 	return (handled);
@@ -1395,7 +1395,7 @@ vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu)
 }
 
 int
-vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset)
+vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset)
 {
 	int error;
 
@@ -1408,7 +1408,7 @@ vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset)
 	error = vmwrite(VMCS_TSC_OFFSET, offset);
 #ifdef BHYVE_SNAPSHOT
 	if (error == 0)
-		error = vm_set_tsc_offset(vmx->vm, vcpu->vcpuid, offset);
+		vm_set_tsc_offset(vcpu->vcpu, offset);
 #endif
 	return (error);
 }
@@ -1419,7 +1419,7 @@ vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset)
 			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
 
 static void
-vmx_inject_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_inject_nmi(struct vmx_vcpu *vcpu)
 {
 	uint32_t gi __diagused, info;
 
@@ -1441,12 +1441,12 @@ vmx_inject_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu)
 	VMX_CTR0(vcpu, "Injecting vNMI");
 
 	/* Clear the request */
-	vm_nmi_clear(vmx->vm, vcpu->vcpuid);
+	vm_nmi_clear(vcpu->vcpu);
 }
 
 static void
-vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
-    struct vlapic *vlapic, uint64_t guestrip)
+vmx_inject_interrupts(struct vmx_vcpu *vcpu, struct vlapic *vlapic,
+    uint64_t guestrip)
 {
 	int vector, need_nmi_exiting, extint_pending;
 	uint64_t rflags, entryinfo;
@@ -1463,7 +1463,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
 		}
 	}
 
-	if (vm_entry_intinfo(vmx->vm, vcpu->vcpuid, &entryinfo)) {
+	if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) {
 		KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
 		    "intinfo is not valid: %#lx", __func__, entryinfo));
 
@@ -1488,7 +1488,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
 		vmcs_write(VMCS_ENTRY_INTR_INFO, info);
 	}
 
-	if (vm_nmi_pending(vmx->vm, vcpu->vcpuid)) {
+	if (vm_nmi_pending(vcpu->vcpu)) {
 		/*
 		 * If there are no conditions blocking NMI injection then
 		 * inject it directly here otherwise enable "NMI window
@@ -1505,7 +1505,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
 		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
 			info = vmcs_read(VMCS_ENTRY_INTR_INFO);
 			if ((info & VMCS_INTR_VALID) == 0) {
-				vmx_inject_nmi(vmx, vcpu);
+				vmx_inject_nmi(vcpu);
 				need_nmi_exiting = 0;
 			} else {
 				VMX_CTR1(vcpu, "Cannot inject NMI "
@@ -1520,7 +1520,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
 			vmx_set_nmi_window_exiting(vcpu);
 	}
 
-	extint_pending = vm_extint_pending(vmx->vm, vcpu->vcpuid);
+	extint_pending = vm_extint_pending(vcpu->vcpu);
 
 	if (!extint_pending && virtual_interrupt_delivery) {
 		vmx_inject_pir(vlapic);
@@ -1553,7 +1553,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
 		    ("invalid vector %d from local APIC", vector));
 	} else {
 		/* Ask the legacy pic for a vector to inject */
-		vatpic_pending_intr(vmx->vm, &vector);
+		vatpic_pending_intr(vcpu->vmx->vm, &vector);
 
 		/*
 		 * From the Intel SDM, Volume 3, Section "Maskable
@@ -1603,8 +1603,8 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
 		/* Update the Local APIC ISR */
 		vlapic_intr_accepted(vlapic, vector);
 	} else {
-		vm_extint_clear(vmx->vm, vcpu->vcpuid);
-		vatpic_intr_accepted(vmx->vm, vector);
+		vm_extint_clear(vcpu->vcpu);
+		vatpic_intr_accepted(vcpu->vmx->vm, vector);
 
 		/*
 		 * After we accepted the current ExtINT the PIC may
@@ -2319,21 +2319,20 @@ vmx_task_switch_reason(uint64_t qual)
 }
 
 static int
-emulate_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
-    bool *retu)
+emulate_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
 {
 	int error;
 
 	if (lapic_msr(num))
-		error = lapic_wrmsr(vmx->vm, vcpu->vcpuid, num, val, retu);
+		error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
 	else
-		error = vmx_wrmsr(vmx, vcpu, num, val, retu);
+		error = vmx_wrmsr(vcpu, num, val, retu);
 
 	return (error);
 }
 
 static int
-emulate_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, bool *retu)
+emulate_rdmsr(struct vmx_vcpu *vcpu, u_int num, bool *retu)
 {
 	struct vmxctx *vmxctx;
 	uint64_t result;
@@ -2341,9 +2340,9 @@ emulate_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, bool *retu)
 	int error;
 
 	if (lapic_msr(num))
-		error = lapic_rdmsr(vmx->vm, vcpu->vcpuid, num, &result, retu);
+		error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
 	else
-		error = vmx_rdmsr(vmx, vcpu, num, &result, retu);
+		error = vmx_rdmsr(vcpu, num, &result, retu);
 
 	if (error == 0) {
 		eax = result;
@@ -2415,7 +2414,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 			idtvec_err = vmcs_idt_vectoring_err();
 			exitintinfo |= (uint64_t)idtvec_err << 32;
 		}
-		error = vm_exit_intinfo(vmx->vm, vcpuid, exitintinfo);
+		error = vm_exit_intinfo(vcpu->vcpu, exitintinfo);
 		KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
 		    __func__, error));
 
@@ -2515,7 +2514,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		ecx = vmxctx->guest_rcx;
 		VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx);
 		SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx);
-		error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
+		error = emulate_rdmsr(vcpu, ecx, &retu);
 		if (error) {
 			vmexit->exitcode = VM_EXITCODE_RDMSR;
 			vmexit->u.msr.code = ecx;
@@ -2537,8 +2536,8 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		    ecx, (uint64_t)edx << 32 | eax);
 		SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx,
 		    (uint64_t)edx << 32 | eax);
-		error = emulate_wrmsr(vmx, vcpu, ecx,
-		    (uint64_t)edx << 32 | eax, &retu);
+		error = emulate_wrmsr(vcpu, ecx, (uint64_t)edx << 32 | eax,
+		    &retu);
 		if (error) {
 			vmexit->exitcode = VM_EXITCODE_WRMSR;
 			vmexit->u.msr.code = ecx;
@@ -2612,8 +2611,8 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 	case EXIT_REASON_NMI_WINDOW:
 		SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit);
 		/* Exit to allow the pending virtual NMI to be injected */
-		if (vm_nmi_pending(vmx->vm, vcpuid))
-			vmx_inject_nmi(vmx, vcpu);
+		if (vm_nmi_pending(vcpu->vcpu))
+			vmx_inject_nmi(vcpu);
 		vmx_clear_nmi_window_exiting(vcpu);
 		vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1);
 		return (1);
@@ -2643,7 +2642,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 	case EXIT_REASON_CPUID:
 		vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
 		SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit);
-		handled = vmx_handle_cpuid(vmx->vm, vcpuid, vmxctx);
+		handled = vmx_handle_cpuid(vcpu, vmxctx);
 		break;
 	case EXIT_REASON_EXCEPTION:
 		vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
@@ -2734,7 +2733,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		 * this must be an instruction that accesses MMIO space.
 		 */
 		gpa = vmcs_gpa();
-		if (vm_mem_allocated(vmx->vm, vcpuid, gpa) ||
+		if (vm_mem_allocated(vcpu->vcpu, gpa) ||
 		    apic_access_fault(vcpu, gpa)) {
 			vmexit->exitcode = VM_EXITCODE_PAGING;
 			vmexit->inst_length = 0;
@@ -3012,10 +3011,9 @@ vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap)
 static int
 vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 {
-	int rc, handled, launched, vcpuid;
+	int rc, handled, launched;
 	struct vmx *vmx;
 	struct vmx_vcpu *vcpu;
-	struct vm *vm;
 	struct vmxctx *vmxctx;
 	struct vmcs *vmcs;
 	struct vm_exit *vmexit;
@@ -3026,18 +3024,16 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 
 	vcpu = vcpui;
 	vmx = vcpu->vmx;
-	vm = vmx->vm;
-	vcpuid = vcpu->vcpuid;
 	vmcs = vcpu->vmcs;
 	vmxctx = &vcpu->ctx;
 	vlapic = vm_lapic(vcpu->vcpu);
-	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit = vm_exitinfo(vcpu->vcpu);
 	launched = 0;
 
 	KASSERT(vmxctx->pmap == pmap,
 	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
 
-	vmx_msr_guest_enter(vmx, vcpu);
+	vmx_msr_guest_enter(vcpu);
 
 	VMPTRLD(vmcs);
 
@@ -3077,7 +3073,7 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 		 * pmap_invalidate_ept().
 		 */
 		disable_intr();
-		vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
+		vmx_inject_interrupts(vcpu, vlapic, rip);
 
 		/*
 		 * Check for vcpu suspension after injecting events because
@@ -3086,33 +3082,33 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 		 */
 		if (vcpu_suspended(evinfo)) {
 			enable_intr();
-			vm_exit_suspended(vmx->vm, vcpuid, rip);
+			vm_exit_suspended(vcpu->vcpu, rip);
 			break;
 		}
 
 		if (vcpu_rendezvous_pending(evinfo)) {
 			enable_intr();
-			vm_exit_rendezvous(vmx->vm, vcpuid, rip);
+			vm_exit_rendezvous(vcpu->vcpu, rip);
 			break;
 		}
 
 		if (vcpu_reqidle(evinfo)) {
 			enable_intr();
-			vm_exit_reqidle(vmx->vm, vcpuid, rip);
+			vm_exit_reqidle(vcpu->vcpu, rip);
 			break;
 		}
 
-		if (vcpu_should_yield(vm, vcpuid)) {
+		if (vcpu_should_yield(vcpu->vcpu)) {
 			enable_intr();
-			vm_exit_astpending(vmx->vm, vcpuid, rip);
+			vm_exit_astpending(vcpu->vcpu, rip);
 			vmx_astpending_trace(vcpu, rip);
 			handled = HANDLED;
 			break;
 		}
 
-		if (vcpu_debugged(vm, vcpuid)) {
+		if (vcpu_debugged(vcpu->vcpu)) {
 			enable_intr();
-			vm_exit_debug(vmx->vm, vcpuid, rip);
+			vm_exit_debug(vcpu->vcpu, rip);
 			break;
 		}
 
@@ -3214,7 +3210,7 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 	    vmexit->exitcode);
 
 	VMCLEAR(vmcs);
-	vmx_msr_guest_exit(vmx, vcpu);
+	vmx_msr_guest_exit(vcpu);
 
 	return (0);
 }
@@ -3390,7 +3386,7 @@ vmx_getreg(void *vcpui, int reg, uint64_t *retval)
 	struct vmx_vcpu *vcpu = vcpui;
 	struct vmx *vmx = vcpu->vmx;
 
-	running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
+	running = vcpu_is_running(vcpu->vcpu, &hostcpu);
 	if (running && hostcpu != curcpu)
 		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm),
 		    vcpu->vcpuid);
@@ -3413,7 +3409,7 @@ vmx_setreg(void *vcpui, int reg, uint64_t val)
 	struct vmx_vcpu *vcpu = vcpui;
 	struct vmx *vmx = vcpu->vmx;
 
-	running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
+	running = vcpu_is_running(vcpu->vcpu, &hostcpu);
 	if (running && hostcpu != curcpu)
 		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm),
 		    vcpu->vcpuid);
@@ -3480,7 +3476,7 @@ vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc)
 	struct vmx_vcpu *vcpu = vcpui;
 	struct vmx *vmx = vcpu->vmx;
 
-	running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
+	running = vcpu_is_running(vcpu->vcpu, &hostcpu);
 	if (running && hostcpu != curcpu)
 		panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm),
 		    vcpu->vcpuid);
@@ -3495,7 +3491,7 @@ vmx_setdesc(void *vcpui, int reg, struct seg_desc *desc)
 	struct vmx_vcpu *vcpu = vcpui;
 	struct vmx *vmx = vcpu->vmx;
 
-	running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
+	running = vcpu_is_running(vcpu->vcpu, &hostcpu);
 	if (running && hostcpu != curcpu)
 		panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm),
 		    vcpu->vcpuid);
@@ -3806,7 +3802,7 @@ vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
 		struct vm_exit *vmexit;
 		uint8_t rvi, ppr;
 
-		vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
+		vmexit = vm_exitinfo(vlapic->vcpu);
 		KASSERT(vmexit->exitcode == VM_EXITCODE_HLT,
 		    ("vmx_pending_intr: exitcode not 'HLT'"));
 		rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT;
@@ -3875,7 +3871,7 @@ vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
*** 608 LINES SKIPPED ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202301262211.30QMBtRG021972>