Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 15 Sep 2020 21:43:41 +0000 (UTC)
From:      Gordon Tetlow <gordon@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-releng@freebsd.org
Subject:   svn commit: r365779 - in releng: 11.3/sys/amd64/vmm/amd 11.3/sys/amd64/vmm/intel 11.4/sys/amd64/vmm/amd 11.4/sys/amd64/vmm/intel 12.1/sys/amd64/vmm/amd 12.1/sys/amd64/vmm/intel 12.2/sys/amd64/vmm/a...
Message-ID:  <202009152143.08FLhfOm046275@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: gordon
Date: Tue Sep 15 21:43:41 2020
New Revision: 365779
URL: https://svnweb.freebsd.org/changeset/base/365779

Log:
  Fix bhyve privilege escalation via VMCS access.
  
  Approved by:	so
  Approved by:	re (implicit for releng/12.2)
  Security:	FreeBSD-SA-20:28.bhyve_vmcs
  Security:	CVE-2020-24718

Modified:
  releng/11.3/sys/amd64/vmm/amd/svm.c
  releng/11.3/sys/amd64/vmm/intel/vmx.c
  releng/11.4/sys/amd64/vmm/amd/svm.c
  releng/11.4/sys/amd64/vmm/intel/vmx.c
  releng/12.1/sys/amd64/vmm/amd/svm.c
  releng/12.1/sys/amd64/vmm/intel/vmx.c
  releng/12.2/sys/amd64/vmm/amd/svm.c
  releng/12.2/sys/amd64/vmm/intel/vmx.c

Modified: releng/11.3/sys/amd64/vmm/amd/svm.c
==============================================================================
--- releng/11.3/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/11.3/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -469,11 +469,24 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iop
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
 	    VMCB_INTCPT_FERR_FREEZE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
 
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
 
 	/*
+	 * Intercept SVM instructions since AMD enables them in guests otherwise.
+	 * Non-intercepted VMMCALL causes #UD, skip it.
+	 */
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
+
+	/*
 	 * From section "Canonicalization and Consistency Checks" in APMv2
 	 * the VMRUN intercept bit must be set to pass the consistency check.
 	 */
@@ -1217,43 +1230,45 @@ emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int nu
 static const char *
 exit_reason_to_str(uint64_t reason)
 {
+	int i;
 	static char reasonbuf[32];
+	static const struct {
+		int reason;
+		const char *str;
+	} reasons[] = {
+		{ .reason = VMCB_EXIT_INVALID,	.str = "invalvmcb" },
+		{ .reason = VMCB_EXIT_SHUTDOWN,	.str = "shutdown" },
+		{ .reason = VMCB_EXIT_NPF, 	.str = "nptfault" },
+		{ .reason = VMCB_EXIT_PAUSE,	.str = "pause" },
+		{ .reason = VMCB_EXIT_HLT,	.str = "hlt" },
+		{ .reason = VMCB_EXIT_CPUID,	.str = "cpuid" },
+		{ .reason = VMCB_EXIT_IO,	.str = "inout" },
+		{ .reason = VMCB_EXIT_MC,	.str = "mchk" },
+		{ .reason = VMCB_EXIT_INTR,	.str = "extintr" },
+		{ .reason = VMCB_EXIT_NMI,	.str = "nmi" },
+		{ .reason = VMCB_EXIT_VINTR,	.str = "vintr" },
+		{ .reason = VMCB_EXIT_MSR,	.str = "msr" },
+		{ .reason = VMCB_EXIT_IRET,	.str = "iret" },
+		{ .reason = VMCB_EXIT_MONITOR,	.str = "monitor" },
+		{ .reason = VMCB_EXIT_MWAIT,	.str = "mwait" },
+		{ .reason = VMCB_EXIT_VMRUN,	.str = "vmrun" },
+		{ .reason = VMCB_EXIT_VMMCALL,	.str = "vmmcall" },
+		{ .reason = VMCB_EXIT_VMLOAD,	.str = "vmload" },
+		{ .reason = VMCB_EXIT_VMSAVE,	.str = "vmsave" },
+		{ .reason = VMCB_EXIT_STGI,	.str = "stgi" },
+		{ .reason = VMCB_EXIT_CLGI,	.str = "clgi" },
+		{ .reason = VMCB_EXIT_SKINIT,	.str = "skinit" },
+		{ .reason = VMCB_EXIT_ICEBP,	.str = "icebp" },
+		{ .reason = VMCB_EXIT_INVD,	.str = "invd" },
+		{ .reason = VMCB_EXIT_INVLPGA,	.str = "invlpga" },
+	};
 
-	switch (reason) {
-	case VMCB_EXIT_INVALID:
-		return ("invalvmcb");
-	case VMCB_EXIT_SHUTDOWN:
-		return ("shutdown");
-	case VMCB_EXIT_NPF:
-		return ("nptfault");
-	case VMCB_EXIT_PAUSE:
-		return ("pause");
-	case VMCB_EXIT_HLT:
-		return ("hlt");
-	case VMCB_EXIT_CPUID:
-		return ("cpuid");
-	case VMCB_EXIT_IO:
-		return ("inout");
-	case VMCB_EXIT_MC:
-		return ("mchk");
-	case VMCB_EXIT_INTR:
-		return ("extintr");
-	case VMCB_EXIT_NMI:
-		return ("nmi");
-	case VMCB_EXIT_VINTR:
-		return ("vintr");
-	case VMCB_EXIT_MSR:
-		return ("msr");
-	case VMCB_EXIT_IRET:
-		return ("iret");
-	case VMCB_EXIT_MONITOR:
-		return ("monitor");
-	case VMCB_EXIT_MWAIT:
-		return ("mwait");
-	default:
-		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
-		return (reasonbuf);
+	for (i = 0; i < nitems(reasons); i++) {
+		if (reasons[i].reason == reason)
+			return (reasons[i].str);
 	}
+	snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
+	return (reasonbuf);
 }
 #endif	/* KTR */
 
@@ -1505,6 +1520,20 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct 
 	case VMCB_EXIT_MWAIT:
 		vmexit->exitcode = VM_EXITCODE_MWAIT;
 		break;
+	case VMCB_EXIT_SHUTDOWN:
+	case VMCB_EXIT_VMRUN:
+	case VMCB_EXIT_VMMCALL:
+	case VMCB_EXIT_VMLOAD:
+	case VMCB_EXIT_VMSAVE:
+	case VMCB_EXIT_STGI:
+	case VMCB_EXIT_CLGI:
+	case VMCB_EXIT_SKINIT:
+	case VMCB_EXIT_ICEBP:
+	case VMCB_EXIT_INVD:
+	case VMCB_EXIT_INVLPGA:
+		vm_inject_ud(svm_sc->vm, vcpu);
+		handled = 1;
+		break;
 	default:
 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
 		break;
@@ -2173,8 +2202,11 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t va
 		return (svm_modify_intr_shadow(svm_sc, vcpu, val));
 	}
 
-	if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
-		return (0);
+	/* Do not permit user write access to VMCB fields by offset. */
+	if (!VMCB_ACCESS_OK(ident)) {
+		if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
+			return (0);
+		}
 	}
 
 	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);

Modified: releng/11.3/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- releng/11.3/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/11.3/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -3185,6 +3185,10 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
 	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
 		return (0);
 
+	/* Do not permit user write access to VMCS fields by offset. */
+	if (reg < 0)
+		return (EINVAL);
+
 	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
 
 	if (error == 0) {

Modified: releng/11.4/sys/amd64/vmm/amd/svm.c
==============================================================================
--- releng/11.4/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/11.4/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -469,11 +469,24 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iop
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
 	    VMCB_INTCPT_FERR_FREEZE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
 
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
 
 	/*
+	 * Intercept SVM instructions since AMD enables them in guests otherwise.
+	 * Non-intercepted VMMCALL causes #UD, skip it.
+	 */
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
+
+	/*
 	 * From section "Canonicalization and Consistency Checks" in APMv2
 	 * the VMRUN intercept bit must be set to pass the consistency check.
 	 */
@@ -1217,43 +1230,45 @@ emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int nu
 static const char *
 exit_reason_to_str(uint64_t reason)
 {
+	int i;
 	static char reasonbuf[32];
+	static const struct {
+		int reason;
+		const char *str;
+	} reasons[] = {
+		{ .reason = VMCB_EXIT_INVALID,	.str = "invalvmcb" },
+		{ .reason = VMCB_EXIT_SHUTDOWN,	.str = "shutdown" },
+		{ .reason = VMCB_EXIT_NPF, 	.str = "nptfault" },
+		{ .reason = VMCB_EXIT_PAUSE,	.str = "pause" },
+		{ .reason = VMCB_EXIT_HLT,	.str = "hlt" },
+		{ .reason = VMCB_EXIT_CPUID,	.str = "cpuid" },
+		{ .reason = VMCB_EXIT_IO,	.str = "inout" },
+		{ .reason = VMCB_EXIT_MC,	.str = "mchk" },
+		{ .reason = VMCB_EXIT_INTR,	.str = "extintr" },
+		{ .reason = VMCB_EXIT_NMI,	.str = "nmi" },
+		{ .reason = VMCB_EXIT_VINTR,	.str = "vintr" },
+		{ .reason = VMCB_EXIT_MSR,	.str = "msr" },
+		{ .reason = VMCB_EXIT_IRET,	.str = "iret" },
+		{ .reason = VMCB_EXIT_MONITOR,	.str = "monitor" },
+		{ .reason = VMCB_EXIT_MWAIT,	.str = "mwait" },
+		{ .reason = VMCB_EXIT_VMRUN,	.str = "vmrun" },
+		{ .reason = VMCB_EXIT_VMMCALL,	.str = "vmmcall" },
+		{ .reason = VMCB_EXIT_VMLOAD,	.str = "vmload" },
+		{ .reason = VMCB_EXIT_VMSAVE,	.str = "vmsave" },
+		{ .reason = VMCB_EXIT_STGI,	.str = "stgi" },
+		{ .reason = VMCB_EXIT_CLGI,	.str = "clgi" },
+		{ .reason = VMCB_EXIT_SKINIT,	.str = "skinit" },
+		{ .reason = VMCB_EXIT_ICEBP,	.str = "icebp" },
+		{ .reason = VMCB_EXIT_INVD,	.str = "invd" },
+		{ .reason = VMCB_EXIT_INVLPGA,	.str = "invlpga" },
+	};
 
-	switch (reason) {
-	case VMCB_EXIT_INVALID:
-		return ("invalvmcb");
-	case VMCB_EXIT_SHUTDOWN:
-		return ("shutdown");
-	case VMCB_EXIT_NPF:
-		return ("nptfault");
-	case VMCB_EXIT_PAUSE:
-		return ("pause");
-	case VMCB_EXIT_HLT:
-		return ("hlt");
-	case VMCB_EXIT_CPUID:
-		return ("cpuid");
-	case VMCB_EXIT_IO:
-		return ("inout");
-	case VMCB_EXIT_MC:
-		return ("mchk");
-	case VMCB_EXIT_INTR:
-		return ("extintr");
-	case VMCB_EXIT_NMI:
-		return ("nmi");
-	case VMCB_EXIT_VINTR:
-		return ("vintr");
-	case VMCB_EXIT_MSR:
-		return ("msr");
-	case VMCB_EXIT_IRET:
-		return ("iret");
-	case VMCB_EXIT_MONITOR:
-		return ("monitor");
-	case VMCB_EXIT_MWAIT:
-		return ("mwait");
-	default:
-		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
-		return (reasonbuf);
+	for (i = 0; i < nitems(reasons); i++) {
+		if (reasons[i].reason == reason)
+			return (reasons[i].str);
 	}
+	snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
+	return (reasonbuf);
 }
 #endif	/* KTR */
 
@@ -1505,6 +1520,20 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct 
 	case VMCB_EXIT_MWAIT:
 		vmexit->exitcode = VM_EXITCODE_MWAIT;
 		break;
+	case VMCB_EXIT_SHUTDOWN:
+	case VMCB_EXIT_VMRUN:
+	case VMCB_EXIT_VMMCALL:
+	case VMCB_EXIT_VMLOAD:
+	case VMCB_EXIT_VMSAVE:
+	case VMCB_EXIT_STGI:
+	case VMCB_EXIT_CLGI:
+	case VMCB_EXIT_SKINIT:
+	case VMCB_EXIT_ICEBP:
+	case VMCB_EXIT_INVD:
+	case VMCB_EXIT_INVLPGA:
+		vm_inject_ud(svm_sc->vm, vcpu);
+		handled = 1;
+		break;
 	default:
 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
 		break;
@@ -2173,8 +2202,11 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t va
 		return (svm_modify_intr_shadow(svm_sc, vcpu, val));
 	}
 
-	if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
-		return (0);
+	/* Do not permit user write access to VMCB fields by offset. */
+	if (!VMCB_ACCESS_OK(ident)) {
+		if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
+			return (0);
+		}
 	}
 
 	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);

Modified: releng/11.4/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- releng/11.4/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/11.4/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -3215,6 +3215,10 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
 	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
 		return (0);
 
+	/* Do not permit user write access to VMCS fields by offset. */
+	if (reg < 0)
+		return (EINVAL);
+
 	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
 
 	if (error == 0) {

Modified: releng/12.1/sys/amd64/vmm/amd/svm.c
==============================================================================
--- releng/12.1/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/12.1/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -469,11 +469,24 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iop
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
 	    VMCB_INTCPT_FERR_FREEZE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
 
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
 
 	/*
+	 * Intercept SVM instructions since AMD enables them in guests otherwise.
+	 * Non-intercepted VMMCALL causes #UD, skip it.
+	 */
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
+
+	/*
 	 * From section "Canonicalization and Consistency Checks" in APMv2
 	 * the VMRUN intercept bit must be set to pass the consistency check.
 	 */
@@ -1217,43 +1230,45 @@ emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int nu
 static const char *
 exit_reason_to_str(uint64_t reason)
 {
+	int i;
 	static char reasonbuf[32];
+	static const struct {
+		int reason;
+		const char *str;
+	} reasons[] = {
+		{ .reason = VMCB_EXIT_INVALID,	.str = "invalvmcb" },
+		{ .reason = VMCB_EXIT_SHUTDOWN,	.str = "shutdown" },
+		{ .reason = VMCB_EXIT_NPF, 	.str = "nptfault" },
+		{ .reason = VMCB_EXIT_PAUSE,	.str = "pause" },
+		{ .reason = VMCB_EXIT_HLT,	.str = "hlt" },
+		{ .reason = VMCB_EXIT_CPUID,	.str = "cpuid" },
+		{ .reason = VMCB_EXIT_IO,	.str = "inout" },
+		{ .reason = VMCB_EXIT_MC,	.str = "mchk" },
+		{ .reason = VMCB_EXIT_INTR,	.str = "extintr" },
+		{ .reason = VMCB_EXIT_NMI,	.str = "nmi" },
+		{ .reason = VMCB_EXIT_VINTR,	.str = "vintr" },
+		{ .reason = VMCB_EXIT_MSR,	.str = "msr" },
+		{ .reason = VMCB_EXIT_IRET,	.str = "iret" },
+		{ .reason = VMCB_EXIT_MONITOR,	.str = "monitor" },
+		{ .reason = VMCB_EXIT_MWAIT,	.str = "mwait" },
+		{ .reason = VMCB_EXIT_VMRUN,	.str = "vmrun" },
+		{ .reason = VMCB_EXIT_VMMCALL,	.str = "vmmcall" },
+		{ .reason = VMCB_EXIT_VMLOAD,	.str = "vmload" },
+		{ .reason = VMCB_EXIT_VMSAVE,	.str = "vmsave" },
+		{ .reason = VMCB_EXIT_STGI,	.str = "stgi" },
+		{ .reason = VMCB_EXIT_CLGI,	.str = "clgi" },
+		{ .reason = VMCB_EXIT_SKINIT,	.str = "skinit" },
+		{ .reason = VMCB_EXIT_ICEBP,	.str = "icebp" },
+		{ .reason = VMCB_EXIT_INVD,	.str = "invd" },
+		{ .reason = VMCB_EXIT_INVLPGA,	.str = "invlpga" },
+	};
 
-	switch (reason) {
-	case VMCB_EXIT_INVALID:
-		return ("invalvmcb");
-	case VMCB_EXIT_SHUTDOWN:
-		return ("shutdown");
-	case VMCB_EXIT_NPF:
-		return ("nptfault");
-	case VMCB_EXIT_PAUSE:
-		return ("pause");
-	case VMCB_EXIT_HLT:
-		return ("hlt");
-	case VMCB_EXIT_CPUID:
-		return ("cpuid");
-	case VMCB_EXIT_IO:
-		return ("inout");
-	case VMCB_EXIT_MC:
-		return ("mchk");
-	case VMCB_EXIT_INTR:
-		return ("extintr");
-	case VMCB_EXIT_NMI:
-		return ("nmi");
-	case VMCB_EXIT_VINTR:
-		return ("vintr");
-	case VMCB_EXIT_MSR:
-		return ("msr");
-	case VMCB_EXIT_IRET:
-		return ("iret");
-	case VMCB_EXIT_MONITOR:
-		return ("monitor");
-	case VMCB_EXIT_MWAIT:
-		return ("mwait");
-	default:
-		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
-		return (reasonbuf);
+	for (i = 0; i < nitems(reasons); i++) {
+		if (reasons[i].reason == reason)
+			return (reasons[i].str);
 	}
+	snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
+	return (reasonbuf);
 }
 #endif	/* KTR */
 
@@ -1505,6 +1520,20 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct 
 	case VMCB_EXIT_MWAIT:
 		vmexit->exitcode = VM_EXITCODE_MWAIT;
 		break;
+	case VMCB_EXIT_SHUTDOWN:
+	case VMCB_EXIT_VMRUN:
+	case VMCB_EXIT_VMMCALL:
+	case VMCB_EXIT_VMLOAD:
+	case VMCB_EXIT_VMSAVE:
+	case VMCB_EXIT_STGI:
+	case VMCB_EXIT_CLGI:
+	case VMCB_EXIT_SKINIT:
+	case VMCB_EXIT_ICEBP:
+	case VMCB_EXIT_INVD:
+	case VMCB_EXIT_INVLPGA:
+		vm_inject_ud(svm_sc->vm, vcpu);
+		handled = 1;
+		break;
 	default:
 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
 		break;
@@ -2179,8 +2208,11 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t va
 		return (svm_modify_intr_shadow(svm_sc, vcpu, val));
 	}
 
-	if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
-		return (0);
+	/* Do not permit user write access to VMCB fields by offset. */
+	if (!VMCB_ACCESS_OK(ident)) {
+		if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
+			return (0);
+		}
 	}
 
 	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);

Modified: releng/12.1/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- releng/12.1/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/12.1/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -3193,6 +3193,10 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
 	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
 		return (0);
 
+	/* Do not permit user write access to VMCS fields by offset. */
+	if (reg < 0)
+		return (EINVAL);
+
 	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
 
 	if (error == 0) {

Modified: releng/12.2/sys/amd64/vmm/amd/svm.c
==============================================================================
--- releng/12.2/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/12.2/sys/amd64/vmm/amd/svm.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -466,11 +466,24 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iop
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
 	    VMCB_INTCPT_FERR_FREEZE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
 
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
 
 	/*
+	 * Intercept SVM instructions since AMD enables them in guests otherwise.
+	 * Non-intercepted VMMCALL causes #UD, skip it.
+	 */
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
+
+	/*
 	 * From section "Canonicalization and Consistency Checks" in APMv2
 	 * the VMRUN intercept bit must be set to pass the consistency check.
 	 */
@@ -1214,43 +1227,45 @@ emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int nu
 static const char *
 exit_reason_to_str(uint64_t reason)
 {
+	int i;
 	static char reasonbuf[32];
+	static const struct {
+		int reason;
+		const char *str;
+	} reasons[] = {
+		{ .reason = VMCB_EXIT_INVALID,	.str = "invalvmcb" },
+		{ .reason = VMCB_EXIT_SHUTDOWN,	.str = "shutdown" },
+		{ .reason = VMCB_EXIT_NPF, 	.str = "nptfault" },
+		{ .reason = VMCB_EXIT_PAUSE,	.str = "pause" },
+		{ .reason = VMCB_EXIT_HLT,	.str = "hlt" },
+		{ .reason = VMCB_EXIT_CPUID,	.str = "cpuid" },
+		{ .reason = VMCB_EXIT_IO,	.str = "inout" },
+		{ .reason = VMCB_EXIT_MC,	.str = "mchk" },
+		{ .reason = VMCB_EXIT_INTR,	.str = "extintr" },
+		{ .reason = VMCB_EXIT_NMI,	.str = "nmi" },
+		{ .reason = VMCB_EXIT_VINTR,	.str = "vintr" },
+		{ .reason = VMCB_EXIT_MSR,	.str = "msr" },
+		{ .reason = VMCB_EXIT_IRET,	.str = "iret" },
+		{ .reason = VMCB_EXIT_MONITOR,	.str = "monitor" },
+		{ .reason = VMCB_EXIT_MWAIT,	.str = "mwait" },
+		{ .reason = VMCB_EXIT_VMRUN,	.str = "vmrun" },
+		{ .reason = VMCB_EXIT_VMMCALL,	.str = "vmmcall" },
+		{ .reason = VMCB_EXIT_VMLOAD,	.str = "vmload" },
+		{ .reason = VMCB_EXIT_VMSAVE,	.str = "vmsave" },
+		{ .reason = VMCB_EXIT_STGI,	.str = "stgi" },
+		{ .reason = VMCB_EXIT_CLGI,	.str = "clgi" },
+		{ .reason = VMCB_EXIT_SKINIT,	.str = "skinit" },
+		{ .reason = VMCB_EXIT_ICEBP,	.str = "icebp" },
+		{ .reason = VMCB_EXIT_INVD,	.str = "invd" },
+		{ .reason = VMCB_EXIT_INVLPGA,	.str = "invlpga" },
+	};
 
-	switch (reason) {
-	case VMCB_EXIT_INVALID:
-		return ("invalvmcb");
-	case VMCB_EXIT_SHUTDOWN:
-		return ("shutdown");
-	case VMCB_EXIT_NPF:
-		return ("nptfault");
-	case VMCB_EXIT_PAUSE:
-		return ("pause");
-	case VMCB_EXIT_HLT:
-		return ("hlt");
-	case VMCB_EXIT_CPUID:
-		return ("cpuid");
-	case VMCB_EXIT_IO:
-		return ("inout");
-	case VMCB_EXIT_MC:
-		return ("mchk");
-	case VMCB_EXIT_INTR:
-		return ("extintr");
-	case VMCB_EXIT_NMI:
-		return ("nmi");
-	case VMCB_EXIT_VINTR:
-		return ("vintr");
-	case VMCB_EXIT_MSR:
-		return ("msr");
-	case VMCB_EXIT_IRET:
-		return ("iret");
-	case VMCB_EXIT_MONITOR:
-		return ("monitor");
-	case VMCB_EXIT_MWAIT:
-		return ("mwait");
-	default:
-		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
-		return (reasonbuf);
+	for (i = 0; i < nitems(reasons); i++) {
+		if (reasons[i].reason == reason)
+			return (reasons[i].str);
 	}
+	snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
+	return (reasonbuf);
 }
 #endif	/* KTR */
 
@@ -1502,6 +1517,20 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct 
 	case VMCB_EXIT_MWAIT:
 		vmexit->exitcode = VM_EXITCODE_MWAIT;
 		break;
+	case VMCB_EXIT_SHUTDOWN:
+	case VMCB_EXIT_VMRUN:
+	case VMCB_EXIT_VMMCALL:
+	case VMCB_EXIT_VMLOAD:
+	case VMCB_EXIT_VMSAVE:
+	case VMCB_EXIT_STGI:
+	case VMCB_EXIT_CLGI:
+	case VMCB_EXIT_SKINIT:
+	case VMCB_EXIT_ICEBP:
+	case VMCB_EXIT_INVD:
+	case VMCB_EXIT_INVLPGA:
+		vm_inject_ud(svm_sc->vm, vcpu);
+		handled = 1;
+		break;
 	default:
 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
 		break;
@@ -2176,8 +2205,11 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t va
 		return (svm_modify_intr_shadow(svm_sc, vcpu, val));
 	}
 
-	if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
-		return (0);
+	/* Do not permit user write access to VMCB fields by offset. */
+	if (!VMCB_ACCESS_OK(ident)) {
+		if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
+			return (0);
+		}
 	}
 
 	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);

Modified: releng/12.2/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- releng/12.2/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:42:05 2020	(r365778)
+++ releng/12.2/sys/amd64/vmm/intel/vmx.c	Tue Sep 15 21:43:41 2020	(r365779)
@@ -3236,6 +3236,10 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
 	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
 		return (0);
 
+	/* Do not permit user write access to VMCS fields by offset. */
+	if (reg < 0)
+		return (EINVAL);
+
 	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
 
 	if (error == 0) {



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202009152143.08FLhfOm046275>