From owner-svn-src-all@freebsd.org Fri Mar 9 19:39:09 2018 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 2764EF45BFE; Fri, 9 Mar 2018 19:39:09 +0000 (UTC) (envelope-from tychon@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id CC11368AD8; Fri, 9 Mar 2018 19:39:08 +0000 (UTC) (envelope-from tychon@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id C2AE7235B4; Fri, 9 Mar 2018 19:39:08 +0000 (UTC) (envelope-from tychon@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id w29Jd8aB069304; Fri, 9 Mar 2018 19:39:08 GMT (envelope-from tychon@FreeBSD.org) Received: (from tychon@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id w29Jd8lV069300; Fri, 9 Mar 2018 19:39:08 GMT (envelope-from tychon@FreeBSD.org) Message-Id: <201803091939.w29Jd8lV069300@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: tychon set sender to tychon@FreeBSD.org using -f From: Tycho Nightingale Date: Fri, 9 Mar 2018 19:39:08 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r330704 - in stable/11/sys/amd64/vmm: amd intel X-SVN-Group: stable-11 X-SVN-Commit-Author: tychon X-SVN-Commit-Paths: in stable/11/sys/amd64/vmm: amd intel X-SVN-Commit-Revision: 330704 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.25 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 09 Mar 2018 19:39:09 -0000 Author: tychon Date: Fri Mar 9 19:39:08 2018 New Revision: 330704 URL: https://svnweb.freebsd.org/changeset/base/330704 Log: MFC r328011,329162 r328011: Provide some mitigation against CVE-2017-5715 by clearing registers upon returning from the guest which aren't immediately clobbered by the host. This eradicates any remaining guest contents limiting their usefulness in an exploit gadget. r329162: Provide further mitigation against CVE-2017-5715 by flushing the return stack buffer (RSB) upon returning from the guest. Modified: stable/11/sys/amd64/vmm/amd/svm_support.S stable/11/sys/amd64/vmm/intel/vmcs.c stable/11/sys/amd64/vmm/intel/vmx.h stable/11/sys/amd64/vmm/intel/vmx_support.S Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/amd64/vmm/amd/svm_support.S ============================================================================== --- stable/11/sys/amd64/vmm/amd/svm_support.S Fri Mar 9 19:04:06 2018 (r330703) +++ stable/11/sys/amd64/vmm/amd/svm_support.S Fri Mar 9 19:39:08 2018 (r330704) @@ -113,6 +113,23 @@ ENTRY(svm_launch) movq %rdi, SCTX_RDI(%rax) movq %rsi, SCTX_RSI(%rax) + /* + * To prevent malicious branch target predictions from + * affecting the host, overwrite all entries in the RSB upon + * exiting a guest. + */ + mov $16, %ecx /* 16 iterations, two calls per loop */ + mov %rsp, %rax +0: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: sub $1, %ecx + jnz 0b + mov %rax, %rsp + /* Restore host state */ pop %r15 pop %r14 @@ -124,8 +141,20 @@ ENTRY(svm_launch) pop %rdx mov %edx, %eax shr $32, %rdx - mov $MSR_GSBASE, %ecx + mov $MSR_GSBASE, %rcx wrmsr + + /* + * Clobber the remaining registers with guest contents so they + * can't be misused. + */ + xor %rbp, %rbp + xor %rdi, %rdi + xor %rsi, %rsi + xor %r8, %r8 + xor %r9, %r9 + xor %r10, %r10 + xor %r11, %r11 VLEAVE ret Modified: stable/11/sys/amd64/vmm/intel/vmcs.c ============================================================================== --- stable/11/sys/amd64/vmm/intel/vmcs.c Fri Mar 9 19:04:06 2018 (r330703) +++ stable/11/sys/amd64/vmm/intel/vmcs.c Fri Mar 9 19:39:08 2018 (r330704) @@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include @@ -50,6 +51,12 @@ __FBSDID("$FreeBSD$"); #include #endif +SYSCTL_DECL(_hw_vmm_vmx); + +static int no_flush_rsb; +SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW, + &no_flush_rsb, 0, "Do not flush RSB upon vmexit"); + static uint64_t vmcs_fix_regval(uint32_t encoding, uint64_t val) { @@ -401,8 +408,15 @@ vmcs_init(struct vmcs *vmcs) goto done; /* instruction pointer */ - if ((error = vmwrite(VMCS_HOST_RIP, (u_long)vmx_exit_guest)) != 0) - goto done; + if (no_flush_rsb) { + if ((error = vmwrite(VMCS_HOST_RIP, + (u_long)vmx_exit_guest)) != 0) + goto done; + } else { + if ((error = vmwrite(VMCS_HOST_RIP, + (u_long)vmx_exit_guest_flush_rsb)) != 0) + goto done; + } /* link pointer */ if ((error = vmwrite(VMCS_LINK_POINTER, ~0)) != 0) Modified: stable/11/sys/amd64/vmm/intel/vmx.h ============================================================================== --- stable/11/sys/amd64/vmm/intel/vmx.h Fri Mar 9 19:04:06 2018 (r330703) +++ stable/11/sys/amd64/vmm/intel/vmx.h Fri Mar 9 19:39:08 2018 (r330704) @@ -148,5 +148,6 @@ u_long vmx_fix_cr4(u_long cr4); int vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset); extern char vmx_exit_guest[]; +extern char vmx_exit_guest_flush_rsb[]; #endif Modified: stable/11/sys/amd64/vmm/intel/vmx_support.S ============================================================================== --- stable/11/sys/amd64/vmm/intel/vmx_support.S Fri Mar 9 19:04:06 2018 (r330703) +++ stable/11/sys/amd64/vmm/intel/vmx_support.S Fri Mar 9 19:39:08 2018 (r330704) @@ -42,6 +42,29 @@ #define VLEAVE pop %rbp /* + * Save the guest context. + */ +#define VMX_GUEST_SAVE \ + movq %rdi,VMXCTX_GUEST_RDI(%rsp); \ + movq %rsi,VMXCTX_GUEST_RSI(%rsp); \ + movq %rdx,VMXCTX_GUEST_RDX(%rsp); \ + movq %rcx,VMXCTX_GUEST_RCX(%rsp); \ + movq %r8,VMXCTX_GUEST_R8(%rsp); \ + movq %r9,VMXCTX_GUEST_R9(%rsp); \ + movq %rax,VMXCTX_GUEST_RAX(%rsp); \ + movq %rbx,VMXCTX_GUEST_RBX(%rsp); \ + movq %rbp,VMXCTX_GUEST_RBP(%rsp); \ + movq %r10,VMXCTX_GUEST_R10(%rsp); \ + movq %r11,VMXCTX_GUEST_R11(%rsp); \ + movq %r12,VMXCTX_GUEST_R12(%rsp); \ + movq %r13,VMXCTX_GUEST_R13(%rsp); \ + movq %r14,VMXCTX_GUEST_R14(%rsp); \ + movq %r15,VMXCTX_GUEST_R15(%rsp); \ + movq %cr2,%rdi; \ + movq %rdi,VMXCTX_GUEST_CR2(%rsp); \ + movq %rsp,%rdi; + +/* * Assumes that %rdi holds a pointer to the 'vmxctx'. * * On "return" all registers are updated to reflect guest state. The two @@ -72,6 +95,20 @@ movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ /* + * Clobber the remaining registers with guest contents so they can't + * be misused. + */ +#define VMX_GUEST_CLOBBER \ + xor %rax, %rax; \ + xor %rcx, %rcx; \ + xor %rdx, %rdx; \ + xor %rsi, %rsi; \ + xor %r8, %r8; \ + xor %r9, %r9; \ + xor %r10, %r10; \ + xor %r11, %r11; + +/* * Save and restore the host context. * * Assumes that %rdi holds a pointer to the 'vmxctx'. @@ -197,33 +234,57 @@ inst_error: * The VMCS-restored %rsp points to the struct vmxctx */ ALIGN_TEXT - .globl vmx_exit_guest -vmx_exit_guest: + .globl vmx_exit_guest_flush_rsb +vmx_exit_guest_flush_rsb: /* * Save guest state that is not automatically saved in the vmcs. */ - movq %rdi,VMXCTX_GUEST_RDI(%rsp) - movq %rsi,VMXCTX_GUEST_RSI(%rsp) - movq %rdx,VMXCTX_GUEST_RDX(%rsp) - movq %rcx,VMXCTX_GUEST_RCX(%rsp) - movq %r8,VMXCTX_GUEST_R8(%rsp) - movq %r9,VMXCTX_GUEST_R9(%rsp) - movq %rax,VMXCTX_GUEST_RAX(%rsp) - movq %rbx,VMXCTX_GUEST_RBX(%rsp) - movq %rbp,VMXCTX_GUEST_RBP(%rsp) - movq %r10,VMXCTX_GUEST_R10(%rsp) - movq %r11,VMXCTX_GUEST_R11(%rsp) - movq %r12,VMXCTX_GUEST_R12(%rsp) - movq %r13,VMXCTX_GUEST_R13(%rsp) - movq %r14,VMXCTX_GUEST_R14(%rsp) - movq %r15,VMXCTX_GUEST_R15(%rsp) + VMX_GUEST_SAVE - movq %cr2,%rdi - movq %rdi,VMXCTX_GUEST_CR2(%rsp) + /* + * Deactivate guest pmap from this cpu. + */ + movq VMXCTX_PMAP(%rdi), %r11 + movl PCPU(CPUID), %r10d + LK btrl %r10d, PM_ACTIVE(%r11) - movq %rsp,%rdi + VMX_HOST_RESTORE + VMX_GUEST_CLOBBER + /* + * To prevent malicious branch target predictions from + * affecting the host, overwrite all entries in the RSB upon + * exiting a guest. + */ + mov $16, %ecx /* 16 iterations, two calls per loop */ + mov %rsp, %rax +0: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: sub $1, %ecx + jnz 0b + mov %rax, %rsp + + /* + * This will return to the caller of 'vmx_enter_guest()' with a return + * value of VMX_GUEST_VMEXIT. + */ + movl $VMX_GUEST_VMEXIT, %eax + VLEAVE + ret + + .globl vmx_exit_guest +vmx_exit_guest: + /* + * Save guest state that is not automatically saved in the vmcs. + */ + VMX_GUEST_SAVE + + /* * Deactivate guest pmap from this cpu. */ movq VMXCTX_PMAP(%rdi), %r11 @@ -231,6 +292,8 @@ vmx_exit_guest: LK btrl %r10d, PM_ACTIVE(%r11) VMX_HOST_RESTORE + + VMX_GUEST_CLOBBER /* * This will return to the caller of 'vmx_enter_guest()' with a return