From owner-svn-src-all@freebsd.org Mon Feb 12 14:45:28 2018 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 647EFF204E7; Mon, 12 Feb 2018 14:45:28 +0000 (UTC) (envelope-from tychon@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id F27907771A; Mon, 12 Feb 2018 14:45:27 +0000 (UTC) (envelope-from tychon@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id ED7E2134B4; Mon, 12 Feb 2018 14:45:27 +0000 (UTC) (envelope-from tychon@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id w1CEjR5K082520; Mon, 12 Feb 2018 14:45:27 GMT (envelope-from tychon@FreeBSD.org) Received: (from tychon@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id w1CEjR3n082516; Mon, 12 Feb 2018 14:45:27 GMT (envelope-from tychon@FreeBSD.org) Message-Id: <201802121445.w1CEjR3n082516@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: tychon set sender to tychon@FreeBSD.org using -f From: Tycho Nightingale Date: Mon, 12 Feb 2018 14:45:27 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r329162 - in head/sys/amd64/vmm: amd intel X-SVN-Group: head X-SVN-Commit-Author: tychon X-SVN-Commit-Paths: in head/sys/amd64/vmm: amd intel X-SVN-Commit-Revision: 329162 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.25 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 12 Feb 2018 14:45:29 -0000 Author: tychon Date: Mon Feb 12 14:45:27 2018 New Revision: 329162 URL: https://svnweb.freebsd.org/changeset/base/329162 Log: Provide further mitigation against CVE-2017-5715 by flushing the return stack buffer (RSB) upon returning from the guest. This was inspired by this linux commit: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/arch/x86/kvm?id=117cc7a908c83697b0b737d15ae1eb5943afe35b Reviewed by: grehan Sponsored by: Dell EMC Isilon Differential Revision: https://reviews.freebsd.org/D14272 Modified: head/sys/amd64/vmm/amd/svm_support.S head/sys/amd64/vmm/intel/vmcs.c head/sys/amd64/vmm/intel/vmx.h head/sys/amd64/vmm/intel/vmx_support.S Modified: head/sys/amd64/vmm/amd/svm_support.S ============================================================================== --- head/sys/amd64/vmm/amd/svm_support.S Mon Feb 12 14:44:21 2018 (r329161) +++ head/sys/amd64/vmm/amd/svm_support.S Mon Feb 12 14:45:27 2018 (r329162) @@ -113,6 +113,23 @@ ENTRY(svm_launch) movq %rdi, SCTX_RDI(%rax) movq %rsi, SCTX_RSI(%rax) + /* + * To prevent malicious branch target predictions from + * affecting the host, overwrite all entries in the RSB upon + * exiting a guest. + */ + mov $16, %ecx /* 16 iterations, two calls per loop */ + mov %rsp, %rax +0: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: sub $1, %ecx + jnz 0b + mov %rax, %rsp + /* Restore host state */ pop %r15 pop %r14 Modified: head/sys/amd64/vmm/intel/vmcs.c ============================================================================== --- head/sys/amd64/vmm/intel/vmcs.c Mon Feb 12 14:44:21 2018 (r329161) +++ head/sys/amd64/vmm/intel/vmcs.c Mon Feb 12 14:45:27 2018 (r329162) @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include @@ -52,6 +53,12 @@ __FBSDID("$FreeBSD$"); #include #endif +SYSCTL_DECL(_hw_vmm_vmx); + +static int no_flush_rsb; +SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW, + &no_flush_rsb, 0, "Do not flush RSB upon vmexit"); + static uint64_t vmcs_fix_regval(uint32_t encoding, uint64_t val) { @@ -403,8 +410,15 @@ vmcs_init(struct vmcs *vmcs) goto done; /* instruction pointer */ - if ((error = vmwrite(VMCS_HOST_RIP, (u_long)vmx_exit_guest)) != 0) - goto done; + if (no_flush_rsb) { + if ((error = vmwrite(VMCS_HOST_RIP, + (u_long)vmx_exit_guest)) != 0) + goto done; + } else { + if ((error = vmwrite(VMCS_HOST_RIP, + (u_long)vmx_exit_guest_flush_rsb)) != 0) + goto done; + } /* link pointer */ if ((error = vmwrite(VMCS_LINK_POINTER, ~0)) != 0) Modified: head/sys/amd64/vmm/intel/vmx.h ============================================================================== --- head/sys/amd64/vmm/intel/vmx.h Mon Feb 12 14:44:21 2018 (r329161) +++ head/sys/amd64/vmm/intel/vmx.h Mon Feb 12 14:45:27 2018 (r329162) @@ -150,5 +150,6 @@ u_long vmx_fix_cr4(u_long cr4); int vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset); extern char vmx_exit_guest[]; +extern char vmx_exit_guest_flush_rsb[]; #endif Modified: head/sys/amd64/vmm/intel/vmx_support.S ============================================================================== --- head/sys/amd64/vmm/intel/vmx_support.S Mon Feb 12 14:44:21 2018 (r329161) +++ head/sys/amd64/vmm/intel/vmx_support.S Mon Feb 12 14:45:27 2018 (r329162) @@ -42,6 +42,29 @@ #define VLEAVE pop %rbp /* + * Save the guest context. + */ +#define VMX_GUEST_SAVE \ + movq %rdi,VMXCTX_GUEST_RDI(%rsp); \ + movq %rsi,VMXCTX_GUEST_RSI(%rsp); \ + movq %rdx,VMXCTX_GUEST_RDX(%rsp); \ + movq %rcx,VMXCTX_GUEST_RCX(%rsp); \ + movq %r8,VMXCTX_GUEST_R8(%rsp); \ + movq %r9,VMXCTX_GUEST_R9(%rsp); \ + movq %rax,VMXCTX_GUEST_RAX(%rsp); \ + movq %rbx,VMXCTX_GUEST_RBX(%rsp); \ + movq %rbp,VMXCTX_GUEST_RBP(%rsp); \ + movq %r10,VMXCTX_GUEST_R10(%rsp); \ + movq %r11,VMXCTX_GUEST_R11(%rsp); \ + movq %r12,VMXCTX_GUEST_R12(%rsp); \ + movq %r13,VMXCTX_GUEST_R13(%rsp); \ + movq %r14,VMXCTX_GUEST_R14(%rsp); \ + movq %r15,VMXCTX_GUEST_R15(%rsp); \ + movq %cr2,%rdi; \ + movq %rdi,VMXCTX_GUEST_CR2(%rsp); \ + movq %rsp,%rdi; + +/* * Assumes that %rdi holds a pointer to the 'vmxctx'. * * On "return" all registers are updated to reflect guest state. The two @@ -211,31 +234,55 @@ inst_error: * The VMCS-restored %rsp points to the struct vmxctx */ ALIGN_TEXT - .globl vmx_exit_guest -vmx_exit_guest: + .globl vmx_exit_guest_flush_rsb +vmx_exit_guest_flush_rsb: /* * Save guest state that is not automatically saved in the vmcs. */ - movq %rdi,VMXCTX_GUEST_RDI(%rsp) - movq %rsi,VMXCTX_GUEST_RSI(%rsp) - movq %rdx,VMXCTX_GUEST_RDX(%rsp) - movq %rcx,VMXCTX_GUEST_RCX(%rsp) - movq %r8,VMXCTX_GUEST_R8(%rsp) - movq %r9,VMXCTX_GUEST_R9(%rsp) - movq %rax,VMXCTX_GUEST_RAX(%rsp) - movq %rbx,VMXCTX_GUEST_RBX(%rsp) - movq %rbp,VMXCTX_GUEST_RBP(%rsp) - movq %r10,VMXCTX_GUEST_R10(%rsp) - movq %r11,VMXCTX_GUEST_R11(%rsp) - movq %r12,VMXCTX_GUEST_R12(%rsp) - movq %r13,VMXCTX_GUEST_R13(%rsp) - movq %r14,VMXCTX_GUEST_R14(%rsp) - movq %r15,VMXCTX_GUEST_R15(%rsp) + VMX_GUEST_SAVE - movq %cr2,%rdi - movq %rdi,VMXCTX_GUEST_CR2(%rsp) + /* + * Deactivate guest pmap from this cpu. + */ + movq VMXCTX_PMAP(%rdi), %r11 + movl PCPU(CPUID), %r10d + LK btrl %r10d, PM_ACTIVE(%r11) - movq %rsp,%rdi + VMX_HOST_RESTORE + + VMX_GUEST_CLOBBER + + /* + * To prevent malicious branch target predictions from + * affecting the host, overwrite all entries in the RSB upon + * exiting a guest. + */ + mov $16, %ecx /* 16 iterations, two calls per loop */ + mov %rsp, %rax +0: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: sub $1, %ecx + jnz 0b + mov %rax, %rsp + + /* + * This will return to the caller of 'vmx_enter_guest()' with a return + * value of VMX_GUEST_VMEXIT. + */ + movl $VMX_GUEST_VMEXIT, %eax + VLEAVE + ret + + .globl vmx_exit_guest +vmx_exit_guest: + /* + * Save guest state that is not automatically saved in the vmcs. + */ + VMX_GUEST_SAVE /* * Deactivate guest pmap from this cpu.