Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 20 May 2020 22:00:31 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r361302 - in head: share/man/man7 sys/amd64/amd64 sys/i386/i386 sys/x86/include sys/x86/x86
Message-ID:  <202005202200.04KM0VbJ014990@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Wed May 20 22:00:31 2020
New Revision: 361302
URL: https://svnweb.freebsd.org/changeset/base/361302

Log:
  amd64: Add a knob to flush RSB on context switches if machine has SMEP.
  
  The flush is needed to prevent cross-process ret2spec, which is not handled
  on kernel entry if IBPB is enabled but SMEP is present.
  While there, add i386 RSB flush.
  
  Reported by:	Anthony Steinhauser <asteinhauser@google.com>
  Reviewed by:	markj, Anthony Steinhauser
  Discussed with:	philip
  admbugs:	961
  Sponsored by:	The FreeBSD Foundation
  MFC after:	1 week

Modified:
  head/share/man/man7/security.7
  head/sys/amd64/amd64/cpu_switch.S
  head/sys/amd64/amd64/initcpu.c
  head/sys/amd64/amd64/support.S
  head/sys/i386/i386/support.s
  head/sys/x86/include/x86_var.h
  head/sys/x86/x86/cpu_machdep.c

Modified: head/share/man/man7/security.7
==============================================================================
--- head/share/man/man7/security.7	Wed May 20 21:41:36 2020	(r361301)
+++ head/share/man/man7/security.7	Wed May 20 22:00:31 2020	(r361302)
@@ -28,7 +28,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd February 4, 2020
+.Dd May 16, 2020
 .Dt SECURITY 7
 .Os
 .Sh NAME
@@ -992,6 +992,13 @@ See also
 .Xr proccontrol 1
 mode
 .Dv kpti .
+.It Dv machdep.mitigations.flush_rsb_ctxsw
+amd64.
+Controls Return Stack Buffer flush on context switch, to prevent
+cross-process ret2spec attacks.
+Only needed, and only enabled by default, if the machine
+supports SMEP, otherwise IBRS would do necessary flushing on kernel
+entry anyway.
 .It Dv hw.mds_disable
 amd64 and i386.
 Controls Microarchitectural Data Sampling hardware information leak

Modified: head/sys/amd64/amd64/cpu_switch.S
==============================================================================
--- head/sys/amd64/amd64/cpu_switch.S	Wed May 20 21:41:36 2020	(r361301)
+++ head/sys/amd64/amd64/cpu_switch.S	Wed May 20 22:00:31 2020	(r361302)
@@ -221,6 +221,8 @@ done_load_dr:
 	movq	%rax,(%rsp)
 	movq	PCPU(CURTHREAD),%rdi
 	call	fpu_activate_sw
+	cmpb	$0,cpu_flush_rsb_ctxsw(%rip)
+	jne	rsb_flush
 	ret
 
 	/*

Modified: head/sys/amd64/amd64/initcpu.c
==============================================================================
--- head/sys/amd64/amd64/initcpu.c	Wed May 20 21:41:36 2020	(r361301)
+++ head/sys/amd64/amd64/initcpu.c	Wed May 20 22:00:31 2020	(r361302)
@@ -238,12 +238,24 @@ initializecpu(void)
 		cr4 |= CR4_PKE;
 
 	/*
+	 * If SMEP is present, we only need to flush RSB (by default)
+	 * on context switches, to prevent cross-process ret2spec
+	 * attacks.  Do it automatically if ibrs_disable is set, to
+	 * complete the mitigation.
+	 *
 	 * Postpone enabling the SMEP on the boot CPU until the page
 	 * tables are switched from the boot loader identity mapping
 	 * to the kernel tables.  The boot loader enables the U bit in
 	 * its tables.
 	 */
-	if (!IS_BSP()) {
+	if (IS_BSP()) {
+		if (cpu_stdext_feature & CPUID_STDEXT_SMEP &&
+		    !TUNABLE_INT_FETCH(
+		    "machdep.mitigations.cpu_flush_rsb_ctxsw",
+		    &cpu_flush_rsb_ctxsw) &&
+		    hw_ibrs_disable)
+			cpu_flush_rsb_ctxsw = 1;
+	} else {
 		if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
 			cr4 |= CR4_SMEP;
 		if (cpu_stdext_feature & CPUID_STDEXT_SMAP)

Modified: head/sys/amd64/amd64/support.S
==============================================================================
--- head/sys/amd64/amd64/support.S	Wed May 20 21:41:36 2020	(r361301)
+++ head/sys/amd64/amd64/support.S	Wed May 20 22:00:31 2020	(r361302)
@@ -1613,23 +1613,27 @@ ENTRY(pmap_pti_pcid_invlrng)
 	retq
 
 	.altmacro
-	.macro	ibrs_seq_label l
-handle_ibrs_\l:
+	.macro	rsb_seq_label l
+rsb_seq_\l:
 	.endm
-	.macro	ibrs_call_label l
-	call	handle_ibrs_\l
+	.macro	rsb_call_label l
+	call	rsb_seq_\l
 	.endm
-	.macro	ibrs_seq count
+	.macro	rsb_seq count
 	ll=1
 	.rept	\count
-	ibrs_call_label	%(ll)
+	rsb_call_label	%(ll)
 	nop
-	ibrs_seq_label %(ll)
+	rsb_seq_label %(ll)
 	addq	$8,%rsp
 	ll=ll+1
 	.endr
 	.endm
 
+ENTRY(rsb_flush)
+	rsb_seq	32
+	ret
+
 /* all callers already saved %rax, %rdx, and %rcx */
 ENTRY(handle_ibrs_entry)
 	cmpb	$0,hw_ibrs_ibpb_active(%rip)
@@ -1641,8 +1645,7 @@ ENTRY(handle_ibrs_entry)
 	wrmsr
 	movb	$1,PCPU(IBPB_SET)
 	testl	$CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
-	jne	1f
-	ibrs_seq 32
+	je	rsb_flush
 1:	ret
 END(handle_ibrs_entry)
 

Modified: head/sys/i386/i386/support.s
==============================================================================
--- head/sys/i386/i386/support.s	Wed May 20 21:41:36 2020	(r361301)
+++ head/sys/i386/i386/support.s	Wed May 20 22:00:31 2020	(r361302)
@@ -445,6 +445,28 @@ msr_onfault:
 	movl	$EFAULT,%eax
 	ret
 
+	.altmacro
+	.macro	rsb_seq_label l
+rsb_seq_\l:
+	.endm
+	.macro	rsb_call_label l
+	call	rsb_seq_\l
+	.endm
+	.macro	rsb_seq count
+	ll=1
+	.rept	\count
+	rsb_call_label	%(ll)
+	nop
+	rsb_seq_label %(ll)
+	addl	$4,%esp
+	ll=ll+1
+	.endr
+	.endm
+
+ENTRY(rsb_flush)
+	rsb_seq	32
+	ret
+
 ENTRY(handle_ibrs_entry)
 	cmpb	$0,hw_ibrs_ibpb_active
 	je	1f
@@ -455,10 +477,9 @@ ENTRY(handle_ibrs_entry)
 	wrmsr
 	movb	$1,PCPU(IBPB_SET)
 	/*
-	 * i386 does not implement SMEP, but the 4/4 split makes this not
-	 * that important.
+	 * i386 does not implement SMEP.
 	 */
-1:	ret
+1:	jmp	rsb_flush
 END(handle_ibrs_entry)
 
 ENTRY(handle_ibrs_exit)

Modified: head/sys/x86/include/x86_var.h
==============================================================================
--- head/sys/x86/include/x86_var.h	Wed May 20 21:41:36 2020	(r361301)
+++ head/sys/x86/include/x86_var.h	Wed May 20 22:00:31 2020	(r361302)
@@ -94,6 +94,7 @@ extern	int	hw_ibrs_ibpb_active;
 extern	int	hw_mds_disable;
 extern	int	hw_ssb_active;
 extern	int	x86_taa_enable;
+extern	int	cpu_flush_rsb_ctxsw;
 
 struct	pcb;
 struct	thread;

Modified: head/sys/x86/x86/cpu_machdep.c
==============================================================================
--- head/sys/x86/x86/cpu_machdep.c	Wed May 20 21:41:36 2020	(r361301)
+++ head/sys/x86/x86/cpu_machdep.c	Wed May 20 22:00:31 2020	(r361302)
@@ -1397,6 +1397,11 @@ SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
     sysctl_taa_state_handler, "A",
     "TAA Mitigation state");
 
+int __read_frequently cpu_flush_rsb_ctxsw;
+SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
+    CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
+    "Flush Return Stack Buffer on context switch");
+
 /*
  * Enable and restore kernel text write permissions.
  * Callers must ensure that disable_wp()/restore_wp() are executed



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202005202200.04KM0VbJ014990>