Date: Thu, 28 May 2020 16:34:53 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-releng@freebsd.org Subject: svn commit: r361588 - in releng/11.4/sys: amd64/amd64 i386/i386 x86/include x86/x86 Message-ID: <202005281634.04SGYrs9087395@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Thu May 28 16:34:53 2020 New Revision: 361588 URL: https://svnweb.freebsd.org/changeset/base/361588 Log: MFC r361299, r361302: MFstable/11 r361558, r361302: Do not consider CAP_RDCL_NO as an indicator for all MDS vulnerabilities handled by hardware. amd64: Add a knob to flush RSB on context switches if machine has SMEP. Approved by: re (gjb) Modified: releng/11.4/sys/amd64/amd64/cpu_switch.S releng/11.4/sys/amd64/amd64/initcpu.c releng/11.4/sys/amd64/amd64/support.S releng/11.4/sys/i386/i386/support.s releng/11.4/sys/x86/include/x86_var.h releng/11.4/sys/x86/x86/cpu_machdep.c Directory Properties: releng/11.4/ (props changed) Modified: releng/11.4/sys/amd64/amd64/cpu_switch.S ============================================================================== --- releng/11.4/sys/amd64/amd64/cpu_switch.S Thu May 28 14:56:11 2020 (r361587) +++ releng/11.4/sys/amd64/amd64/cpu_switch.S Thu May 28 16:34:53 2020 (r361588) @@ -235,6 +235,8 @@ done_load_dr: movq %rax,(%rsp) movq PCPU(CURTHREAD),%rdi call fpu_activate_sw + cmpb $0,cpu_flush_rsb_ctxsw(%rip) + jne rsb_flush ret /* Modified: releng/11.4/sys/amd64/amd64/initcpu.c ============================================================================== --- releng/11.4/sys/amd64/amd64/initcpu.c Thu May 28 14:56:11 2020 (r361587) +++ releng/11.4/sys/amd64/amd64/initcpu.c Thu May 28 16:34:53 2020 (r361588) @@ -232,13 +232,27 @@ initializecpu(void) cr4 |= CR4_FSGSBASE; /* + * If SMEP is present, we only need to flush RSB (by default) + * on context switches, to prevent cross-process ret2spec + * attacks. Do it automatically if ibrs_disable is set, to + * complete the mitigation. + * * Postpone enabling the SMEP on the boot CPU until the page * tables are switched from the boot loader identity mapping * to the kernel tables. The boot loader enables the U bit in * its tables. */ - if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP)) - cr4 |= CR4_SMEP; + if (IS_BSP()) { + if (cpu_stdext_feature & CPUID_STDEXT_SMEP && + !TUNABLE_INT_FETCH( + "machdep.mitigations.cpu_flush_rsb_ctxsw", + &cpu_flush_rsb_ctxsw) && + hw_ibrs_disable) + cpu_flush_rsb_ctxsw = 1; + } else { + if (cpu_stdext_feature & CPUID_STDEXT_SMEP) + cr4 |= CR4_SMEP; + } load_cr4(cr4); if ((amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; Modified: releng/11.4/sys/amd64/amd64/support.S ============================================================================== --- releng/11.4/sys/amd64/amd64/support.S Thu May 28 14:56:11 2020 (r361587) +++ releng/11.4/sys/amd64/amd64/support.S Thu May 28 16:34:53 2020 (r361588) @@ -832,23 +832,27 @@ ENTRY(pmap_pti_pcid_invlrng) retq .altmacro - .macro ibrs_seq_label l -handle_ibrs_\l: + .macro rsb_seq_label l +rsb_seq_\l: .endm - .macro ibrs_call_label l - call handle_ibrs_\l + .macro rsb_call_label l + call rsb_seq_\l .endm - .macro ibrs_seq count + .macro rsb_seq count ll=1 .rept \count - ibrs_call_label %(ll) + rsb_call_label %(ll) nop - ibrs_seq_label %(ll) + rsb_seq_label %(ll) addq $8,%rsp ll=ll+1 .endr .endm +ENTRY(rsb_flush) + rsb_seq 32 + ret + /* all callers already saved %rax, %rdx, and %rcx */ ENTRY(handle_ibrs_entry) cmpb $0,hw_ibrs_ibpb_active(%rip) @@ -860,8 +864,7 @@ ENTRY(handle_ibrs_entry) wrmsr movb $1,PCPU(IBPB_SET) testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip) - jne 1f - ibrs_seq 32 + je rsb_flush 1: ret END(handle_ibrs_entry) Modified: releng/11.4/sys/i386/i386/support.s ============================================================================== --- releng/11.4/sys/i386/i386/support.s Thu May 28 14:56:11 2020 (r361587) +++ releng/11.4/sys/i386/i386/support.s Thu May 28 16:34:53 2020 (r361588) @@ -819,8 +819,30 @@ msr_onfault: movl $EFAULT,%eax ret -ENTRY(handle_ibrs_entry) + .altmacro + .macro rsb_seq_label l +rsb_seq_\l: + .endm + .macro rsb_call_label l + call rsb_seq_\l + .endm + .macro rsb_seq count + ll=1 + .rept \count + rsb_call_label %(ll) + nop + rsb_seq_label %(ll) + addl $4,%esp + ll=ll+1 + .endr + .endm + +ENTRY(rsb_flush) + rsb_seq 32 ret + +ENTRY(handle_ibrs_entry) + jmp rsb_flush END(handle_ibrs_entry) ENTRY(handle_ibrs_exit) Modified: releng/11.4/sys/x86/include/x86_var.h ============================================================================== --- releng/11.4/sys/x86/include/x86_var.h Thu May 28 14:56:11 2020 (r361587) +++ releng/11.4/sys/x86/include/x86_var.h Thu May 28 16:34:53 2020 (r361588) @@ -86,6 +86,7 @@ extern int hw_ibrs_ibpb_active; extern int hw_mds_disable; extern int hw_ssb_active; extern int x86_taa_enable; +extern int cpu_flush_rsb_ctxsw; struct pcb; struct thread; Modified: releng/11.4/sys/x86/x86/cpu_machdep.c ============================================================================== --- releng/11.4/sys/x86/x86/cpu_machdep.c Thu May 28 14:56:11 2020 (r361587) +++ releng/11.4/sys/x86/x86/cpu_machdep.c Thu May 28 16:34:53 2020 (r361588) @@ -1049,11 +1049,11 @@ hw_mds_recalculate(void) * reported. For instance, hypervisor might unknowingly * filter the cap out. * For the similar reasons, and for testing, allow to enable - * mitigation even for RDCL_NO or MDS_NO caps. + * mitigation even when MDS_NO cap is set. */ if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 || - ((cpu_ia32_arch_caps & (IA32_ARCH_CAP_RDCL_NO | - IA32_ARCH_CAP_MDS_NO)) != 0 && hw_mds_disable == 3)) { + ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 && + hw_mds_disable == 3)) { mds_handler = mds_handler_void; } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 && hw_mds_disable == 3) || hw_mds_disable == 1) { @@ -1360,3 +1360,7 @@ SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state, sysctl_taa_state_handler, "A", "TAA Mitigation state"); +int __read_frequently cpu_flush_rsb_ctxsw; +SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw, + CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0, + "Flush Return Stack Buffer on context switch");
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202005281634.04SGYrs9087395>