Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 25 Nov 2019 09:53:50 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org
Subject:   svn commit: r355087 - in stable/12/sys: dev/hwpmc x86/include x86/x86
Message-ID:  <201911250953.xAP9ron1014729@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Mon Nov 25 09:53:49 2019
New Revision: 355087
URL: https://svnweb.freebsd.org/changeset/base/355087

Log:
  MFC r354828:
  Add x86 msr tweak KPI.

Modified:
  stable/12/sys/dev/hwpmc/hwpmc_core.c
  stable/12/sys/x86/include/x86_var.h
  stable/12/sys/x86/x86/cpu_machdep.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/dev/hwpmc/hwpmc_core.c
==============================================================================
--- stable/12/sys/dev/hwpmc/hwpmc_core.c	Mon Nov 25 09:43:36 2019	(r355086)
+++ stable/12/sys/dev/hwpmc/hwpmc_core.c	Mon Nov 25 09:53:49 2019	(r355087)
@@ -220,15 +220,6 @@ iaf_reload_count_to_perfctr_value(pmc_value_t rlc)
 	return (1ULL << core_iaf_width) - rlc;
 }
 
-static void
-tweak_tsx_force_abort(void *arg)
-{
-	u_int val;
-
-	val = (uintptr_t)arg;
-	wrmsr(MSR_TSX_FORCE_ABORT, val);
-}
-
 static int
 iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
     const struct pmc_op_pmcallocate *a)
@@ -270,7 +261,8 @@ iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_TSXFA) != 0 &&
 	    !pmc_tsx_force_abort_set) {
 		pmc_tsx_force_abort_set = true;
-		smp_rendezvous(NULL, tweak_tsx_force_abort, NULL, (void *)1);
+		x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS |
+		    MSR_OP_WRITE, 1);
 	}
 
 	flags = 0;
@@ -411,7 +403,8 @@ iaf_release_pmc(int cpu, int ri, struct pmc *pmc)
 	MPASS(pmc_alloc_refs > 0);
 	if (pmc_alloc_refs-- == 1 && pmc_tsx_force_abort_set) {
 		pmc_tsx_force_abort_set = false;
-		smp_rendezvous(NULL, tweak_tsx_force_abort, NULL, (void *)0);
+		x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS |
+		    MSR_OP_WRITE, 0);
 	}
 
 	return (0);

Modified: stable/12/sys/x86/include/x86_var.h
==============================================================================
--- stable/12/sys/x86/include/x86_var.h	Mon Nov 25 09:43:36 2019	(r355086)
+++ stable/12/sys/x86/include/x86_var.h	Mon Nov 25 09:53:49 2019	(r355087)
@@ -155,4 +155,12 @@ int	user_dbreg_trap(register_t dr6);
 int	minidumpsys(struct dumperinfo *);
 struct pcb *get_pcb_td(struct thread *td);
 
+#define	MSR_OP_ANDNOT		0x00000001
+#define	MSR_OP_OR		0x00000002
+#define	MSR_OP_WRITE		0x00000003
+#define	MSR_OP_LOCAL		0x10000000
+#define	MSR_OP_SCHED		0x20000000
+#define	MSR_OP_RENDEZVOUS	0x30000000
+void x86_msr_op(u_int msr, u_int op, uint64_t arg1);
+
 #endif

Modified: stable/12/sys/x86/x86/cpu_machdep.c
==============================================================================
--- stable/12/sys/x86/x86/cpu_machdep.c	Mon Nov 25 09:43:36 2019	(r355086)
+++ stable/12/sys/x86/x86/cpu_machdep.c	Mon Nov 25 09:53:49 2019	(r355087)
@@ -111,7 +111,80 @@ static u_int	cpu_reset_proxyid;
 static volatile u_int	cpu_reset_proxy_active;
 #endif
 
+struct msr_op_arg {
+	u_int msr;
+	int op;
+	uint64_t arg1;
+};
 
+static void
+x86_msr_op_one(void *argp)
+{
+	struct msr_op_arg *a;
+	uint64_t v;
+
+	a = argp;
+	switch (a->op) {
+	case MSR_OP_ANDNOT:
+		v = rdmsr(a->msr);
+		v &= ~a->arg1;
+		wrmsr(a->msr, v);
+		break;
+	case MSR_OP_OR:
+		v = rdmsr(a->msr);
+		v |= a->arg1;
+		wrmsr(a->msr, v);
+		break;
+	case MSR_OP_WRITE:
+		wrmsr(a->msr, a->arg1);
+		break;
+	}
+}
+
+#define	MSR_OP_EXMODE_MASK	0xf0000000
+#define	MSR_OP_OP_MASK		0x000000ff
+
+void
+x86_msr_op(u_int msr, u_int op, uint64_t arg1)
+{
+	struct thread *td;
+	struct msr_op_arg a;
+	u_int exmode;
+	int bound_cpu, i, is_bound;
+
+	a.op = op & MSR_OP_OP_MASK;
+	MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
+	    a.op == MSR_OP_WRITE);
+	exmode = op & MSR_OP_EXMODE_MASK;
+	MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED ||
+	    exmode == MSR_OP_RENDEZVOUS);
+	a.msr = msr;
+	a.arg1 = arg1;
+	switch (exmode) {
+	case MSR_OP_LOCAL:
+		x86_msr_op_one(&a);
+		break;
+	case MSR_OP_SCHED:
+		td = curthread;
+		thread_lock(td);
+		is_bound = sched_is_bound(td);
+		bound_cpu = td->td_oncpu;
+		CPU_FOREACH(i) {
+			sched_bind(td, i);
+			x86_msr_op_one(&a);
+		}
+		if (is_bound)
+			sched_bind(td, bound_cpu);
+		else
+			sched_unbind(td);
+		thread_unlock(td);
+		break;
+	case MSR_OP_RENDEZVOUS:
+		smp_rendezvous(NULL, x86_msr_op_one, NULL, &a);
+		break;
+	}
+}
+
 /*
  * Machine dependent boot() routine
  *
@@ -791,18 +864,10 @@ SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw
 void
 hw_ibrs_recalculate(void)
 {
-	uint64_t v;
-
 	if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
-		if (hw_ibrs_disable) {
-			v = rdmsr(MSR_IA32_SPEC_CTRL);
-			v &= ~(uint64_t)IA32_SPEC_CTRL_IBRS;
-			wrmsr(MSR_IA32_SPEC_CTRL, v);
-		} else {
-			v = rdmsr(MSR_IA32_SPEC_CTRL);
-			v |= IA32_SPEC_CTRL_IBRS;
-			wrmsr(MSR_IA32_SPEC_CTRL, v);
-		}
+		x86_msr_op(MSR_IA32_SPEC_CTRL, MSR_OP_LOCAL |
+		    (hw_ibrs_disable ? MSR_OP_ANDNOT : MSR_OP_OR),
+		    IA32_SPEC_CTRL_IBRS);
 		return;
 	}
 	hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 &&
@@ -834,46 +899,17 @@ SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_ac
     "Speculative Store Bypass Disable active");
 
 static void
-hw_ssb_set_one(bool enable)
-{
-	uint64_t v;
-
-	v = rdmsr(MSR_IA32_SPEC_CTRL);
-	if (enable)
-		v |= (uint64_t)IA32_SPEC_CTRL_SSBD;
-	else
-		v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD;
-	wrmsr(MSR_IA32_SPEC_CTRL, v);
-}
-
-static void
 hw_ssb_set(bool enable, bool for_all_cpus)
 {
-	struct thread *td;
-	int bound_cpu, i, is_bound;
 
 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
 		hw_ssb_active = 0;
 		return;
 	}
 	hw_ssb_active = enable;
-	if (for_all_cpus) {
-		td = curthread;
-		thread_lock(td);
-		is_bound = sched_is_bound(td);
-		bound_cpu = td->td_oncpu;
-		CPU_FOREACH(i) {
-			sched_bind(td, i);
-			hw_ssb_set_one(enable);
-		}
-		if (is_bound)
-			sched_bind(td, bound_cpu);
-		else
-			sched_unbind(td);
-		thread_unlock(td);
-	} else {
-		hw_ssb_set_one(enable);
-	}
+	x86_msr_op(MSR_IA32_SPEC_CTRL,
+	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
+	    (for_all_cpus ? MSR_OP_SCHED : MSR_OP_LOCAL), IA32_SPEC_CTRL_SSBD);
 }
 
 void
@@ -1136,43 +1172,13 @@ enum {
 };
 
 static void
-taa_set_one(bool enable)
-{
-	uint64_t v;
-
-	v = rdmsr(MSR_IA32_TSX_CTRL);
-	if (enable)
-		v |= (uint64_t)(IA32_TSX_CTRL_RTM_DISABLE |
-		    IA32_TSX_CTRL_TSX_CPUID_CLEAR);
-	else
-		v &= ~(uint64_t)(IA32_TSX_CTRL_RTM_DISABLE |
-		    IA32_TSX_CTRL_TSX_CPUID_CLEAR);
-
-	wrmsr(MSR_IA32_TSX_CTRL, v);
-}
-
-static void
 taa_set(bool enable, bool all)
 {
-	struct thread *td;
-	int bound_cpu, i, is_bound;
 
-	if (all) {
-		td = curthread;
-		thread_lock(td);
-		is_bound = sched_is_bound(td);
-		bound_cpu = td->td_oncpu;
-		CPU_FOREACH(i) {
-			sched_bind(td, i);
-			taa_set_one(enable);
-		}
-		if (is_bound)
-			sched_bind(td, bound_cpu);
-		else
-			sched_unbind(td);
-		thread_unlock(td);
-	} else
-		taa_set_one(enable);
+	x86_msr_op(MSR_IA32_TSX_CTRL,
+	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
+	    (all ? MSR_OP_RENDEZVOUS : MSR_OP_LOCAL),
+	    IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR);
 }
 
 void



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201911250953.xAP9ron1014729>