Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 27 Feb 2026 22:08:45 +0000
From:      Warner Losh <imp@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Cc:        Ali Mashtizadeh <mashti@uwaterloo.ca>
Subject:   git: 00c0a1f0bf6c - main - hwpmc: Fix PMC flags for AMD Zen cores
Message-ID:  <69a215ed.342b2.2e815130@gitrepo.freebsd.org>

index | next in thread | raw e-mail

The branch main has been updated by imp:

URL: https://cgit.FreeBSD.org/src/commit/?id=00c0a1f0bf6c07e63384a389060dfc10924c0ed6

commit 00c0a1f0bf6c07e63384a389060dfc10924c0ed6
Author:     Ali Mashtizadeh <mashti@uwaterloo.ca>
AuthorDate: 2026-02-21 19:07:26 +0000
Commit:     Warner Losh <imp@FreeBSD.org>
CommitDate: 2026-02-27 21:19:40 +0000

    hwpmc: Fix PMC flags for AMD Zen cores
    
    The PMC flags available for DF and L3 counters were not all implemented.
    More importantly, the field encodings for the L3 counters changed in an
    incompatible way between Family 17h and Family 19h.  Similarly, the
    field encodings for the DF coutners changed between Family 19h and 1Ah.
    I also added the precise retire flag for the 3rd core counter.
    
    Lastly, I added a warning in the jevent parser because ignoring the
    unknown fields results in counters incorrectly programmed.  We should
    not just ignore that.
    
    Sponsored by: Netflix
    
    Reviewed by: imp
    Pull Request: https://github.com/freebsd/freebsd-src/pull/2040
---
 lib/libpmc/libpmc_pmu_util.c    | 71 ++++++++++++++++++++++++++++---
 lib/libpmc/pmu-events/jevents.c | 40 +++++++++++++++++-
 lib/libpmc/pmu-events/json.c    | 14 +++++++
 lib/libpmc/pmu-events/json.h    |  1 +
 sys/dev/hwpmc/hwpmc_amd.c       | 19 ++++++++-
 sys/dev/hwpmc/hwpmc_amd.h       | 93 +++++++++++++++++++++++++++++------------
 6 files changed, 204 insertions(+), 34 deletions(-)

diff --git a/lib/libpmc/libpmc_pmu_util.c b/lib/libpmc/libpmc_pmu_util.c
index de642aa71a18..0832ab32e2f1 100644
--- a/lib/libpmc/libpmc_pmu_util.c
+++ b/lib/libpmc/libpmc_pmu_util.c
@@ -121,6 +121,24 @@ pmu_events_mfr(void)
 	return (mfr);
 }
 
+static int
+pmu_events_x86_family(void)
+{
+	char buf[PMC_CPUID_LEN];
+	size_t s = sizeof(buf);
+	char *cpuid, *family;
+
+	if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
+	    (void *)NULL, 0) == -1)
+		return (-1);
+	cpuid = &buf[0];
+
+	strsep(&cpuid, "-");
+	family = strsep(&cpuid, "-");
+
+	return (strtol(family, NULL, 10));
+}
+
 /*
  *  The Intel fixed mode counters are:
  *	"inst_retired.any",
@@ -208,6 +226,10 @@ struct pmu_event_desc {
 	uint64_t ped_offcore_rsp;
 	uint64_t ped_l3_thread;
 	uint64_t ped_l3_slice;
+	uint32_t ped_sourceid;
+	uint32_t ped_coreid;
+	uint32_t ped_allsources;
+	uint32_t ped_allcores;
 	uint32_t ped_event;
 	uint32_t ped_frontend;
 	uint32_t ped_ldlat;
@@ -347,6 +369,14 @@ pmu_parse_event(struct pmu_event_desc *ped, const char *eventin)
 			ped->ped_l3_thread = strtol(value, NULL, 16);
 		else if (strcmp(key, "l3_slice_mask") == 0)
 			ped->ped_l3_slice = strtol(value, NULL, 16);
+		else if (strcmp(key, "sourceid") == 0)
+			ped->ped_sourceid = strtol(value, NULL, 16);
+		else if (strcmp(key, "coreid") == 0)
+			ped->ped_coreid = strtol(value, NULL, 16);
+		else if (strcmp(key, "allcores") == 0)
+			ped->ped_allcores = strtol(value, NULL, 10);
+		else if (strcmp(key, "allsources") == 0)
+			ped->ped_allsources = strtol(value, NULL, 10);
 		else {
 			debug = getenv("PMUDEBUG");
 			if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL)
@@ -486,20 +516,23 @@ static int
 pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
 	struct pmu_event_desc *ped)
 {
+	int cpu_family;
 	struct pmc_md_amd_op_pmcallocate *amd;
 	const struct pmu_event *pe;
 	int idx = -1;
 
+	cpu_family = pmu_events_x86_family();
+
 	amd = &pm->pm_md.pm_amd;
 	if (ped->ped_umask > 0) {
 		pm->pm_caps |= PMC_CAP_QUALIFIER;
-		amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask);
 	}
 	pm->pm_class = PMC_CLASS_K8;
 	pe = pmu_event_get(NULL, event_name, &idx);
 
 	if (pe->pmu == NULL) {
 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
+		amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask);
 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE;
 		if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
 			(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
@@ -515,14 +548,42 @@ pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
 			amd->pm_amd_config |= AMD_PMC_INVERT;
 		if (pm->pm_caps & PMC_CAP_INTERRUPT)
 			amd->pm_amd_config |= AMD_PMC_INT;
+		if (pm->pm_caps & PMC_CAP_PRECISE)
+			amd->pm_amd_config |= AMD_PMC_PRECISERETIRE;
 	} else if (strcmp("amd_l3", pe->pmu) == 0) {
-		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
+		amd->pm_amd_config |= AMD_PMC_L3_TO_EVENTMASK(ped->ped_event);
+		amd->pm_amd_config |= AMD_PMC_L3_TO_UNITMASK(ped->ped_umask);
 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE;
-		amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice);
-		amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread);
+		if (cpu_family <= 0x17) {
+			amd->pm_amd_config |=
+			    AMD_PMC_L31_TO_SLICE(ped->ped_l3_slice);
+			amd->pm_amd_config |=
+			    AMD_PMC_L31_TO_CORE(ped->ped_l3_thread);
+		} else {
+			amd->pm_amd_config |=
+			    AMD_PMC_L32_TO_THREAD(ped->ped_l3_thread);
+			amd->pm_amd_config |=
+			    AMD_PMC_L32_TO_SOURCEID(ped->ped_sourceid);
+			amd->pm_amd_config |=
+			    AMD_PMC_L32_TO_COREID(ped->ped_coreid);
+			if (ped->ped_allcores)
+				amd->pm_amd_config |= AMD_PMC_L32_ALLCORES;
+			if (ped->ped_allsources)
+				amd->pm_amd_config |= AMD_PMC_L32_ALLSOURCES;
+		}
 	} else if (strcmp("amd_df", pe->pmu) == 0) {
-		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event);
 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC;
+		if (cpu_family <= 19) {
+			amd->pm_amd_config |=
+			    AMD_PMC_DF1_TO_EVENTMASK(ped->ped_event);
+			amd->pm_amd_config |=
+			    AMD_PMC_DF1_TO_UNITMASK(ped->ped_umask);
+		} else {
+			amd->pm_amd_config |=
+			    AMD_PMC_DF2_TO_EVENTMASK(ped->ped_event);
+			amd->pm_amd_config |=
+			    AMD_PMC_DF2_TO_UNITMASK(ped->ped_umask);
+		}
 	} else {
 		printf("PMC pmu '%s' is not supported!\n", pe->pmu);
 		return (EOPNOTSUPP);
diff --git a/lib/libpmc/pmu-events/jevents.c b/lib/libpmc/pmu-events/jevents.c
index 628ed26c6f9d..4e4f53a0c0d0 100644
--- a/lib/libpmc/pmu-events/jevents.c
+++ b/lib/libpmc/pmu-events/jevents.c
@@ -560,6 +560,10 @@ static int json_events(const char *fn,
 		jsmntok_t *obj = tok++;
 		bool configcode_present = false;
 		char *umask = NULL;
+		char *allcores = NULL;
+		char *allslices = NULL;
+		char *sliceid = NULL;
+		char *threadmask = NULL;
 		char *cmask = NULL;
 		char *inv = NULL;
 		char *any = NULL;
@@ -585,6 +589,22 @@ static int json_events(const char *fn,
 			/* match_field */
 			if (json_streq(map, field, "UMask") && nz) {
 				addfield(map, &umask, "", "umask=", val);
+			} else if (json_streq(map, field, "EnAllCores") && nz) {
+				addfield(map, &allcores, "", "allcores=", val);
+			} else if (json_streq(map, field, "EnAllSlices") && nz) {
+				addfield(map, &allslices, "", "allslices=", val);
+			} else if (json_streq(map, field, "SliceId") && nz) {
+				/*
+				 * We use sourceid because there's a
+				 * descripency where the JSON from linux calls
+				 * it a SliceId, which is not the name used by
+				 * AMD in the PPRs.  The field name from Family
+				 * 19h and below that calls it slicemask see
+				 * the references in hwpmc_amd.h.
+				 */
+				addfield(map, &sliceid, "", "sourceid=", val);
+			} else if (json_streq(map, field, "ThreadMask") && nz) {
+				addfield(map, &threadmask, "", "l3_thread_mask=", val);
 			} else if (json_streq(map, field, "CounterMask") && nz) {
 				addfield(map, &cmask, "", "cmask=", val);
 			} else if (json_streq(map, field, "Invert") && nz) {
@@ -675,8 +695,14 @@ static int json_events(const char *fn,
 				addfield(map, &arch_std, "", "", val);
 				for (s = arch_std; *s; s++)
 					*s = tolower(*s);
+			} else {
+				/*
+				 * We shouldn't ignore unknown fields that
+				 * makes the counter invalid!
+				 */
+				json_copystr(map, field, buf, sizeof(buf));
+				fprintf(stderr, "Unknown field '%s'!\n", buf);
 			}
-			/* ignore unknown fields */
 		}
 		if (precise && je.desc && !strstr(je.desc, "(Precise Event)")) {
 			if (json_streq(map, precise, "2"))
@@ -707,6 +733,14 @@ static int json_events(const char *fn,
 			addfield(map, &event, ",", period, NULL);
 		if (umask)
 			addfield(map, &event, ",", umask, NULL);
+		if (allcores)
+			addfield(map, &event, ",", allcores, NULL);
+		if (allslices)
+			addfield(map, &event, ",", allslices, NULL);
+		if (sliceid)
+			addfield(map, &event, ",", sliceid, NULL);
+		if (threadmask)
+			addfield(map, &event, ",", threadmask, NULL);
 
 		if (je.desc && extra_desc)
 			addfield(map, &je.desc, " ", extra_desc, NULL);
@@ -737,6 +771,10 @@ static int json_events(const char *fn,
 		err = func(data, &je);
 free_strings:
 		free(umask);
+		free(allcores);
+		free(allslices);
+		free(sliceid);
+		free(threadmask);
 		free(cmask);
 		free(inv);
 		free(any);
diff --git a/lib/libpmc/pmu-events/json.c b/lib/libpmc/pmu-events/json.c
index 89cafbc04fb6..66eaf0a74ba6 100644
--- a/lib/libpmc/pmu-events/json.c
+++ b/lib/libpmc/pmu-events/json.c
@@ -160,3 +160,17 @@ int json_streq(char *map, jsmntok_t *t, const char *s)
 	unsigned len = json_len(t);
 	return len == strlen(s) && !strncasecmp(map + t->start, s, len);
 }
+
+int json_copystr(char *map, jsmntok_t *t, char *s, int len)
+{
+	int jlen;
+
+	jlen = json_len(t);
+	if (jlen > len)
+		jlen = len - 1;
+
+	memcpy(s, map + t->start, jlen);
+	s[jlen] = '\0';
+
+	return (jlen);
+}
diff --git a/lib/libpmc/pmu-events/json.h b/lib/libpmc/pmu-events/json.h
index 278ebd32cfb6..89b9c2fba617 100644
--- a/lib/libpmc/pmu-events/json.h
+++ b/lib/libpmc/pmu-events/json.h
@@ -9,6 +9,7 @@ int json_line(char *map, jsmntok_t *t);
 const char *json_name(jsmntok_t *t);
 int json_streq(char *map, jsmntok_t *t, const char *s);
 int json_len(jsmntok_t *t);
+int json_copystr(char *map, jsmntok_t *t, char *s, int len);
 
 extern int verbose;
 
diff --git a/sys/dev/hwpmc/hwpmc_amd.c b/sys/dev/hwpmc/hwpmc_amd.c
index 801b75b39595..b34cbffcffa8 100644
--- a/sys/dev/hwpmc/hwpmc_amd.c
+++ b/sys/dev/hwpmc/hwpmc_amd.c
@@ -347,6 +347,10 @@ amd_allocate_pmc(int cpu __unused, int ri, struct pmc *pm,
 
 	caps = pm->pm_caps;
 
+	if (((caps & PMC_CAP_PRECISE) != 0) &&
+	    ((pd->pd_caps & PMC_CAP_PRECISE) == 0))
+		return (EINVAL);
+
 	PMCDBG2(MDP, ALL, 1,"amd-allocate ri=%d caps=0x%x", ri, caps);
 
 	/* Validate sub-class. */
@@ -360,6 +364,9 @@ amd_allocate_pmc(int cpu __unused, int ri, struct pmc *pm,
 		return (0);
 	}
 
+	/*
+	 * Everything below this is for supporting older processors.
+	 */
 	pe = a->pm_ev;
 
 	/* map ev to the correct event mask code */
@@ -817,6 +824,14 @@ pmc_amd_initialize(void)
 		    "K8-%d", i);
 		d->pm_descr.pd_class = PMC_CLASS_K8;
 		d->pm_descr.pd_caps = AMD_PMC_CAPS;
+		/*
+		 * Zen 5 can precisely count retire events.
+		 *
+		 * Refer to PPR Vol 1 for AMD Family 1Ah Model 02h C1 57238
+		 * Rev. 0.24 September 29, 2024.
+		 */
+		if ((family >= 0x1a) && (i == 2))
+			d->pm_descr.pd_caps |= PMC_CAP_PRECISE;
 		d->pm_descr.pd_width = 48;
 		if ((amd_feature2 & AMDID2_PCXC) != 0) {
 			d->pm_evsel = AMD_PMC_CORE_BASE + 2 * i;
@@ -836,7 +851,7 @@ pmc_amd_initialize(void)
 			snprintf(d->pm_descr.pd_name, PMC_NAME_MAX,
 			    "K8-L3-%d", i);
 			d->pm_descr.pd_class = PMC_CLASS_K8;
-			d->pm_descr.pd_caps = AMD_PMC_CAPS;
+			d->pm_descr.pd_caps = AMD_PMC_L3_CAPS;
 			d->pm_descr.pd_width = 48;
 			d->pm_evsel = AMD_PMC_L3_BASE + 2 * i;
 			d->pm_perfctr = AMD_PMC_L3_BASE + 2 * i + 1;
@@ -852,7 +867,7 @@ pmc_amd_initialize(void)
 			snprintf(d->pm_descr.pd_name, PMC_NAME_MAX,
 			    "K8-DF-%d", i);
 			d->pm_descr.pd_class = PMC_CLASS_K8;
-			d->pm_descr.pd_caps = AMD_PMC_CAPS;
+			d->pm_descr.pd_caps = AMD_PMC_DF_CAPS;
 			d->pm_descr.pd_width = 48;
 			d->pm_evsel = AMD_PMC_DF_BASE + 2 * i;
 			d->pm_perfctr = AMD_PMC_DF_BASE + 2 * i + 1;
diff --git a/sys/dev/hwpmc/hwpmc_amd.h b/sys/dev/hwpmc/hwpmc_amd.h
index be484a1111a2..6d8ab8203942 100644
--- a/sys/dev/hwpmc/hwpmc_amd.h
+++ b/sys/dev/hwpmc/hwpmc_amd.h
@@ -62,21 +62,10 @@
 #define	AMD_PMC_CORE_DEFAULT	6
 #define	AMD_PMC_CORE_MAX	16
 
-/* L3 */
-#define	AMD_PMC_L3_BASE		0xC0010230
-#define	AMD_PMC_L3_DEFAULT	6
-#define	AMD_PMC_L3_MAX		6
-
-/* DF */
-#define	AMD_PMC_DF_BASE		0xC0010240
-#define	AMD_PMC_DF_DEFAULT	4
-#define	AMD_PMC_DF_MAX		64
-
-#define	AMD_NPMCS_K8		4
-#define AMD_NPMCS_MAX		(AMD_PMC_CORE_MAX + AMD_PMC_L3_MAX + \
-				 AMD_PMC_DF_MAX)
-
 #define	AMD_PMC_COUNTERMASK	0xFF000000
+#define AMD_PMC_PRECISERETIRE	(1ULL << 43) /* Only valid for PERF_CTL2 */
+#define	AMD_PMC_HOST		(1ULL << 41)
+#define	AMD_PMC_GUEST		(1ULL << 40)
 #define	AMD_PMC_TO_COUNTER(x)	(((x) << 24) & AMD_PMC_COUNTERMASK)
 #define	AMD_PMC_INVERT		(1 << 23)
 #define	AMD_PMC_ENABLE		(1 << 22)
@@ -85,24 +74,13 @@
 #define	AMD_PMC_EDGE		(1 << 18)
 #define	AMD_PMC_OS		(1 << 17)
 #define	AMD_PMC_USR		(1 << 16)
-#define	AMD_PMC_L3SLICEMASK	(0x000F000000000000)
-#define	AMD_PMC_L3COREMASK	(0xFF00000000000000)
-#define	AMD_PMC_TO_L3SLICE(x)	(((x) << 48) & AMD_PMC_L3SLICEMASK)
-#define	AMD_PMC_TO_L3CORE(x)	(((x) << 56) & AMD_PMC_L3COREMASK)
-
-#define	AMD_PMC_UNITMASK_M	0x10
-#define	AMD_PMC_UNITMASK_O	0x08
-#define	AMD_PMC_UNITMASK_E	0x04
-#define	AMD_PMC_UNITMASK_S	0x02
-#define	AMD_PMC_UNITMASK_I	0x01
-#define	AMD_PMC_UNITMASK_MOESI	0x1F
 
 #define	AMD_PMC_UNITMASK	0xFF00
 #define	AMD_PMC_EVENTMASK 	0xF000000FF
 
 #define	AMD_PMC_TO_UNITMASK(x)	(((x) << 8) & AMD_PMC_UNITMASK)
 #define	AMD_PMC_TO_EVENTMASK(x)	(((x) & 0xFF) | (((uint64_t)(x) & 0xF00) << 24))
-#define	AMD_PMC_TO_EVENTMASK_DF(x)	(((x) & 0xFF) | (((uint64_t)(x) & 0x0F00) << 24)) | (((uint64_t)(x) & 0x3000) << 47)
+
 #define	AMD_VALID_BITS		(AMD_PMC_COUNTERMASK | AMD_PMC_INVERT |	\
 	AMD_PMC_ENABLE | AMD_PMC_INT | AMD_PMC_PC | AMD_PMC_EDGE | 	\
 	AMD_PMC_OS | AMD_PMC_USR | AMD_PMC_UNITMASK | AMD_PMC_EVENTMASK)
@@ -111,6 +89,69 @@
 	PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | 		\
 	PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
 
+/* L3 */
+#define	AMD_PMC_L3_BASE		0xC0010230
+#define	AMD_PMC_L3_DEFAULT	6
+#define	AMD_PMC_L3_MAX		6
+
+/*
+ * L3 counters change their encoding slightly between Family 17h and Family 19h
+ * processors.
+ *
+ * Refer to the following documents for the L3 fields:
+ * PPR for AMD Family 17h Model 20h A1 55772-A1 Rev. 3.08 April 14, 2021
+ * PPR for AMD Family 19h Model 51h A1 56569-A1 Rev. 3.03 September 21, 2021
+ * PPR for AMD Family 1Ah Model 02h C1 57238 Rev. 0.24 September 29, 2024
+ */
+#define	AMD_PMC_L31_SLICEMASK	(0x000F000000000000ULL)
+#define	AMD_PMC_L31_COREMASK	(0xFF00000000000000ULL)
+
+#define	AMD_PMC_L31_TO_SLICE(x)	(((uint64_t)(x) << 48) & AMD_PMC_L31_SLICEMASK)
+#define	AMD_PMC_L31_TO_CORE(x)	(((uint64_t)(x) << 56) & AMD_PMC_L31_COREMASK)
+
+#define	AMD_PMC_L32_THREADMASK	(0x0F00000000000000ULL)
+#define	AMD_PMC_L32_SOURCEMASK	(0x0007000000000000ULL)
+#define	AMD_PMC_L32_ALLCORES	(1ULL << 47)
+#define	AMD_PMC_L32_ALLSOURCES	(1ULL << 46)
+#define	AMD_PMC_L32_COREMASK	(0x00001C0000000000ULL)
+
+#define	AMD_PMC_L32_TO_THREAD(x) (((uint64_t)(x) << 56) & AMD_PMC_L32_THREADMASK)
+#define	AMD_PMC_L32_TO_SOURCEID(x) (((uint64_t)(x) << 48) & AMD_PMC_L32_SOURCEMASK)
+#define	AMD_PMC_L32_TO_COREID(x) (((uint64_t)(x) << 42) & AMD_PMC_L32_COREMASK)
+
+#define	AMD_PMC_L3_TO_UNITMASK(x)	(((x) << 8) & AMD_PMC_UNITMASK)
+#define	AMD_PMC_L3_TO_EVENTMASK(x)	((x) & 0xFF)
+
+#define AMD_PMC_L3_CAPS		(PMC_CAP_READ | PMC_CAP_WRITE | \
+	PMC_CAP_QUALIFIER | PMC_CAP_DOMWIDE)
+
+/* DF */
+#define	AMD_PMC_DF_BASE		0xC0010240
+#define	AMD_PMC_DF_DEFAULT	4
+#define	AMD_PMC_DF_MAX		64
+
+#define AMD_PMC_DF_CAPS		(PMC_CAP_READ | PMC_CAP_WRITE | \
+	PMC_CAP_QUALIFIER | PMC_CAP_DOMWIDE)
+
+/*
+ * DF counters change their encoding between Family 19h and Family 1Ah
+ * processors.
+ *
+ * Refer to the same documents as the L3 counters.
+ */
+#define	AMD_PMC_DF1_TO_EVENTMASK(x)	(((x) & 0xFF) | \
+	(((uint64_t)(x) & 0x0F00) << 24) | (((uint64_t)(x) & 0x3000) << 47))
+#define AMD_PMC_DF1_TO_UNITMASK(x)	(((x) & 0xFF) << 8)
+
+#define	AMD_PMC_DF2_TO_EVENTMASK(x)	(((x) & 0xFF) | \
+	(((uint64_t)(x) & 0x7F00) << 24))
+#define AMD_PMC_DF2_TO_UNITMASK(x)	((((x) & 0xFF) << 8) | \
+	(((uint64_t)(x) & 0x0F00) << 16))
+
+#define	AMD_NPMCS_K8		4
+#define AMD_NPMCS_MAX		(AMD_PMC_CORE_MAX + AMD_PMC_L3_MAX + \
+				 AMD_PMC_DF_MAX)
+
 #define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
 #define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
 


home | help

Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69a215ed.342b2.2e815130>