Date: Sun, 23 Sep 2018 19:00:06 +0000 (UTC) From: Mateusz Guzik <mjg@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r338899 - in head/sys: kern sys Message-ID: <201809231900.w8NJ06QV024682@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mjg Date: Sun Sep 23 19:00:06 2018 New Revision: 338899 URL: https://svnweb.freebsd.org/changeset/base/338899 Log: Eliminate false sharing in malloc due to statistic collection Currently stats are collected in a MAXCPU-sized array which is not aligned and suffers enormous false-sharing. Fix the problem by utilizing per-cpu allocation. The counter(9) API is not used here as it is too incomplete and does not provide a win over per-cpu zone sized for malloc stats struct. In particular stats are being reported for each cpu separately by just copying what is supposed to be an array element for given cpu. This eliminates significant false-sharing during malloc-heavy tests e.g. on Skylake. See the review for details. Reviewed by: markj Approved by: re (kib) Differential Revision: https://reviews.freebsd.org/D17289 Modified: head/sys/kern/kern_malloc.c head/sys/sys/malloc.h Modified: head/sys/kern/kern_malloc.c ============================================================================== --- head/sys/kern/kern_malloc.c Sun Sep 23 16:37:32 2018 (r338898) +++ head/sys/kern/kern_malloc.c Sun Sep 23 19:00:06 2018 (r338899) @@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$"); #include <sys/vmmeter.h> #include <sys/proc.h> #include <sys/sbuf.h> +#include <sys/smp.h> #include <sys/sysctl.h> #include <sys/time.h> #include <sys/vmem.h> @@ -173,6 +174,7 @@ struct { * declare malloc types. */ static uma_zone_t mt_zone; +static uma_zone_t mt_stats_zone; u_long vm_kmem_size; SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, @@ -368,7 +370,7 @@ malloc_type_zone_allocated(struct malloc_type *mtp, un critical_enter(); mtip = mtp->ks_handle; - mtsp = &mtip->mti_stats[curcpu]; + mtsp = zpcpu_get(mtip->mti_stats); if (size > 0) { mtsp->mts_memalloced += size; mtsp->mts_numallocs++; @@ -411,7 +413,7 @@ malloc_type_freed(struct malloc_type *mtp, unsigned lo critical_enter(); mtip = mtp->ks_handle; - mtsp = &mtip->mti_stats[curcpu]; + mtsp = zpcpu_get(mtip->mti_stats); mtsp->mts_memfreed += size; mtsp->mts_numfrees++; @@ -953,6 +955,9 @@ mallocinit(void *dummy) if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) kmem_zmax = KMEM_ZMAX; + mt_stats_zone = uma_zcreate("mt_stats_zone", + sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL, + UMA_ALIGN_PTR, UMA_ZONE_PCPU); mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), #ifdef INVARIANTS mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, @@ -995,6 +1000,7 @@ malloc_init(void *data) panic("malloc_init: bad malloc type magic"); mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); + mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO); mtp->ks_handle = mtip; mtp_set_subzone(mtp); @@ -1042,8 +1048,8 @@ malloc_uninit(void *data) * Look for memory leaks. */ temp_allocs = temp_bytes = 0; - for (i = 0; i < MAXCPU; i++) { - mtsp = &mtip->mti_stats[i]; + for (i = 0; i <= mp_maxid; i++) { + mtsp = zpcpu_get_cpu(mtip->mti_stats, i); temp_allocs += mtsp->mts_numallocs; temp_allocs -= mtsp->mts_numfrees; temp_bytes += mtsp->mts_memalloced; @@ -1056,6 +1062,7 @@ malloc_uninit(void *data) } slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); + uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats); uma_zfree_arg(mt_zone, mtip, slab); } @@ -1077,6 +1084,7 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) { struct malloc_type_stream_header mtsh; struct malloc_type_internal *mtip; + struct malloc_type_stats *mtsp, zeromts; struct malloc_type_header mth; struct malloc_type *mtp; int error, i; @@ -1089,6 +1097,8 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); mtx_lock(&malloc_mtx); + bzero(&zeromts, sizeof(zeromts)); + /* * Insert stream header. */ @@ -1114,10 +1124,17 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) /* * Insert type statistics for each CPU. */ - for (i = 0; i < MAXCPU; i++) { - (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i], - sizeof(mtip->mti_stats[i])); + for (i = 0; i <= mp_maxid; i++) { + mtsp = zpcpu_get_cpu(mtip->mti_stats, i); + (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp)); } + /* + * Fill in the missing CPUs. + */ + for (; i < MAXCPU; i++) { + (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts)); + } + } mtx_unlock(&malloc_mtx); error = sbuf_finish(&sbuf); @@ -1170,6 +1187,7 @@ restart: DB_SHOW_COMMAND(malloc, db_show_malloc) { struct malloc_type_internal *mtip; + struct malloc_type_internal *mtsp; struct malloc_type *mtp; uint64_t allocs, frees; uint64_t alloced, freed; @@ -1183,7 +1201,8 @@ DB_SHOW_COMMAND(malloc, db_show_malloc) frees = 0; alloced = 0; freed = 0; - for (i = 0; i < MAXCPU; i++) { + for (i = 0; i <= mp_maxid; i++) { + mtsp = zpcpu_get_cpu(mtip->mti_stats, i); allocs += mtip->mti_stats[i].mts_numallocs; frees += mtip->mti_stats[i].mts_numfrees; alloced += mtip->mti_stats[i].mts_memalloced; Modified: head/sys/sys/malloc.h ============================================================================== --- head/sys/sys/malloc.h Sun Sep 23 16:37:32 2018 (r338898) +++ head/sys/sys/malloc.h Sun Sep 23 19:00:06 2018 (r338899) @@ -101,7 +101,7 @@ struct malloc_type_internal { uint32_t mti_probes[DTMALLOC_PROBE_MAX]; /* DTrace probe ID array. */ u_char mti_zone; - struct malloc_type_stats mti_stats[MAXCPU]; + struct malloc_type_stats *mti_stats; }; /*
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201809231900.w8NJ06QV024682>