Date: Wed, 28 Apr 2021 14:09:51 GMT From: Mark Johnston <markj@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-branches@FreeBSD.org Subject: git: 6eddb6822c9a - stable/13 - uma: Split bucket_cache_drain() to permit per-domain reclamation Message-ID: <202104281409.13SE9pCi025307@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch stable/13 has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=6eddb6822c9abe8a96fb1ad764aee231951ee87f commit 6eddb6822c9abe8a96fb1ad764aee231951ee87f Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2021-04-09 13:47:09 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2021-04-28 14:00:41 +0000 uma: Split bucket_cache_drain() to permit per-domain reclamation Note that the per-domain variant does not shrink the target bucket size. No functional change intended. Sponsored by: The FreeBSD Foundation (cherry picked from commit 54f421f9e84234c4313f2d636e4ebd74009a74d6) --- sys/vm/uma_core.c | 78 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 36 deletions(-) diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index b1762500c147..a030be0dad13 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -1307,11 +1307,50 @@ pcpu_cache_drain_safe(uma_zone_t zone) * estimated working set size. */ static void -bucket_cache_reclaim(uma_zone_t zone, bool drain) +bucket_cache_reclaim_domain(uma_zone_t zone, bool drain, int domain) { uma_zone_domain_t zdom; uma_bucket_t bucket; long target; + + /* + * The cross bucket is partially filled and not part of + * the item count. Reclaim it individually here. + */ + zdom = ZDOM_GET(zone, domain); + if ((zone->uz_flags & UMA_ZONE_SMR) == 0 || drain) { + ZONE_CROSS_LOCK(zone); + bucket = zdom->uzd_cross; + zdom->uzd_cross = NULL; + ZONE_CROSS_UNLOCK(zone); + if (bucket != NULL) + bucket_free(zone, bucket, NULL); + } + + /* + * If we were asked to drain the zone, we are done only once + * this bucket cache is empty. Otherwise, we reclaim items in + * excess of the zone's estimated working set size. If the + * difference nitems - imin is larger than the WSS estimate, + * then the estimate will grow at the end of this interval and + * we ignore the historical average. + */ + ZDOM_LOCK(zdom); + target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems - + zdom->uzd_imin); + while (zdom->uzd_nitems > target) { + bucket = zone_fetch_bucket(zone, zdom, true); + if (bucket == NULL) + break; + bucket_free(zone, bucket, NULL); + ZDOM_LOCK(zdom); + } + ZDOM_UNLOCK(zdom); +} + +static void +bucket_cache_reclaim(uma_zone_t zone, bool drain) +{ int i; /* @@ -1321,41 +1360,8 @@ bucket_cache_reclaim(uma_zone_t zone, bool drain) if (zone->uz_bucket_size > zone->uz_bucket_size_min) zone->uz_bucket_size--; - for (i = 0; i < vm_ndomains; i++) { - /* - * The cross bucket is partially filled and not part of - * the item count. Reclaim it individually here. - */ - zdom = ZDOM_GET(zone, i); - if ((zone->uz_flags & UMA_ZONE_SMR) == 0 || drain) { - ZONE_CROSS_LOCK(zone); - bucket = zdom->uzd_cross; - zdom->uzd_cross = NULL; - ZONE_CROSS_UNLOCK(zone); - if (bucket != NULL) - bucket_free(zone, bucket, NULL); - } - - /* - * If we were asked to drain the zone, we are done only once - * this bucket cache is empty. Otherwise, we reclaim items in - * excess of the zone's estimated working set size. If the - * difference nitems - imin is larger than the WSS estimate, - * then the estimate will grow at the end of this interval and - * we ignore the historical average. - */ - ZDOM_LOCK(zdom); - target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems - - zdom->uzd_imin); - while (zdom->uzd_nitems > target) { - bucket = zone_fetch_bucket(zone, zdom, true); - if (bucket == NULL) - break; - bucket_free(zone, bucket, NULL); - ZDOM_LOCK(zdom); - } - ZDOM_UNLOCK(zdom); - } + for (i = 0; i < vm_ndomains; i++) + bucket_cache_reclaim_domain(zone, drain, i); } static void
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202104281409.13SE9pCi025307>