Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 26 Aug 2020 14:31:36 +0000 (UTC)
From:      Mark Johnston <markj@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r364819 - head/sys/kern
Message-ID:  <202008261431.07QEVals090752@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: markj
Date: Wed Aug 26 14:31:35 2020
New Revision: 364819
URL: https://svnweb.freebsd.org/changeset/base/364819

Log:
  vmem: Avoid allocating span tags when segments are never released.
  
  vmem uses span tags to delimit imported segments, so that they can be
  released if the segment becomes free in the future.  However, the
  per-domain kernel KVA arenas never release resources, so the span tags
  between imported ranges are unused when the ranges are contiguous.
  Furthermore, such span tags prevent coalescing of free segments across
  KVA_QUANTUM boundaries, resulting in internal fragmentation which
  inhibits superpage promotion in the kernel map.
  
  Stop allocating span tags in arenas that never release resources.  This
  saves a small amount of memory and allows free segements to coalesce
  across import boundaries.  This manifests as improved kernel superpage
  usage during poudriere runs, which also helps to reduce physical memory
  fragmentation by reducing the number of broken partially populated
  reservations.
  
  Tested by:	pho
  Sponsored by:	The FreeBSD Foundation
  Differential Revision:	https://reviews.freebsd.org/D24548

Modified:
  head/sys/kern/subr_vmem.c

Modified: head/sys/kern/subr_vmem.c
==============================================================================
--- head/sys/kern/subr_vmem.c	Wed Aug 26 14:02:38 2020	(r364818)
+++ head/sys/kern/subr_vmem.c	Wed Aug 26 14:31:35 2020	(r364819)
@@ -249,6 +249,18 @@ static struct vmem memguard_arena_storage;
 vmem_t *memguard_arena = &memguard_arena_storage;
 #endif
 
+static bool
+bt_isbusy(bt_t *bt)
+{
+	return (bt->bt_type == BT_TYPE_BUSY);
+}
+
+static bool
+bt_isfree(bt_t *bt)
+{
+	return (bt->bt_type == BT_TYPE_FREE);
+}
+
 /*
  * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
  * allocation will not fail once bt_fill() passes.  To do so we cache
@@ -795,25 +807,49 @@ SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_star
 static void
 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
 {
-	bt_t *btspan;
-	bt_t *btfree;
+	bt_t *btfree, *btprev, *btspan;
 
+	VMEM_ASSERT_LOCKED(vm);
 	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
 	MPASS((size & vm->vm_quantum_mask) == 0);
 
-	btspan = bt_alloc(vm);
-	btspan->bt_type = type;
-	btspan->bt_start = addr;
-	btspan->bt_size = size;
-	bt_insseg_tail(vm, btspan);
+	if (vm->vm_releasefn == NULL) {
+		/*
+		 * The new segment will never be released, so see if it is
+		 * contiguous with respect to an existing segment.  In this case
+		 * a span tag is not needed, and it may be possible now or in
+		 * the future to coalesce the new segment with an existing free
+		 * segment.
+		 */
+		btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
+		if ((!bt_isbusy(btprev) && !bt_isfree(btprev)) ||
+		    btprev->bt_start + btprev->bt_size != addr)
+			btprev = NULL;
+	} else {
+		btprev = NULL;
+	}
 
-	btfree = bt_alloc(vm);
-	btfree->bt_type = BT_TYPE_FREE;
-	btfree->bt_start = addr;
-	btfree->bt_size = size;
-	bt_insseg(vm, btfree, btspan);
-	bt_insfree(vm, btfree);
+	if (btprev == NULL || bt_isbusy(btprev)) {
+		if (btprev == NULL) {
+			btspan = bt_alloc(vm);
+			btspan->bt_type = type;
+			btspan->bt_start = addr;
+			btspan->bt_size = size;
+			bt_insseg_tail(vm, btspan);
+		}
 
+		btfree = bt_alloc(vm);
+		btfree->bt_type = BT_TYPE_FREE;
+		btfree->bt_start = addr;
+		btfree->bt_size = size;
+		bt_insseg_tail(vm, btfree);
+		bt_insfree(vm, btfree);
+	} else {
+		bt_remfree(vm, btprev);
+		btprev->bt_size += size;
+		bt_insfree(vm, btprev);
+	}
+
 	vm->vm_size += size;
 }
 
@@ -1147,6 +1183,7 @@ vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
 {
 
 	VMEM_LOCK(vm);
+	KASSERT(vm->vm_size == 0, ("%s: arena is non-empty", __func__));
 	vm->vm_importfn = importfn;
 	vm->vm_releasefn = releasefn;
 	vm->vm_arg = arg;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202008261431.07QEVals090752>