Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 6 Feb 2018 04:16:00 +0000 (UTC)
From:      Gleb Smirnoff <glebius@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r328916 - in head/sys: kern vm
Message-ID:  <201802060416.w164G0va096970@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: glebius
Date: Tue Feb  6 04:16:00 2018
New Revision: 328916
URL: https://svnweb.freebsd.org/changeset/base/328916

Log:
  Followup on r302393 by cperciva, improving calculation of boot pages required
  for UMA startup.
  
  o Introduce another stage of UMA startup, which is entered after
    vm_page_startup() finishes. After this stage we don't yet enable buckets,
    but we can ask VM for pages. Rename stages to meaningful names while here.
    New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
    BOOT_RUNNING.
    Enabling page alloc earlier allows us to dramatically reduce number of
    boot pages required. What is more important number of zones becomes
    consistent across different machines, as no MD allocations are done before
    the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
    startup_alloc(), however that may change, so vm_page_startup() provides
    its need for early zones as argument.
  o Introduce uma_startup_count() function, to avoid code duplication. The
    functions calculates sizes of zones zone and kegs zone, and calculates how
    many pages UMA will need to bootstrap.
    It counts not only of zone structures, but also of kegs, slabs and hashes.
  o Hide uma_startup_foo() declarations from public file.
  o Provide several DIAGNOSTIC printfs on boot_pages usage.
  o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
    mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
    but also as args.size.
  
  Reviewed by:		imp, gallatin (earlier version)
  Differential Revision:	https://reviews.freebsd.org/D14054

Modified:
  head/sys/kern/kern_malloc.c
  head/sys/vm/uma.h
  head/sys/vm/uma_core.c
  head/sys/vm/uma_int.h
  head/sys/vm/vm_page.c

Modified: head/sys/kern/kern_malloc.c
==============================================================================
--- head/sys/kern/kern_malloc.c	Tue Feb  6 02:13:44 2018	(r328915)
+++ head/sys/kern/kern_malloc.c	Tue Feb  6 04:16:00 2018	(r328916)
@@ -96,6 +96,8 @@ __FBSDID("$FreeBSD$");
 dtrace_malloc_probe_func_t	dtrace_malloc_probe;
 #endif
 
+extern void	uma_startup2(void);
+
 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) ||		\
     defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
 #define	MALLOC_DEBUG	1

Modified: head/sys/vm/uma.h
==============================================================================
--- head/sys/vm/uma.h	Tue Feb  6 02:13:44 2018	(r328915)
+++ head/sys/vm/uma.h	Tue Feb  6 04:16:00 2018	(r328916)
@@ -431,40 +431,6 @@ typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t 
 typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag);
 
 /*
- * Sets up the uma allocator. (Called by vm_mem_init)
- *
- * Arguments:
- *	bootmem  A pointer to memory used to bootstrap the system.
- *
- * Returns:
- *	Nothing
- *
- * Discussion:
- *	This memory is used for zones which allocate things before the
- *	backend page supplier can give us pages.  It should be
- *	UMA_SLAB_SIZE * boot_pages bytes. (see uma_int.h)
- *
- */
-
-void uma_startup(void *bootmem, int boot_pages);
-
-/*
- * Finishes starting up the allocator.  This should
- * be called when kva is ready for normal allocs.
- *
- * Arguments:
- *	None
- *
- * Returns:
- *	Nothing
- *
- * Discussion:
- *	uma_startup2 is called by kmeminit() to enable us of uma for malloc.
- */
-
-void uma_startup2(void);
-
-/*
  * Reclaims unused memory for all zones
  *
  * Arguments:

Modified: head/sys/vm/uma_core.c
==============================================================================
--- head/sys/vm/uma_core.c	Tue Feb  6 02:13:44 2018	(r328915)
+++ head/sys/vm/uma_core.c	Tue Feb  6 04:16:00 2018	(r328916)
@@ -149,9 +149,8 @@ static unsigned long uma_kmem_limit = LONG_MAX;
 static volatile unsigned long uma_kmem_total;
 
 /* Is the VM done starting up? */
-static int booted = 0;
-#define	UMA_STARTUP	1
-#define	UMA_STARTUP2	2
+static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
+    BOOT_RUNNING } booted = BOOT_COLD;
 
 /*
  * This is the handle used to schedule events that need to happen
@@ -226,6 +225,11 @@ enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }
 
 /* Prototypes.. */
 
+int	uma_startup_count(int);
+void	uma_startup(void *, int);
+void	uma_startup1(void);
+void	uma_startup2(void);
+
 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
@@ -1084,6 +1088,11 @@ startup_alloc(uma_zone_t zone, vm_size_t bytes, int do
 	 * Check our small startup cache to see if it has pages remaining.
 	 */
 	mtx_lock(&uma_boot_pages_mtx);
+#ifdef DIAGNOSTIC
+	if (booted < BOOT_PAGEALLOC)
+		printf("%s from \"%s\", %d boot pages left\n", __func__,
+		    zone->uz_name, boot_pages);
+#endif
 	if (pages <= boot_pages) {
 		mem = bootmem;
 		boot_pages -= pages;
@@ -1093,7 +1102,7 @@ startup_alloc(uma_zone_t zone, vm_size_t bytes, int do
 		return (mem);
 	}
 	mtx_unlock(&uma_boot_pages_mtx);
-	if (booted < UMA_STARTUP2)
+	if (booted < BOOT_PAGEALLOC)
 		panic("UMA: Increase vm.boot_pages");
 	/*
 	 * Now that we've booted reset these users to their real allocator.
@@ -1472,7 +1481,7 @@ keg_ctor(void *mem, int size, void *udata, int flags)
 	 * If we haven't booted yet we need allocations to go through the
 	 * startup cache until the vm is ready.
 	 */
-	if (booted < UMA_STARTUP2)
+	if (booted < BOOT_PAGEALLOC)
 		keg->uk_allocf = startup_alloc;
 #ifdef UMA_MD_SMALL_ALLOC
 	else if (keg->uk_ppera == 1)
@@ -1770,25 +1779,63 @@ zone_foreach(void (*zfunc)(uma_zone_t))
 	rw_runlock(&uma_rwlock);
 }
 
-/* Public functions */
-/* See uma.h */
+/*
+ * Count how many pages do we need to bootstrap.  VM supplies
+ * its need in early zones in the argument, we add up our zones,
+ * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
+ * zone of zones and zone of kegs are accounted separately.
+ */
+#define	UMA_BOOT_ZONES	11
+static int zsize, ksize;
+int
+uma_startup_count(int zones)
+{
+	int pages;
+
+	ksize = sizeof(struct uma_keg) +
+	    (sizeof(struct uma_domain) * vm_ndomains);
+	zsize = sizeof(struct uma_zone) +
+	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
+	    (sizeof(struct uma_zone_domain) * vm_ndomains);
+
+	/* Memory for the zone of zones and zone of kegs. */
+	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
+	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
+
+	zones += UMA_BOOT_ZONES;
+
+	/* Memory for startup zones, UMA and VM, ... */
+	if (zsize > UMA_SLAB_SIZE)
+		pages += zones * howmany(zsize, UMA_SLAB_SIZE);
+	else
+		pages += howmany(zones, UMA_SLAB_SIZE / zsize);
+
+	/* ... and their kegs. */
+	pages += howmany(ksize * zones, UMA_SLAB_SIZE);
+
+	/*
+	 * Take conservative approach that every zone
+	 * is going to allocate hash.
+	 */
+	pages += howmany(sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT *
+	    zones, UMA_SLAB_SIZE);
+
+	return (pages);
+}
+
 void
 uma_startup(void *mem, int npages)
 {
 	struct uma_zctor_args args;
 	uma_keg_t masterkeg;
 	uintptr_t m;
-	int zsize;
-	int ksize;
 
+#ifdef DIAGNOSTIC
+	printf("Entering %s with %d boot pages configured\n", __func__, npages);
+#endif
+
 	rw_init(&uma_rwlock, "UMA lock");
 
-	ksize = sizeof(struct uma_keg) +
-	    (sizeof(struct uma_domain) * vm_ndomains);
-	zsize = sizeof(struct uma_zone) +
-	    (sizeof(struct uma_cache) * mp_ncpus) +
-	    (sizeof(struct uma_zone_domain) * vm_ndomains);
-
 	/* Use bootpages memory for the zone of zones and zone of kegs. */
 	m = (uintptr_t)mem;
 	zones = (uma_zone_t)m;
@@ -1819,9 +1866,7 @@ uma_startup(void *mem, int npages)
 	boot_pages = npages;
 
 	args.name = "UMA Zones";
-	args.size = sizeof(struct uma_zone) +
-	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
-	    (sizeof(struct uma_zone_domain) * vm_ndomains);
+	args.size = zsize;
 	args.ctor = zone_ctor;
 	args.dtor = zone_dtor;
 	args.uminit = zero_init;
@@ -1844,27 +1889,37 @@ uma_startup(void *mem, int npages)
 
 	bucket_init();
 
-	booted = UMA_STARTUP;
+	booted = BOOT_STRAPPED;
 }
 
-/* see uma.h */
 void
+uma_startup1(void)
+{
+
+#ifdef DIAGNOSTIC
+	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
+#endif
+	booted = BOOT_PAGEALLOC;
+}
+
+void
 uma_startup2(void)
 {
-	booted = UMA_STARTUP2;
-	bucket_enable();
+
+	booted = BOOT_BUCKETS;
 	sx_init(&uma_drain_lock, "umadrain");
+	bucket_enable();
 }
 
 /*
  * Initialize our callout handle
  *
  */
-
 static void
 uma_startup3(void)
 {
 
+	booted = BOOT_RUNNING;
 	callout_init(&uma_callout, 1);
 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
 }
@@ -1884,6 +1939,7 @@ uma_kcreate(uma_zone_t zone, size_t size, uma_init umi
 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
 }
 
+/* Public functions */
 /* See uma.h */
 void
 uma_set_align(int align)
@@ -1932,7 +1988,7 @@ uma_zcreate(const char *name, size_t size, uma_ctor ct
 	args.flags = flags;
 	args.keg = NULL;
 
-	if (booted < UMA_STARTUP2) {
+	if (booted < BOOT_BUCKETS) {
 		locked = false;
 	} else {
 		sx_slock(&uma_drain_lock);
@@ -1966,7 +2022,7 @@ uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor
 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
 	args.keg = keg;
 
-	if (booted < UMA_STARTUP2) {
+	if (booted < BOOT_BUCKETS) {
 		locked = false;
 	} else {
 		sx_slock(&uma_drain_lock);

Modified: head/sys/vm/uma_int.h
==============================================================================
--- head/sys/vm/uma_int.h	Tue Feb  6 02:13:44 2018	(r328915)
+++ head/sys/vm/uma_int.h	Tue Feb  6 04:16:00 2018	(r328916)
@@ -134,10 +134,6 @@
 #define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
 #define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
 
-#define UMA_BOOT_PAGES		64	/* Pages allocated for startup */
-#define UMA_BOOT_PAGES_ZONES	32	/* Multiplier for pages to reserve */
-					/* if uma_zone > PAGE_SIZE */
-
 /* Max waste percentage before going to off page slab management */
 #define UMA_MAX_WASTE	10
 

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Tue Feb  6 02:13:44 2018	(r328915)
+++ head/sys/vm/vm_page.c	Tue Feb  6 04:16:00 2018	(r328916)
@@ -125,6 +125,10 @@ __FBSDID("$FreeBSD$");
 
 #include <machine/md_var.h>
 
+extern int	uma_startup_count(int);
+extern void	uma_startup(void *, int);
+extern void	uma_startup1(void);
+
 /*
  *	Associated with page of user-allocatable memory is a
  *	page structure.
@@ -145,7 +149,7 @@ vm_page_t vm_page_array;
 long vm_page_array_size;
 long first_page;
 
-static int boot_pages = UMA_BOOT_PAGES;
+static int boot_pages;
 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &boot_pages, 0,
     "number of pages allocated for bootstrapping the VM system");
@@ -466,7 +470,7 @@ vm_page_startup(vm_offset_t vaddr)
 	vm_paddr_t end, high_avail, low_avail, new_end, page_range, size;
 	vm_paddr_t biggestsize, last_pa, pa;
 	u_long pagecount;
-	int biggestone, i, pages_per_zone, segind;
+	int biggestone, i, segind;
 
 	biggestsize = 0;
 	biggestone = 0;
@@ -496,26 +500,13 @@ vm_page_startup(vm_offset_t vaddr)
 		vm_page_domain_init(&vm_dom[i]);
 
 	/*
-	 * Almost all of the pages needed for bootstrapping UMA are used
-	 * for zone structures, so if the number of CPUs results in those
-	 * structures taking more than one page each, we set aside more pages
-	 * in proportion to the zone structure size.
-	 */
-	pages_per_zone = howmany(sizeof(struct uma_zone) +
-	    sizeof(struct uma_cache) * (mp_maxid + 1) +
-	    roundup2(sizeof(struct uma_slab), sizeof(void *)), UMA_SLAB_SIZE);
-	if (pages_per_zone > 1) {
-		/* Reserve more pages so that we don't run out. */
-		boot_pages = UMA_BOOT_PAGES_ZONES * pages_per_zone;
-	}
-
-	/*
 	 * Allocate memory for use when boot strapping the kernel memory
 	 * allocator.
 	 *
 	 * CTFLAG_RDTUN doesn't work during the early boot process, so we must
 	 * manually fetch the value.
 	 */
+	boot_pages = uma_startup_count(0);
 	TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
 	new_end = end - (boot_pages * UMA_SLAB_SIZE);
 	new_end = trunc_page(new_end);
@@ -748,6 +739,9 @@ vm_page_startup(vm_offset_t vaddr)
 	 * can work.
 	 */
 	domainset_zero();
+
+	/* Announce page availability to UMA. */
+	uma_startup1();
 
 	return (vaddr);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201802060416.w164G0va096970>