Date: Tue, 6 Feb 2018 16:42:13 -0800 From: Gleb Smirnoff <glebius@FreeBSD.org> To: Peter Holm <peter@holm.cc> Cc: svn-src-head@freebsd.org, svn-src-all@freebsd.org, src-committers@freebsd.org Subject: Re: svn commit: r328916 - in head/sys: kern vm Message-ID: <20180207004213.GG1063@FreeBSD.org> In-Reply-To: <20180206232521.GA41396@x2.osted.lan> References: <201802060416.w164G0va096970@repo.freebsd.org> <20180206113017.GA25428@x2.osted.lan> <20180206182839.GB1063@FreeBSD.org> <20180206193430.GA36054@x2.osted.lan> <20180206221555.GC1063@FreeBSD.org> <20180206225244.GA40529@x2.osted.lan> <20180206230635.GE1063@FreeBSD.org> <20180206232521.GA41396@x2.osted.lan>
next in thread | previous in thread | raw e-mail | index | archive | help
--i9LlY+UWpKt15+FH
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
Hi Peter,
can you please try this patch? In either case success
or not, please provide me with dmesg. Thanks a lot!
--
Gleb Smirnoff
--i9LlY+UWpKt15+FH
Content-Type: text/x-diff; charset=us-ascii
Content-Disposition: attachment; filename="boot_pages.diff"
Index: uma_core.c
===================================================================
--- uma_core.c (revision 328955)
+++ uma_core.c (working copy)
@@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$");
#ifdef DEBUG_MEMGUARD
#include <vm/memguard.h>
#endif
+#define DIAGNOSTIC
/*
* This is the zone and keg from which all zones are spawned.
@@ -1800,6 +1801,7 @@ uma_startup_count(int zones)
/* Memory for the zone of zones and zone of kegs. */
pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
+ printf("boot_pages master %d\n", pages);
zones += UMA_BOOT_ZONES;
@@ -1807,17 +1809,20 @@ uma_startup_count(int zones)
if (zsize > UMA_SLAB_SIZE)
pages += zones * howmany(zsize, UMA_SLAB_SIZE);
else
- pages += howmany(zones, UMA_SLAB_SIZE / zsize);
+ pages += howmany(zones, UMA_SLAB_SPACE / zsize);
+ printf("boot_pages zones %d\n", pages);
/* ... and their kegs. */
- pages += howmany(zones, UMA_SLAB_SIZE / ksize);
+ pages += howmany(zones, UMA_SLAB_SPACE / ksize);
+ printf("boot_pages kegs %d\n", pages);
/*
* Take conservative approach that every zone
* is going to allocate hash.
*/
- pages += howmany(zones, UMA_SLAB_SIZE /
+ pages += howmany(zones, UMA_SLAB_SPACE /
(sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
+ printf("boot_pages hash %d\n", pages);
return (pages);
}
Index: uma_int.h
===================================================================
--- uma_int.h (revision 328955)
+++ uma_int.h (working copy)
@@ -138,6 +138,11 @@
#define UMA_MAX_WASTE 10
/*
+ * Size of memory in a not offpage slab available for actual items.
+ */
+#define UMA_SLAB_SPACE (UMA_SLAB_SIZE - sizeof(struct uma_slab))
+
+/*
* I doubt there will be many cases where this is exceeded. This is the initial
* size of the hash table for uma_slabs that are managed off page. This hash
* does expand by powers of two. Currently it doesn't get smaller.
Index: vm_page.c
===================================================================
--- vm_page.c (revision 328955)
+++ vm_page.c (working copy)
@@ -518,8 +518,11 @@ vm_page_startup(vm_offset_t vaddr)
/* vmem_startup() calls uma_prealloc(). */
boot_pages += vmem_startup_count();
+ printf("boot_pages vmem %d\n", boot_pages);
/* vm_map_startup() calls uma_prealloc(). */
- boot_pages += howmany(MAX_KMAP, UMA_SLAB_SIZE / sizeof(struct vm_map));
+ boot_pages += howmany(MAX_KMAP,
+ UMA_SLAB_SPACE / sizeof(struct vm_map));
+ printf("boot_pages kmap %d\n", boot_pages);
/*
* Before going fully functional kmem_init() does allocation
--i9LlY+UWpKt15+FH--
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20180207004213.GG1063>
