Date: Sun, 15 Jul 2012 20:29:48 +0000 (UTC) From: Matthew D Fleming <mdf@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r238502 - in head/sys: kern vm Message-ID: <201207152029.q6FKTmfW071319@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mdf Date: Sun Jul 15 20:29:48 2012 New Revision: 238502 URL: http://svn.freebsd.org/changeset/base/238502 Log: Fix a bug with memguard(9) on 32-bit architectures without a VM_KMEM_MAX_SIZE. The code was not taking into account the size of the kernel_map, which the kmem_map is allocated from, so it could produce a sub-map size too large to fit. The simplest solution is to ignore VM_KMEM_MAX entirely and base the memguard map's size off the kernel_map's size, since this is always relevant and always smaller. Found by: Justin Hibbits Modified: head/sys/kern/kern_malloc.c head/sys/vm/memguard.c head/sys/vm/memguard.h head/sys/vm/vm_map.h Modified: head/sys/kern/kern_malloc.c ============================================================================== --- head/sys/kern/kern_malloc.c Sun Jul 15 20:16:17 2012 (r238501) +++ head/sys/kern/kern_malloc.c Sun Jul 15 20:29:48 2012 (r238502) @@ -744,7 +744,7 @@ kmeminit(void *dummy) vm_kmem_size = 2 * mem_size * PAGE_SIZE; #ifdef DEBUG_MEMGUARD - tmp = memguard_fudge(vm_kmem_size, vm_kmem_size_max); + tmp = memguard_fudge(vm_kmem_size, kernel_map); #else tmp = vm_kmem_size; #endif Modified: head/sys/vm/memguard.c ============================================================================== --- head/sys/vm/memguard.c Sun Jul 15 20:16:17 2012 (r238501) +++ head/sys/vm/memguard.c Sun Jul 15 20:29:48 2012 (r238502) @@ -159,16 +159,18 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, fre * the kmem_map. The memguard memory will be a submap. */ unsigned long -memguard_fudge(unsigned long km_size, unsigned long km_max) +memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) { - u_long mem_pgs = cnt.v_page_count; + u_long mem_pgs, parent_size; vm_memguard_divisor = 10; TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); + parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) + + PAGE_SIZE; /* Pick a conservative value if provided value sucks. */ if ((vm_memguard_divisor <= 0) || - ((km_size / vm_memguard_divisor) == 0)) + ((parent_size / vm_memguard_divisor) == 0)) vm_memguard_divisor = 10; /* * Limit consumption of physical pages to @@ -177,21 +179,19 @@ memguard_fudge(unsigned long km_size, un * This prevents memguard's page promotions from completely * using up memory, since most malloc(9) calls are sub-page. */ + mem_pgs = cnt.v_page_count; memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; /* * We want as much KVA as we can take safely. Use at most our - * allotted fraction of kmem_max. Limit this to twice the - * physical memory to avoid using too much memory as pagetable - * pages. - */ - memguard_mapsize = km_max / vm_memguard_divisor; - /* size must be multiple of PAGE_SIZE */ - memguard_mapsize = round_page(memguard_mapsize); - if (memguard_mapsize == 0 || - memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) + * allotted fraction of the parent map's size. Limit this to + * twice the physical memory to avoid using too much memory as + * pagetable pages (size must be multiple of PAGE_SIZE). + */ + memguard_mapsize = round_page(parent_size / vm_memguard_divisor); + if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; - if (km_max > 0 && km_size + memguard_mapsize > km_max) - return (km_max); + if (km_size + memguard_mapsize > parent_size) + memguard_mapsize = 0; return (km_size + memguard_mapsize); } Modified: head/sys/vm/memguard.h ============================================================================== --- head/sys/vm/memguard.h Sun Jul 15 20:16:17 2012 (r238501) +++ head/sys/vm/memguard.h Sun Jul 15 20:29:48 2012 (r238502) @@ -35,7 +35,7 @@ struct malloc_type; struct vm_map; #ifdef DEBUG_MEMGUARD -unsigned long memguard_fudge(unsigned long, unsigned long); +unsigned long memguard_fudge(unsigned long, const struct vm_map *); void memguard_init(struct vm_map *); void *memguard_alloc(unsigned long, int); void *memguard_realloc(void *, unsigned long, struct malloc_type *, int); Modified: head/sys/vm/vm_map.h ============================================================================== --- head/sys/vm/vm_map.h Sun Jul 15 20:16:17 2012 (r238501) +++ head/sys/vm/vm_map.h Sun Jul 15 20:29:48 2012 (r238502) @@ -200,13 +200,13 @@ struct vm_map { #ifdef _KERNEL static __inline vm_offset_t -vm_map_max(vm_map_t map) +vm_map_max(const struct vm_map *map) { return (map->max_offset); } static __inline vm_offset_t -vm_map_min(vm_map_t map) +vm_map_min(const struct vm_map *map) { return (map->min_offset); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201207152029.q6FKTmfW071319>