From owner-svn-src-user@freebsd.org Mon Nov 13 03:41:52 2017 Return-Path: Delivered-To: svn-src-user@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 8C704CFE059 for ; Mon, 13 Nov 2017 03:41:52 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 270B3801DE; Mon, 13 Nov 2017 03:41:52 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id vAD3fp4c097660; Mon, 13 Nov 2017 03:41:51 GMT (envelope-from jeff@FreeBSD.org) Received: (from jeff@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id vAD3foUw097650; Mon, 13 Nov 2017 03:41:50 GMT (envelope-from jeff@FreeBSD.org) Message-Id: <201711130341.vAD3foUw097650@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: jeff set sender to jeff@FreeBSD.org using -f From: Jeff Roberson Date: Mon, 13 Nov 2017 03:41:50 +0000 (UTC) To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r325754 - in user/jeff/numa/sys: kern sys vm X-SVN-Group: user X-SVN-Commit-Author: jeff X-SVN-Commit-Paths: in user/jeff/numa/sys: kern sys vm X-SVN-Commit-Revision: 325754 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-user@freebsd.org X-Mailman-Version: 2.1.25 Precedence: list List-Id: "SVN commit messages for the experimental " user" src tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 13 Nov 2017 03:41:52 -0000 Author: jeff Date: Mon Nov 13 03:41:50 2017 New Revision: 325754 URL: https://svnweb.freebsd.org/changeset/base/325754 Log: Eliminate kmem_arena to simplify the kmem_ api for forthcoming NUMA support Modified: user/jeff/numa/sys/kern/kern_malloc.c user/jeff/numa/sys/kern/subr_vmem.c user/jeff/numa/sys/sys/vmem.h user/jeff/numa/sys/vm/memguard.c user/jeff/numa/sys/vm/uma.h user/jeff/numa/sys/vm/uma_core.c user/jeff/numa/sys/vm/vm_kern.c user/jeff/numa/sys/vm/vm_map.c user/jeff/numa/sys/vm/vm_object.c user/jeff/numa/sys/vm/vm_object.h Modified: user/jeff/numa/sys/kern/kern_malloc.c ============================================================================== --- user/jeff/numa/sys/kern/kern_malloc.c Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/kern/kern_malloc.c Mon Nov 13 03:41:50 2017 (r325754) @@ -237,7 +237,7 @@ sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) { u_long size; - size = vmem_size(kmem_arena, VMEM_ALLOC); + size = vmem_size(kernel_arena, VMEM_ALLOC); return (sysctl_handle_long(oidp, &size, 0, req)); } @@ -246,7 +246,7 @@ sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) { u_long size; - size = vmem_size(kmem_arena, VMEM_FREE); + size = vmem_size(kernel_arena, VMEM_FREE); return (sysctl_handle_long(oidp, &size, 0, req)); } @@ -757,9 +757,8 @@ kmeminit(void) #else tmp = vm_kmem_size; #endif - vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE, - 0, 0); - vmem_set_reclaim(kmem_arena, kmem_reclaim); + vmem_set_limit(kernel_arena, tmp); + vmem_set_reclaim(kernel_arena, kmem_reclaim); #ifdef DEBUG_MEMGUARD /* @@ -767,7 +766,7 @@ kmeminit(void) * replacement allocator used for detecting tamper-after-free * scenarios as they occur. It is only used for debugging. */ - memguard_init(kmem_arena); + memguard_init(kernel_arena); #endif } Modified: user/jeff/numa/sys/kern/subr_vmem.c ============================================================================== --- user/jeff/numa/sys/kern/subr_vmem.c Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/kern/subr_vmem.c Mon Nov 13 03:41:50 2017 (r325754) @@ -135,6 +135,7 @@ struct vmem { int vm_nbusytag; vmem_size_t vm_inuse; vmem_size_t vm_size; + vmem_size_t vm_limit; /* Used on import. */ vmem_import_t *vm_importfn; @@ -226,11 +227,11 @@ static uma_zone_t vmem_bt_zone; /* boot time arena storage. */ static struct vmem kernel_arena_storage; -static struct vmem kmem_arena_storage; static struct vmem buffer_arena_storage; static struct vmem transient_arena_storage; +/* kernel and kmem arenas are aliased for backwards KPI compat. */ vmem_t *kernel_arena = &kernel_arena_storage; -vmem_t *kmem_arena = &kmem_arena_storage; +vmem_t *kmem_arena = &kernel_arena_storage; vmem_t *buffer_arena = &buffer_arena_storage; vmem_t *transient_arena = &transient_arena_storage; @@ -252,11 +253,11 @@ bt_fill(vmem_t *vm, int flags) VMEM_ASSERT_LOCKED(vm); /* - * Only allow the kmem arena to dip into reserve tags. It is the + * Only allow the kernel arena to dip into reserve tags. It is the * vmem where new tags come from. */ flags &= BT_FLAGS; - if (vm != kmem_arena) + if (vm != kernel_arena) flags &= ~M_USE_RESERVE; /* @@ -613,22 +614,22 @@ vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_ { vmem_addr_t addr; - *pflag = UMA_SLAB_KMEM; + *pflag = UMA_SLAB_KERNEL; /* * Single thread boundary tag allocation so that the address space * and memory are added in one atomic operation. */ mtx_lock(&vmem_bt_lock); - if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN, + if (vmem_xalloc(kernel_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) { - if (kmem_back(kmem_object, addr, bytes, + if (kmem_back(kernel_object, addr, bytes, M_NOWAIT | M_USE_RESERVE) == 0) { mtx_unlock(&vmem_bt_lock); return ((void *)addr); } - vmem_xfree(kmem_arena, addr, bytes); + vmem_xfree(kernel_arena, addr, bytes); mtx_unlock(&vmem_bt_lock); /* * Out of memory, not address space. This may not even be @@ -832,6 +833,9 @@ vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t vmem_addr_t addr; int error; + if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) + return ENOMEM; + if (vm->vm_importfn == NULL) return EINVAL; @@ -976,6 +980,15 @@ vmem_set_import(vmem_t *vm, vmem_import_t *importfn, } void +vmem_set_limit(vmem_t *vm, vmem_size_t limit) +{ + + VMEM_LOCK(vm); + vm->vm_limit = limit; + VMEM_UNLOCK(vm); +} + +void vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) { @@ -1007,6 +1020,7 @@ vmem_init(vmem_t *vm, const char *name, vmem_addr_t ba vm->vm_quantum_shift = flsl(quantum) - 1; vm->vm_nbusytag = 0; vm->vm_size = 0; + vm->vm_limit = 0; vm->vm_inuse = 0; qc_init(vm, qcache_max); Modified: user/jeff/numa/sys/sys/vmem.h ============================================================================== --- user/jeff/numa/sys/sys/vmem.h Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/sys/vmem.h Mon Nov 13 03:41:50 2017 (r325754) @@ -74,6 +74,12 @@ void vmem_set_import(vmem_t *vm, vmem_import_t *import vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum); /* + * Set a limit on the total size of a vmem. + */ + +void vmem_set_limit(vmem_t *vm, vmem_size_t limit); + +/* * Set a callback for reclaiming memory when space is exhausted: */ void vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn); Modified: user/jeff/numa/sys/vm/memguard.c ============================================================================== --- user/jeff/numa/sys/vm/memguard.c Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/vm/memguard.c Mon Nov 13 03:41:50 2017 (r325754) @@ -64,7 +64,7 @@ __FBSDID("$FreeBSD$"); static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); /* - * The vm_memguard_divisor variable controls how much of kmem_map should be + * The vm_memguard_divisor variable controls how much of kernel_arena should be * reserved for MemGuard. */ static u_int vm_memguard_divisor; @@ -155,7 +155,7 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, C /* * Return a fudged value to be used for vm_kmem_size for allocating - * the kmem_map. The memguard memory will be a submap. + * the kernel_arena. The memguard memory will be a submap. */ unsigned long memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) @@ -346,7 +346,7 @@ memguard_alloc(unsigned long req_size, int flags) addr = origaddr; if (do_guard) addr += PAGE_SIZE; - rv = kmem_back(kmem_object, addr, size_p, flags); + rv = kmem_back(kernel_object, addr, size_p, flags); if (rv != KERN_SUCCESS) { vmem_xfree(memguard_arena, origaddr, size_v); memguard_fail_pgs++; @@ -416,7 +416,7 @@ memguard_free(void *ptr) * vm_map lock to serialize updates to memguard_wasted, since * we had the lock at increment. */ - kmem_unback(kmem_object, addr, size); + kmem_unback(kernel_object, addr, size); if (sizev > size) addr -= PAGE_SIZE; vmem_xfree(memguard_arena, addr, sizev); Modified: user/jeff/numa/sys/vm/uma.h ============================================================================== --- user/jeff/numa/sys/vm/uma.h Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/vm/uma.h Mon Nov 13 03:41:50 2017 (r325754) @@ -607,12 +607,11 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free free * These flags are setable in the allocf and visible in the freef. */ #define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */ -#define UMA_SLAB_KMEM 0x02 /* Slab alloced from kmem_map */ #define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kernel_map */ #define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */ #define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */ #define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */ -/* 0x40 and 0x80 are available */ +/* 0x02, 0x40 and 0x80 are available */ /* * Used to pre-fill a zone with some number of items Modified: user/jeff/numa/sys/vm/uma_core.c ============================================================================== --- user/jeff/numa/sys/vm/uma_core.c Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/vm/uma_core.c Mon Nov 13 03:41:50 2017 (r325754) @@ -1077,8 +1077,8 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t * { void *p; /* Returned page */ - *pflag = UMA_SLAB_KMEM; - p = (void *) kmem_malloc(kmem_arena, bytes, wait); + *pflag = UMA_SLAB_KERNEL; + p = (void *) kmem_malloc(kernel_arena, bytes, wait); return (p); } @@ -1159,9 +1159,7 @@ page_free(void *mem, vm_size_t size, uint8_t flags) { struct vmem *vmem; - if (flags & UMA_SLAB_KMEM) - vmem = kmem_arena; - else if (flags & UMA_SLAB_KERNEL) + if (flags & UMA_SLAB_KERNEL) vmem = kernel_arena; else panic("UMA: page_free used with invalid flags %x", flags); Modified: user/jeff/numa/sys/vm/vm_kern.c ============================================================================== --- user/jeff/numa/sys/vm/vm_kern.c Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/vm/vm_kern.c Mon Nov 13 03:41:50 2017 (r325754) @@ -162,11 +162,13 @@ vm_offset_t kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { - vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; + vm_object_t object = kernel_object; vm_offset_t addr, i, offset; vm_page_t m; int pflags, tries; + KASSERT(vmem == kernel_arena, + ("kmem_alloc_attr: Only kernel_arena is supported.")); size = round_page(size); if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) return (0); @@ -218,12 +220,14 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, i vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { - vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; + vm_object_t object = kernel_object; vm_offset_t addr, offset, tmp; vm_page_t end_m, m; u_long npages; int pflags, tries; + KASSERT(vmem == kernel_arena, + ("kmem_alloc_contig: Only kernel_arena is supported.")); size = round_page(size); if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) return (0); @@ -312,12 +316,13 @@ kmem_malloc(struct vmem *vmem, vm_size_t size, int fla vm_offset_t addr; int rv; + KASSERT(vmem == kernel_arena, + ("kmem_malloc: Only kernel_arena is supported.")); size = round_page(size); if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) return (0); - rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, - addr, size, flags); + rv = kmem_back(kernel_object, addr, size, flags); if (rv != KERN_SUCCESS) { vmem_free(vmem, addr, size); return (0); @@ -337,8 +342,8 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_siz vm_page_t m, mpred; int pflags; - KASSERT(object == kmem_object || object == kernel_object, - ("kmem_back: only supports kernel objects.")); + KASSERT(object == kernel_object, + ("kmem_back: only supports kernel object.")); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; @@ -394,8 +399,8 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_s vm_page_t m, next; vm_offset_t end, offset; - KASSERT(object == kmem_object || object == kernel_object, - ("kmem_unback: only supports kernel objects.")); + KASSERT(object == kernel_object, + ("kmem_unback: only supports kernel object.")); pmap_remove(kernel_pmap, addr, addr + size); offset = addr - VM_MIN_KERNEL_ADDRESS; @@ -420,9 +425,10 @@ void kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) { + KASSERT(vmem == kernel_arena, + ("kmem_free: Only kernel_arena is supported.")); size = round_page(size); - kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, - addr, size); + kmem_unback(kernel_object, addr, size); vmem_free(vmem, addr, size); } Modified: user/jeff/numa/sys/vm/vm_map.c ============================================================================== --- user/jeff/numa/sys/vm/vm_map.c Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/vm/vm_map.c Mon Nov 13 03:41:50 2017 (r325754) @@ -1187,9 +1187,9 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_oof vm_inherit_t inheritance; VM_MAP_ASSERT_LOCKED(map); - KASSERT((object != kmem_object && object != kernel_object) || + KASSERT(object != kernel_object || (cow & MAP_COPY_ON_WRITE) == 0, - ("vm_map_insert: kmem or kernel object and COW")); + ("vm_map_insert: kernel object and COW")); KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, ("vm_map_insert: paradoxical MAP_NOFAULT request")); KASSERT((prot & ~max) == 0, @@ -2988,7 +2988,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry VM_OBJECT_WLOCK(object); if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || - object == kernel_object || object == kmem_object)) { + object == kernel_object)) { vm_object_collapse(object); /* Modified: user/jeff/numa/sys/vm/vm_object.c ============================================================================== --- user/jeff/numa/sys/vm/vm_object.c Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/vm/vm_object.c Mon Nov 13 03:41:50 2017 (r325754) @@ -142,7 +142,6 @@ struct object_q vm_object_list; struct mtx vm_object_list_mtx; /* lock for object list and count */ struct vm_object kernel_object_store; -struct vm_object kmem_object_store; static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats"); @@ -290,14 +289,6 @@ vm_object_init(void) #if VM_NRESERVLEVEL > 0 kernel_object->flags |= OBJ_COLORED; kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); -#endif - - rw_init(&kmem_object->lock, "kmem vm object"); - _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - - VM_MIN_KERNEL_ADDRESS), kmem_object); -#if VM_NRESERVLEVEL > 0 - kmem_object->flags |= OBJ_COLORED; - kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); #endif /* Modified: user/jeff/numa/sys/vm/vm_object.h ============================================================================== --- user/jeff/numa/sys/vm/vm_object.h Mon Nov 13 03:34:55 2017 (r325753) +++ user/jeff/numa/sys/vm/vm_object.h Mon Nov 13 03:41:50 2017 (r325754) @@ -225,10 +225,10 @@ extern struct object_q vm_object_list; /* list of allo extern struct mtx vm_object_list_mtx; /* lock for object list and count */ extern struct vm_object kernel_object_store; -extern struct vm_object kmem_object_store; +/* kernel and kmem are aliased for backwards KPI compat. */ #define kernel_object (&kernel_object_store) -#define kmem_object (&kmem_object_store) +#define kmem_object (&kernel_object_store) #define VM_OBJECT_ASSERT_LOCKED(object) \ rw_assert(&(object)->lock, RA_LOCKED)