From owner-svn-src-user@freebsd.org Mon Nov 13 23:33:09 2017 Return-Path: Delivered-To: svn-src-user@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 5D156DDA066 for ; Mon, 13 Nov 2017 23:33:09 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 3716E3637; Mon, 13 Nov 2017 23:33:09 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id vADNX8mO004146; Mon, 13 Nov 2017 23:33:08 GMT (envelope-from jeff@FreeBSD.org) Received: (from jeff@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id vADNX8XN004142; Mon, 13 Nov 2017 23:33:08 GMT (envelope-from jeff@FreeBSD.org) Message-Id: <201711132333.vADNX8XN004142@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: jeff set sender to jeff@FreeBSD.org using -f From: Jeff Roberson Date: Mon, 13 Nov 2017 23:33:08 +0000 (UTC) To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r325784 - in user/jeff/numa/sys: kern vm X-SVN-Group: user X-SVN-Commit-Author: jeff X-SVN-Commit-Paths: in user/jeff/numa/sys: kern vm X-SVN-Commit-Revision: 325784 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-user@freebsd.org X-Mailman-Version: 2.1.25 Precedence: list List-Id: "SVN commit messages for the experimental " user" src tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 13 Nov 2017 23:33:09 -0000 Author: jeff Date: Mon Nov 13 23:33:07 2017 New Revision: 325784 URL: https://svnweb.freebsd.org/changeset/base/325784 Log: Use a soft limit for kmem implemented within uma. Part of r325754 Modified: user/jeff/numa/sys/kern/kern_malloc.c user/jeff/numa/sys/kern/subr_vmem.c user/jeff/numa/sys/vm/uma_core.c user/jeff/numa/sys/vm/uma_int.h Modified: user/jeff/numa/sys/kern/kern_malloc.c ============================================================================== --- user/jeff/numa/sys/kern/kern_malloc.c Mon Nov 13 23:21:17 2017 (r325783) +++ user/jeff/numa/sys/kern/kern_malloc.c Mon Nov 13 23:33:07 2017 (r325784) @@ -237,16 +237,22 @@ sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) { u_long size; - size = vmem_size(kernel_arena, VMEM_ALLOC); + size = uma_size(); return (sysctl_handle_long(oidp, &size, 0, req)); } static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) { - u_long size; + u_long size, limit; - size = vmem_size(kernel_arena, VMEM_FREE); + /* The sysctl is unsigned, implement as a saturation value. */ + size = uma_size(); + limit = uma_limit(); + if (size > limit) + size = 0; + else + size = limit - size; return (sysctl_handle_long(oidp, &size, 0, req)); } @@ -667,19 +673,6 @@ reallocf(void *addr, unsigned long size, struct malloc return (mem); } -/* - * Wake the uma reclamation pagedaemon thread when we exhaust KVA. It - * will call the lowmem handler and uma_reclaim() callbacks in a - * context that is safe. - */ -static void -kmem_reclaim(vmem_t *vm, int flags) -{ - - uma_reclaim_wakeup(); - pagedaemon_wakeup(); -} - #ifndef __sparc64__ CTASSERT(VM_KMEM_SIZE_SCALE >= 1); #endif @@ -757,8 +750,7 @@ kmeminit(void) #else tmp = vm_kmem_size; #endif - vmem_set_limit(kernel_arena, tmp); - vmem_set_reclaim(kernel_arena, kmem_reclaim); + uma_set_limit(tmp); #ifdef DEBUG_MEMGUARD /* Modified: user/jeff/numa/sys/kern/subr_vmem.c ============================================================================== --- user/jeff/numa/sys/kern/subr_vmem.c Mon Nov 13 23:21:17 2017 (r325783) +++ user/jeff/numa/sys/kern/subr_vmem.c Mon Nov 13 23:33:07 2017 (r325784) @@ -833,9 +833,6 @@ vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t vmem_addr_t addr; int error; - if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) - return ENOMEM; - if (vm->vm_importfn == NULL) return EINVAL; @@ -846,6 +843,9 @@ vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t if (align != vm->vm_quantum_mask + 1) size = (align * 2) + size; size = roundup(size, vm->vm_import_quantum); + + if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) + return ENOMEM; /* * Hide MAXALLOC tags so we're guaranteed to be able to add this Modified: user/jeff/numa/sys/vm/uma_core.c ============================================================================== --- user/jeff/numa/sys/vm/uma_core.c Mon Nov 13 23:21:17 2017 (r325783) +++ user/jeff/numa/sys/vm/uma_core.c Mon Nov 13 23:33:07 2017 (r325784) @@ -145,6 +145,10 @@ static struct mtx uma_boot_pages_mtx; static struct sx uma_drain_lock; +/* kmem soft limit. */ +static unsigned long uma_kmem_limit; +static volatile unsigned long uma_kmem_total; + /* Is the VM done starting up? */ static int booted = 0; #define UMA_STARTUP 1 @@ -283,6 +287,22 @@ static int zone_warnings = 1; SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, "Warn when UMA zones becomes full"); +/* Adjust bytes under management by UMA. */ +static inline void +uma_total_dec(unsigned long size) +{ + + atomic_subtract_long(&uma_kmem_total, size); +} + +static inline void +uma_total_inc(unsigned long size) +{ + + if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) + uma_reclaim_wakeup(); +} + /* * This routine checks to see whether or not it's safe to enable buckets. */ @@ -829,6 +849,7 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int star if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); + uma_total_dec(PAGE_SIZE * keg->uk_ppera); } /* @@ -933,6 +954,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wai { uma_alloc allocf; uma_slab_t slab; + unsigned long size; uint8_t *mem; uint8_t flags; int i; @@ -943,6 +965,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wai allocf = keg->uk_allocf; KEG_UNLOCK(keg); + size = keg->uk_ppera * PAGE_SIZE; if (keg->uk_flags & UMA_ZONE_OFFPAGE) { slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); @@ -966,13 +989,14 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wai wait |= M_NODUMP; /* zone is passed for legacy reasons. */ - mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait); + mem = allocf(zone, size, &flags, wait); if (mem == NULL) { if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); slab = NULL; goto out; } + uma_total_inc(size); /* Point the slab into the allocated memory */ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) @@ -3128,14 +3152,14 @@ uma_reclaim(void) sx_xunlock(&uma_drain_lock); } -static int uma_reclaim_needed; +static volatile int uma_reclaim_needed; void uma_reclaim_wakeup(void) { - uma_reclaim_needed = 1; - wakeup(&uma_reclaim_needed); + if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) + wakeup(uma_reclaim); } void @@ -3144,14 +3168,13 @@ uma_reclaim_worker(void *arg __unused) sx_xlock(&uma_drain_lock); for (;;) { - sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM, - "umarcl", 0); + sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", 0); if (uma_reclaim_needed) { - uma_reclaim_needed = 0; sx_xunlock(&uma_drain_lock); EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); sx_xlock(&uma_drain_lock); uma_reclaim_locked(true); + atomic_set_int(&uma_reclaim_needed, 0); } } } @@ -3215,6 +3238,27 @@ uma_zero_item(void *item, uma_zone_t zone) bzero(zpcpu_get_cpu(item, i), zone->uz_size); } else bzero(item, zone->uz_size); +} + +unsigned long +uma_limit(void) +{ + + return uma_kmem_limit; +} + +void +uma_set_limit(unsigned long limit) +{ + uma_kmem_limit = limit; +} + + +unsigned long +uma_size(void) +{ + + return uma_kmem_total; } void Modified: user/jeff/numa/sys/vm/uma_int.h ============================================================================== --- user/jeff/numa/sys/vm/uma_int.h Mon Nov 13 23:21:17 2017 (r325783) +++ user/jeff/numa/sys/vm/uma_int.h Mon Nov 13 23:33:07 2017 (r325784) @@ -423,6 +423,13 @@ vsetslab(vm_offset_t va, uma_slab_t slab) void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait); void uma_small_free(void *mem, vm_size_t size, uint8_t flags); + +/* Set a global soft limit on UMA managed memory. */ +void uma_set_limit(unsigned long limit); +unsigned long uma_limit(void); + +/* Return the amount of memory managed by UMA. */ +unsigned long uma_size(void); #endif /* _KERNEL */ #endif /* VM_UMA_INT_H */