Date: Mon, 22 Oct 2018 16:16:42 +0000 (UTC) From: Mark Johnston <markj@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r339599 - in head/sys: kern sys Message-ID: <201810221616.w9MGGgOe065009@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: markj Date: Mon Oct 22 16:16:42 2018 New Revision: 339599 URL: https://svnweb.freebsd.org/changeset/base/339599 Log: Don't import 0 into vmem quantum caches. vmem uses UMA cache zones to implement the quantum cache. Since uma_zalloc() returns 0 (NULL) to signal an allocation failure, UMA should not be used to cache resource 0. Fix this by ensuring that 0 is never cached in UMA in the first place, and by modifying vmem_alloc() to fall back to a search of the free lists if the cache is depleted, rather than blocking in qc_import(). Reported by and discussed with: Brett Gutstein <bgutstein@rice.edu> Reviewed by: alc MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D17483 Modified: head/sys/kern/subr_vmem.c head/sys/sys/vmem.h Modified: head/sys/kern/subr_vmem.c ============================================================================== --- head/sys/kern/subr_vmem.c Mon Oct 22 16:09:01 2018 (r339598) +++ head/sys/kern/subr_vmem.c Mon Oct 22 16:16:42 2018 (r339599) @@ -504,6 +504,9 @@ bt_insfree(vmem_t *vm, bt_t *bt) /* * Import from the arena into the quantum cache in UMA. + * + * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate + * failure, so UMA can't be used to cache a resource with value 0. */ static int qc_import(void *arg, void **store, int cnt, int domain, int flags) @@ -512,19 +515,16 @@ qc_import(void *arg, void **store, int cnt, int domain vmem_addr_t addr; int i; + KASSERT((flags & M_WAITOK) == 0, ("blocking allocation")); + qc = arg; - if ((flags & VMEM_FITMASK) == 0) - flags |= M_BESTFIT; for (i = 0; i < cnt; i++) { if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, - VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) + VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) break; store[i] = (void *)addr; - /* Only guarantee one allocation. */ - flags &= ~M_WAITOK; - flags |= M_NOWAIT; } - return i; + return (i); } /* @@ -1123,15 +1123,20 @@ vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vm WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); if (size <= vm->vm_qcache_max) { + /* + * Resource 0 cannot be cached, so avoid a blocking allocation + * in qc_import() and give the vmem_xalloc() call below a chance + * to return 0. + */ qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; - *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags); - if (*addrp == 0) - return (ENOMEM); - return (0); + *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, + (flags & ~M_WAITOK) | M_NOWAIT); + if (__predict_true(*addrp != 0)) + return (0); } - return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, - flags, addrp); + return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, + flags, addrp)); } int @@ -1263,7 +1268,8 @@ vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t si qcache_t *qc; MPASS(size > 0); - if (size <= vm->vm_qcache_max) { + if (size <= vm->vm_qcache_max && + __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) { qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; uma_zfree(qc->qc_cache, (void *)addr); } else Modified: head/sys/sys/vmem.h ============================================================================== --- head/sys/sys/vmem.h Mon Oct 22 16:09:01 2018 (r339598) +++ head/sys/sys/vmem.h Mon Oct 22 16:16:42 2018 (r339599) @@ -41,8 +41,9 @@ typedef struct vmem vmem_t; typedef uintptr_t vmem_addr_t; typedef size_t vmem_size_t; -#define VMEM_ADDR_MIN 0 -#define VMEM_ADDR_MAX (~(vmem_addr_t)0) +#define VMEM_ADDR_MIN 0 +#define VMEM_ADDR_QCACHE_MIN 1 +#define VMEM_ADDR_MAX (~(vmem_addr_t)0) typedef int (vmem_import_t)(void *, vmem_size_t, int, vmem_addr_t *); typedef void (vmem_release_t)(void *, vmem_addr_t, vmem_size_t);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201810221616.w9MGGgOe065009>