Date: Tue, 13 Apr 2021 21:42:37 GMT From: Mark Johnston <markj@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 2b914b85ddf4 - main - kmem: Add KASAN state transitions Message-ID: <202104132142.13DLgb3C099211@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=2b914b85ddf4c25d112b2639bbbb7618641872b4 commit 2b914b85ddf4c25d112b2639bbbb7618641872b4 Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2021-04-13 21:40:01 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2021-04-13 21:42:21 +0000 kmem: Add KASAN state transitions Memory allocated with kmem_* is unmapped upon free, so KASAN doesn't provide a lot of benefit, but since allocations are always a multiple of the page size we can create a redzone when the allocation request size is not a multiple of the page size. MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D29458 --- sys/vm/vm_kern.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 77d9a4105862..a69493d1323f 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -71,12 +71,13 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> -#include <sys/kernel.h> /* for ticks and hz */ +#include <sys/asan.h> #include <sys/domainset.h> #include <sys/eventhandler.h> +#include <sys/kernel.h> #include <sys/lock.h> -#include <sys/proc.h> #include <sys/malloc.h> +#include <sys/proc.h> #include <sys/rwlock.h> #include <sys/sysctl.h> #include <sys/vmem.h> @@ -220,25 +221,26 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_object_t object; vm_offset_t addr, i, offset; vm_page_t m; + vm_size_t asize; int pflags; vm_prot_t prot; object = kernel_object; - size = round_page(size); + asize = round_page(size); vmem = vm_dom[domain].vmd_kernel_arena; - if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) + if (vmem_alloc(vmem, asize, M_BESTFIT | flags, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED; prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW; VM_OBJECT_WLOCK(object); - for (i = 0; i < size; i += PAGE_SIZE) { + for (i = 0; i < asize; i += PAGE_SIZE) { m = kmem_alloc_contig_pages(object, atop(offset + i), domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr); if (m == NULL) { VM_OBJECT_WUNLOCK(object); kmem_unback(object, addr, i); - vmem_free(vmem, addr, size); + vmem_free(vmem, addr, asize); return (0); } KASSERT(vm_page_domain(m) == domain, @@ -251,6 +253,7 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, prot | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object); + kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE); return (addr); } @@ -299,23 +302,24 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_object_t object; vm_offset_t addr, offset, tmp; vm_page_t end_m, m; + vm_size_t asize; u_long npages; int pflags; object = kernel_object; - size = round_page(size); + asize = round_page(size); vmem = vm_dom[domain].vmd_kernel_arena; - if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) + if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED; - npages = atop(size); + npages = atop(asize); VM_OBJECT_WLOCK(object); m = kmem_alloc_contig_pages(object, atop(offset), domain, pflags, npages, low, high, alignment, boundary, memattr); if (m == NULL) { VM_OBJECT_WUNLOCK(object); - vmem_free(vmem, addr, size); + vmem_free(vmem, addr, asize); return (0); } KASSERT(vm_page_domain(m) == domain, @@ -332,6 +336,7 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, tmp += PAGE_SIZE; } VM_OBJECT_WUNLOCK(object); + kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE); return (addr); } @@ -407,21 +412,23 @@ kmem_malloc_domain(int domain, vm_size_t size, int flags) { vmem_t *arena; vm_offset_t addr; + vm_size_t asize; int rv; if (__predict_true((flags & M_EXEC) == 0)) arena = vm_dom[domain].vmd_kernel_arena; else arena = vm_dom[domain].vmd_kernel_rwx_arena; - size = round_page(size); - if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr)) + asize = round_page(size); + if (vmem_alloc(arena, asize, flags | M_BESTFIT, &addr)) return (0); - rv = kmem_back_domain(domain, kernel_object, addr, size, flags); + rv = kmem_back_domain(domain, kernel_object, addr, asize, flags); if (rv != KERN_SUCCESS) { - vmem_free(arena, addr, size); + vmem_free(arena, addr, asize); return (0); } + kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE); return (addr); } @@ -613,6 +620,7 @@ kmem_free(vm_offset_t addr, vm_size_t size) struct vmem *arena; size = round_page(size); + kasan_mark((void *)addr, size, size, 0); arena = _kmem_unback(kernel_object, addr, size); if (arena != NULL) vmem_free(arena, addr, size);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202104132142.13DLgb3C099211>