From owner-svn-src-head@FreeBSD.ORG Sat Jul 14 18:10:45 2012 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 1D5F2106564A; Sat, 14 Jul 2012 18:10:45 +0000 (UTC) (envelope-from alc@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 0812E8FC1B; Sat, 14 Jul 2012 18:10:45 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q6EIAiMu099902; Sat, 14 Jul 2012 18:10:44 GMT (envelope-from alc@svn.freebsd.org) Received: (from alc@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q6EIAipC099899; Sat, 14 Jul 2012 18:10:44 GMT (envelope-from alc@svn.freebsd.org) Message-Id: <201207141810.q6EIAipC099899@svn.freebsd.org> From: Alan Cox Date: Sat, 14 Jul 2012 18:10:44 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r238452 - head/sys/vm X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 14 Jul 2012 18:10:45 -0000 Author: alc Date: Sat Jul 14 18:10:44 2012 New Revision: 238452 URL: http://svn.freebsd.org/changeset/base/238452 Log: Move kmem_alloc_{attr,contig}() to vm/vm_kern.c, where similarly named functions reside. Correct the comment describing kmem_alloc_contig(). Modified: head/sys/vm/vm_contig.c head/sys/vm/vm_kern.c Modified: head/sys/vm/vm_contig.c ============================================================================== --- head/sys/vm/vm_contig.c Sat Jul 14 16:34:17 2012 (r238451) +++ head/sys/vm/vm_contig.c Sat Jul 14 18:10:44 2012 (r238452) @@ -75,14 +75,11 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include -#include #include #include #include #include -#include static int vm_contig_launder_page(vm_page_t m, vm_page_t *next) @@ -208,144 +205,3 @@ again: } vm_page_unlock_queues(); } - -/* - * Allocates a region from the kernel address map and pages within the - * specified physical address range to the kernel object, creates a wired - * mapping from the region to these pages, and returns the region's starting - * virtual address. The allocated pages are not necessarily physically - * contiguous. If M_ZERO is specified through the given flags, then the pages - * are zeroed before they are mapped. - */ -vm_offset_t -kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low, - vm_paddr_t high, vm_memattr_t memattr) -{ - vm_object_t object = kernel_object; - vm_offset_t addr; - vm_ooffset_t end_offset, offset; - vm_page_t m; - int pflags, tries; - - size = round_page(size); - vm_map_lock(map); - if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { - vm_map_unlock(map); - return (0); - } - offset = addr - VM_MIN_KERNEL_ADDRESS; - vm_object_reference(object); - vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL, - VM_PROT_ALL, 0); - if ((flags & (M_NOWAIT | M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY; - else - pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY; - if (flags & M_ZERO) - pflags |= VM_ALLOC_ZERO; - VM_OBJECT_LOCK(object); - end_offset = offset + size; - for (; offset < end_offset; offset += PAGE_SIZE) { - tries = 0; -retry: - m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1, - low, high, PAGE_SIZE, 0, memattr); - if (m == NULL) { - VM_OBJECT_UNLOCK(object); - if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { - vm_map_unlock(map); - vm_contig_grow_cache(tries, low, high); - vm_map_lock(map); - VM_OBJECT_LOCK(object); - tries++; - goto retry; - } - /* - * Since the pages that were allocated by any previous - * iterations of this loop are not busy, they can be - * freed by vm_object_page_remove(), which is called - * by vm_map_delete(). - */ - vm_map_delete(map, addr, addr + size); - vm_map_unlock(map); - return (0); - } - if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - m->valid = VM_PAGE_BITS_ALL; - } - VM_OBJECT_UNLOCK(object); - vm_map_unlock(map); - vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM | - VM_MAP_WIRE_NOHOLES); - return (addr); -} - -/* - * Allocates a region from the kernel address map, inserts the - * given physically contiguous pages into the kernel object, - * creates a wired mapping from the region to the pages, and - * returns the region's starting virtual address. If M_ZERO is - * specified through the given flags, then the pages are zeroed - * before they are mapped. - */ -vm_offset_t -kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low, - vm_paddr_t high, u_long alignment, vm_paddr_t boundary, - vm_memattr_t memattr) -{ - vm_object_t object = kernel_object; - vm_offset_t addr; - vm_ooffset_t offset; - vm_page_t end_m, m; - int pflags, tries; - - size = round_page(size); - vm_map_lock(map); - if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { - vm_map_unlock(map); - return (0); - } - offset = addr - VM_MIN_KERNEL_ADDRESS; - vm_object_reference(object); - vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL, - VM_PROT_ALL, 0); - if ((flags & (M_NOWAIT | M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY; - else - pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY; - if (flags & M_ZERO) - pflags |= VM_ALLOC_ZERO; - if (flags & M_NODUMP) - pflags |= VM_ALLOC_NODUMP; - VM_OBJECT_LOCK(object); - tries = 0; -retry: - m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, - atop(size), low, high, alignment, boundary, memattr); - if (m == NULL) { - VM_OBJECT_UNLOCK(object); - if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { - vm_map_unlock(map); - vm_contig_grow_cache(tries, low, high); - vm_map_lock(map); - VM_OBJECT_LOCK(object); - tries++; - goto retry; - } - vm_map_delete(map, addr, addr + size); - vm_map_unlock(map); - return (0); - } - end_m = m + atop(size); - for (; m < end_m; m++) { - if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - m->valid = VM_PAGE_BITS_ALL; - } - VM_OBJECT_UNLOCK(object); - vm_map_unlock(map); - vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM | - VM_MAP_WIRE_NOHOLES); - return (addr); -} Modified: head/sys/vm/vm_kern.c ============================================================================== --- head/sys/vm/vm_kern.c Sat Jul 14 16:34:17 2012 (r238451) +++ head/sys/vm/vm_kern.c Sat Jul 14 18:10:44 2012 (r238452) @@ -195,6 +195,148 @@ kmem_alloc(map, size) } /* + * Allocates a region from the kernel address map and physical pages + * within the specified address range to the kernel object. Creates a + * wired mapping from this region to these pages, and returns the + * region's starting virtual address. The allocated pages are not + * necessarily physically contiguous. If M_ZERO is specified through the + * given flags, then the pages are zeroed before they are mapped. + */ +vm_offset_t +kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low, + vm_paddr_t high, vm_memattr_t memattr) +{ + vm_object_t object = kernel_object; + vm_offset_t addr; + vm_ooffset_t end_offset, offset; + vm_page_t m; + int pflags, tries; + + size = round_page(size); + vm_map_lock(map); + if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { + vm_map_unlock(map); + return (0); + } + offset = addr - VM_MIN_KERNEL_ADDRESS; + vm_object_reference(object); + vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL, + VM_PROT_ALL, 0); + if ((flags & (M_NOWAIT | M_USE_RESERVE)) == M_NOWAIT) + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY; + else + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY; + if (flags & M_ZERO) + pflags |= VM_ALLOC_ZERO; + VM_OBJECT_LOCK(object); + end_offset = offset + size; + for (; offset < end_offset; offset += PAGE_SIZE) { + tries = 0; +retry: + m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1, + low, high, PAGE_SIZE, 0, memattr); + if (m == NULL) { + VM_OBJECT_UNLOCK(object); + if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { + vm_map_unlock(map); + vm_contig_grow_cache(tries, low, high); + vm_map_lock(map); + VM_OBJECT_LOCK(object); + tries++; + goto retry; + } + + /* + * Since the pages that were allocated by any previous + * iterations of this loop are not busy, they can be + * freed by vm_object_page_remove(), which is called + * by vm_map_delete(). + */ + vm_map_delete(map, addr, addr + size); + vm_map_unlock(map); + return (0); + } + if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + m->valid = VM_PAGE_BITS_ALL; + } + VM_OBJECT_UNLOCK(object); + vm_map_unlock(map); + vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM | + VM_MAP_WIRE_NOHOLES); + return (addr); +} + +/* + * Allocates a region from the kernel address map and physically + * contiguous pages within the specified address range to the kernel + * object. Creates a wired mapping from this region to these pages, and + * returns the region's starting virtual address. If M_ZERO is specified + * through the given flags, then the pages are zeroed before they are + * mapped. + */ +vm_offset_t +kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low, + vm_paddr_t high, u_long alignment, vm_paddr_t boundary, + vm_memattr_t memattr) +{ + vm_object_t object = kernel_object; + vm_offset_t addr; + vm_ooffset_t offset; + vm_page_t end_m, m; + int pflags, tries; + + size = round_page(size); + vm_map_lock(map); + if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { + vm_map_unlock(map); + return (0); + } + offset = addr - VM_MIN_KERNEL_ADDRESS; + vm_object_reference(object); + vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL, + VM_PROT_ALL, 0); + if ((flags & (M_NOWAIT | M_USE_RESERVE)) == M_NOWAIT) + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY; + else + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY; + if (flags & M_ZERO) + pflags |= VM_ALLOC_ZERO; + if (flags & M_NODUMP) + pflags |= VM_ALLOC_NODUMP; + VM_OBJECT_LOCK(object); + tries = 0; +retry: + m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, + atop(size), low, high, alignment, boundary, memattr); + if (m == NULL) { + VM_OBJECT_UNLOCK(object); + if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { + vm_map_unlock(map); + vm_contig_grow_cache(tries, low, high); + vm_map_lock(map); + VM_OBJECT_LOCK(object); + tries++; + goto retry; + } + vm_map_delete(map, addr, addr + size); + vm_map_unlock(map); + return (0); + } + end_m = m + atop(size); + for (; m < end_m; m++) { + if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + m->valid = VM_PAGE_BITS_ALL; + } + VM_OBJECT_UNLOCK(object); + vm_map_unlock(map); + vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM | + VM_MAP_WIRE_NOHOLES); + return (addr); +} + +/* * kmem_free: * * Release a region of kernel virtual memory allocated