From owner-svn-src-projects@FreeBSD.ORG Thu Jul 29 02:12:23 2010 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 56F09106567D; Thu, 29 Jul 2010 02:12:23 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 3ACA78FC1B; Thu, 29 Jul 2010 02:12:23 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id o6T2CN1W099345; Thu, 29 Jul 2010 02:12:23 GMT (envelope-from jeff@svn.freebsd.org) Received: (from jeff@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id o6T2CN20099343; Thu, 29 Jul 2010 02:12:23 GMT (envelope-from jeff@svn.freebsd.org) Message-Id: <201007290212.o6T2CN20099343@svn.freebsd.org> From: Jeff Roberson Date: Thu, 29 Jul 2010 02:12:23 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r210586 - projects/ofed/head/sys/ofed/include/linux X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 29 Jul 2010 02:12:23 -0000 Author: jeff Date: Thu Jul 29 02:12:22 2010 New Revision: 210586 URL: http://svn.freebsd.org/changeset/base/210586 Log: - Refine various page allocation methods. They all essentially override the page object pointer to cache the virtual address after allocation. This same trick is used in UMA to store the slab address and so it should be safe. Sponsored by: Isilon Systems, iX Systems, and Panasas. Modified: projects/ofed/head/sys/ofed/include/linux/gfp.h Modified: projects/ofed/head/sys/ofed/include/linux/gfp.h ============================================================================== --- projects/ofed/head/sys/ofed/include/linux/gfp.h Thu Jul 29 02:11:31 2010 (r210585) +++ projects/ofed/head/sys/ofed/include/linux/gfp.h Thu Jul 29 02:12:22 2010 (r210586) @@ -51,12 +51,12 @@ #define GFP_IOFS M_NOWAIT static inline unsigned long -get_zeroed_page(gfp_t mask) +_get_page(gfp_t mask) { vm_page_t m; vm_offset_t p; - p = kmem_malloc(kernel_map, PAGE_SIZE, mask | M_ZERO); + p = kmem_malloc(kmem_map, PAGE_SIZE, mask | M_ZERO); if (p) { m = virt_to_page(p); m->flags |= PG_KVA; @@ -65,6 +65,10 @@ get_zeroed_page(gfp_t mask) return (p); } +#define get_zeroed_page(mask) _get_page((mask) | M_ZERO) +#define alloc_page(mask) virt_to_page(_get_page((mask))) +#define __get_free_page(mask) _get_page((mask)) + static inline void free_page(unsigned long page) { @@ -73,29 +77,50 @@ free_page(unsigned long page) m = virt_to_page(page); if (m->flags & PG_KVA) { m->flags &= ~PG_KVA; - m->object = kernel_object; + m->object = kmem_object; } - kmem_free(kernel_map, page, PAGE_SIZE); + kmem_free(kmem_map, page, PAGE_SIZE); +} + +static inline void +__free_page(struct page *m) +{ + void *p; + + if ((m->flags & PG_KVA) == 0) + panic("__free_page: Freed page %p not allocated via wrappers.", + m); + p = m->object; + m->flags &= ~PG_KVA; + m->object = kmem_object; + kmem_free(kmem_map, (vm_offset_t)p, PAGE_SIZE); } static inline void __free_pages(void *p, unsigned int order) { + unsigned long start; unsigned long page; vm_page_t m; size_t size; - size = order << PAGE_SHIFT; - for (page = (uintptr_t)p; p < (uintptr_t)p + size; page += PAGE_SIZE) { + size = PAGE_SIZE << order; + start = (unsigned long)p; + for (page = start; page < start + size; page += PAGE_SIZE) { m = virt_to_page(page); if (m->flags & PG_KVA) { m->flags &= ~PG_KVA; - m->object = kernel_object; + m->object = kmem_object; } } - kmem_free(kernel_map, p, size); + kmem_free(kmem_map, (vm_offset_t)p, size); } +/* + * Alloc pages allocates directly from the buddy allocator on linux so + * order specifies a power of two bucket of pages and the results + * are expected to be aligned on the size as well. + */ static inline struct page * alloc_pages(gfp_t gfp_mask, unsigned int order) { @@ -104,9 +129,9 @@ alloc_pages(gfp_t gfp_mask, unsigned int vm_page_t m; size_t size; - size = order << PAGE_SHIFT; - start = kmem_alloc_contig(kernel_map, size, gfp_mask, 0, -1, - PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); + size = PAGE_SIZE << order; + start = kmem_alloc_contig(kmem_map, size, gfp_mask, 0, -1, + size, 0, VM_MEMATTR_DEFAULT); if (start == 0) return (NULL); for (page = start; page < start + size; page += PAGE_SIZE) {