From owner-svn-src-stable-10@freebsd.org Wed Jul 1 11:28:44 2015 Return-Path: Delivered-To: svn-src-stable-10@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 0BD6998D296; Wed, 1 Jul 2015 11:28:44 +0000 (UTC) (envelope-from avg@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id F04EA2733; Wed, 1 Jul 2015 11:28:43 +0000 (UTC) (envelope-from avg@FreeBSD.org) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id t61BShGA064592; Wed, 1 Jul 2015 11:28:43 GMT (envelope-from avg@FreeBSD.org) Received: (from avg@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id t61BShjs064590; Wed, 1 Jul 2015 11:28:43 GMT (envelope-from avg@FreeBSD.org) Message-Id: <201507011128.t61BShjs064590@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: avg set sender to avg@FreeBSD.org using -f From: Andriy Gapon Date: Wed, 1 Jul 2015 11:28:43 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r285002 - stable/10/sys/dev/drm2/ttm X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-stable-10@freebsd.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: SVN commit messages for only the 10-stable src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 01 Jul 2015 11:28:44 -0000 Author: avg Date: Wed Jul 1 11:28:42 2015 New Revision: 285002 URL: https://svnweb.freebsd.org/changeset/base/285002 Log: MFC r278153,284416: ttm memory allocation improvements If the vm_page_alloc_contig() failed in the ttm page allocators, do what other callers of vm_page_alloc_contig() do, retry after vm_pageout_grow_cache(). ttm_vm_page_alloc: use vm_page_alloc for pages without dma32 restriction This change re-organizes code a little bit to extract common pieces of ttm_alloc_new_pages() and ttm_get_pages() into dedicated functions. Also, for requests without address restrictions regular vm_page_alloc() is used. Lastly, when vm_page_alloc_contig() fails we call VM_WAIT before calling vm_pageout_grow_cache() to ensure that there is enough free pages at all. Note: no MFC to stable/9 because it lacks vm_pageout_grow_cache(). Modified: stable/10/sys/dev/drm2/ttm/ttm_bo.c stable/10/sys/dev/drm2/ttm/ttm_page_alloc.c Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/dev/drm2/ttm/ttm_bo.c ============================================================================== --- stable/10/sys/dev/drm2/ttm/ttm_bo.c Wed Jul 1 10:47:13 2015 (r285001) +++ stable/10/sys/dev/drm2/ttm/ttm_bo.c Wed Jul 1 11:28:42 2015 (r285002) @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #define TTM_ASSERT_LOCKED(param) #define TTM_DEBUG(fmt, arg...) @@ -1489,15 +1490,23 @@ int ttm_bo_global_init(struct drm_global container_of(ref, struct ttm_bo_global_ref, ref); struct ttm_bo_global *glob = ref->object; int ret; + int tries; sx_init(&glob->device_list_mutex, "ttmdlm"); mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF); glob->mem_glob = bo_ref->mem_glob; + tries = 0; +retry: glob->dummy_read_page = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ, 1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); if (unlikely(glob->dummy_read_page == NULL)) { + if (tries < 1) { + vm_pageout_grow_cache(tries, 0, VM_MAX_ADDRESS); + tries++; + goto retry; + } ret = -ENOMEM; goto out_no_drp; } Modified: stable/10/sys/dev/drm2/ttm/ttm_page_alloc.c ============================================================================== --- stable/10/sys/dev/drm2/ttm/ttm_page_alloc.c Wed Jul 1 10:47:13 2015 (r285001) +++ stable/10/sys/dev/drm2/ttm/ttm_page_alloc.c Wed Jul 1 11:28:42 2015 (r285002) @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t)) #define SMALL_ALLOCATION 16 @@ -154,6 +155,66 @@ ttm_caching_state_to_vm(enum ttm_caching panic("caching state %d\n", cstate); } +static vm_page_t +ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr) +{ + vm_page_t p; + int tries; + + for (tries = 0; ; tries++) { + p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff, + PAGE_SIZE, 0, memattr); + if (p != NULL || tries > 2) + return (p); + + /* + * Before growing the cache see if this is just a normal + * memory shortage. + */ + VM_WAIT; + vm_pageout_grow_cache(tries, 0, 0xffffffff); + } +} + +static vm_page_t +ttm_vm_page_alloc_any(int req, vm_memattr_t memattr) +{ + vm_page_t p; + + while (1) { + p = vm_page_alloc(NULL, 0, req); + if (p != NULL) + break; + VM_WAIT; + } + pmap_page_set_memattr(p, memattr); + return (p); +} + +static vm_page_t +ttm_vm_page_alloc(int flags, enum ttm_caching_state cstate) +{ + vm_page_t p; + vm_memattr_t memattr; + int req; + + memattr = ttm_caching_state_to_vm(cstate); + req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ; + if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0) + req |= VM_ALLOC_ZERO; + + if ((flags & TTM_PAGE_FLAG_DMA32) != 0) + p = ttm_vm_page_alloc_dma32(req, memattr); + else + p = ttm_vm_page_alloc_any(req, memattr); + + if (p != NULL) { + p->oflags &= ~VPO_UNMANAGED; + p->flags |= PG_FICTITIOUS; + } + return (p); +} + static void ttm_pool_kobj_release(struct ttm_pool_manager *m) { @@ -472,23 +533,16 @@ static int ttm_alloc_new_pages(struct pg vm_page_t *caching_array; vm_page_t p; int r = 0; - unsigned i, cpages, aflags; + unsigned i, cpages; unsigned max_cpages = min(count, (unsigned)(PAGE_SIZE/sizeof(vm_page_t))); - aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | - ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? - VM_ALLOC_ZERO : 0); - /* allocate array for page caching change */ caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP, M_WAITOK | M_ZERO); for (i = 0, cpages = 0; i < count; ++i) { - p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0, - (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : - VM_MAX_ADDRESS, PAGE_SIZE, 0, - ttm_caching_state_to_vm(cstate)); + p = ttm_vm_page_alloc(ttm_alloc_flags, cstate); if (!p) { printf("[TTM] Unable to get page %u\n", i); @@ -505,8 +559,6 @@ static int ttm_alloc_new_pages(struct pg r = -ENOMEM; goto out; } - p->oflags &= ~VPO_UNMANAGED; - p->flags |= PG_FICTITIOUS; #ifdef CONFIG_HIGHMEM /* KIB: nop */ /* gfp flags of highmem page should never be dma32 so we @@ -688,26 +740,18 @@ static int ttm_get_pages(vm_page_t *page struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct pglist plist; vm_page_t p = NULL; - int gfp_flags, aflags; + int gfp_flags; unsigned count; int r; - aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | - ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0); - /* No pool for cached pages */ if (pool == NULL) { for (r = 0; r < npages; ++r) { - p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0, - (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : - VM_MAX_ADDRESS, PAGE_SIZE, - 0, ttm_caching_state_to_vm(cstate)); + p = ttm_vm_page_alloc(flags, cstate); if (!p) { printf("[TTM] Unable to allocate page\n"); return -ENOMEM; } - p->oflags &= ~VPO_UNMANAGED; - p->flags |= PG_FICTITIOUS; pages[r] = p; } return 0;