Date: Tue, 1 Jul 2014 06:50:35 +0000 (UTC) From: Scott Long <scottl@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r268076 - stable/10/sys/x86/x86 Message-ID: <201407010650.s616oZTJ092028@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: scottl Date: Tue Jul 1 06:50:35 2014 New Revision: 268076 URL: http://svnweb.freebsd.org/changeset/base/268076 Log: Merge r266746, 266775: Now that there are separate back-end implementations of busdma, the bounce implementation shouldn't steal flags from the common front-end. Move those flags to the back-end. Eliminate the fake contig_dmamap and replace it with a new flag, BUS_DMA_KMEM_ALLOC. They serve the same purpose, but using the flag means that the map can be NULL again, which in turn enables significant optimizations for the common case of no bouncing. Obtained from: Netflix, Inc. Modified: stable/10/sys/x86/x86/busdma_bounce.c Modified: stable/10/sys/x86/x86/busdma_bounce.c ============================================================================== --- stable/10/sys/x86/x86/busdma_bounce.c Tue Jul 1 06:43:15 2014 (r268075) +++ stable/10/sys/x86/x86/busdma_bounce.c Tue Jul 1 06:50:35 2014 (r268076) @@ -58,16 +58,21 @@ __FBSDID("$FreeBSD$"); #else #define MAX_BPAGES 8192 #endif -#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 -#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 + +enum { + BUS_DMA_COULD_BOUNCE = 0x01, + BUS_DMA_MIN_ALLOC_COMP = 0x02, + BUS_DMA_KMEM_ALLOC = 0x04, +}; struct bounce_zone; struct bus_dma_tag { struct bus_dma_tag_common common; - int map_count; - bus_dma_segment_t *segments; - struct bounce_zone *bounce_zone; + int map_count; + int bounce_flags; + bus_dma_segment_t *segments; + struct bounce_zone *bounce_zone; }; struct bounce_page { @@ -121,7 +126,7 @@ struct bus_dmamap { static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; -static struct bus_dmamap nobounce_dmamap, contig_dmamap; +static struct bus_dmamap nobounce_dmamap; static void init_bounce_pages(void *dummy); static int alloc_bounce_zone(bus_dma_tag_t dmat); @@ -173,14 +178,14 @@ bounce_bus_dma_tag_create(bus_dma_tag_t newtag->segments = NULL; if (parent != NULL && ((newtag->common.filter != NULL) || - ((parent->common.flags & BUS_DMA_COULD_BOUNCE) != 0))) - newtag->common.flags |= BUS_DMA_COULD_BOUNCE; + ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))) + newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || newtag->common.alignment > 1) - newtag->common.flags |= BUS_DMA_COULD_BOUNCE; + newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; - if (((newtag->common.flags & BUS_DMA_COULD_BOUNCE) != 0) && + if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && (flags & BUS_DMA_ALLOCNOW) != 0) { struct bounce_zone *bz; @@ -201,7 +206,7 @@ bounce_bus_dma_tag_create(bus_dma_tag_t error = ENOMEM; } /* Performed initial allocation */ - newtag->common.flags |= BUS_DMA_MIN_ALLOC_COMP; + newtag->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP; } else error = 0; @@ -279,7 +284,7 @@ bounce_bus_dmamap_create(bus_dma_tag_t d * exclusion region, a data alignment that is stricter than 1, and/or * an active address boundary. */ - if (dmat->common.flags & BUS_DMA_COULD_BOUNCE) { + if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) { /* Must bounce */ if (dmat->bounce_zone == NULL) { if ((error = alloc_bounce_zone(dmat)) != 0) @@ -307,17 +312,17 @@ bounce_bus_dmamap_create(bus_dma_tag_t d else maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->common.lowaddr)); - if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || + if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { pages = MAX(atop(dmat->common.maxsize), 1); pages = MIN(maxpages - bz->total_bpages, pages); pages = MAX(pages, 1); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; - if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP) + if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) { - dmat->common.flags |= + dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP; } } else @@ -342,7 +347,7 @@ static int bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { - if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) { + if (map != NULL && map != &nobounce_dmamap) { if (STAILQ_FIRST(&map->bpages) != NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); @@ -416,13 +421,13 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dm *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, attr); - *mapp = &contig_dmamap; + dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; } else { *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, dmat->common.boundary, attr); - *mapp = &contig_dmamap; + dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", @@ -445,18 +450,18 @@ bounce_bus_dmamem_free(bus_dma_tag_t dma { /* * dmamem does not need to be bounced, so the map should be - * NULL if malloc() was used and contig_dmamap if - * kmem_alloc_contig() was used. + * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc() + * was used and set if kmem_alloc_contig() was used. */ - if (!(map == NULL || map == &contig_dmamap)) + if (map != NULL) panic("bus_dmamem_free: Invalid map freed\n"); - if (map == NULL) + if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0) free(vaddr, M_DEVBUF); else kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->common.maxsize); CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, - dmat->common.flags); + dmat->bounce_flags); } static void @@ -608,13 +613,13 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_ bus_addr_t curaddr; int error; - if (map == NULL || map == &contig_dmamap) + if (map == NULL) map = &nobounce_dmamap; if (segs == NULL) segs = dmat->segments; - if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) { + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); @@ -626,7 +631,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_ while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->common.maxsegsz); - if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) && + if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = MIN(sgsize, PAGE_SIZE); @@ -661,13 +666,13 @@ bounce_bus_dmamap_load_buffer(bus_dma_ta vm_offset_t vaddr; int error; - if (map == NULL || map == &contig_dmamap) + if (map == NULL) map = &nobounce_dmamap; if (segs == NULL) segs = dmat->segments; - if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) { + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); @@ -691,7 +696,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_ta */ max_sgsize = MIN(buflen, dmat->common.maxsegsz); sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); - if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) && + if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); @@ -972,7 +977,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_ struct bounce_page *bpage; KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - KASSERT(map != NULL && map != &nobounce_dmamap && map != &contig_dmamap, + KASSERT(map != NULL && map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map)); bz = dmat->bounce_zone;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201407010650.s616oZTJ092028>