Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 7 Dec 2012 23:18:31 +0000 (UTC)
From:      Jeff Roberson <jeff@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r244000 - in projects/physbio/sys: arm/arm mips/mips
Message-ID:  <201212072318.qB7NIVOJ006677@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jeff
Date: Fri Dec  7 23:18:30 2012
New Revision: 244000
URL: http://svnweb.freebsd.org/changeset/base/244000

Log:
   - Remember the list of virtual addresses we have mapped in the bus_dma_map
     on architectures that need to invalidate virtual caches.  This makes the
     sync operation type agnostic.  Rather than mallocing a small structure
     for each virtual address as busdma_machdep-v6.c on arm did, I opted to
     allocate an array sized by the maximum number of physical segments
     supported based on the idea that there should always be more physical
     than virtual segments.
  
  Sponsored by:	EMC / Isilon Storage Division

Modified:
  projects/physbio/sys/arm/arm/busdma_machdep-v6.c
  projects/physbio/sys/arm/arm/busdma_machdep.c
  projects/physbio/sys/mips/mips/busdma_machdep.c

Modified: projects/physbio/sys/arm/arm/busdma_machdep-v6.c
==============================================================================
--- projects/physbio/sys/arm/arm/busdma_machdep-v6.c	Fri Dec  7 22:30:30 2012	(r243999)
+++ projects/physbio/sys/arm/arm/busdma_machdep-v6.c	Fri Dec  7 23:18:30 2012	(r244000)
@@ -108,7 +108,6 @@ struct sync_list {
 	vm_offset_t	vaddr;		/* kva of bounce buffer */
 	bus_addr_t	busaddr;	/* Physical address */
 	bus_size_t	datacount;	/* client data count */
-	STAILQ_ENTRY(sync_list) slinks;
 };
 
 int busdma_swi_pending;
@@ -151,7 +150,8 @@ struct bus_dmamap {
 	bus_dmamap_callback_t *callback;
 	void		      *callback_arg;
 	STAILQ_ENTRY(bus_dmamap) links;
-	STAILQ_HEAD(,sync_list)	slist;
+	int		       sync_count;
+	struct sync_list       slist[];
 };
 
 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@@ -436,17 +436,18 @@ out:
 int
 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
 {
+	int mapsize;
 	int error;
 
 	error = 0;
 
-	*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
-					     M_NOWAIT | M_ZERO);
+	mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
+	*mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
 	if (*mapp == NULL) {
 		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
 		return (ENOMEM);
 	}
-	STAILQ_INIT(&((*mapp)->slist));
+	(*mapp)->sync_count = 0;
 
 	if (dmat->segments == NULL) {
 		dmat->segments = (bus_dma_segment_t *)malloc(
@@ -521,8 +522,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
 int
 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
 {
-	if (STAILQ_FIRST(&map->bpages) != NULL ||
-	    STAILQ_FIRST(&map->slist) != NULL) {
+	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
 		    __func__, dmat, EBUSY);
 		return (EBUSY);
@@ -546,6 +546,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
 		 bus_dmamap_t *mapp)
 {
 	int mflags, len;
+	int mapsize;
 
 	if (flags & BUS_DMA_NOWAIT)
 		mflags = M_NOWAIT;
@@ -554,15 +555,15 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
 
 	/* ARM non-snooping caches need a map for the VA cache sync structure */
 
-	*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
-					     M_NOWAIT | M_ZERO);
+	mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
+	*mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
 	if (*mapp == NULL) {
 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
 		    __func__, dmat, dmat->flags, ENOMEM);
 		return (ENOMEM);
 	}
 
-	STAILQ_INIT(&((*mapp)->slist));
+	(*mapp)->sync_count = 0;
 
 	if (dmat->segments == NULL) {
 		dmat->segments = (bus_dma_segment_t *)malloc(
@@ -774,18 +775,14 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
 			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
 		} else {
-			/* add_sync_list(dmat, map, vaddr, sgsize, cflag); */
-			sl = (struct sync_list *)malloc(sizeof(struct sync_list),
-						M_DEVBUF, M_NOWAIT | M_ZERO);
-			if (sl == NULL)
+			sl = &map->slist[map->sync_count];
+			if (++map->sync_count > dmat->nsegments)
 				goto cleanup;
-			STAILQ_INSERT_TAIL(&(map->slist), sl, slinks);
 			sl->vaddr = vaddr;
 			sl->datacount = sgsize;
 			sl->busaddr = curaddr;
 		}
 
-
 		if (dmat->ranges) {
 			struct arm32_dma_range *dr;
 
@@ -1012,12 +1009,6 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, b
 {
 	struct bounce_page *bpage;
 	struct bounce_zone *bz;
-	struct sync_list *sl;
-
-        while ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
-                STAILQ_REMOVE_HEAD(&map->slist, slinks);
-                free(sl, M_DEVBUF);
-        }
 
 	if ((bz = dmat->bounce_zone) != NULL) {
 		while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
@@ -1031,6 +1022,7 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, b
 		map->pagesreserved = 0;
 		map->pagesneeded = 0;
 	}
+	map->sync_count = 0;
 }
 
 #ifdef notyetbounceuser
@@ -1090,15 +1082,13 @@ void
 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 {
 	struct bounce_page *bpage;
-	struct sync_list *sl;
+	struct sync_list *sl, *end;
 	bus_size_t len, unalign;
 	vm_offset_t buf, ebuf;
 #ifdef FIX_DMAP_BUS_DMASYNC_POSTREAD
 	vm_offset_t bbuf;
 	char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
 #endif
-	int listcount = 0;
-
 		/* if buffer was from user space, it it possible that this
 		 * is not the same vm map. The fix is to map each page in
 		 * the buffer into the current address space (KVM) and then
@@ -1166,29 +1156,26 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 		}
 	}
 
-	sl = STAILQ_FIRST(&map->slist);
-	while (sl) {
-		listcount++;
-		sl = STAILQ_NEXT(sl, slinks);
-	}
-	if ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
+	if (map->sync_count != 0) {
 		/* ARM caches are not self-snooping for dma */
 
+		sl = &map->slist[0];
+		end = &map->slist[map->sync_count];
 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
 		    "performing sync", __func__, dmat, dmat->flags, op);
 
 		switch (op) {
 		case BUS_DMASYNC_PREWRITE:
-			while (sl != NULL) {
+			while (sl != end) {
 			    cpu_dcache_wb_range(sl->vaddr, sl->datacount);
 			    l2cache_wb_range(sl->vaddr, sl->busaddr,
 				sl->datacount);
-			    sl = STAILQ_NEXT(sl, slinks);
+			    sl++;
 			}
 			break;
 
 		case BUS_DMASYNC_PREREAD:
-			while (sl != NULL) {
+			while (sl != end) {
 					/* write back the unaligned portions */
 				vm_paddr_t physaddr = sl->busaddr, ephysaddr;
 				buf = sl->vaddr;
@@ -1228,16 +1215,16 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 					cpu_dcache_inv_range(buf, len);
 					l2cache_inv_range(buf, physaddr, len);
 				}
-				sl = STAILQ_NEXT(sl, slinks);
+				sl++;
 			}
 			break;
 
 		case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
-			while (sl != NULL) {
+			while (sl != end) {
 				cpu_dcache_wbinv_range(sl->vaddr, sl->datacount);
 				l2cache_wbinv_range(sl->vaddr,
 				    sl->busaddr, sl->datacount);
-				sl = STAILQ_NEXT(sl, slinks);
+				sl++;
 			}
 			break;
 
@@ -1245,7 +1232,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 		case BUS_DMASYNC_POSTREAD:
 			if (!pmap_dmap_iscurrent(map->pmap))
 			     panic("_bus_dmamap_sync: wrong user map. apply fix");
-			while (sl != NULL) {
+			while (sl != end) {
 					/* write back the unaligned portions */
 				vm_paddr_t physaddr;
 				buf = sl->vaddr;
@@ -1278,7 +1265,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 					unalign = arm_dcache_align - unalign;
 					memcpy((void *)ebuf, _tmp_clend, unalign);
 				}
-				sl = STAILQ_NEXT(sl, slinks);
+				sl++;
 			}
 				break;
 #endif /* FIX_DMAP_BUS_DMASYNC_POSTREAD */

Modified: projects/physbio/sys/arm/arm/busdma_machdep.c
==============================================================================
--- projects/physbio/sys/arm/arm/busdma_machdep.c	Fri Dec  7 22:30:30 2012	(r243999)
+++ projects/physbio/sys/arm/arm/busdma_machdep.c	Fri Dec  7 23:18:30 2012	(r244000)
@@ -102,6 +102,12 @@ struct bounce_page {
 	STAILQ_ENTRY(bounce_page) links;
 };
 
+struct sync_list {
+	vm_offset_t	vaddr;		/* kva of bounce buffer */
+	bus_addr_t	busaddr;	/* Physical address */
+	bus_size_t	datacount;	/* client data count */
+};
+
 int busdma_swi_pending;
 
 struct bounce_zone {
@@ -131,11 +137,7 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma
 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
 	   "Total bounce pages");
 
-#define DMAMAP_LINEAR		0x1
-#define DMAMAP_MBUF		0x2
-#define DMAMAP_UIO		0x4
 #define DMAMAP_ALLOCATED	0x10
-#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
 #define DMAMAP_COHERENT		0x8
 struct bus_dmamap {
 	struct bp_list	bpages;
@@ -151,7 +153,8 @@ struct bus_dmamap {
 	STAILQ_ENTRY(bus_dmamap) links;
 	bus_dmamap_callback_t *callback;
 	void		      *callback_arg;
-
+	int		       sync_count;
+	struct sync_list       *slist;
 };
 
 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@@ -290,10 +293,14 @@ dflt_lock(void *arg, bus_dma_lock_op_t o
 }
 
 static __inline bus_dmamap_t
-_busdma_alloc_dmamap(void)
+_busdma_alloc_dmamap(bus_dma_tag_t dmat)
 {
+	struct sync_list *slist;
 	bus_dmamap_t map;
 
+	slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
+	if (slist == NULL)
+		return (NULL);
 	mtx_lock(&busdma_mtx);
 	map = TAILQ_FIRST(&dmamap_freelist);
 	if (map)
@@ -305,13 +312,17 @@ _busdma_alloc_dmamap(void)
 			map->flags = DMAMAP_ALLOCATED;
 	} else
 		map->flags = 0;
-	STAILQ_INIT(&map->bpages);
+	if (map != NULL)
+		STAILQ_INIT(&map->bpages);
+	else
+		free(slist, M_DEVBUF);
 	return (map);
 }
 
 static __inline void
 _busdma_free_dmamap(bus_dmamap_t map)
 {
+	free(map->slist, M_DEVBUF);
 	if (map->flags & DMAMAP_ALLOCATED)
 		free(map, M_DEVBUF);
 	else {
@@ -494,7 +505,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
 		}
 	}
 
-	newmap = _busdma_alloc_dmamap();
+	newmap = _busdma_alloc_dmamap(dmat);
 	if (newmap == NULL) {
 		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
 		return (ENOMEM);
@@ -502,6 +513,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
 	*mapp = newmap;
 	newmap->dmat = dmat;
 	newmap->allocbuffer = NULL;
+	newmap->sync_count = 0;
 	dmat->map_count++;
 
 	/*
@@ -565,7 +577,7 @@ int
 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
 {
 
-	if (STAILQ_FIRST(&map->bpages) != NULL) {
+	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
 		    __func__, dmat, EBUSY);
 		return (EBUSY);
@@ -608,7 +620,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
 	if (flags & BUS_DMA_ZERO)
 		mflags |= M_ZERO;
 
-	newmap = _busdma_alloc_dmamap();
+	newmap = _busdma_alloc_dmamap(dmat);
 	if (newmap == NULL) {
 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
 		    __func__, dmat, dmat->flags, ENOMEM);
@@ -617,6 +629,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
 	dmat->map_count++;
 	*mapp = newmap;
 	newmap->dmat = dmat;
+	newmap->sync_count = 0;
 	
         if (dmat->maxsize <= PAGE_SIZE &&
 	   (dmat->alignment < dmat->maxsize) &&
@@ -728,6 +741,8 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
+				map->buffer = buf;
+				map->len = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
 				    map, links);
 				mtx_unlock(&bounce_lock);
@@ -751,6 +766,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 {
 	bus_size_t sgsize;
 	bus_addr_t curaddr, baddr, bmask;
+	struct sync_list *sl;
 	vm_offset_t vaddr = (vm_offset_t)buf;
 	int seg;
 	int error = 0;
@@ -839,8 +855,16 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 				sgsize = (baddr - curaddr);
 		}
 		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
-		    map->pagesneeded != 0 && run_filter(dmat, curaddr))
+		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
 			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
+		} else {
+			sl = &map->slist[map->sync_count];
+			if (++map->sync_count > dmat->nsegments)
+				goto cleanup;
+			sl->vaddr = vaddr;
+			sl->datacount = sgsize;
+			sl->busaddr = curaddr;
+		}
 
 		if (dmat->ranges) {
 			struct arm32_dma_range *dr;
@@ -883,7 +907,7 @@ segdone:
 	}
 
 	*segp = seg;
-
+cleanup:
 	/*
 	 * Did we fit?
 	 */
@@ -906,10 +930,6 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_
 	KASSERT(map != NULL, ("dmamap is NULL"));
 	map->callback = callback;
 	map->callback_arg = callback_arg;
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
-	map->buffer = buf;
-	map->len = buflen;
 	error = _bus_dmamap_load_buffer(dmat,
 	    map, buf, buflen, kernel_pmap,
 	    flags, NULL, &nsegs);
@@ -938,10 +958,6 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat,
 
 	M_ASSERTPKTHDR(m0);
 
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
-	map->buffer = m0;
-	map->len = 0;
 	if (m0->m_pkthdr.len <= dmat->maxsize) {
 		struct mbuf *m;
 
@@ -950,7 +966,6 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat,
 				error = _bus_dmamap_load_buffer(dmat,
 				    map, m->m_data, m->m_len,
 				    kernel_pmap, flags, NULL, &nsegs);
-				map->len += m->m_len;
 			}
 		}
 	} else {
@@ -982,10 +997,6 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
 
 	flags |= BUS_DMA_NOWAIT;
 	*nsegs = -1;
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
-	map->buffer = m0;			
-	map->len = 0;
 	if (m0->m_pkthdr.len <= dmat->maxsize) {
 		struct mbuf *m;
 
@@ -995,7 +1006,6 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
 						m->m_data, m->m_len,
 						kernel_pmap, flags,
 						segs, nsegs);
-				map->len += m->m_len;
 			}
 		}
 	} else {
@@ -1024,10 +1034,6 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, 
 
 	resid = uio->uio_resid;
 	iov = uio->uio_iov;
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
-	map->buffer = uio;
-	map->len = 0;
 
 	if (uio->uio_segflg == UIO_USERSPACE) {
 		KASSERT(uio->uio_td != NULL,
@@ -1051,7 +1057,6 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, 
 			error = _bus_dmamap_load_buffer(dmat,
 			    map, addr, minlen, pmap, flags, NULL, &nsegs);
 
-			map->len += minlen;
 			resid -= minlen;
 		}
 	}
@@ -1079,11 +1084,11 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, b
 {
 	struct bounce_page *bpage;
 
-	map->flags &= ~DMAMAP_TYPE_MASK;
 	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
 		STAILQ_REMOVE_HEAD(&map->bpages, links);
 		free_bounce_page(dmat, bpage);
 	}
+	map->sync_count = 0;
 	return;
 }
 
@@ -1172,28 +1177,10 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, 
 	}
 }
 
-static __inline int
-_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
-{
-	struct bounce_page *bpage;
-
-	STAILQ_FOREACH(bpage, &map->bpages, links) {
-		if ((vm_offset_t)buf >= bpage->datavaddr &&
-		    (vm_offset_t)buf + len <= bpage->datavaddr +
-		    bpage->datacount)
-			return (1);
-	}
-	return (0);
-
-}
-
 void
 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 {
-	struct mbuf *m;
-	struct uio *uio;
-	int resid;
-	struct iovec *iov;
+	struct sync_list *sl, *end;
 	
 	if (op == BUS_DMASYNC_POSTWRITE)
 		return;
@@ -1202,38 +1189,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 	if (map->flags & DMAMAP_COHERENT)
 		return;
 	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
-	switch(map->flags & DMAMAP_TYPE_MASK) {
-	case DMAMAP_LINEAR:
-		if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
-			bus_dmamap_sync_buf(map->buffer, map->len, op);
-		break;
-	case DMAMAP_MBUF:
-		m = map->buffer;
-		while (m) {
-			if (m->m_len > 0 &&
-			    !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
-				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
-			m = m->m_next;
-		}
-		break;
-	case DMAMAP_UIO:
-		uio = map->buffer;
-		iov = uio->uio_iov;
-		resid = uio->uio_resid;
-		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
-			bus_size_t minlen = resid < iov[i].iov_len ? resid :
-			    iov[i].iov_len;
-			if (minlen > 0) {
-				if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
-				    minlen))
-					bus_dmamap_sync_buf(iov[i].iov_base,
-					    minlen, op);
-				resid -= minlen;
-			}
-		}
-		break;
-	default:
-		break;
+	if (map->sync_count) {
+		end = &map->slist[map->sync_count];
+		for (sl = &map->slist[0]; sl != end; sl++)
+			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op);
 	}
 	cpu_drain_writebuf();
 }

Modified: projects/physbio/sys/mips/mips/busdma_machdep.c
==============================================================================
--- projects/physbio/sys/mips/mips/busdma_machdep.c	Fri Dec  7 22:30:30 2012	(r243999)
+++ projects/physbio/sys/mips/mips/busdma_machdep.c	Fri Dec  7 23:18:30 2012	(r244000)
@@ -93,6 +93,12 @@ struct bounce_page {
 	STAILQ_ENTRY(bounce_page) links;
 };
 
+struct sync_list {
+	vm_offset_t	vaddr;		/* kva of bounce buffer */
+	bus_addr_t	busaddr;	/* Physical address */
+	bus_size_t	datacount;	/* client data count */
+};
+
 int busdma_swi_pending;
 
 struct bounce_zone {
@@ -122,10 +128,6 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma
 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
 	   "Total bounce pages");
 
-#define DMAMAP_LINEAR		0x1
-#define DMAMAP_MBUF		0x2
-#define DMAMAP_UIO		0x4
-#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
 #define DMAMAP_UNCACHEABLE	0x8
 #define DMAMAP_ALLOCATED	0x10
 #define DMAMAP_MALLOCUSED	0x20
@@ -144,7 +146,8 @@ struct bus_dmamap {
 	STAILQ_ENTRY(bus_dmamap) links;
 	bus_dmamap_callback_t *callback;
 	void		*callback_arg;
-
+	int		sync_count;
+	struct sync_list *slist;
 };
 
 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@@ -268,10 +271,14 @@ dflt_lock(void *arg, bus_dma_lock_op_t o
 }
 
 static __inline bus_dmamap_t
-_busdma_alloc_dmamap(void)
+_busdma_alloc_dmamap(bus_dma_tag_t dmat)
 {
+	struct sync_list *slist;
 	bus_dmamap_t map;
 
+	slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
+	if (slist == NULL)
+		return (NULL);
 	mtx_lock(&busdma_mtx);
 	map = TAILQ_FIRST(&dmamap_freelist);
 	if (map)
@@ -283,13 +290,17 @@ _busdma_alloc_dmamap(void)
 			map->flags = DMAMAP_ALLOCATED;
 	} else
 		map->flags = 0;
-	STAILQ_INIT(&map->bpages);
+	if (map != NULL)
+		STAILQ_INIT(&map->bpages);
+	else
+		free(slist, M_DEVBUF);
 	return (map);
 }
 
 static __inline void 
 _busdma_free_dmamap(bus_dmamap_t map)
 {
+	free(map->slist, M_DEVBUF);
 	if (map->flags & DMAMAP_ALLOCATED)
 		free(map, M_DEVBUF);
 	else {
@@ -472,7 +483,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
 		}
 	}
 
-	newmap = _busdma_alloc_dmamap();
+	newmap = _busdma_alloc_dmamap(dmat);
 	if (newmap == NULL) {
 		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
 		return (ENOMEM);
@@ -480,6 +491,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
 	*mapp = newmap;
 	newmap->dmat = dmat;
 	newmap->allocbuffer = NULL;
+	newmap->sync_count = 0;
 	dmat->map_count++;
 
 	/*
@@ -544,7 +556,7 @@ int
 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
 {
 
-	if (STAILQ_FIRST(&map->bpages) != NULL) {
+	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
 		    __func__, dmat, EBUSY);
 		return (EBUSY);
@@ -587,7 +599,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
 	if (flags & BUS_DMA_ZERO)
 		mflags |= M_ZERO;
 
-	newmap = _busdma_alloc_dmamap();
+	newmap = _busdma_alloc_dmamap(dmat);
 	if (newmap == NULL) {
 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
 		    __func__, dmat, dmat->flags, ENOMEM);
@@ -596,6 +608,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
 	dmat->map_count++;
 	*mapp = newmap;
 	newmap->dmat = dmat;
+	nwemap->sync_count = 0;
 
 	/*
 	 * If all the memory is coherent with DMA then we don't need to
@@ -726,6 +739,8 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
+				map->buffer = buf;
+				map->len = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
 				    map, links);
 				mtx_unlock(&bounce_lock);
@@ -750,6 +765,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 {
 	bus_size_t sgsize;
 	bus_addr_t curaddr, baddr, bmask;
+	struct sync_list *sl;
 	vm_offset_t vaddr = (vm_offset_t)buf;
 	int seg;
 	int error = 0;
@@ -798,6 +814,13 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
 		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
 			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
+		} else {
+			sl = &map->slist[map->sync_count];
+			if (++map->sync_count > dmat->nsegments)
+				goto cleanup;
+			sl->vaddr = vaddr;
+			sl->datacount = sgsize;
+			sl->busaddr = curaddr;
 		}
 
 		/*
@@ -826,7 +849,7 @@ segdone:
 	}
 
 	*segp = seg;
-
+cleanup:
 	/*
 	 * Did we fit?
 	 */
@@ -849,10 +872,6 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_
 	KASSERT(map != NULL, ("dmamap is NULL"));
 	map->callback = callback;
 	map->callback_arg = callback_arg;
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_LINEAR;
-	map->buffer = buf;
-	map->len = buflen;
 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
 	    flags, NULL, &nsegs);
 	if (error == EINPROGRESS)
@@ -880,10 +899,6 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat,
 
 	M_ASSERTPKTHDR(m0);
 
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_MBUF;
-	map->buffer = m0;
-	map->len = 0;
 	if (m0->m_pkthdr.len <= dmat->maxsize) {
 		struct mbuf *m;
 
@@ -892,7 +907,6 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat,
 				error = _bus_dmamap_load_buffer(dmat,
 				    map, m->m_data, m->m_len, 
 				    kernel_pmap, flags, NULL, &nsegs);
-				map->len += m->m_len;
 			}
 		}
 	} else {
@@ -924,10 +938,6 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
 
 	flags |= BUS_DMA_NOWAIT;
 	*nsegs = -1;
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_MBUF;
-	map->buffer = m0;			
-	map->len = 0;
 	if (m0->m_pkthdr.len <= dmat->maxsize) {
 		struct mbuf *m;
 
@@ -937,7 +947,6 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
 						m->m_data, m->m_len,
 						kernel_pmap, flags,
 						segs, nsegs);
-				map->len += m->m_len;
 			}
 		}
 	} else {
@@ -966,10 +975,6 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, 
 
 	resid = uio->uio_resid;
 	iov = uio->uio_iov;
-	map->flags &= ~DMAMAP_TYPE_MASK;
-	map->flags |= DMAMAP_UIO;
-	map->buffer = uio;
-	map->len = 0;
 
 	if (uio->uio_segflg == UIO_USERSPACE) {
 		KASSERT(uio->uio_td != NULL,
@@ -994,7 +999,6 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, 
 			error = _bus_dmamap_load_buffer(dmat, map, addr,
 			    minlen, pmap, flags, NULL, &nsegs);
 
-			map->len += minlen;
 			resid -= minlen;
 		}
 	}
@@ -1022,11 +1026,11 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, b
 {
 	struct bounce_page *bpage;
 
-	map->flags &= ~DMAMAP_TYPE_MASK;
 	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
 		STAILQ_REMOVE_HEAD(&map->bpages, links);
 		free_bounce_page(dmat, bpage);
 	}
+	map->sync_count = 0;
 	return;
 }
 
@@ -1154,28 +1158,10 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, 
 	}
 }
 
-static __inline int
-_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
-{
-	struct bounce_page *bpage;
-
-	STAILQ_FOREACH(bpage, &map->bpages, links) {
-		if ((vm_offset_t)buf >= bpage->datavaddr &&
-		    (vm_offset_t)buf + len <= bpage->datavaddr + 
-		    bpage->datacount)
-			return (1);
-	}
-	return (0);
-
-}
-
 void
 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 {
-	struct mbuf *m;
-	struct uio *uio;
-	int resid;
-	struct iovec *iov;
+	struct sync_list *sl, *end;
 	
 	if (op == BUS_DMASYNC_POSTWRITE)
 		return;
@@ -1189,38 +1175,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 		return;
 
 	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
-	switch(map->flags & DMAMAP_TYPE_MASK) {
-	case DMAMAP_LINEAR:
-		if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
-			bus_dmamap_sync_buf(map->buffer, map->len, op);
-		break;
-	case DMAMAP_MBUF:
-		m = map->buffer;
-		while (m) {
-			if (m->m_len > 0 &&
-			    !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
-				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
-			m = m->m_next;
-		}
-		break;
-	case DMAMAP_UIO:
-		uio = map->buffer;
-		iov = uio->uio_iov;
-		resid = uio->uio_resid;
-		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
-			bus_size_t minlen = resid < iov[i].iov_len ? resid :
-			    iov[i].iov_len;
-			if (minlen > 0) {
-				if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
-				    minlen))
-					bus_dmamap_sync_buf(iov[i].iov_base,
-					    minlen, op);
-				resid -= minlen;
-			}
-		}
-		break;
-	default:
-		break;
+	if (map->sync_count) {
+		end = &map->slist[map->sync_count];
+		for (sl = &map->slist[0]; sl != end; sl++)
+			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op);
 	}
 }
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201212072318.qB7NIVOJ006677>