Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 27 Dec 2012 04:29:14 +0000 (UTC)
From:      Jeff Roberson <jeff@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r244726 - in projects/physbio/sys: arm/arm ia64/ia64 kern mips/mips powerpc/powerpc sparc64/include sparc64/sparc64 sys x86/x86
Message-ID:  <201212270429.qBR4TEQT006871@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jeff
Date: Thu Dec 27 04:29:14 2012
New Revision: 244726
URL: http://svnweb.freebsd.org/changeset/base/244726

Log:
   - Implement a bus_dma_memory_t type that is used to encapsulate objects
     and types for various structures that describe memory regions we want
     to dma to.
   - Rename bus_dma_mayblock to _nowait to be more consistent.  Add a memory_t
     parameter so that EINPROGRESS operations will work with any type.
   - Change all mayblock implementations to restart any arbitrary type.
   - Redefine the load routines splitting the parsers from the API policy.
   - Add a physical address loading routine to the backend API.  Currently
     it is empty.
  
  Sponsored by:	EMC / Isilon Storage Division

Modified:
  projects/physbio/sys/arm/arm/busdma_machdep-v6.c
  projects/physbio/sys/arm/arm/busdma_machdep.c
  projects/physbio/sys/ia64/ia64/busdma_machdep.c
  projects/physbio/sys/kern/subr_busdma.c
  projects/physbio/sys/mips/mips/busdma_machdep.c
  projects/physbio/sys/powerpc/powerpc/busdma_machdep.c
  projects/physbio/sys/sparc64/include/bus_dma.h
  projects/physbio/sys/sparc64/sparc64/bus_machdep.c
  projects/physbio/sys/sparc64/sparc64/iommu.c
  projects/physbio/sys/sys/bus_dma.h
  projects/physbio/sys/x86/x86/busdma_machdep.c

Modified: projects/physbio/sys/arm/arm/busdma_machdep-v6.c
==============================================================================
--- projects/physbio/sys/arm/arm/busdma_machdep-v6.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/arm/arm/busdma_machdep-v6.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -142,8 +142,7 @@ struct bus_dmamap {
 	int		       pagesneeded;
 	int		       pagesreserved;
 	bus_dma_tag_t	       dmat;
-	void		      *buf;		/* unmapped buffer pointer */
-	bus_size_t	       buflen;		/* unmapped buffer length */
+	bus_dma_memory_t       mem;
 	pmap_t		       pmap;
 	bus_dmamap_callback_t *callback;
 	void		      *callback_arg;
@@ -694,9 +693,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
-				map->dmat = dmat;
-				map->buf = buf;
-				map->buflen = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
 				    map, links);
 				mtx_unlock(&bounce_lock);
@@ -842,10 +838,13 @@ cleanup:
 
 
 void
-__bus_dmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-		      bus_dmamap_callback_t *callback, void *callback_arg)
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+		    bus_dma_memory_t mem, bus_dmamap_callback_t *callback,
+		    void *callback_arg)
 {
 
+	map->mem = mem;
+	map->dmat = dmat;
 	map->callback = callback;
 	map->callback_arg = callback_arg;
 }
@@ -1379,8 +1378,8 @@ busdma_swi(void)
 		mtx_unlock(&bounce_lock);
 		dmat = map->dmat;
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
-		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
-				map->callback, map->callback_arg, /*flags*/0);
+		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
+				    map->callback_arg, BUS_DMA_WAITOK);
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
 		mtx_lock(&bounce_lock);
 	}

Modified: projects/physbio/sys/arm/arm/busdma_machdep.c
==============================================================================
--- projects/physbio/sys/arm/arm/busdma_machdep.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/arm/arm/busdma_machdep.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -142,12 +142,11 @@ struct bus_dmamap {
 	int		pagesneeded;
 	int		pagesreserved;
         bus_dma_tag_t	dmat;
+	bus_dma_memory_t mem;
 	int		flags;
-	void 		*buffer;
 	void		*origbuffer;
 	void		*allocbuffer;
 	TAILQ_ENTRY(bus_dmamap)	freelist;
-	int		len;
 	STAILQ_ENTRY(bus_dmamap) links;
 	bus_dmamap_callback_t *callback;
 	void		      *callback_arg;
@@ -740,8 +739,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
-				map->buffer = buf;
-				map->len = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
 				    map, links);
 				mtx_unlock(&bounce_lock);
@@ -923,12 +920,14 @@ cleanup:
 }
 
 void
-__bus_dmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-		      bus_dmamap_callback_t *callback, void *callback_arg)
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+		    bus_dma_memory_t mem, bus_dmamap_callback_t *callback,
+		    void *callback_arg)
 {
 
 	KASSERT(dmat != NULL, ("dmatag is NULL"));
 	KASSERT(map != NULL, ("dmamap is NULL"));
+	map->mem = mem;
 	map->callback = callback;
 	map->callback_arg = callback_arg;
 }
@@ -1308,8 +1307,8 @@ busdma_swi(void)
 		mtx_unlock(&bounce_lock);
 		dmat = map->dmat;
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
-		bus_dmamap_load(map->dmat, map, map->buffer, map->len,
-		    map->callback, map->callback_arg, /*flags*/0);
+		bus_dmamap_load_mem(map->dmat, map, &map->mem,
+		    map->callback, map->callback_arg, BUS_DMA_WAITOK);
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
 		mtx_lock(&bounce_lock);
 	}

Modified: projects/physbio/sys/ia64/ia64/busdma_machdep.c
==============================================================================
--- projects/physbio/sys/ia64/ia64/busdma_machdep.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/ia64/ia64/busdma_machdep.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -105,8 +105,7 @@ struct bus_dmamap {
 	int		pagesneeded;
 	int		pagesreserved;
 	bus_dma_tag_t	dmat;
-	void		*buf;		/* unmapped buffer pointer */
-	bus_size_t	buflen;		/* unmapped buffer length */
+	bus_dma_memory_t mem;
 	bus_dmamap_callback_t *callback;
 	void		*callback_arg;
 	STAILQ_ENTRY(bus_dmamap) links;
@@ -535,9 +534,6 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
-				map->dmat = dmat;
-				map->buf = buf;
-				map->buflen = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
 				    map, links);
 				mtx_unlock(&bounce_lock);
@@ -615,10 +611,12 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 
 
 void
-__bus_dmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-    bus_dmamap_callback_t *callback, void *callback_arg)
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg)
 {
 	if (map != NULL) {
+		map->dmat = dmat;
+		map->mem = mem;
 		map->callback = callback;
 		map->callback_arg = callback_arg;
 	}
@@ -827,8 +825,8 @@ busdma_swi(void)
 		mtx_unlock(&bounce_lock);
 		dmat = map->dmat;
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
-		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
-		    map->callback, map->callback_arg, /*flags*/0);
+		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
+		    map->callback_arg, BUS_DMA_WAITOK);
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
 		mtx_lock(&bounce_lock);
 	}

Modified: projects/physbio/sys/kern/subr_busdma.c
==============================================================================
--- projects/physbio/sys/kern/subr_busdma.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/kern/subr_busdma.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -53,6 +53,185 @@ __FBSDID("$FreeBSD$");
 #include <machine/bus.h>
 
 /*
+ * Load a list of virtual addresses.
+ */
+static int
+_bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
+    int flags)
+{
+	int error;
+
+	error = 0;
+	for (; sglist_cnt > 0; sglist_cnt--, list++) {
+		error = _bus_dmamap_load_buffer(dmat, map,
+		    (void *)list->ds_addr, list->ds_len, pmap, flags, NULL,
+		    nsegs);
+		if (error)
+			break;
+	}
+	return (error);
+}
+
+/*
+ * Load a list of physical addresses.
+ */
+static int
+_bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
+{
+	int error;
+
+	error = 0;
+	for (; sglist_cnt > 0; sglist_cnt--, list++) {
+		error = _bus_dmamap_load_phys(dmat, map,
+		    (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
+		    nsegs);
+		if (error)
+			break;
+	}
+	return (error);
+}
+
+/*
+ * Load an mbuf chain.
+ */
+static int
+_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
+    struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
+{
+	struct mbuf *m;
+	int error;
+
+	M_ASSERTPKTHDR(m0);
+
+	error = 0;
+	for (m = m0; m != NULL && error == 0; m = m->m_next) {
+		if (m->m_len > 0) {
+			error = _bus_dmamap_load_buffer(dmat, map, m->m_data,
+			    m->m_len, kernel_pmap, flags, segs, nsegs);
+		}
+	}
+	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+	    __func__, dmat, flags, error, *nsegs);
+	return (error);
+}
+
+/*
+ * Load from block io.
+ */
+static int
+_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
+    int *nsegs, int flags)
+{
+	int error;
+
+	error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
+	    bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
+
+	return (error);
+}
+
+/*
+ * Load a cam control block.
+ */
+static int
+_bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
+		    int *nsegs, int flags)
+{
+	struct ccb_ataio *ataio;
+	struct ccb_scsiio *csio;
+	struct ccb_hdr *ccb_h;
+	void *data_ptr;
+	int error;
+	uint32_t dxfer_len;
+	uint16_t sglist_cnt;
+
+	error = 0;
+	ccb_h = &ccb->ccb_h;
+	switch (ccb_h->func_code) {
+	case XPT_SCSI_IO:
+		csio = &ccb->csio;
+		data_ptr = csio->data_ptr;
+		dxfer_len = csio->dxfer_len;
+		sglist_cnt = csio->sglist_cnt;
+		break;
+	case XPT_ATA_IO:
+		ataio = &ccb->ataio;
+		data_ptr = ataio->data_ptr;
+		dxfer_len = ataio->dxfer_len;
+		sglist_cnt = 0;
+		break;
+	default:
+		panic("_bus_dmamap_load_ccb: Unsupported func code %d",
+		    ccb_h->func_code);
+	}
+
+	switch ((ccb_h->flags & CAM_DATA_MASK)) {
+	case CAM_DATA_VADDR:
+		error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len,
+		    kernel_pmap, flags, NULL, nsegs);
+		break;
+	case CAM_DATA_PADDR:
+		error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)data_ptr,
+		    dxfer_len, flags, NULL, nsegs);
+		break;
+	case CAM_DATA_SG:
+		error = _bus_dmamap_load_vlist(dmat, map,
+		    (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap,
+		    nsegs, flags);
+		break;
+	case CAM_DATA_SG_PADDR:
+		error = _bus_dmamap_load_plist(dmat, map,
+		    (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags);
+		break;
+	case CAM_DATA_BIO:
+		error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr,
+		    nsegs, flags);
+		break;
+	default:
+		panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented",
+		    ccb_h->flags);
+	}
+	return (error);
+}
+
+/*
+ * Load a uio.
+ */
+static int
+_bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
+    pmap_t pmap, int *nsegs, int flags)
+{
+	bus_size_t resid;
+	bus_size_t minlen;
+	struct iovec *iov;
+	caddr_t addr;
+	int error, i;
+
+	resid = uio->uio_resid;
+	iov = uio->uio_iov;
+	error = 0;
+
+	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
+		/*
+		 * Now at the first iovec to load.  Load each iovec
+		 * until we have exhausted the residual count.
+		 */
+
+		addr = (caddr_t) iov[i].iov_base;
+		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
+		if (minlen > 0) {
+			error = _bus_dmamap_load_buffer(dmat, map, addr,
+			    minlen, pmap, flags, NULL, nsegs);
+			resid -= minlen;
+		}
+	}
+
+	return (error);
+}
+
+/*
  * Map the buffer buf into bus space using the dmamap map.
  */
 int
@@ -65,7 +244,8 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_
 	int nsegs;
 
 	if ((flags & BUS_DMA_NOWAIT) == 0)
-		_bus_dmamap_mayblock(dmat, map, callback, callback_arg);
+		_bus_dmamap_waitok(dmat, map, dma_mem_vaddr(buf, buflen),
+		    callback, callback_arg);
 
 	nsegs = -1;
 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
@@ -94,35 +274,6 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_
 	return (0);
 }
 
-/*
- * Like _bus_dmamap_load(), but for mbufs.
- */
-static __inline int
-_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
-    struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
-{
-	struct mbuf *m;
-	int error;
-
-	M_ASSERTPKTHDR(m0);
-
-	flags |= BUS_DMA_NOWAIT;
-	*nsegs = -1;
-	error = 0;
-
-	for (m = m0; m != NULL && error == 0; m = m->m_next) {
-		if (m->m_len > 0) {
-			error = _bus_dmamap_load_buffer(dmat, map, m->m_data,
-			    m->m_len, kernel_pmap, flags, segs, nsegs);
-		}
-	}
-
-	++*nsegs;
-	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
-	    __func__, dmat, flags, error, *nsegs);
-	return (error);
-}
-
 int
 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
@@ -130,7 +281,10 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat,
 	bus_dma_segment_t *segs;
 	int nsegs, error;
 
+	flags |= BUS_DMA_NOWAIT;
+	nsegs = -1;
 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
+	++nsegs;
 
 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
 	if (error)
@@ -149,30 +303,23 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
 {
 	int error;
 
+	flags |= BUS_DMA_NOWAIT;
+	*nsegs = -1;
 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
+	++*nsegs;
 	_bus_dmamap_complete(dmat, map, segs, *nsegs, error);
 	return (error);
 }
 
-/*
- * Like _bus_dmamap_load(), but for uios.
- */
 int
 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
 {
 	bus_dma_segment_t *segs;
-	int nsegs, error, i;
-	bus_size_t resid;
-	bus_size_t minlen;
-	struct iovec *iov;
-	caddr_t addr;
+	int nsegs, error;
 	pmap_t pmap;
 
 	flags |= BUS_DMA_NOWAIT;
-	resid = uio->uio_resid;
-	iov = uio->uio_iov;
-
 	if (uio->uio_segflg == UIO_USERSPACE) {
 		KASSERT(uio->uio_td != NULL,
 			("bus_dmamap_load_uio: USERSPACE but no proc"));
@@ -181,23 +328,9 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, 
 		pmap = kernel_pmap;
 
 	nsegs = -1;
-	error = 0;
-	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
-		/*
-		 * Now at the first iovec to load.  Load each iovec
-		 * until we have exhausted the residual count.
-		 */
-
-		addr = (caddr_t) iov[i].iov_base;
-		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
-		if (minlen > 0) {
-			error = _bus_dmamap_load_buffer(dmat, map, addr,
-			    minlen, pmap, flags, NULL, &nsegs);
-			resid -= minlen;
-		}
-	}
-
+	error = _bus_dmamap_load_uio(dmat, map, uio, pmap, &nsegs, flags);
 	nsegs++;
+
 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
 	if (error)
 		(*callback)(callback_arg, segs, 0, 0, error);
@@ -214,91 +347,107 @@ bus_dmamap_load_ccb(bus_dma_tag_t dmat, 
 		    bus_dmamap_callback_t *callback, void *callback_arg,
 		    int flags)
 {
-	struct ccb_ataio *ataio;
-	struct ccb_scsiio *csio;
+	bus_dma_segment_t *segs;
 	struct ccb_hdr *ccb_h;
-	void *data_ptr;
-	uint32_t dxfer_len;
-	uint16_t sglist_cnt;
+	int error;
+	int nsegs;
 
 	ccb_h = &ccb->ccb_h;
 	if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
 		callback(callback_arg, NULL, 0, 0);
 		return (0);
 	}
+	if ((flags & BUS_DMA_NOWAIT) == 0)
+		_bus_dmamap_waitok(dmat, map, dma_mem_ccb(ccb), callback,
+		    callback_arg);
+	nsegs = -1;
+	error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags);
+	nsegs++;
+	if (error == EINPROGRESS)
+		return (error);
 
-	switch (ccb_h->func_code) {
-	case XPT_SCSI_IO:
-		csio = &ccb->csio;
-		data_ptr = csio->data_ptr;
-		dxfer_len = csio->dxfer_len;
-		sglist_cnt = csio->sglist_cnt;
+	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
+	if (error)
+		(*callback)(callback_arg, segs, 0, error);
+	else
+		(*callback)(callback_arg, segs, nsegs, error);
+	/*
+	 * Return ENOMEM to the caller so that it can pass it up the stack.
+	 * This error only happens when NOWAIT is set, so deferal is disabled.
+	 */
+	if (error == ENOMEM)
+		return (error);
+
+	return (0);
+}
+
+int
+bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_memory_t *mem, bus_dmamap_callback_t *callback,
+    void *callback_arg, int flags)
+{
+	bus_dma_segment_t *segs;
+	int error;
+	int nsegs;
+
+	if ((flags & BUS_DMA_NOWAIT) == 0)
+		_bus_dmamap_waitok(dmat, map, *mem, callback, callback_arg);
+
+	nsegs = -1;
+	switch (mem->dm_type) {
+	case BUS_DMAMEM_VADDR:
+		error = _bus_dmamap_load_buffer(dmat, map, mem->dm_vaddr,
+		    mem->dm_opaque, kernel_pmap, flags, NULL, &nsegs);
 		break;
-	case XPT_ATA_IO:
-		ataio = &ccb->ataio;
-		data_ptr = ataio->data_ptr;
-		dxfer_len = ataio->dxfer_len;
-		sglist_cnt = 0;
+	case BUS_DMAMEM_PADDR:
+		error = _bus_dmamap_load_phys(dmat, map, mem->dm_paddr,
+		    mem->dm_opaque, flags, NULL, &nsegs);
 		break;
-	default:
-		panic("bus_dmamap_load_ccb: Unsupported func code %d",
-		    ccb_h->func_code);
-	}
-
-	switch ((ccb_h->flags & CAM_DATA_MASK)) {
-	case CAM_DATA_VADDR:
-		return bus_dmamap_load(dmat,
-				       map,
-				       data_ptr,
-				       dxfer_len,
-				       callback,
-				       callback_arg,
-				       /*flags*/0);
-	case CAM_DATA_PADDR: {
-		bus_dma_segment_t seg;
-
-		seg.ds_addr = (bus_addr_t)(vm_offset_t)data_ptr;
-		seg.ds_len = dxfer_len;
-		callback(callback_arg, &seg, 1, 0);
-		break;
-	}
-	case CAM_DATA_SG: {
-		bus_dma_segment_t *segs;
-		int nsegs;
-		int error;
-		int i;
-
-		flags |= BUS_DMA_NOWAIT;
-		segs = (bus_dma_segment_t *)data_ptr;
-		nsegs = -1;
-		error = 0;
-		for (i = 0; i < sglist_cnt && error == 0; i++) {
-			error = _bus_dmamap_load_buffer(dmat, map,
-			    (void *)segs[i].ds_addr, segs[i].ds_len,
-			    kernel_pmap, flags, NULL, &nsegs);
-		}
-		nsegs++;
-		segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
-		if (error)
-			(*callback)(callback_arg, segs, 0, error);
-		else
-			(*callback)(callback_arg, segs, nsegs, error);
-
-		if (error == ENOMEM)
-			return (error);
+	case BUS_DMAMEM_VLIST:
+		error = _bus_dmamap_load_vlist(dmat, map, mem->dm_list,
+		    mem->dm_opaque, kernel_pmap, &nsegs, flags);
 		break;
-	}
-	case CAM_DATA_SG_PADDR: {
-		bus_dma_segment_t *segs;
-		/* Just use the segments provided */
-		segs = (bus_dma_segment_t *)data_ptr;
-		callback(callback_arg, segs, sglist_cnt, 0);
+	case BUS_DMAMEM_PLIST:
+		error = _bus_dmamap_load_plist(dmat, map, mem->dm_list,
+		    mem->dm_opaque, &nsegs, flags);
+		break;
+	case BUS_DMAMEM_BIO:
+		error = _bus_dmamap_load_bio(dmat, map, mem->dm_bio,
+		    &nsegs, flags);
+		break;
+	case BUS_DMAMEM_UIO:
+		error = _bus_dmamap_load_uio(dmat, map, mem->dm_uio,
+		    /*XXX*/kernel_pmap, &nsegs, flags);
+		break;
+	case BUS_DMAMEM_MBUF:
+		error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->dm_mbuf, NULL,
+		    &nsegs, flags);
+		break;
+	case BUS_DMAMEM_CCB:
+		error = _bus_dmamap_load_ccb(dmat, map, mem->dm_ccb, &nsegs,
+		    flags);
 		break;
 	}
-	case CAM_DATA_BIO:
-	default:
-		panic("bus_dmamap_load_ccb: flags 0x%X unimplemented",
-		    ccb_h->flags);
-	}
+	nsegs++;
+
+	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+	    __func__, dmat, flags, error, nsegs + 1);
+
+	if (error == EINPROGRESS)
+		return (error);
+
+	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
+	if (error)
+		(*callback)(callback_arg, segs, 0, error);
+	else
+		(*callback)(callback_arg, segs, nsegs, 0);
+
+	/*
+	 * Return ENOMEM to the caller so that it can pass it up the stack.
+	 * This error only happens when NOWAIT is set, so deferal is disabled.
+	 */
+	if (error == ENOMEM)
+		return (error);
+
 	return (0);
 }

Modified: projects/physbio/sys/mips/mips/busdma_machdep.c
==============================================================================
--- projects/physbio/sys/mips/mips/busdma_machdep.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/mips/mips/busdma_machdep.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -135,12 +135,11 @@ struct bus_dmamap {
 	int		pagesneeded;
 	int		pagesreserved;
 	bus_dma_tag_t	dmat;
+	bus_dma_memory_t mem;
 	int		flags;
-	void 		*buffer;
 	void		*origbuffer;
 	void		*allocbuffer;
 	TAILQ_ENTRY(bus_dmamap)	freelist;
-	int		len;
 	STAILQ_ENTRY(bus_dmamap) links;
 	bus_dmamap_callback_t *callback;
 	void		*callback_arg;
@@ -738,8 +737,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
-				map->buffer = buf;
-				map->len = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
 				    map, links);
 				mtx_unlock(&bounce_lock);
@@ -865,12 +862,13 @@ cleanup:
 }
 
 void
-__bus_dmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-    bus_dmamap_callback_t *callback, void *callback_arg)
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg)
 {
 
 	KASSERT(dmat != NULL, ("dmatag is NULL"));
 	KASSERT(map != NULL, ("dmamap is NULL"));
+	map->mem = mem;
 	map->callback = callback;
 	map->callback_arg = callback_arg;
 }
@@ -1292,8 +1290,8 @@ busdma_swi(void)
 		mtx_unlock(&bounce_lock);
 		dmat = map->dmat;
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
-		bus_dmamap_load(map->dmat, map, map->buffer, map->len,
-		    map->callback, map->callback_arg, /*flags*/0);
+		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
+		    map->callback_arg, BUS_DMA_WAITOK);
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
 		mtx_lock(&bounce_lock);
 	}

Modified: projects/physbio/sys/powerpc/powerpc/busdma_machdep.c
==============================================================================
--- projects/physbio/sys/powerpc/powerpc/busdma_machdep.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/powerpc/powerpc/busdma_machdep.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -626,9 +626,6 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
-				map->dmat = dmat;
-				map->buf = buf;
-				map->buflen = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
 				    map, links);
 				mtx_unlock(&bounce_lock);
@@ -709,11 +706,14 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 }
 
 void
-__bus_dmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-		      bus_dmamap_callback_t *callback, void *callback_arg)
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+		    bus_dma_memory_t mem, bus_dmamap_callback_t *callback,
+		    void *callback_arg)
 {
 
 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
+		map->dmat = dmat;
+		map->mem = mem;
 		map->callback = callback;
 		map->callback_arg = callback_arg;
 	}
@@ -1036,8 +1036,9 @@ busdma_swi(void)
 		mtx_unlock(&bounce_lock);
 		dmat = map->dmat;
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
-		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
-				map->callback, map->callback_arg, /*flags*/0);
+		bus_dmamap_load_mem(map->dmat, map, &map->mem,
+				    map->callback, map->callback_arg,
+				    BUS_DMA_WAITOK);
 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
 		mtx_lock(&bounce_lock);
 	}

Modified: projects/physbio/sys/sparc64/include/bus_dma.h
==============================================================================
--- projects/physbio/sys/sparc64/include/bus_dma.h	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/sparc64/include/bus_dma.h	Thu Dec 27 04:29:14 2012	(r244726)
@@ -81,7 +81,7 @@ struct bus_dma_methods {
 	int	(*dm_dmamap_load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map,
 	    void *buf, bus_size_t buflen, struct pmap *pmap, int flags,
 	    bus_dma_segment_t *segs, int *segp);
-	void	(*dm_dmamap_mayblock)(bus_dma_tag_t dmat, bus_dmamap_t map,
+	void	(*dm_dmamap_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map,
 	    bus_dmamap_callback_t *callback, void *callback_arg);
 	bus_dma_segment_t *(*dm_dmamap_complete)(bus_dma_tag_t dmat,
 	    bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error);
@@ -127,8 +127,8 @@ struct bus_dma_tag {
 #define	_bus_dmamap_load_buffer(t, m, b, l, p, f, s, sp)		\
 	((t)->dt_mt->dm_dmamap_load_buffer((t), (m), (b), (l), (p),	\
 	    (f), (s), (sp)))
-#define	_bus_dmamap_mayblock(t, m, c, ca)				\
-	((t)->dt_mt->dm_dmamap_mayblock((t), (m), (c), (ca)))
+#define	_bus_dmamap_waitok(t, m, mem, c, ca)				\
+	((t)->dt_mt->dm_dmamap_waitok((t), (m), (mem), (c), (ca)))
 #define	_bus_dmamap_complete(t, m, s, n, e)				\
 	((t)->dt_mt->dm_dmamap_complete((t), (m), (s), (n), (e)))
 #define	bus_dmamap_unload(t, p)						\

Modified: projects/physbio/sys/sparc64/sparc64/bus_machdep.c
==============================================================================
--- projects/physbio/sys/sparc64/sparc64/bus_machdep.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/sparc64/sparc64/bus_machdep.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -404,8 +404,8 @@ nexus_dmamap_load_buffer(bus_dma_tag_t d
 }
 
 static void
-nexus_dmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-    bus_dmamap_callback_t *callback, void *callback_arg)
+nexus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg)
 {
 
 }
@@ -521,7 +521,7 @@ static struct bus_dma_methods nexus_dma_
 	nexus_dmamap_create,
 	nexus_dmamap_destroy,
 	nexus_dmamap_load_buffer,
-	nexus_dmamap_mayblock,
+	nexus_dmamap_waitok,
 	nexus_dmamap_complete,
 	nexus_dmamap_unload,
 	nexus_dmamap_sync,

Modified: projects/physbio/sys/sparc64/sparc64/iommu.c
==============================================================================
--- projects/physbio/sys/sparc64/sparc64/iommu.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/sparc64/sparc64/iommu.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -970,8 +970,8 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t 
 }
 
 static void
-iommu_dvmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-    bus_dmamap_callback_t *callback, void *callback_arg)
+iommu_dvmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_memory_t mem, bus_dmamap_callback_t *callback, void *callback_arg)
 {
 }
 
@@ -1089,7 +1089,7 @@ struct bus_dma_methods iommu_dma_methods
 	iommu_dvmamap_create,
 	iommu_dvmamap_destroy,
 	iommu_dvmamap_load_buffer,
-	iommu_dvmamap_mayblock,
+	iommu_dvmamap_waitok,
 	iommu_dvmamap_complete,
 	iommu_dvmamap_unload,
 	iommu_dvmamap_sync,

Modified: projects/physbio/sys/sys/bus_dma.h
==============================================================================
--- projects/physbio/sys/sys/bus_dma.h	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/sys/bus_dma.h	Thu Dec 27 04:29:14 2012	(r244726)
@@ -113,6 +113,7 @@
 struct pmap;
 struct mbuf;
 struct uio;
+struct bio;
 union ccb;
 
 /*
@@ -135,6 +136,128 @@ typedef struct bus_dma_segment {
 } bus_dma_segment_t;
 
 /*
+ *	bus_dma_memory_t 
+ *
+ *	Encapsulates various memory descriptors that devices may DMA
+ *	to or from.
+ */
+
+typedef struct bus_dma_memory {
+	union {
+		void			*dm_vaddr;
+		vm_paddr_t		dm_paddr;
+		bus_dma_segment_t	*dm_list;
+		struct bio		*dm_bio;
+		struct uio		*dm_uio;
+		struct mbuf		*dm_mbuf;
+		union ccb		*dm_ccb;
+	};
+	bus_size_t	dm_opaque;	/* type specific data. */
+	uint32_t	dm_type;	/* Type of memory. */
+} bus_dma_memory_t;
+
+#define	BUS_DMAMEM_VADDR	1	/* Contiguous virtual address. */
+#define	BUS_DMAMEM_PADDR	2	/* Contiguous physical address. */
+#define	BUS_DMAMEM_VLIST	3	/* sglist of kva. */
+#define	BUS_DMAMEM_PLIST	4	/* sglist of physical addresses. */
+#define	BUS_DMAMEM_BIO		5	/* Pointer to a bio (block io). */
+#define	BUS_DMAMEM_UIO		6	/* Pointer to a uio (any io). */
+#define	BUS_DMAMEM_MBUF		7	/* Pointer to a mbuf (network io). */
+#define	BUS_DMAMEM_CCB		8	/* Cam control block. (scsi/ata io). */
+
+static inline bus_dma_memory_t
+dma_mem_vaddr(void *vaddr, bus_size_t len)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_vaddr = vaddr;
+	mem.dm_opaque = len;
+	mem.dm_type = BUS_DMAMEM_VADDR;
+
+	return (mem);
+}
+
+static inline bus_dma_memory_t
+dma_mem_paddr(vm_paddr_t paddr, bus_size_t len)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_paddr = paddr;
+	mem.dm_opaque = len;
+	mem.dm_type = BUS_DMAMEM_PADDR;
+
+	return (mem);
+}
+
+static inline bus_dma_memory_t
+dma_mem_vlist(bus_dma_segment_t *vlist, int sglist_cnt)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_list = vlist;
+	mem.dm_opaque = sglist_cnt;
+	mem.dm_type = BUS_DMAMEM_VLIST;
+
+	return (mem);
+}
+
+static inline bus_dma_memory_t
+dma_mem_plist(bus_dma_segment_t *plist, int sglist_cnt)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_list = plist;
+	mem.dm_opaque = sglist_cnt;
+	mem.dm_type = BUS_DMAMEM_PLIST;
+
+	return (mem);
+}
+
+static inline bus_dma_memory_t
+dma_mem_bio(struct bio *bio)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_bio = bio;
+	mem.dm_type = BUS_DMAMEM_BIO;
+
+	return (mem);
+}
+
+static inline bus_dma_memory_t
+dma_mem_uio(struct uio *uio)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_uio = uio;
+	mem.dm_type = BUS_DMAMEM_UIO;
+
+	return (mem);
+}
+
+static inline bus_dma_memory_t
+dma_mem_mbuf(struct mbuf *mbuf)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_mbuf = mbuf;
+	mem.dm_type = BUS_DMAMEM_MBUF;
+
+	return (mem);
+}
+
+static inline bus_dma_memory_t
+dma_mem_ccb(union ccb *ccb)
+{
+	bus_dma_memory_t mem;
+
+	mem.dm_ccb = ccb;
+	mem.dm_type = BUS_DMAMEM_CCB;
+
+	return (mem);
+}
+
+/*
  * A function that returns 1 if the address cannot be accessed by
  * a device and 0 if it can be.
  */
@@ -229,6 +352,13 @@ int bus_dmamap_load_ccb(bus_dma_tag_t dm
 			int flags);
 
 /*
+ * Loads any memory descriptor.
+ */
+int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
+			bus_dma_memory_t *mem, bus_dmamap_callback_t *callback,
+			void *callback_arg, int flags);
+
+/*
  * XXX sparc64 uses the same interface, but a much different implementation.
  *     <machine/bus_dma.h> for the sparc64 arch contains the equivalent
  *     declarations.
@@ -286,13 +416,15 @@ void _bus_dmamap_unload(bus_dma_tag_t dm
  * busdma layers.  These are not intended for consumption by driver
  * software.
  */
-void __bus_dmamap_mayblock(bus_dma_tag_t dmat, bus_dmamap_t map,
-    			   bus_dmamap_callback_t *callback, void *callback_arg);
+void __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+			 bus_dma_memory_t mem,
+    			 bus_dmamap_callback_t *callback,
+			 void *callback_arg);
 
-#define	_bus_dmamap_mayblock(dmat, map, callback, callback_arg)		\
+#define	_bus_dmamap_waitok(dmat, map, mem, callback, callback_arg)	\
 	do {								\
 		if ((map) != NULL)					\
-			__bus_dmamap_mayblock(dmat, map, callback,	\
+			__bus_dmamap_waitok(dmat, map, mem, callback,	\
 			    callback_arg);				\
 	} while (0);
 
@@ -300,6 +432,10 @@ int _bus_dmamap_load_buffer(bus_dma_tag_
 			    void *buf, bus_size_t buflen, struct pmap *pmap,
 			    int flags, bus_dma_segment_t *segs, int *segp);
 
+int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+			  vm_paddr_t paddr, bus_size_t buflen,
+			  int flags, bus_dma_segment_t *segs, int *segp);
+
 bus_dma_segment_t *_bus_dmamap_complete(bus_dma_tag_t dmat,
 			   		bus_dmamap_t map,
 					bus_dma_segment_t *segs,

Modified: projects/physbio/sys/x86/x86/busdma_machdep.c
==============================================================================
--- projects/physbio/sys/x86/x86/busdma_machdep.c	Thu Dec 27 02:02:23 2012	(r244725)
+++ projects/physbio/sys/x86/x86/busdma_machdep.c	Thu Dec 27 04:29:14 2012	(r244726)
@@ -122,8 +122,7 @@ struct bus_dmamap {
 	int		       pagesneeded;
 	int		       pagesreserved;
 	bus_dma_tag_t	       dmat;
-	void		      *buf;		/* unmapped buffer pointer */
-	bus_size_t	       buflen;		/* unmapped buffer length */
+	bus_dma_memory_t       mem;
 	bus_dmamap_callback_t *callback;
 	void		      *callback_arg;
 	STAILQ_ENTRY(bus_dmamap) links;
@@ -627,9 +626,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
 		} else {
 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
 				/* Queue us for resources */
-				map->dmat = dmat;
-				map->buf = buf;
-				map->buflen = buflen;
 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201212270429.qBR4TEQT006871>