Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 7 Apr 2006 22:43:43 GMT
From:      John-Mark Gurney <jmg@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 94782 for review
Message-ID:  <200604072243.k37MhhOL049237@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=94782

Change 94782 by jmg@jmg_arlene on 2006/04/07 22:42:42

	make compile and link...  include machine/bus.h in sys/bus.h so
	we get bus_dma_tag_t defined...
	
	pull dvma out of OFW for initalization...
	
	other minor fixes..
	
	this still isn't complete as we don't enter pages into the iommu
	yet, (nor unmap them)

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/dev/pci/pci_pci.c#7 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/hviommu.h#2 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/hv_pci.c#27 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/hviommu.c#2 edit
.. //depot/projects/kmacy_sun4v/src/sys/sys/bus.h#4 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/dev/pci/pci_pci.c#7 (text+ko) ====

@@ -244,7 +244,7 @@
 	    device_printf(dev, "  memory decode disabled.\n");
 	if (pcib_is_prefetch_open(sc))
 	    device_printf(dev, "  prefetched decode 0x%jx-0x%jx\n",
-		(uintmax_t)sc->pmembase, (uintmax_t)c->pmemlimit);
+		(uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
 	else
 	    device_printf(dev, "  prefetched decode disabled.\n");
 	if (sc->flags & PCIB_SUBTRACTIVE)

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/hviommu.h#2 (text+ko) ====

@@ -5,6 +5,6 @@
 
 extern struct bus_dma_methods hviommu_dma_methods;
 
-struct hviommu *hviommu_init(devhandle_t dh);
+struct hviommu *hviommu_init(devhandle_t dh, u_long dvmabase, u_long dvmasize);
 
 #endif /* _HVIOMMU_H_ */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/hv_pci.c#27 (text+ko) ====

@@ -160,6 +160,7 @@
 	uint32_t cell;
 #endif
 	uint64_t reg, nreg;
+	uint32_t *dvma;
 	int br[2];
 	int n, type, error;
 	int i, nrange, rid;
@@ -258,11 +259,18 @@
 		(*btp)->bst_parent = &nexus_bustag;
 		(*btp)->bst_type = type;
 	}
+	free(range, M_OFWPROP);
 
+	nrange = OF_getprop_alloc(node, "virtual-dma", sizeof *dvma,
+	    (void **)&dvma);
+	KASSERT(nrange == 2, ("virtual-dma propery invalid"));
+
 	/* Setup bus_dma_tag */
-	himp = hviommu_init(sc->hs_devhandle);
-	sc->hs_dt_cookie = himp;
-	sc->hs_dt_mt = &hviommu_dma_methods;
+	himp = hviommu_init(sc->hs_devhandle, dvma[0], dvma[1]);
+	sc->hs_dmatag.dt_cookie = himp;
+	sc->hs_dmatag.dt_mt = &hviommu_dma_methods;
+
+	free(dvma, M_OFWPROP);
 
 	device_add_child(dev, "pci", -1);
 

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/hviommu.c#2 (text+ko) ====

@@ -101,22 +101,42 @@
  */
 
 #include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/mbuf.h>
 #include <sys/mutex.h>
+#include <sys/proc.h>
 #include <sys/uio.h>
 
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+
 #include <machine/bus.h>
 #include <machine/bus_private.h>
 #include <machine/hviommu.h>
+#include <machine/pmap.h>
 #include <machine/resource.h>
 
 #include <sys/rman.h>
 
-#define	IO_PAGE_SHIFT	PAGE_SHIFT_8K
+/*
+ * Tuning constants.
+ */
+#define	IOMMU_MAX_PRE		(32 * 1024)
+#define	IOMMU_MAX_PRE_SEG	3
+
+#define	IO_PAGE_SIZE		PAGE_SIZE_8K
+#define	IO_PAGE_MASK		PAGE_MASK_8K
+#define	IO_PAGE_SHIFT		PAGE_SHIFT_8K
+#define	round_io_page(x)	round_page(x)
+#define	trunc_io_page(x)	trunc_page(x)
 
 MALLOC_DEFINE(M_HVIOMMU, "hviommu", "HyperVisor IOMMU");
 
+TAILQ_HEAD(hviommu_maplruq_head, bus_dmamap);
+
 struct hviommu {
 	struct mtx	him_mtx;
 
@@ -124,11 +144,19 @@
 	u_long		him_dvmabase;
 	u_long		him_dvmasize;
 
+	struct hviommu_maplruq_head him_maplruq;
 	struct rman	him_rman;
 };
 
 #define VA_TO_TSBID(him, va)	((va - (him)->him_dvmabase) >> IO_PAGE_SHIFT)
 
+/*
+ * Always overallocate one page; this is needed to handle alignment of the
+ * buffer, so it makes sense using a lazy allocation scheme.
+ */
+#define	IOMMU_SIZE_ROUNDUP(sz)						\
+	(round_io_page(sz) + IO_PAGE_SIZE)
+
 /* Resource helpers */
 #define IOMMU_RES_TO(v)	((v) >> IO_PAGE_SHIFT)
 #define	IOMMU_RES_START(res)						\
@@ -138,28 +166,45 @@
 #define	IOMMU_RES_SIZE(res)						\
 	((bus_size_t)rman_get_size(res) << IO_PAGE_SHIFT)
 
+/* Helpers for struct bus_dmamap_res */
+#define	BDR_START(r)	IOMMU_RES_START((r)->dr_res)
+#define	BDR_END(r)	IOMMU_RES_END((r)->dr_res)
+#define	BDR_SIZE(r)	IOMMU_RES_SIZE((r)->dr_res)
+
 /* Locking macros. */
 #define	HIM_LOCK(him)	mtx_lock(&him->him_mtx)
 #define	HIM_LOCK_ASSERT(him)	mtx_assert(&him->him_mtx, MA_OWNED)
 #define	HIM_UNLOCK(him)	mtx_unlock(&him->him_mtx)
 
-struct bus_dma_methods hviommu_dma_methods = {
-	.dm_dmamap_create = hviommu_dvmamap_create,
-	.dm_dmamap_destroy = hviommu_dvmamap_destroy,
-	.dm_dmamap_load = hviommu_dvmamap_load,
-	.dm_dmamap_load_mbuf = hviommu_dvmamap_load_mbuf,
-	.dm_dmamap_load_mbuf_sg = hviommu_dvmamap_load_mbuf_sg,
-	.dm_dmamap_load_uio = hviommu_dvmamap_load_uio,
-	.dm_dmamap_unload = hviommu_dvmamap_unload,
-	.dm_dmamap_sync = hviommu_dvmamap_sync,
-	.dm_dmamap_alloc = hviommu_dvmamem_alloc,
-	.dm_dmamap_free = hviommu_dvmamem_free,
-};
+/* LRU queue handling for lazy resource allocation. */
+static __inline void
+hviommu_map_insq(struct hviommu *him, bus_dmamap_t map)
+{
+
+	HIM_LOCK_ASSERT(him);
+	if (!SLIST_EMPTY(&map->dm_reslist)) {
+		if (map->dm_onq)
+			TAILQ_REMOVE(&him->him_maplruq, map, dm_maplruq);
+		TAILQ_INSERT_TAIL(&him->him_maplruq, map, dm_maplruq);
+		map->dm_onq = 1;
+	}
+}
+
+static __inline void
+hviommu_map_remq(struct hviommu *him, bus_dmamap_t map)
+{
+
+	HIM_LOCK_ASSERT(him);
+	if (map->dm_onq)
+		TAILQ_REMOVE(&him->him_maplruq, map, dm_maplruq);
+	map->dm_onq = 0;
+}
 
 struct hviommu *
 hviommu_init(devhandle_t dh, u_long dvmabase, u_long dvmasize)
 {
 	struct hviommu *him;
+	u_long end;
 
 	him = malloc(sizeof *him, M_HVIOMMU, M_WAITOK);
 
@@ -168,8 +213,9 @@
 	him->him_dvmabase = dvmabase;
 	him->him_dvmasize = dvmasize;
 
+	TAILQ_INIT(&him->him_maplruq);
 	him->him_rman.rm_type = RMAN_ARRAY;
-	him->him_rman.rm_desc = "HyperVisor IOMMU Memory";
+	him->him_rman.rm_descr = "HyperVisor IOMMU Memory";
 	end = him->him_dvmabase + him->him_dvmasize - 1;
 	if (rman_init(&him->him_rman) != 0 ||
 	    rman_manage_region(&him->him_rman, him->him_dvmabase >>
@@ -181,7 +227,7 @@
 
 /* XXX - vm_stat_t better be signed */
 static void
-hviommu_remove(struct hviommu *him, vm_offset_t va, vm_stat_t len)
+hviommu_remove(struct hviommu *him, vm_offset_t va, vm_size_t len)
 {
 	uint64_t error;
 	pages_t demapped;
@@ -194,7 +240,7 @@
 	while (len >= 0) {
 		if ((error = hvio_iommu_demap(him->him_handle,
 		    VA_TO_TSBID(him, va), len >> IO_PAGE_SHIFT, &demapped))) {
-			printf("%s: demap: va: %#lx, npages: %#lx, err: %d\n",
+			printf("%s: demap: va: %#lx, npages: %#lx, err: %ld\n",
 			    __func__, va, len >> IO_PAGE_SHIFT, error);
 			demapped = 1;
 		}
@@ -229,12 +275,12 @@
 	sgsize = IOMMU_RES_TO(round_io_page(size));
 	if (t->dt_boundary > 0 && t->dt_boundary < IO_PAGE_SIZE)
 		panic("hviommu_dvmamap_load: illegal boundary specified");
-	res = rman_reserve_resource_bound(&hviommu_dvma_rman, 0L,
+	res = rman_reserve_resource_bound(&him->him_rman, 0L,
 	    IOMMU_RES_TO(t->dt_lowaddr), sgsize,
 	    IOMMU_RES_TO(t->dt_boundary),
 	    RF_ACTIVE | rman_make_alignment_flags(align), NULL);
 	if (res == NULL) {
-		free(bdr, M_IOMMU);
+		free(bdr, M_HVIOMMU);
 		return (ENOMEM);
 	}
 
@@ -354,9 +400,9 @@
 			 */
 			HIM_LOCK(him);
 			freed = 0;
-			last = TAILQ_LAST(&hviommu_maplruq, hviommu_maplruq_head);
+			last = TAILQ_LAST(&him->him_maplruq, hviommu_maplruq_head);
 			do {
-				tm = TAILQ_FIRST(&hviommu_maplruq);
+				tm = TAILQ_FIRST(&him->him_maplruq);
 				complete = tm == last;
 				if (tm == NULL)
 					break;
@@ -397,7 +443,7 @@
 	if ((flags & BUS_DMA_ZERO) != 0)
 		mflags |= M_ZERO;
 
-	if ((*vaddr = malloc(dt->dt_maxsize, M_IOMMU, mflags)) == NULL) {
+	if ((*vaddr = malloc(dt->dt_maxsize, M_HVIOMMU, mflags)) == NULL) {
 		error = ENOMEM;
 		sparc64_dma_free_map(dt, *mapp);
 		return (error);
@@ -422,7 +468,7 @@
 
 	hviommu_dvma_vfree(him, map);
 	sparc64_dma_free_map(dt, map);
-	free(vaddr, M_IOMMU);
+	free(vaddr, M_HVIOMMU);
 }
 
 static int
@@ -534,8 +580,12 @@
 		buflen -= sgsize;
 		vaddr += sgsize;
 
-		hviommu_enter(is, trunc_io_page(dvmaddr), trunc_io_page(curaddr),
+#if 0
+		hviommu_enter(him, trunc_io_page(dvmaddr), trunc_io_page(curaddr),
 		    flags);
+#else
+		panic("enter page into iommu");
+#endif
 
 		/*
 		 * Chop the chunk up into segments of at most maxsegsz, but try
@@ -599,7 +649,7 @@
 	error = hviommu_dvmamap_load_buffer(dt, him, map, buf, buflen, NULL,
 	    flags, dt->dt_segments, &seg, 1);
 
-	HIM_LOCK(is);
+	HIM_LOCK(him);
 	hviommu_map_insq(him, map);
 	if (error != 0) {
 		hviommu_dvmamap_vunload(him, map);
@@ -639,7 +689,7 @@
 		for (m = m0; m != NULL && error == 0; m = m->m_next) {
 			if (m->m_len == 0)
 				continue;
-			error = hviommu_dvmamap_load_buffer(dt, is, map,
+			error = hviommu_dvmamap_load_buffer(dt, him, map,
 			    m->m_data, m->m_len, NULL, flags, dt->dt_segments,
 			    &nsegs, first);
 			first = 0;
@@ -648,7 +698,7 @@
 		error = EINVAL;
 
 	HIM_LOCK(him);
-	hviommu_map_insq(is, map);
+	hviommu_map_insq(him, map);
 	if (error != 0) {
 		hviommu_dvmamap_vunload(him, map);
 		HIM_UNLOCK(him);
@@ -747,7 +797,7 @@
 		if (minlen == 0)
 			continue;
 
-		error = hviommu_dvmamap_load_buffer(dt, is, map,
+		error = hviommu_dvmamap_load_buffer(dt, him, map,
 		    iov[i].iov_base, minlen, td, flags, dt->dt_segments, 
 		    &nsegs, first);
 		first = 0;
@@ -778,8 +828,8 @@
 	if ((map->dm_flags & DMF_LOADED) == 0)
 		return;
 	HIM_LOCK(him);
-	hviommu_dvmamap_vunload(is, map);
-	hviommu_map_insq(is, map);
+	hviommu_dvmamap_vunload(him, map);
+	hviommu_map_insq(him, map);
 	HIM_UNLOCK(him);
 	map->dm_flags &= ~DMF_LOADED;
 }
@@ -791,10 +841,50 @@
 	struct bus_dmamap_res *r;
 	vm_offset_t va;
 	vm_size_t len;
+	size_t synced;
+	uint64_t err;
+	io_sync_direction_t iodir;
 
 	if ((map->dm_flags & DMF_LOADED) == 0)
 		return;
 	/* XXX This is probably bogus. */
-	if ((op & BUS_DMASYNC_PREREAD) != 0 || (op & BUS_DMASYNC_PREWRITE) != 0)
+	iodir = 0;
+	if (op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD))
+		iodir |= IO_SYNC_CPU;
+ 	if (op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE))
+		iodir |= IO_SYNC_DEVICE;
+
+	if ((op & BUS_DMASYNC_PREREAD) != 0)
+		membar(Sync);
+	HIM_LOCK(him);
+	SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
+		va = (vm_offset_t)BDR_START(r);
+		len = r->dr_used;
+		while (len > 0) {
+			if ((err = hvio_dma_sync(him->him_handle, va, len, iodir,
+			    &synced))) {
+				printf("failed to dma_sync: err: %ld, handle: %#lx, va: %#lx, len: %#lx, dir: %d\n",
+				    err, him->him_handle, va, len, iodir);
+				synced = 1;
+			}
+			va += synced;
+			len -= synced;
+		}
+	}
+	HIM_UNLOCK(him);
+	if ((op & BUS_DMASYNC_PREWRITE) != 0)
 		membar(Sync);
 }
+
+struct bus_dma_methods hviommu_dma_methods = {
+	.dm_dmamap_create = hviommu_dvmamap_create,
+	.dm_dmamap_destroy = hviommu_dvmamap_destroy,
+	.dm_dmamap_load = hviommu_dvmamap_load,
+	.dm_dmamap_load_mbuf = hviommu_dvmamap_load_mbuf,
+	.dm_dmamap_load_mbuf_sg = hviommu_dvmamap_load_mbuf_sg,
+	.dm_dmamap_load_uio = hviommu_dvmamap_load_uio,
+	.dm_dmamap_unload = hviommu_dvmamap_unload,
+	.dm_dmamap_sync = hviommu_dvmamap_sync,
+	.dm_dmamem_alloc = hviommu_dvmamem_alloc,
+	.dm_dmamem_free = hviommu_dvmamem_free,
+};

==== //depot/projects/kmacy_sun4v/src/sys/sys/bus.h#4 (text+ko) ====

@@ -29,6 +29,8 @@
 #ifndef _SYS_BUS_H_
 #define _SYS_BUS_H_
 
+#include <machine/bus.h>
+
 /**
  * @defgroup NEWBUS newbus - a generic framework for managing devices
  * @{



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200604072243.k37MhhOL049237>