Date: Fri, 1 May 2020 09:46:27 +0000 (UTC) From: Hans Petter Selasky <hselasky@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: svn commit: r360525 - in stable/12: sys/arm64/arm64 sys/arm64/include sys/compat/linuxkpi/common/include/linux sys/compat/linuxkpi/common/src sys/dev/ofw sys/dev/pci sys/kern sys/sys sys/x86/includ... Message-ID: <202005010946.0419kRVc060976@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: hselasky Date: Fri May 1 09:46:27 2020 New Revision: 360525 URL: https://svnweb.freebsd.org/changeset/base/360525 Log: MFC r346645, r346664, r346687, r347387, r347836, r347088, 347089, r346956, r346957, r346958, r347088, r347089, r347385, r353938, r350570, r350572 and r350573: Implement full bus_dma(9) support in the LinuxKPI and pull in all dependencies. Bump FreeBSD version to force recompilation of external modules. Sponsored by: Mellanox Technologies Modified: stable/12/sys/arm64/arm64/busdma_bounce.c stable/12/sys/arm64/include/bus_dma.h stable/12/sys/arm64/include/bus_dma_impl.h stable/12/sys/compat/linuxkpi/common/include/linux/device.h stable/12/sys/compat/linuxkpi/common/include/linux/dma-mapping.h stable/12/sys/compat/linuxkpi/common/include/linux/dmapool.h stable/12/sys/compat/linuxkpi/common/include/linux/gfp.h stable/12/sys/compat/linuxkpi/common/include/linux/io.h stable/12/sys/compat/linuxkpi/common/include/linux/pci.h stable/12/sys/compat/linuxkpi/common/include/linux/scatterlist.h stable/12/sys/compat/linuxkpi/common/src/linux_pci.c stable/12/sys/dev/ofw/ofwpci.c stable/12/sys/dev/pci/vga_pci.c stable/12/sys/kern/bus_if.m stable/12/sys/kern/subr_bus.c stable/12/sys/sys/bus.h stable/12/sys/sys/bus_dma.h stable/12/sys/sys/param.h stable/12/sys/x86/include/bus_dma.h stable/12/sys/x86/include/busdma_impl.h stable/12/sys/x86/iommu/busdma_dmar.c stable/12/sys/x86/x86/busdma_bounce.c stable/12/usr.sbin/camdd/camdd.c Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/arm64/arm64/busdma_bounce.c ============================================================================== --- stable/12/sys/arm64/arm64/busdma_bounce.c Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/arm64/arm64/busdma_bounce.c Fri May 1 09:46:27 2020 (r360525) @@ -152,6 +152,8 @@ static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); +static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, + bus_size_t buflen, int *pagesneeded); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, @@ -271,6 +273,15 @@ out: return (error); } +static bool +bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) +{ + + if ((dmat->bounce_flags & BF_COULD_BOUNCE) == 0) + return (true); + return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL)); +} + static bus_dmamap_t alloc_dmamap(bus_dma_tag_t dmat, int flags) { @@ -539,29 +550,45 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr dmat->bounce_flags); } +static bool +_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen, + int *pagesneeded) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + int count; + + /* + * Count the number of bounce pages needed in order to + * complete this transfer + */ + count = 0; + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->common.maxsegsz); + if (bus_dma_run_filter(&dmat->common, curaddr)) { + sgsize = MIN(sgsize, + PAGE_SIZE - (curaddr & PAGE_MASK)); + if (pagesneeded == NULL) + return (true); + count++; + } + curaddr += sgsize; + buflen -= sgsize; + } + + if (pagesneeded != NULL) + *pagesneeded = count; + return (count != 0); +} + static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags) { - bus_addr_t curaddr; - bus_size_t sgsize; if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) { - /* - * Count the number of bounce pages - * needed in order to complete this transfer - */ - curaddr = buf; - while (buflen != 0) { - sgsize = MIN(buflen, dmat->common.maxsegsz); - if (bus_dma_run_filter(&dmat->common, curaddr)) { - sgsize = MIN(sgsize, - PAGE_SIZE - (curaddr & PAGE_MASK)); - map->pagesneeded++; - } - curaddr += sgsize; - buflen -= sgsize; - } + _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded); CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } @@ -1316,6 +1343,7 @@ busdma_swi(void) struct bus_dma_impl bus_dma_bounce_impl = { .tag_create = bounce_bus_dma_tag_create, .tag_destroy = bounce_bus_dma_tag_destroy, + .id_mapped = bounce_bus_dma_id_mapped, .map_create = bounce_bus_dmamap_create, .map_destroy = bounce_bus_dmamap_destroy, .mem_alloc = bounce_bus_dmamem_alloc, Modified: stable/12/sys/arm64/include/bus_dma.h ============================================================================== --- stable/12/sys/arm64/include/bus_dma.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/arm64/include/bus_dma.h Fri May 1 09:46:27 2020 (r360525) @@ -9,6 +9,18 @@ #include <machine/bus_dma_impl.h> /* + * Is DMA address 1:1 mapping of physical address + */ +static inline bool +bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->id_mapped(dmat, buf, buflen)); +} + +/* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ Modified: stable/12/sys/arm64/include/bus_dma_impl.h ============================================================================== --- stable/12/sys/arm64/include/bus_dma_impl.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/arm64/include/bus_dma_impl.h Fri May 1 09:46:27 2020 (r360525) @@ -58,6 +58,7 @@ struct bus_dma_impl { bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat); int (*tag_destroy)(bus_dma_tag_t dmat); + bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t); int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp); int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map); int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags, Modified: stable/12/sys/compat/linuxkpi/common/include/linux/device.h ============================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/device.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/include/linux/device.h Fri May 1 09:46:27 2020 (r360525) @@ -108,7 +108,7 @@ struct device { struct class *class; void (*release)(struct device *dev); struct kobject kobj; - uint64_t *dma_mask; + void *dma_priv; void *driver_data; unsigned int irq; #define LINUX_IRQ_INVALID 65535 Modified: stable/12/sys/compat/linuxkpi/common/include/linux/dma-mapping.h ============================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/dma-mapping.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/include/linux/dma-mapping.h Fri May 1 09:46:27 2020 (r360525) @@ -90,6 +90,16 @@ struct dma_map_ops { #define DMA_BIT_MASK(n) ((2ULL << ((n) - 1)) - 1ULL) +int linux_dma_tag_init(struct device *dev, u64 mask); +void *linux_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); +dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len); +void linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t size); +int linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, struct dma_attrs *attrs); +void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, struct dma_attrs *attrs); + static inline int dma_supported(struct device *dev, u64 mask) { @@ -102,11 +112,10 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) { - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + if (!dev->dma_priv || !dma_supported(dev, dma_mask)) return -EIO; - *dev->dma_mask = dma_mask; - return (0); + return (linux_dma_tag_init(dev, dma_mask)); } static inline int @@ -134,24 +143,7 @@ static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - vm_paddr_t high; - size_t align; - void *mem; - - if (dev != NULL && dev->dma_mask) - high = *dev->dma_mask; - else if (flag & GFP_DMA32) - high = BUS_SPACE_MAXADDR_32BIT; - else - high = BUS_SPACE_MAXADDR; - align = PAGE_SIZE << get_order(size); - mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, - VM_MEMATTR_DEFAULT); - if (mem) - *dma_handle = vtophys(mem); - else - *dma_handle = 0; - return (mem); + return (linux_dma_alloc_coherent(dev, size, dma_handle, flag)); } static inline void * @@ -164,25 +156,27 @@ dma_zalloc_coherent(struct device *dev, size_t size, d static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) + dma_addr_t dma_addr) { + linux_dma_unmap(dev, dma_addr, size); kmem_free((vm_offset_t)cpu_addr, size); } -/* XXX This only works with no iommu. */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - return vtophys(ptr); + return (linux_dma_map_phys(dev, vtophys(ptr), size)); } static inline void -dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, +dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { + + linux_dma_unmap(dev, dma_addr, size); } static inline dma_addr_t @@ -190,26 +184,23 @@ dma_map_page_attrs(struct device *dev, struct page *pa size_t size, enum dma_data_direction dir, unsigned long attrs) { - return (VM_PAGE_TO_PHYS(page) + offset); + return (linux_dma_map_phys(dev, VM_PAGE_TO_PHYS(page) + offset, size)); } static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct scatterlist *sg; - int i; - for_each_sg(sgl, sg, nents, i) - sg_dma_address(sg) = sg_phys(sg); - - return (nents); + return (linux_dma_map_sg_attrs(dev, sgl, nents, dir, attrs)); } static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { + + linux_dma_unmap_sg_attrs(dev, sg, nents, dir, attrs); } static inline dma_addr_t @@ -217,13 +208,15 @@ dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { - return VM_PAGE_TO_PHYS(page) + offset; + return (linux_dma_map_phys(dev, VM_PAGE_TO_PHYS(page) + offset, size)); } static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { + + linux_dma_unmap(dev, dma_address, size); } static inline void @@ -273,7 +266,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return (0); + return (dma_addr == 0); } static inline unsigned int dma_set_max_seg_size(struct device *dev, Modified: stable/12/sys/compat/linuxkpi/common/include/linux/dmapool.h ============================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/dmapool.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/include/linux/dmapool.h Fri May 1 09:46:27 2020 (r360525) @@ -37,44 +37,35 @@ #include <linux/device.h> #include <linux/slab.h> -struct dma_pool { - uma_zone_t pool_zone; -}; +struct dma_pool; +struct dma_pool *linux_dma_pool_create(char *name, struct device *dev, + size_t size, size_t align, size_t boundary); +void linux_dma_pool_destroy(struct dma_pool *pool); +void *linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle); +void linux_dma_pool_free(struct dma_pool *pool, void *vaddr, + dma_addr_t dma_addr); static inline struct dma_pool * dma_pool_create(char *name, struct device *dev, size_t size, size_t align, size_t boundary) { - struct dma_pool *pool; - pool = kmalloc(sizeof(*pool), GFP_KERNEL); - align--; - /* - * XXX Eventually this could use a separate allocf to honor boundary - * and physical address requirements of the device. - */ - pool->pool_zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, - align, UMA_ZONE_OFFPAGE|UMA_ZONE_HASH); - - return (pool); + return (linux_dma_pool_create(name, dev, size, align, boundary)); } static inline void dma_pool_destroy(struct dma_pool *pool) { - uma_zdestroy(pool->pool_zone); - kfree(pool); + + linux_dma_pool_destroy(pool); } static inline void * dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { - void *vaddr; - vaddr = uma_zalloc(pool->pool_zone, mem_flags); - if (vaddr) - *handle = vtophys(vaddr); - return (vaddr); + return (linux_dma_pool_alloc(pool, mem_flags, handle)); } static inline void * @@ -85,9 +76,10 @@ dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags } static inline void -dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr) +dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) { - uma_zfree(pool->pool_zone, vaddr); + + linux_dma_pool_free(pool, vaddr, dma_addr); } Modified: stable/12/sys/compat/linuxkpi/common/include/linux/gfp.h ============================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/gfp.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/include/linux/gfp.h Fri May 1 09:46:27 2020 (r360525) @@ -56,6 +56,7 @@ #define __GFP_IO 0 #define __GFP_NO_KSWAPD 0 +#define __GFP_KSWAPD_RECLAIM 0 #define __GFP_WAIT M_WAITOK #define __GFP_DMA32 (1U << 24) /* LinuxKPI only */ #define __GFP_BITS_SHIFT 25 Modified: stable/12/sys/compat/linuxkpi/common/include/linux/io.h ============================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/io.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/include/linux/io.h Fri May 1 09:46:27 2020 (r360525) @@ -42,6 +42,32 @@ * XXX This is all x86 specific. It should be bus space access. */ + +/* rmb and wmb are declared in machine/atomic.h, so should be included first. */ +#ifndef __io_br +#define __io_br() __compiler_membar() +#endif + +#ifndef __io_ar +#ifdef rmb +#define __io_ar() rmb() +#else +#define __io_ar() __compiler_membar() +#endif +#endif + +#ifndef __io_bw +#ifdef wmb +#define __io_bw() wmb() +#else +#define __io_bw() __compiler_membar() +#endif +#endif + +#ifndef __io_aw +#define __io_aw() __compiler_membar() +#endif + /* Access MMIO registers atomically without barriers and byte swapping. */ static inline uint8_t @@ -112,9 +138,9 @@ readb(const volatile void *addr) { uint8_t v; - __compiler_membar(); + __io_br(); v = *(const volatile uint8_t *)addr; - __compiler_membar(); + __io_ar(); return (v); } #define readb(addr) readb(addr) @@ -123,9 +149,9 @@ readb(const volatile void *addr) static inline void writeb(uint8_t v, volatile void *addr) { - __compiler_membar(); + __io_bw(); *(volatile uint8_t *)addr = v; - __compiler_membar(); + __io_aw(); } #define writeb(v, addr) writeb(v, addr) @@ -135,9 +161,9 @@ readw(const volatile void *addr) { uint16_t v; - __compiler_membar(); - v = *(const volatile uint16_t *)addr; - __compiler_membar(); + __io_br(); + v = le16toh(__raw_readw(addr)); + __io_ar(); return (v); } #define readw(addr) readw(addr) @@ -146,9 +172,9 @@ readw(const volatile void *addr) static inline void writew(uint16_t v, volatile void *addr) { - __compiler_membar(); - *(volatile uint16_t *)addr = v; - __compiler_membar(); + __io_bw(); + __raw_writew(htole16(v), addr); + __io_aw(); } #define writew(v, addr) writew(v, addr) @@ -158,9 +184,9 @@ readl(const volatile void *addr) { uint32_t v; - __compiler_membar(); - v = *(const volatile uint32_t *)addr; - __compiler_membar(); + __io_br(); + v = le32toh(__raw_readl(addr)); + __io_ar(); return (v); } #define readl(addr) readl(addr) @@ -169,9 +195,9 @@ readl(const volatile void *addr) static inline void writel(uint32_t v, volatile void *addr) { - __compiler_membar(); - *(volatile uint32_t *)addr = v; - __compiler_membar(); + __io_bw(); + __raw_writel(htole32(v), addr); + __io_aw(); } #define writel(v, addr) writel(v, addr) @@ -183,9 +209,9 @@ readq(const volatile void *addr) { uint64_t v; - __compiler_membar(); - v = *(const volatile uint64_t *)addr; - __compiler_membar(); + __io_br(); + v = le64toh(__raw_readq(addr)); + __io_ar(); return (v); } #define readq(addr) readq(addr) @@ -193,9 +219,9 @@ readq(const volatile void *addr) static inline void writeq(uint64_t v, volatile void *addr) { - __compiler_membar(); - *(volatile uint64_t *)addr = v; - __compiler_membar(); + __io_bw(); + __raw_writeq(htole64(v), addr); + __io_aw(); } #define writeq(v, addr) writeq(v, addr) #endif @@ -206,7 +232,7 @@ writeq(uint64_t v, volatile void *addr) static inline uint8_t readb_relaxed(const volatile void *addr) { - return (*(const volatile uint8_t *)addr); + return (__raw_readb(addr)); } #define readb_relaxed(addr) readb_relaxed(addr) @@ -214,7 +240,7 @@ readb_relaxed(const volatile void *addr) static inline void writeb_relaxed(uint8_t v, volatile void *addr) { - *(volatile uint8_t *)addr = v; + __raw_writeb(v, addr); } #define writeb_relaxed(v, addr) writeb_relaxed(v, addr) @@ -222,7 +248,7 @@ writeb_relaxed(uint8_t v, volatile void *addr) static inline uint16_t readw_relaxed(const volatile void *addr) { - return (*(const volatile uint16_t *)addr); + return (le16toh(__raw_readw(addr))); } #define readw_relaxed(addr) readw_relaxed(addr) @@ -230,7 +256,7 @@ readw_relaxed(const volatile void *addr) static inline void writew_relaxed(uint16_t v, volatile void *addr) { - *(volatile uint16_t *)addr = v; + __raw_writew(htole16(v), addr); } #define writew_relaxed(v, addr) writew_relaxed(v, addr) @@ -238,7 +264,7 @@ writew_relaxed(uint16_t v, volatile void *addr) static inline uint32_t readl_relaxed(const volatile void *addr) { - return (*(const volatile uint32_t *)addr); + return (le32toh(__raw_readl(addr))); } #define readl_relaxed(addr) readl_relaxed(addr) @@ -246,7 +272,7 @@ readl_relaxed(const volatile void *addr) static inline void writel_relaxed(uint32_t v, volatile void *addr) { - *(volatile uint32_t *)addr = v; + __raw_writel(htole32(v), addr); } #define writel_relaxed(v, addr) writel_relaxed(v, addr) @@ -256,14 +282,14 @@ writel_relaxed(uint32_t v, volatile void *addr) static inline uint64_t readq_relaxed(const volatile void *addr) { - return (*(const volatile uint64_t *)addr); + return (le64toh(__raw_readq(addr))); } #define readq_relaxed(addr) readq_relaxed(addr) static inline void writeq_relaxed(uint64_t v, volatile void *addr) { - *(volatile uint64_t *)addr = v; + __raw_writeq(htole64(v), addr); } #define writeq_relaxed(v, addr) writeq_relaxed(v, addr) #endif @@ -290,7 +316,13 @@ ioread16(const volatile void *addr) static inline uint16_t ioread16be(const volatile void *addr) { - return (bswap16(readw(addr))); + uint16_t v; + + __io_br(); + v = (be16toh(__raw_readw(addr))); + __io_ar(); + + return (v); } #define ioread16be(addr) ioread16be(addr) @@ -306,7 +338,13 @@ ioread32(const volatile void *addr) static inline uint32_t ioread32be(const volatile void *addr) { - return (bswap32(readl(addr))); + uint32_t v; + + __io_br(); + v = (be32toh(__raw_readl(addr))); + __io_ar(); + + return (v); } #define ioread32be(addr) ioread32be(addr) @@ -338,7 +376,9 @@ iowrite32(uint32_t v, volatile void *addr) static inline void iowrite32be(uint32_t v, volatile void *addr) { - writel(bswap32(v), addr); + __io_bw(); + __raw_writel(htobe32(v), addr); + __io_aw(); } #define iowrite32be(v, addr) iowrite32be(v, addr) Modified: stable/12/sys/compat/linuxkpi/common/include/linux/pci.h ============================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/pci.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/include/linux/pci.h Fri May 1 09:46:27 2020 (r360525) @@ -212,6 +212,7 @@ struct pci_driver { struct pci_bus { struct pci_dev *self; + int domain; int number; }; @@ -226,7 +227,6 @@ struct pci_dev { struct list_head links; struct pci_driver *pdrv; struct pci_bus *bus; - uint64_t dma_mask; uint16_t device; uint16_t vendor; uint16_t subsystem_vendor; @@ -279,26 +279,6 @@ linux_pci_find_irq_dev(unsigned int irq) return (found); } -static inline unsigned long -pci_resource_start(struct pci_dev *pdev, int bar) -{ - struct resource_list_entry *rle; - - if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) - return (0); - return rle->start; -} - -static inline unsigned long -pci_resource_len(struct pci_dev *pdev, int bar) -{ - struct resource_list_entry *rle; - - if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) - return (0); - return rle->count; -} - static inline int pci_resource_type(struct pci_dev *pdev, int bar) { @@ -470,6 +450,9 @@ linux_pci_disable_msi(struct pci_dev *pdev) pdev->irq = pdev->dev.irq; pdev->msi_enabled = false; } + +unsigned long pci_resource_start(struct pci_dev *pdev, int bar); +unsigned long pci_resource_len(struct pci_dev *pdev, int bar); static inline bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) Modified: stable/12/sys/compat/linuxkpi/common/include/linux/scatterlist.h ============================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/scatterlist.h Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/include/linux/scatterlist.h Fri May 1 09:46:27 2020 (r360525) @@ -36,6 +36,7 @@ #include <linux/slab.h> #include <linux/mm.h> +struct bus_dmamap; struct scatterlist { unsigned long page_link; #define SG_PAGE_LINK_CHAIN 0x1UL @@ -43,7 +44,8 @@ struct scatterlist { #define SG_PAGE_LINK_MASK 0x3UL unsigned int offset; unsigned int length; - dma_addr_t address; + dma_addr_t dma_address; + struct bus_dmamap *dma_map; /* FreeBSD specific */ }; CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0); @@ -77,7 +79,7 @@ struct sg_page_iter { #define sg_chain_ptr(sg) \ ((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK)) -#define sg_dma_address(sg) (sg)->address +#define sg_dma_address(sg) (sg)->dma_address #define sg_dma_len(sg) (sg)->length #define for_each_sg_page(sgl, iter, nents, pgoffset) \ @@ -444,7 +446,7 @@ _sg_iter_init(struct scatterlist *sgl, struct sg_page_ static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *spi) { - return (spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT)); + return (spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT)); } static inline struct page * Modified: stable/12/sys/compat/linuxkpi/common/src/linux_pci.c ============================================================================== --- stable/12/sys/compat/linuxkpi/common/src/linux_pci.c Fri May 1 06:10:09 2020 (r360524) +++ stable/12/sys/compat/linuxkpi/common/src/linux_pci.c Fri May 1 09:46:27 2020 (r360525) @@ -29,16 +29,17 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> +#include <sys/bus.h> #include <sys/malloc.h> #include <sys/kernel.h> #include <sys/sysctl.h> #include <sys/lock.h> #include <sys/mutex.h> -#include <sys/bus.h> #include <sys/fcntl.h> #include <sys/file.h> #include <sys/filio.h> #include <sys/pciio.h> +#include <sys/pctrie.h> #include <sys/rwlock.h> #include <vm/vm.h> @@ -86,6 +87,83 @@ static device_method_t pci_methods[] = { DEVMETHOD_END }; +struct linux_dma_priv { + uint64_t dma_mask; + struct mtx lock; + bus_dma_tag_t dmat; + struct pctrie ptree; +}; +#define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) +#define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) + +static int +linux_pdev_dma_init(struct pci_dev *pdev) +{ + struct linux_dma_priv *priv; + int error; + + priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); + pdev->dev.dma_priv = priv; + + mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); + + pctrie_init(&priv->ptree); + + /* create a default DMA tag */ + error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); + if (error) { + mtx_destroy(&priv->lock); + free(priv, M_DEVBUF); + pdev->dev.dma_priv = NULL; + } + return (error); +} + +static int +linux_pdev_dma_uninit(struct pci_dev *pdev) +{ + struct linux_dma_priv *priv; + + priv = pdev->dev.dma_priv; + if (priv->dmat) + bus_dma_tag_destroy(priv->dmat); + mtx_destroy(&priv->lock); + free(priv, M_DEVBUF); + pdev->dev.dma_priv = NULL; + return (0); +} + +int +linux_dma_tag_init(struct device *dev, u64 dma_mask) +{ + struct linux_dma_priv *priv; + int error; + + priv = dev->dma_priv; + + if (priv->dmat) { + if (priv->dma_mask == dma_mask) + return (0); + + bus_dma_tag_destroy(priv->dmat); + } + + priv->dma_mask = dma_mask; + + error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), + 1, 0, /* alignment, boundary */ + dma_mask, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + BUS_SPACE_MAXSIZE, /* maxsize */ + 1, /* nsegments */ + BUS_SPACE_MAXSIZE, /* maxsegsz */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &priv->dmat); + return (-error); +} + static struct pci_driver * linux_pci_find(device_t dev, const struct pci_device_id **idp) { @@ -142,7 +220,6 @@ linux_pci_attach(device_t dev) struct pci_driver *pdrv; const struct pci_device_id *id; device_t parent; - devclass_t devclass; int error; linux_set_current(curthread); @@ -151,7 +228,6 @@ linux_pci_attach(device_t dev) pdev = device_get_softc(dev); parent = device_get_parent(dev); - devclass = device_get_devclass(parent); if (pdrv->isdrm) { dinfo = device_get_ivars(parent); device_set_ivars(dev, dinfo); @@ -169,7 +245,6 @@ linux_pci_attach(device_t dev) pdev->subsystem_device = dinfo->cfg.subdevice; pdev->class = pci_get_class(dev); pdev->revision = pci_get_revid(dev); - pdev->dev.dma_mask = &pdev->dma_mask; pdev->pdrv = pdrv; kobject_init(&pdev->dev.kobj, &linux_dev_ktype); kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); @@ -181,10 +256,14 @@ linux_pci_attach(device_t dev) else pdev->dev.irq = LINUX_IRQ_INVALID; pdev->irq = pdev->dev.irq; + error = linux_pdev_dma_init(pdev); + if (error) + goto out_dma_init; pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); pbus->self = pdev; pbus->number = pci_get_bus(dev); + pbus->domain = pci_get_domain(dev); pdev->bus = pbus; spin_lock(&pci_lock); @@ -192,15 +271,19 @@ linux_pci_attach(device_t dev) spin_unlock(&pci_lock); error = pdrv->probe(pdev, id); - if (error) { - free(pdev->bus, M_DEVBUF); - spin_lock(&pci_lock); - list_del(&pdev->links); - spin_unlock(&pci_lock); - put_device(&pdev->dev); - error = -error; - } - return (error); + if (error) + goto out_probe; + return (0); + +out_probe: + free(pdev->bus, M_DEVBUF); + linux_pdev_dma_uninit(pdev); +out_dma_init: + spin_lock(&pci_lock); + list_del(&pdev->links); + spin_unlock(&pci_lock); + put_device(&pdev->dev); + return (-error); } static int @@ -212,7 +295,9 @@ linux_pci_detach(device_t dev) pdev = device_get_softc(dev); pdev->pdrv->remove(pdev); + free(pdev->bus, M_DEVBUF); + linux_pdev_dma_uninit(pdev); spin_lock(&pci_lock); list_del(&pdev->links); @@ -354,6 +439,36 @@ linux_pci_register_driver(struct pci_driver *pdrv) return (_linux_pci_register_driver(pdrv, dc)); } +unsigned long +pci_resource_start(struct pci_dev *pdev, int bar) +{ + struct resource_list_entry *rle; + rman_res_t newstart; + device_t dev; + + if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) + return (0); + dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + MPASS(dev != NULL); + if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { + device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", + (uintmax_t)rle->start); + return (0); + } + return (newstart); +} + +unsigned long +pci_resource_len(struct pci_dev *pdev, int bar) +{ + struct resource_list_entry *rle; + + if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) + return (0); + return (rle->count); +} + int linux_pci_register_drm_driver(struct pci_driver *pdrv) { @@ -397,4 +512,417 @@ linux_pci_unregister_drm_driver(struct pci_driver *pdr if (bus != NULL) devclass_delete_driver(bus, &pdrv->bsddriver); mtx_unlock(&Giant); +} + +CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); + +struct linux_dma_obj { + void *vaddr; + uint64_t dma_addr; + bus_dmamap_t dmamap; +}; + +static uma_zone_t linux_dma_trie_zone; +static uma_zone_t linux_dma_obj_zone; + +static void +linux_dma_init(void *arg) +{ + + linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", + pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, + UMA_ALIGN_PTR, 0); + linux_dma_obj_zone = uma_zcreate("linux_dma_object", + sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, + UMA_ALIGN_PTR, 0); + +} +SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); + +static void +linux_dma_uninit(void *arg) +{ + + uma_zdestroy(linux_dma_obj_zone); + uma_zdestroy(linux_dma_trie_zone); +} +SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); + +static void * +linux_dma_trie_alloc(struct pctrie *ptree) +{ + + return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); +} + +static void +linux_dma_trie_free(struct pctrie *ptree, void *node) +{ + + uma_zfree(linux_dma_trie_zone, node); +} + + +PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, + linux_dma_trie_free); + +void * +linux_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + struct linux_dma_priv *priv; + vm_paddr_t high; + size_t align; + void *mem; + + if (dev == NULL || dev->dma_priv == NULL) { *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202005010946.0419kRVc060976>