{ + struct vmmap *lh_first; +}; +#define VMMAP_HASH_SIZE 64 +#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) +#define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK +static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; +static struct mtx vmmaplock; + int is_vmalloc_addr(const void *addr) { return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); } +static void +vmmap_add(void *addr, unsigned long size) +{ + struct vmmap *vmmap; + + vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); + mtx_lock(&vmmaplock); + vmmap->vm_size = size; + vmmap->vm_addr = addr; + LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); + mtx_unlock(&vmmaplock); +} + +static struct vmmap * +vmmap_remove(void *addr) +{ + struct vmmap *vmmap; + + mtx_lock(&vmmaplock); + LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) + if (vmmap->vm_addr == addr) + break; + if (vmmap) + LIST_REMOVE(vmmap, vm_next); + mtx_unlock(&vmmaplock); + + return (vmmap); +} + +#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) +void * +_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) +{ + void *addr; + + addr = pmap_mapdev_attr(phys_addr, size, attr); + if (addr == NULL) + return (NULL); + vmmap_add(addr, size); + + return (addr); +} +#endif + +void +iounmap(void *addr) +{ + struct vmmap *vmmap; + + vmmap = vmmap_remove(addr); + if (vmmap == NULL) + return; +#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) + pmap_unmapdev(addr, vmmap->vm_size); +#endif + kfree(vmmap); +} + +void * +vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) +{ + vm_offset_t off; + size_t size; + + size = count * PAGE_SIZE; + off = kva_alloc(size); + if (off == 0) + return (NULL); + vmmap_add((void *)off, size); + pmap_qenter(off, pages, count); + + return ((void *)off); +} + +void +vunmap(void *addr) +{ + struct vmmap *vmmap; + + vmmap = vmmap_remove(addr); + if (vmmap == NULL) + return; + pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); + kva_free((vm_offset_t)addr, vmmap->vm_size); + kfree(vmmap); +} + vm_fault_t lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) @@ -575,3 +682,21 @@ linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) linux_free_pages(page, 0); } + +static void +lkpi_page_init(void *arg) +{ + int i; + + mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); + for (i = 0; i < VMMAP_HASH_SIZE; i++) + LIST_INIT(&vmmaphead[i]); +} +SYSINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_init, NULL); + +static void +lkpi_page_uninit(void *arg) +{ + mtx_destroy(&vmmaplock); +} +SYSUNINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_uninit, NULL);