Date: Wed, 27 May 2020 01:24:13 +0000 (UTC) From: Justin Hibbits <jhibbits@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r361544 - in head/sys: conf powerpc/aim powerpc/booke powerpc/include powerpc/powerpc powerpc/ps3 powerpc/pseries Message-ID: <202005270124.04R1ODAh030113@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: jhibbits Date: Wed May 27 01:24:12 2020 New Revision: 361544 URL: https://svnweb.freebsd.org/changeset/base/361544 Log: powerpc/mmu: Convert PowerPC pmap drivers to ifunc from kobj With IFUNC support in the kernel, we can finally get rid of our poor-man's ifunc for pmap, utilizing kobj. Since moea64 uses a second tier kobj as well, for its own private methods, this adds a second pmap install function (pmap_mmu_init()) to perform pmap 'post-install pre-bootstrap' initialization, before the IFUNCs get initialized. Reviewed by: bdragon Deleted: head/sys/powerpc/aim/moea64_if.m head/sys/powerpc/powerpc/mmu_if.m Modified: head/sys/conf/files.powerpc head/sys/powerpc/aim/mmu_oea.c head/sys/powerpc/aim/mmu_oea64.c head/sys/powerpc/aim/mmu_oea64.h head/sys/powerpc/aim/mmu_radix.c head/sys/powerpc/aim/moea64_native.c head/sys/powerpc/booke/booke_machdep.c head/sys/powerpc/booke/pmap.c head/sys/powerpc/booke/pmap_32.c head/sys/powerpc/booke/pmap_64.c head/sys/powerpc/include/mmuvar.h head/sys/powerpc/include/pmap.h head/sys/powerpc/powerpc/machdep.c head/sys/powerpc/powerpc/pmap_dispatch.c head/sys/powerpc/ps3/mmu_ps3.c head/sys/powerpc/pseries/mmu_phyp.c Modified: head/sys/conf/files.powerpc ============================================================================== --- head/sys/conf/files.powerpc Wed May 27 01:04:03 2020 (r361543) +++ head/sys/conf/files.powerpc Wed May 27 01:24:12 2020 (r361544) @@ -135,7 +135,6 @@ powerpc/aim/aim_machdep.c optional aim powerpc/aim/mmu_oea.c optional aim powerpc powerpc/aim/mmu_oea64.c optional aim powerpc/aim/mmu_radix.c optional aim powerpc64 -powerpc/aim/moea64_if.m optional aim powerpc/aim/moea64_native.c optional aim powerpc/aim/mp_cpudep.c optional aim powerpc/aim/slb.c optional aim powerpc64 @@ -260,7 +259,6 @@ powerpc/powerpc/iommu_if.m standard powerpc/powerpc/machdep.c standard powerpc/powerpc/mem.c optional mem powerpc/powerpc/minidump_machdep.c optional powerpc64 -powerpc/powerpc/mmu_if.m standard powerpc/powerpc/mp_machdep.c optional smp powerpc/powerpc/nexus.c standard powerpc/powerpc/openpic.c standard Modified: head/sys/powerpc/aim/mmu_oea.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea.c Wed May 27 01:04:03 2020 (r361543) +++ head/sys/powerpc/aim/mmu_oea.c Wed May 27 01:24:12 2020 (r361544) @@ -149,8 +149,6 @@ __FBSDID("$FreeBSD$"); #include <machine/mmuvar.h> #include <machine/trap.h> -#include "mmu_if.h" - #define MOEA_DEBUG #define TODO panic("%s: not implemented", __func__); @@ -267,125 +265,123 @@ static int moea_enter_locked(pmap_t, vm_offset_t, vm_ static void moea_syncicache(vm_paddr_t, vm_size_t); static boolean_t moea_query_bit(vm_page_t, int); static u_int moea_clear_bit(vm_page_t, int); -static void moea_kremove(mmu_t, vm_offset_t); +static void moea_kremove(vm_offset_t); int moea_pte_spill(vm_offset_t); /* * Kernel MMU interface */ -void moea_clear_modify(mmu_t, vm_page_t); -void moea_copy_page(mmu_t, vm_page_t, vm_page_t); -void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, +void moea_clear_modify(vm_page_t); +void moea_copy_page(vm_page_t, vm_page_t); +void moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); -int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, +int moea_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t); -void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, +void moea_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); -void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); -vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); -vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); -void moea_init(mmu_t); -boolean_t moea_is_modified(mmu_t, vm_page_t); -boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); -boolean_t moea_is_referenced(mmu_t, vm_page_t); -int moea_ts_referenced(mmu_t, vm_page_t); -vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); -boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); -void moea_page_init(mmu_t, vm_page_t); -int moea_page_wired_mappings(mmu_t, vm_page_t); -void moea_pinit(mmu_t, pmap_t); -void moea_pinit0(mmu_t, pmap_t); -void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); -void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); -void moea_qremove(mmu_t, vm_offset_t, int); -void moea_release(mmu_t, pmap_t); -void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); -void moea_remove_all(mmu_t, vm_page_t); -void moea_remove_write(mmu_t, vm_page_t); -void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); -void moea_zero_page(mmu_t, vm_page_t); -void moea_zero_page_area(mmu_t, vm_page_t, int, int); -void moea_activate(mmu_t, struct thread *); -void moea_deactivate(mmu_t, struct thread *); -void moea_cpu_bootstrap(mmu_t, int); -void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); -void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t); -void *moea_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); -void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); -vm_paddr_t moea_kextract(mmu_t, vm_offset_t); -void moea_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); -void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t); -void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); -boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); -static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); -void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va); -void moea_scan_init(mmu_t mmu); -vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m); -void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr); -boolean_t moea_page_is_mapped(mmu_t mmu, vm_page_t m); -static int moea_map_user_ptr(mmu_t mmu, pmap_t pm, +void moea_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); +vm_paddr_t moea_extract(pmap_t, vm_offset_t); +vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); +void moea_init(void); +boolean_t moea_is_modified(vm_page_t); +boolean_t moea_is_prefaultable(pmap_t, vm_offset_t); +boolean_t moea_is_referenced(vm_page_t); +int moea_ts_referenced(vm_page_t); +vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +boolean_t moea_page_exists_quick(pmap_t, vm_page_t); +void moea_page_init(vm_page_t); +int moea_page_wired_mappings(vm_page_t); +int moea_pinit(pmap_t); +void moea_pinit0(pmap_t); +void moea_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); +void moea_qenter(vm_offset_t, vm_page_t *, int); +void moea_qremove(vm_offset_t, int); +void moea_release(pmap_t); +void moea_remove(pmap_t, vm_offset_t, vm_offset_t); +void moea_remove_all(vm_page_t); +void moea_remove_write(vm_page_t); +void moea_unwire(pmap_t, vm_offset_t, vm_offset_t); +void moea_zero_page(vm_page_t); +void moea_zero_page_area(vm_page_t, int, int); +void moea_activate(struct thread *); +void moea_deactivate(struct thread *); +void moea_cpu_bootstrap(int); +void moea_bootstrap(vm_offset_t, vm_offset_t); +void *moea_mapdev(vm_paddr_t, vm_size_t); +void *moea_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); +void moea_unmapdev(vm_offset_t, vm_size_t); +vm_paddr_t moea_kextract(vm_offset_t); +void moea_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t); +void moea_kenter(vm_offset_t, vm_paddr_t); +void moea_page_set_memattr(vm_page_t m, vm_memattr_t ma); +boolean_t moea_dev_direct_mapped(vm_paddr_t, vm_size_t); +static void moea_sync_icache(pmap_t, vm_offset_t, vm_size_t); +void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va); +void moea_scan_init(void); +vm_offset_t moea_quick_enter_page(vm_page_t m); +void moea_quick_remove_page(vm_offset_t addr); +boolean_t moea_page_is_mapped(vm_page_t m); +static int moea_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); -static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, +static int moea_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded_addr); -static mmu_method_t moea_methods[] = { - MMUMETHOD(mmu_clear_modify, moea_clear_modify), - MMUMETHOD(mmu_copy_page, moea_copy_page), - MMUMETHOD(mmu_copy_pages, moea_copy_pages), - MMUMETHOD(mmu_enter, moea_enter), - MMUMETHOD(mmu_enter_object, moea_enter_object), - MMUMETHOD(mmu_enter_quick, moea_enter_quick), - MMUMETHOD(mmu_extract, moea_extract), - MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), - MMUMETHOD(mmu_init, moea_init), - MMUMETHOD(mmu_is_modified, moea_is_modified), - MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), - MMUMETHOD(mmu_is_referenced, moea_is_referenced), - MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), - MMUMETHOD(mmu_map, moea_map), - MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), - MMUMETHOD(mmu_page_init, moea_page_init), - MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), - MMUMETHOD(mmu_pinit, moea_pinit), - MMUMETHOD(mmu_pinit0, moea_pinit0), - MMUMETHOD(mmu_protect, moea_protect), - MMUMETHOD(mmu_qenter, moea_qenter), - MMUMETHOD(mmu_qremove, moea_qremove), - MMUMETHOD(mmu_release, moea_release), - MMUMETHOD(mmu_remove, moea_remove), - MMUMETHOD(mmu_remove_all, moea_remove_all), - MMUMETHOD(mmu_remove_write, moea_remove_write), - MMUMETHOD(mmu_sync_icache, moea_sync_icache), - MMUMETHOD(mmu_unwire, moea_unwire), - MMUMETHOD(mmu_zero_page, moea_zero_page), - MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), - MMUMETHOD(mmu_activate, moea_activate), - MMUMETHOD(mmu_deactivate, moea_deactivate), - MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), - MMUMETHOD(mmu_quick_enter_page, moea_quick_enter_page), - MMUMETHOD(mmu_quick_remove_page, moea_quick_remove_page), - MMUMETHOD(mmu_page_is_mapped, moea_page_is_mapped), +static struct pmap_funcs moea_methods = { + .clear_modify = moea_clear_modify, + .copy_page = moea_copy_page, + .copy_pages = moea_copy_pages, + .enter = moea_enter, + .enter_object = moea_enter_object, + .enter_quick = moea_enter_quick, + .extract = moea_extract, + .extract_and_hold = moea_extract_and_hold, + .init = moea_init, + .is_modified = moea_is_modified, + .is_prefaultable = moea_is_prefaultable, + .is_referenced = moea_is_referenced, + .ts_referenced = moea_ts_referenced, + .map = moea_map, + .page_exists_quick = moea_page_exists_quick, + .page_init = moea_page_init, + .page_wired_mappings = moea_page_wired_mappings, + .pinit = moea_pinit, + .pinit0 = moea_pinit0, + .protect = moea_protect, + .qenter = moea_qenter, + .qremove = moea_qremove, + .release = moea_release, + .remove = moea_remove, + .remove_all = moea_remove_all, + .remove_write = moea_remove_write, + .sync_icache = moea_sync_icache, + .unwire = moea_unwire, + .zero_page = moea_zero_page, + .zero_page_area = moea_zero_page_area, + .activate = moea_activate, + .deactivate = moea_deactivate, + .page_set_memattr = moea_page_set_memattr, + .quick_enter_page = moea_quick_enter_page, + .quick_remove_page = moea_quick_remove_page, + .page_is_mapped = moea_page_is_mapped, /* Internal interfaces */ - MMUMETHOD(mmu_bootstrap, moea_bootstrap), - MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), - MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), - MMUMETHOD(mmu_mapdev, moea_mapdev), - MMUMETHOD(mmu_unmapdev, moea_unmapdev), - MMUMETHOD(mmu_kextract, moea_kextract), - MMUMETHOD(mmu_kenter, moea_kenter), - MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), - MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), - MMUMETHOD(mmu_scan_init, moea_scan_init), - MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map), - MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr), - MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr), - - { 0, 0 } + .bootstrap = moea_bootstrap, + .cpu_bootstrap = moea_cpu_bootstrap, + .mapdev_attr = moea_mapdev_attr, + .mapdev = moea_mapdev, + .unmapdev = moea_unmapdev, + .kextract = moea_kextract, + .kenter = moea_kenter, + .kenter_attr = moea_kenter_attr, + .dev_direct_mapped = moea_dev_direct_mapped, + .dumpsys_pa_init = moea_scan_init, + .dumpsys_map_chunk = moea_dumpsys_map, + .map_user_ptr = moea_map_user_ptr, + .decode_kernel_ptr = moea_decode_kernel_ptr, }; -MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); +MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods); static __inline uint32_t moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) @@ -627,7 +623,7 @@ om_cmp(const void *a, const void *b) } void -moea_cpu_bootstrap(mmu_t mmup, int ap) +moea_cpu_bootstrap(int ap) { u_int sdr; int i; @@ -665,7 +661,7 @@ moea_cpu_bootstrap(mmu_t mmup, int ap) } void -moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) +moea_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) { ihandle_t mmui; phandle_t chosen, mmu; @@ -921,7 +917,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm /* Enter the pages */ for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) - moea_kenter(mmup, translations[i].om_va + off, + moea_kenter(translations[i].om_va + off, translations[i].om_pa + off); } } @@ -933,7 +929,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm ; Maxmem = powerpc_btop(phys_avail[i + 1]); - moea_cpu_bootstrap(mmup,0); + moea_cpu_bootstrap(0); mtmsr(mfmsr() | PSL_DR | PSL_IR); pmap_bootstrapped++; @@ -954,7 +950,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm thread0.td_kstack = va; thread0.td_kstack_pages = kstack_pages; for (i = 0; i < kstack_pages; i++) { - moea_kenter(mmup, va, pa); + moea_kenter(va, pa); pa += PAGE_SIZE; va += PAGE_SIZE; } @@ -967,7 +963,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm va = virtual_avail; virtual_avail += round_page(msgbufsize); while (va < virtual_avail) { - moea_kenter(mmup, va, pa); + moea_kenter(va, pa); pa += PAGE_SIZE; va += PAGE_SIZE; } @@ -980,7 +976,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm va = virtual_avail; virtual_avail += DPCPU_SIZE; while (va < virtual_avail) { - moea_kenter(mmup, va, pa); + moea_kenter(va, pa); pa += PAGE_SIZE; va += PAGE_SIZE; } @@ -992,7 +988,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm * space can be accessed in any way. */ void -moea_activate(mmu_t mmu, struct thread *td) +moea_activate(struct thread *td) { pmap_t pm, pmr; @@ -1010,7 +1006,7 @@ moea_activate(mmu_t mmu, struct thread *td) } void -moea_deactivate(mmu_t mmu, struct thread *td) +moea_deactivate(struct thread *td) { pmap_t pm; @@ -1020,7 +1016,7 @@ moea_deactivate(mmu_t mmu, struct thread *td) } void -moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) +moea_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry key, *pvo; @@ -1038,7 +1034,7 @@ moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_ } void -moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) +moea_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t dst; vm_offset_t src; @@ -1050,7 +1046,7 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t md } void -moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, +moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; @@ -1077,7 +1073,7 @@ moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t * Zero a page of physical memory by temporarily mapping it into the tlb. */ void -moea_zero_page(mmu_t mmu, vm_page_t m) +moea_zero_page(vm_page_t m) { vm_offset_t off, pa = VM_PAGE_TO_PHYS(m); @@ -1086,7 +1082,7 @@ moea_zero_page(mmu_t mmu, vm_page_t m) } void -moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) +moea_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); void *va = (void *)(pa + off); @@ -1095,19 +1091,19 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, i } vm_offset_t -moea_quick_enter_page(mmu_t mmu, vm_page_t m) +moea_quick_enter_page(vm_page_t m) { return (VM_PAGE_TO_PHYS(m)); } void -moea_quick_remove_page(mmu_t mmu, vm_offset_t addr) +moea_quick_remove_page(vm_offset_t addr) { } boolean_t -moea_page_is_mapped(mmu_t mmu, vm_page_t m) +moea_page_is_mapped(vm_page_t m) { return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); } @@ -1118,7 +1114,7 @@ moea_page_is_mapped(mmu_t mmu, vm_page_t m) * will be wired down. */ int -moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, +moea_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind) { int error; @@ -1216,7 +1212,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page * corresponding offset from m_start are mapped. */ void -moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, +moea_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; @@ -1239,7 +1235,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t st } void -moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, +moea_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { @@ -1252,7 +1248,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, } vm_paddr_t -moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) +moea_extract(pmap_t pm, vm_offset_t va) { struct pvo_entry *pvo; vm_paddr_t pa; @@ -1273,7 +1269,7 @@ moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) * protection. */ vm_page_t -moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) +moea_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { struct pvo_entry *pvo; vm_page_t m; @@ -1293,7 +1289,7 @@ moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offse } void -moea_init(mmu_t mmu) +moea_init() { moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), @@ -1306,7 +1302,7 @@ moea_init(mmu_t mmu) } boolean_t -moea_is_referenced(mmu_t mmu, vm_page_t m) +moea_is_referenced(vm_page_t m) { boolean_t rv; @@ -1319,7 +1315,7 @@ moea_is_referenced(mmu_t mmu, vm_page_t m) } boolean_t -moea_is_modified(mmu_t mmu, vm_page_t m) +moea_is_modified(vm_page_t m) { boolean_t rv; @@ -1339,7 +1335,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m) } boolean_t -moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) +moea_is_prefaultable(pmap_t pmap, vm_offset_t va) { struct pvo_entry *pvo; boolean_t rv; @@ -1352,7 +1348,7 @@ moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset } void -moea_clear_modify(mmu_t mmu, vm_page_t m) +moea_clear_modify(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, @@ -1370,7 +1366,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m) * Clear the write and modified bits in each of the given page's mappings. */ void -moea_remove_write(mmu_t mmu, vm_page_t m) +moea_remove_write(vm_page_t m) { struct pvo_entry *pvo; struct pte *pt; @@ -1425,7 +1421,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m) * optimal aging of shared pages. */ int -moea_ts_referenced(mmu_t mmu, vm_page_t m) +moea_ts_referenced(vm_page_t m) { int count; @@ -1441,7 +1437,7 @@ moea_ts_referenced(mmu_t mmu, vm_page_t m) * Modify the WIMG settings of all mappings for a page. */ void -moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) +moea_page_set_memattr(vm_page_t m, vm_memattr_t ma) { struct pvo_entry *pvo; struct pvo_head *pvo_head; @@ -1481,14 +1477,14 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memat * Map a wired page into kernel virtual address space. */ void -moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) +moea_kenter(vm_offset_t va, vm_paddr_t pa) { - moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); + moea_kenter_attr(va, pa, VM_MEMATTR_DEFAULT); } void -moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) +moea_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) { u_int pte_lo; int error; @@ -1517,7 +1513,7 @@ moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t * address. */ vm_paddr_t -moea_kextract(mmu_t mmu, vm_offset_t va) +moea_kextract(vm_offset_t va) { struct pvo_entry *pvo; vm_paddr_t pa; @@ -1541,10 +1537,10 @@ moea_kextract(mmu_t mmu, vm_offset_t va) * Remove a wired page from kernel virtual address space. */ void -moea_kremove(mmu_t mmu, vm_offset_t va) +moea_kremove(vm_offset_t va) { - moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); + moea_remove(kernel_pmap, va, va + PAGE_SIZE); } /* @@ -1553,7 +1549,7 @@ moea_kremove(mmu_t mmu, vm_offset_t va) * called in this thread. This is used internally in copyin/copyout. */ int -moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, +moea_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen) { size_t l; @@ -1592,7 +1588,7 @@ moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const * address space. */ static int -moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, +moea_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded_addr) { vm_offset_t user_sr; @@ -1621,7 +1617,7 @@ moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, in * first usable address after the mapped region. */ vm_offset_t -moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, +moea_map(vm_offset_t *virt, vm_paddr_t pa_start, vm_paddr_t pa_end, int prot) { vm_offset_t sva, va; @@ -1629,7 +1625,7 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_s sva = *virt; va = sva; for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) - moea_kenter(mmu, va, pa_start); + moea_kenter(va, pa_start); *virt = va; return (sva); } @@ -1642,7 +1638,7 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_s * subset of pmaps for proper page aging. */ boolean_t -moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) +moea_page_exists_quick(pmap_t pmap, vm_page_t m) { int loops; struct pvo_entry *pvo; @@ -1666,7 +1662,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page } void -moea_page_init(mmu_t mmu __unused, vm_page_t m) +moea_page_init(vm_page_t m) { m->md.mdpg_attrs = 0; @@ -1679,7 +1675,7 @@ moea_page_init(mmu_t mmu __unused, vm_page_t m) * that are wired. */ int -moea_page_wired_mappings(mmu_t mmu, vm_page_t m) +moea_page_wired_mappings(vm_page_t m) { struct pvo_entry *pvo; int count; @@ -1697,8 +1693,8 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m) static u_int moea_vsidcontext; -void -moea_pinit(mmu_t mmu, pmap_t pmap) +int +moea_pinit(pmap_t pmap) { int i, mask; u_int entropy; @@ -1708,7 +1704,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap) entropy = 0; __asm __volatile("mftb %0" : "=r"(entropy)); - if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) + if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap)) == NULL) { pmap->pmap_phys = pmap; } @@ -1752,7 +1748,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap) for (i = 0; i < 16; i++) pmap->pm_sr[i] = VSID_MAKE(i, hash); mtx_unlock(&moea_vsid_mutex); - return; + return (1); } mtx_unlock(&moea_vsid_mutex); @@ -1763,11 +1759,11 @@ moea_pinit(mmu_t mmu, pmap_t pmap) * Initialize the pmap associated with process 0. */ void -moea_pinit0(mmu_t mmu, pmap_t pm) +moea_pinit0(pmap_t pm) { PMAP_LOCK_INIT(pm); - moea_pinit(mmu, pm); + moea_pinit(pm); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } @@ -1775,7 +1771,7 @@ moea_pinit0(mmu_t mmu, pmap_t pm) * Set the physical protection on the specified range of this map as requested. */ void -moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, +moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct pvo_entry *pvo, *tpvo, key; @@ -1785,7 +1781,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm ("moea_protect: non current pmap")); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { - moea_remove(mmu, pm, sva, eva); + moea_remove(pm, sva, eva); return; } @@ -1825,13 +1821,13 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm * references recorded. Existing mappings in the region are overwritten. */ void -moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) +moea_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; va = sva; while (count-- > 0) { - moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); + moea_kenter(va, VM_PAGE_TO_PHYS(*m)); va += PAGE_SIZE; m++; } @@ -1842,19 +1838,19 @@ moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, * temporary mappings entered by moea_qenter. */ void -moea_qremove(mmu_t mmu, vm_offset_t sva, int count) +moea_qremove(vm_offset_t sva, int count) { vm_offset_t va; va = sva; while (count-- > 0) { - moea_kremove(mmu, va); + moea_kremove(va); va += PAGE_SIZE; } } void -moea_release(mmu_t mmu, pmap_t pmap) +moea_release(pmap_t pmap) { int idx, mask; @@ -1876,7 +1872,7 @@ moea_release(mmu_t mmu, pmap_t pmap) * Remove the given range of addresses from the specified map. */ void -moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) +moea_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry *pvo, *tpvo, key; @@ -1897,7 +1893,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_ * will reflect changes in pte's back to the vm_page. */ void -moea_remove_all(mmu_t mmu, vm_page_t m) +moea_remove_all(vm_page_t m) { struct pvo_head *pvo_head; struct pvo_entry *pvo, *next_pvo; @@ -2600,7 +2596,7 @@ moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size } boolean_t -moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) +moea_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) { int i; @@ -2623,14 +2619,14 @@ moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_si * NOT real memory. */ void * -moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) +moea_mapdev(vm_paddr_t pa, vm_size_t size) { - return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); + return (moea_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT)); } void * -moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) +moea_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) { vm_offset_t va, tmpva, ppa, offset; int i; @@ -2654,7 +2650,7 @@ moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t s panic("moea_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0;) { - moea_kenter_attr(mmu, tmpva, ppa, ma); + moea_kenter_attr(tmpva, ppa, ma); tlbie(tmpva); size -= PAGE_SIZE; tmpva += PAGE_SIZE; @@ -2665,7 +2661,7 @@ moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t s } void -moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) +moea_unmapdev(vm_offset_t va, vm_size_t size) { vm_offset_t base, offset; @@ -2682,7 +2678,7 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t siz } static void -moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) +moea_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { struct pvo_entry *pvo; vm_offset_t lim; @@ -2706,7 +2702,7 @@ moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, } void -moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) +moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va) { *va = (void *)pa; @@ -2715,7 +2711,7 @@ moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; void -moea_scan_init(mmu_t mmu) +moea_scan_init() { struct pvo_entry *pvo; vm_offset_t va; Modified: head/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea64.c Wed May 27 01:04:03 2020 (r361543) +++ head/sys/powerpc/aim/mmu_oea64.c Wed May 27 01:24:12 2020 (r361544) @@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$"); #include <machine/_inttypes.h> #include <machine/cpu.h> +#include <machine/ifunc.h> #include <machine/platform.h> #include <machine/frame.h> #include <machine/md_var.h> @@ -96,11 +97,9 @@ __FBSDID("$FreeBSD$"); #include <machine/mmuvar.h> #include "mmu_oea64.h" -#include "mmu_if.h" -#include "moea64_if.h" void moea64_release_vsid(uint64_t vsid); -uintptr_t moea64_get_unique_vsid(void); +uintptr_t moea64_get_unique_vsid(void); #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) #define ENABLE_TRANS(msr) mtmsr(msr) @@ -111,7 +110,7 @@ uintptr_t moea64_get_unique_vsid(void); /* * Locking semantics: - * + * * There are two locks of interest: the page locks and the pmap locks, which * protect their individual PVO lists and are locked in that order. The contents * of all PVO entries are protected by the locks of their respective pmaps. @@ -122,7 +121,7 @@ uintptr_t moea64_get_unique_vsid(void); #define PV_LOCK_COUNT PA_LOCK_COUNT static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; - + /* * Cheap NUMA-izing of the pv locks, to reduce contention across domains. * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the @@ -184,7 +183,7 @@ uma_zone_t moea64_pvo_zone; /* zone for pvo entries */ static struct pvo_entry *moea64_bpvo_pool; static int moea64_bpvo_pool_index = 0; static int moea64_bpvo_pool_size = 0; -SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, +SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, &moea64_bpvo_pool_index, 0, ""); #define BPVO_POOL_SIZE 327680 /* Sensible historical default value */ @@ -210,11 +209,11 @@ u_int moea64_pte_overflow = 0; u_int moea64_pvo_entries = 0; u_int moea64_pvo_enter_calls = 0; u_int moea64_pvo_remove_calls = 0; -SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, +SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, &moea64_pte_valid, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, &moea64_pte_overflow, 0, ""); -SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, +SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, &moea64_pvo_entries, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, &moea64_pvo_enter_calls, 0, ""); @@ -233,149 +232,147 @@ int moea64_large_page_shift = 0; /* * PVO calls. */ -static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, +static int moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head, struct pvo_entry **oldpvo); -static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo); -static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo); -static void moea64_pvo_remove_from_page_locked(mmu_t mmu, +static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo); +static void moea64_pvo_remove_from_page(struct pvo_entry *pvo); +static void moea64_pvo_remove_from_page_locked( struct pvo_entry *pvo, vm_page_t m); static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); /* * Utility routines. */ -static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t); -static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t); -static void moea64_kremove(mmu_t, vm_offset_t); -static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, +static boolean_t moea64_query_bit(vm_page_t, uint64_t); +static u_int moea64_clear_bit(vm_page_t, uint64_t); +static void moea64_kremove(vm_offset_t); +static void moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, vm_size_t sz); static void moea64_pmap_init_qpages(void); /* * Kernel MMU interface */ -void moea64_clear_modify(mmu_t, vm_page_t); -void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); -void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, +void moea64_clear_modify(vm_page_t); +void moea64_copy_page(vm_page_t, vm_page_t); +void moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); -int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, +int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int flags, int8_t psind); -void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, +void moea64_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); -void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); -vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); -vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); -void moea64_init(mmu_t); -boolean_t moea64_is_modified(mmu_t, vm_page_t); -boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); -boolean_t moea64_is_referenced(mmu_t, vm_page_t); -int moea64_ts_referenced(mmu_t, vm_page_t); -vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); -boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); -void moea64_page_init(mmu_t, vm_page_t); -int moea64_page_wired_mappings(mmu_t, vm_page_t); -void moea64_pinit(mmu_t, pmap_t); -void moea64_pinit0(mmu_t, pmap_t); -void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); -void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); -void moea64_qremove(mmu_t, vm_offset_t, int); -void moea64_release(mmu_t, pmap_t); -void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); -void moea64_remove_pages(mmu_t, pmap_t); -void moea64_remove_all(mmu_t, vm_page_t); -void moea64_remove_write(mmu_t, vm_page_t); -void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); -void moea64_zero_page(mmu_t, vm_page_t); -void moea64_zero_page_area(mmu_t, vm_page_t, int, int); -void moea64_activate(mmu_t, struct thread *); -void moea64_deactivate(mmu_t, struct thread *); -void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); -void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); -void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); -vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); -void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); -void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma); -void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); -boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); -static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); -void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, +void moea64_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); +vm_paddr_t moea64_extract(pmap_t, vm_offset_t); +vm_page_t moea64_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); +void moea64_init(void); +boolean_t moea64_is_modified(vm_page_t); +boolean_t moea64_is_prefaultable(pmap_t, vm_offset_t); +boolean_t moea64_is_referenced(vm_page_t); +int moea64_ts_referenced(vm_page_t); +vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +boolean_t moea64_page_exists_quick(pmap_t, vm_page_t); +void moea64_page_init(vm_page_t); +int moea64_page_wired_mappings(vm_page_t); +int moea64_pinit(pmap_t); +void moea64_pinit0(pmap_t); +void moea64_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); +void moea64_qenter(vm_offset_t, vm_page_t *, int); +void moea64_qremove(vm_offset_t, int); +void moea64_release(pmap_t); +void moea64_remove(pmap_t, vm_offset_t, vm_offset_t); +void moea64_remove_pages(pmap_t); +void moea64_remove_all(vm_page_t); +void moea64_remove_write(vm_page_t); +void moea64_unwire(pmap_t, vm_offset_t, vm_offset_t); +void moea64_zero_page(vm_page_t); +void moea64_zero_page_area(vm_page_t, int, int); +void moea64_activate(struct thread *); +void moea64_deactivate(struct thread *); +void *moea64_mapdev(vm_paddr_t, vm_size_t); +void *moea64_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); +void moea64_unmapdev(vm_offset_t, vm_size_t); +vm_paddr_t moea64_kextract(vm_offset_t); +void moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma); +void moea64_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma); +void moea64_kenter(vm_offset_t, vm_paddr_t); +boolean_t moea64_dev_direct_mapped(vm_paddr_t, vm_size_t); +static void moea64_sync_icache(pmap_t, vm_offset_t, vm_size_t); +void moea64_dumpsys_map(vm_paddr_t pa, size_t sz, void **va); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202005270124.04R1ODAh030113>