Date: Fri, 4 Oct 2019 15:27:11 +0000 (UTC) From: Mark Johnston <markj@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: svn commit: r353106 - in stable/12/sys: arm64/arm64 arm64/include riscv/include riscv/riscv Message-ID: <201910041527.x94FRBbA036857@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: markj Date: Fri Oct 4 15:27:10 2019 New Revision: 353106 URL: https://svnweb.freebsd.org/changeset/base/353106 Log: MFC r352826: Implement pmap_page_is_mapped() correctly on arm64 and riscv. Modified: stable/12/sys/arm64/arm64/pmap.c stable/12/sys/arm64/include/pmap.h stable/12/sys/riscv/include/pmap.h stable/12/sys/riscv/riscv/pmap.c Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/arm64/arm64/pmap.c ============================================================================== --- stable/12/sys/arm64/arm64/pmap.c Fri Oct 4 15:24:16 2019 (r353105) +++ stable/12/sys/arm64/arm64/pmap.c Fri Oct 4 15:27:10 2019 (r353106) @@ -4250,6 +4250,27 @@ restart: } /* + * Returns true if the given page is mapped individually or as part of + * a 2mpage. Otherwise, returns false. + */ +bool +pmap_page_is_mapped(vm_page_t m) +{ + struct rwlock *lock; + bool rv; + + if ((m->oflags & VPO_UNMANAGED) != 0) + return (false); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); + rw_rlock(lock); + rv = !TAILQ_EMPTY(&m->md.pv_list) || + ((m->flags & PG_FICTITIOUS) == 0 && + !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); + rw_runlock(lock); + return (rv); +} + +/* * Destroy all managed, non-wired mappings in the given user-space * pmap. This pmap cannot be active on any processor besides the * caller. Modified: stable/12/sys/arm64/include/pmap.h ============================================================================== --- stable/12/sys/arm64/include/pmap.h Fri Oct 4 15:24:16 2019 (r353105) +++ stable/12/sys/arm64/include/pmap.h Fri Oct 4 15:27:10 2019 (r353106) @@ -152,6 +152,7 @@ vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kremove(vm_offset_t); void pmap_kremove_device(vm_offset_t, vm_size_t); void *pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t ma); +bool pmap_page_is_mapped(vm_page_t m); bool pmap_ps_enabled(pmap_t pmap); void *pmap_mapdev(vm_offset_t, vm_size_t); @@ -168,8 +169,6 @@ bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t * int pmap_fault(pmap_t, uint64_t, uint64_t); struct pcb *pmap_switch(struct thread *, struct thread *); - -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) static inline int pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) Modified: stable/12/sys/riscv/include/pmap.h ============================================================================== --- stable/12/sys/riscv/include/pmap.h Fri Oct 4 15:24:16 2019 (r353105) +++ stable/12/sys/riscv/include/pmap.h Fri Oct 4 15:27:10 2019 (r353106) @@ -149,6 +149,7 @@ void pmap_kenter_device(vm_offset_t, vm_size_t, vm_pad vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kremove(vm_offset_t); void pmap_kremove_device(vm_offset_t, vm_size_t); +bool pmap_page_is_mapped(vm_page_t m); bool pmap_ps_enabled(pmap_t); void *pmap_mapdev(vm_offset_t, vm_size_t); @@ -161,8 +162,6 @@ void pmap_unmap_io_transient(vm_page_t *, vm_offset_t bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **, pt_entry_t **); - -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t); Modified: stable/12/sys/riscv/riscv/pmap.c ============================================================================== --- stable/12/sys/riscv/riscv/pmap.c Fri Oct 4 15:24:16 2019 (r353105) +++ stable/12/sys/riscv/riscv/pmap.c Fri Oct 4 15:27:10 2019 (r353106) @@ -3545,6 +3545,27 @@ restart: return (count); } +/* + * Returns true if the given page is mapped individually or as part of + * a 2mpage. Otherwise, returns false. + */ +bool +pmap_page_is_mapped(vm_page_t m) +{ + struct rwlock *lock; + bool rv; + + if ((m->oflags & VPO_UNMANAGED) != 0) + return (false); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); + rw_rlock(lock); + rv = !TAILQ_EMPTY(&m->md.pv_list) || + ((m->flags & PG_FICTITIOUS) == 0 && + !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); + rw_runlock(lock); + return (rv); +} + static void pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv, struct spglist *free, bool superpage)
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201910041527.x94FRBbA036857>