Date: Wed, 21 Oct 2015 14:57:59 +0000 (UTC) From: Ian Lepore <ian@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r289699 - in head/sys/mips: include mips Message-ID: <201510211457.t9LEvx3H000101@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: ian Date: Wed Oct 21 14:57:59 2015 New Revision: 289699 URL: https://svnweb.freebsd.org/changeset/base/289699 Log: Switch from a stub to a real implementation of pmap_page_set_attr() for mips, and implement support for VM_MEMATTR_UNCACHEABLE. This will be used in upcoming changes to support BUS_DMA_COHERENT in bus_dmamem_alloc(). Reviewed by: adrian, imp Modified: head/sys/mips/include/pmap.h head/sys/mips/mips/pmap.c Modified: head/sys/mips/include/pmap.h ============================================================================== --- head/sys/mips/include/pmap.h Wed Oct 21 13:59:00 2015 (r289698) +++ head/sys/mips/include/pmap.h Wed Oct 21 14:57:59 2015 (r289699) @@ -74,6 +74,7 @@ struct md_page { }; #define PV_TABLE_REF 0x02 /* referenced */ +#define PV_MEMATTR_UNCACHEABLE 0x04 #define ASID_BITS 8 #define ASIDGEN_BITS (32 - ASID_BITS) @@ -165,7 +166,6 @@ extern vm_paddr_t dump_avail[PHYS_AVAIL_ #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) -#define pmap_page_set_memattr(m, ma) (void)0 void pmap_bootstrap(void); void *pmap_mapdev(vm_paddr_t, vm_size_t); @@ -179,6 +179,7 @@ void pmap_kenter_temporary_free(vm_paddr void pmap_flush_pvcache(vm_page_t m); int pmap_emulate_modified(pmap_t pmap, vm_offset_t va); void pmap_grow_direct_page_cache(void); +void pmap_page_set_memattr(vm_page_t, vm_memattr_t); #endif /* _KERNEL */ Modified: head/sys/mips/mips/pmap.c ============================================================================== --- head/sys/mips/mips/pmap.c Wed Oct 21 13:59:00 2015 (r289698) +++ head/sys/mips/mips/pmap.c Wed Oct 21 14:57:59 2015 (r289699) @@ -314,6 +314,15 @@ pmap_lmem_unmap(void) } #endif /* !__mips_n64 */ +static __inline int +is_cacheable_page(vm_paddr_t pa, vm_page_t m) +{ + + return ((m->md.pv_flags & PV_MEMATTR_UNCACHEABLE) == 0 && + is_cacheable_mem(pa)); + +} + /* * Page table entry lookup routines. */ @@ -2009,7 +2018,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, newpte |= PTE_W; if (is_kernel_pmap(pmap)) newpte |= PTE_G; - if (is_cacheable_mem(pa)) + if (is_cacheable_page(pa, m)) newpte |= PTE_C_CACHE; else newpte |= PTE_C_UNCACHED; @@ -2280,7 +2289,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ if ((m->oflags & VPO_UNMANAGED) == 0) *pte |= PTE_MANAGED; - if (is_cacheable_mem(pa)) + if (is_cacheable_page(pa, m)) *pte |= PTE_C_CACHE; else *pte |= PTE_C_UNCACHED; @@ -2650,9 +2659,12 @@ pmap_quick_enter_page(vm_page_t m) pa = VM_PAGE_TO_PHYS(m); - if (MIPS_DIRECT_MAPPABLE(pa)) - return (MIPS_PHYS_TO_DIRECT(pa)); - + if (MIPS_DIRECT_MAPPABLE(pa)) { + if (m->md.pv_flags & PV_MEMATTR_UNCACHEABLE) + return (MIPS_PHYS_TO_DIRECT_UNCACHED(pa)); + else + return (MIPS_PHYS_TO_DIRECT(pa)); + } critical_enter(); sysm = &sysmap_lmem[PCPU_GET(cpuid)]; @@ -2660,7 +2672,7 @@ pmap_quick_enter_page(vm_page_t m) pte = pmap_pte(kernel_pmap, sysm->base); *pte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | - (is_cacheable_mem(pa) ? PTE_C_CACHE : PTE_C_UNCACHED); + (is_cacheable_page(pa, m) ? PTE_C_CACHE : PTE_C_UNCACHED); sysm->valid1 = 1; return (sysm->base); @@ -3520,3 +3532,27 @@ pmap_flush_pvcache(vm_page_t m) } } } + +void +pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) +{ + + /* + * It appears that this function can only be called before any mappings + * for the page are established. If this ever changes, this code will + * need to walk the pv_list and make each of the existing mappings + * uncacheable, being careful to sync caches and PTEs (and maybe + * invalidate TLB?) for any current mapping it modifies. + */ + if (TAILQ_FIRST(&m->md.pv_list) != NULL) + panic("Can't change memattr on page with existing mappings"); + + /* + * The only memattr we support is UNCACHEABLE, translate the (semi-)MI + * representation of that into our internal flag in the page MD struct. + */ + if (ma == VM_MEMATTR_UNCACHEABLE) + m->md.pv_flags |= PV_MEMATTR_UNCACHEABLE; + else + m->md.pv_flags &= ~PV_MEMATTR_UNCACHEABLE; +}
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201510211457.t9LEvx3H000101>