Date: Wed, 31 Mar 2010 02:43:58 +0000 (UTC) From: Marcel Moolenaar <marcel@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org Subject: svn commit: r205956 - in stable/8/sys: amd64/amd64 arm/arm i386/i386 i386/xen ia64/ia64 kern mips/mips powerpc/aim powerpc/booke powerpc/include powerpc/powerpc sparc64/sparc64 sun4v/sun4v vm Message-ID: <201003310243.o2V2hwa9070387@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: marcel Date: Wed Mar 31 02:43:58 2010 New Revision: 205956 URL: http://svn.freebsd.org/changeset/base/205956 Log: MFC rev 198341 and 198342: o Introduce vm_sync_icache() for making the I-cache coherent with the memory or D-cache, depending on the semantics of the platform. vm_sync_icache() is basically a wrapper around pmap_sync_icache(), that translates the vm_map_t argumument to pmap_t. o Introduce pmap_sync_icache() to all PMAP implementation. For powerpc it replaces the pmap_page_executable() function, added to solve the I-cache problem in uiomove_fromphys(). o In proc_rwmem() call vm_sync_icache() when writing to a page that has execute permissions. This assures that when breakpoints are written, the I-cache will be coherent and the process will actually hit the breakpoint. o This also fixes the Book-E PMAP implementation that was missing necessary locking while trying to deal with the I-cache coherency in pmap_enter() (read: mmu_booke_enter_locked). Modified: stable/8/sys/amd64/amd64/pmap.c stable/8/sys/arm/arm/pmap.c stable/8/sys/i386/i386/pmap.c stable/8/sys/i386/xen/pmap.c stable/8/sys/ia64/ia64/pmap.c stable/8/sys/kern/sys_process.c stable/8/sys/mips/mips/pmap.c stable/8/sys/powerpc/aim/mmu_oea.c stable/8/sys/powerpc/aim/mmu_oea64.c stable/8/sys/powerpc/booke/pmap.c stable/8/sys/powerpc/include/pmap.h stable/8/sys/powerpc/powerpc/mmu_if.m stable/8/sys/powerpc/powerpc/pmap_dispatch.c stable/8/sys/powerpc/powerpc/uio_machdep.c stable/8/sys/sparc64/sparc64/pmap.c stable/8/sys/sun4v/sun4v/pmap.c stable/8/sys/vm/pmap.h stable/8/sys/vm/vm_extern.h stable/8/sys/vm/vm_glue.c Directory Properties: stable/8/sys/ (props changed) stable/8/sys/amd64/include/xen/ (props changed) stable/8/sys/cddl/contrib/opensolaris/ (props changed) stable/8/sys/contrib/dev/acpica/ (props changed) stable/8/sys/contrib/pf/ (props changed) stable/8/sys/dev/xen/xenpci/ (props changed) Modified: stable/8/sys/amd64/amd64/pmap.c ============================================================================== --- stable/8/sys/amd64/amd64/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/amd64/amd64/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -4756,6 +4756,11 @@ if (oldpmap) /* XXX FIXME */ critical_exit(); } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/arm/arm/pmap.c ============================================================================== --- stable/8/sys/arm/arm/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/arm/arm/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -2869,14 +2869,14 @@ pmap_kenter_internal(vm_offset_t va, vm_ if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) { vm_page_lock_queues(); if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) { - /* release vm_page lock for pv_entry UMA */ + /* release vm_page lock for pv_entry UMA */ vm_page_unlock_queues(); if ((pve = pmap_get_pv_entry()) == NULL) panic("pmap_kenter_internal: no pv entries"); vm_page_lock_queues(); PMAP_LOCK(pmap_kernel()); pmap_enter_pv(m, pve, pmap_kernel(), va, - PVF_WRITE | PVF_UNMAN); + PVF_WRITE | PVF_UNMAN); pmap_fix_cache(m, pmap_kernel(), va); PMAP_UNLOCK(pmap_kernel()); } else { @@ -4573,6 +4573,12 @@ pmap_mincore(pmap_t pmap, vm_offset_t ad } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/i386/i386/pmap.c ============================================================================== --- stable/8/sys/i386/i386/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/i386/i386/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -4859,6 +4859,11 @@ pmap_activate(struct thread *td) critical_exit(); } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/i386/xen/pmap.c ============================================================================== --- stable/8/sys/i386/xen/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/i386/xen/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -4175,6 +4175,11 @@ pmap_activate(struct thread *td) critical_exit(); } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/ia64/ia64/pmap.c ============================================================================== --- stable/8/sys/ia64/ia64/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/ia64/ia64/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -2263,6 +2263,33 @@ out: return (prevpm); } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ + pmap_t oldpm; + struct ia64_lpte *pte; + vm_offset_t lim; + vm_size_t len; + + sz += va & 31; + va &= ~31; + sz = (sz + 31) & ~31; + + PMAP_LOCK(pm); + oldpm = pmap_switch(pm); + while (sz > 0) { + lim = round_page(va); + len = MIN(lim - va, sz); + pte = pmap_find_vhpt(va); + if (pte != NULL && pmap_present(pte)) + ia64_sync_icache(va, len); + va += len; + sz -= len; + } + pmap_switch(oldpm); + PMAP_UNLOCK(pm); +} + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/kern/sys_process.c ============================================================================== --- stable/8/sys/kern/sys_process.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/kern/sys_process.c Wed Mar 31 02:43:58 2010 (r205956) @@ -341,6 +341,10 @@ proc_rwmem(struct proc *p, struct uio *u */ error = uiomove_fromphys(&m, page_offset, len, uio); + /* Make the I-cache coherent for breakpoints. */ + if (!error && writing && (out_prot & VM_PROT_EXECUTE)) + vm_sync_icache(map, uva, len); + /* * Release the page. */ Modified: stable/8/sys/mips/mips/pmap.c ============================================================================== --- stable/8/sys/mips/mips/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/mips/mips/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -2903,6 +2903,11 @@ pmap_activate(struct thread *td) critical_exit(); } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/powerpc/aim/mmu_oea.c ============================================================================== --- stable/8/sys/powerpc/aim/mmu_oea.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/powerpc/aim/mmu_oea.c Wed Mar 31 02:43:58 2010 (r205956) @@ -330,7 +330,7 @@ void moea_unmapdev(mmu_t, vm_offset_t, v vm_offset_t moea_kextract(mmu_t, vm_offset_t); void moea_kenter(mmu_t, vm_offset_t, vm_offset_t); boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); -boolean_t moea_page_executable(mmu_t, vm_page_t); +static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_change_wiring, moea_change_wiring), @@ -357,6 +357,7 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_remove, moea_remove), MMUMETHOD(mmu_remove_all, moea_remove_all), MMUMETHOD(mmu_remove_write, moea_remove_write), + MMUMETHOD(mmu_sync_icache, moea_sync_icache), MMUMETHOD(mmu_zero_page, moea_zero_page), MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), @@ -371,7 +372,6 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_kextract, moea_kextract), MMUMETHOD(mmu_kenter, moea_kenter), MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), - MMUMETHOD(mmu_page_executable, moea_page_executable), { 0, 0 } }; @@ -2361,12 +2361,6 @@ moea_dev_direct_mapped(mmu_t mmu, vm_off return (EFAULT); } -boolean_t -moea_page_executable(mmu_t mmu, vm_page_t pg) -{ - return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC); -} - /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This @@ -2426,3 +2420,27 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, kmem_free(kernel_map, base, size); } } + +static void +moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) +{ + struct pvo_entry *pvo; + vm_offset_t lim; + vm_paddr_t pa; + vm_size_t len; + + PMAP_LOCK(pm); + while (sz > 0) { + lim = round_page(va); + len = MIN(lim - va, sz); + pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); + if (pvo != NULL) { + pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | + (va & ADDR_POFF); + moea_syncicache(pa, len); + } + va += len; + sz -= len; + } + PMAP_UNLOCK(pm); +} Modified: stable/8/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- stable/8/sys/powerpc/aim/mmu_oea64.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/powerpc/aim/mmu_oea64.c Wed Mar 31 02:43:58 2010 (r205956) @@ -362,7 +362,7 @@ static boolean_t moea64_query_bit(vm_pag static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *); static void moea64_kremove(mmu_t, vm_offset_t); static void moea64_syncicache(pmap_t pmap, vm_offset_t va, - vm_offset_t pa); + vm_offset_t pa, vm_size_t sz); static void tlbia(void); /* @@ -403,7 +403,7 @@ void moea64_unmapdev(mmu_t, vm_offset_t, vm_offset_t moea64_kextract(mmu_t, vm_offset_t); void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); -boolean_t moea64_page_executable(mmu_t, vm_page_t); +static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); static mmu_method_t moea64_bridge_methods[] = { MMUMETHOD(mmu_change_wiring, moea64_change_wiring), @@ -430,6 +430,7 @@ static mmu_method_t moea64_bridge_method MMUMETHOD(mmu_remove, moea64_remove), MMUMETHOD(mmu_remove_all, moea64_remove_all), MMUMETHOD(mmu_remove_write, moea64_remove_write), + MMUMETHOD(mmu_sync_icache, moea64_sync_icache), MMUMETHOD(mmu_zero_page, moea64_zero_page), MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), @@ -444,7 +445,6 @@ static mmu_method_t moea64_bridge_method MMUMETHOD(mmu_kextract, moea64_kextract), MMUMETHOD(mmu_kenter, moea64_kenter), MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), - MMUMETHOD(mmu_page_executable, moea64_page_executable), { 0, 0 } }; @@ -1256,12 +1256,12 @@ moea64_enter_locked(pmap_t pmap, vm_offs * mapped executable and cacheable. */ if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { - moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m)); + moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); } } static void -moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa) +moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) { /* @@ -1278,16 +1278,16 @@ moea64_syncicache(pmap_t pmap, vm_offset * If PMAP is not bootstrapped, we are likely to be * in real mode. */ - __syncicache((void *)pa,PAGE_SIZE); + __syncicache((void *)pa, sz); } else if (pmap == kernel_pmap) { - __syncicache((void *)va,PAGE_SIZE); + __syncicache((void *)va, sz); } else { /* Use the scratch page to set up a temp mapping */ mtx_lock(&moea64_scratchpage_mtx); moea64_set_scratchpage_pa(1,pa); - __syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE); + __syncicache((void *)moea64_scratchpage_va[1], sz); mtx_unlock(&moea64_scratchpage_mtx); } @@ -1803,8 +1803,9 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_ pvo->pvo_pmap, PVO_VADDR(pvo)); if ((pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { - moea64_syncicache(pm, sva, - pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); + moea64_syncicache(pm, sva, + pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, + PAGE_SIZE); } } UNLOCK_TABLE(); @@ -2431,12 +2432,6 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_o return (error); } -boolean_t -moea64_page_executable(mmu_t mmu, vm_page_t pg) -{ - return (!moea64_query_bit(pg, LPTE_NOEXEC)); -} - /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This @@ -2479,3 +2474,26 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t v kmem_free(kernel_map, base, size); } +static void +moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) +{ + struct pvo_entry *pvo; + vm_offset_t lim; + vm_paddr_t pa; + vm_size_t len; + + PMAP_LOCK(pm); + while (sz > 0) { + lim = round_page(va); + len = MIN(lim - va, sz); + pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); + if (pvo != NULL) { + pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | + (va & ADDR_POFF); + moea64_syncicache(pm, va, pa, len); + } + va += len; + sz -= len; + } + PMAP_UNLOCK(pm); +} Modified: stable/8/sys/powerpc/booke/pmap.c ============================================================================== --- stable/8/sys/powerpc/booke/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/powerpc/booke/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -319,7 +319,8 @@ static vm_offset_t mmu_booke_kextract(mm static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); static void mmu_booke_kremove(mmu_t, vm_offset_t); static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); -static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); +static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, + vm_size_t); static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, vm_size_t, vm_size_t *); static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, @@ -357,6 +358,7 @@ static mmu_method_t mmu_booke_methods[] MMUMETHOD(mmu_remove, mmu_booke_remove), MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), + MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), @@ -370,7 +372,6 @@ static mmu_method_t mmu_booke_methods[] MMUMETHOD(mmu_kenter, mmu_booke_kenter), MMUMETHOD(mmu_kextract, mmu_booke_kextract), /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ - MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), /* dumpsys() support */ @@ -1682,21 +1683,6 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t __syncicache((void *)va, PAGE_SIZE); sync = 0; } - - if (sync) { - /* Create a temporary mapping. */ - pmap = PCPU_GET(curpmap); - - va = 0; - pte = pte_find(mmu, pmap, va); - KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); - - flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; - - pte_enter(mmu, pmap, m, va, flags); - __syncicache((void *)va, PAGE_SIZE); - pte_remove(mmu, pmap, va, PTBL_UNHOLD); - } } /* @@ -1991,25 +1977,47 @@ mmu_booke_remove_write(mmu_t mmu, vm_pag vm_page_flag_clear(m, PG_WRITEABLE); } -static boolean_t -mmu_booke_page_executable(mmu_t mmu, vm_page_t m) +static void +mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) { - pv_entry_t pv; pte_t *pte; - boolean_t executable; + pmap_t pmap; + vm_page_t m; + vm_offset_t addr; + vm_paddr_t pa; + int active, valid; + + va = trunc_page(va); + sz = round_page(sz); - executable = FALSE; - TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { - PMAP_LOCK(pv->pv_pmap); - pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); - if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) - executable = TRUE; - PMAP_UNLOCK(pv->pv_pmap); - if (executable) - break; + vm_page_lock_queues(); + pmap = PCPU_GET(curpmap); + active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; + while (sz > 0) { + PMAP_LOCK(pm); + pte = pte_find(mmu, pm, va); + valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; + if (valid) + pa = PTE_PA(pte); + PMAP_UNLOCK(pm); + if (valid) { + if (!active) { + /* Create a mapping in the active pmap. */ + addr = 0; + m = PHYS_TO_VM_PAGE(pa); + PMAP_LOCK(pmap); + pte_enter(mmu, pmap, m, addr, + PTE_SR | PTE_VALID | PTE_UR); + __syncicache((void *)addr, PAGE_SIZE); + pte_remove(mmu, pmap, addr, PTBL_UNHOLD); + PMAP_UNLOCK(pmap); + } else + __syncicache((void *)va, PAGE_SIZE); + } + va += PAGE_SIZE; + sz -= PAGE_SIZE; } - - return (executable); + vm_page_unlock_queues(); } /* Modified: stable/8/sys/powerpc/include/pmap.h ============================================================================== --- stable/8/sys/powerpc/include/pmap.h Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/powerpc/include/pmap.h Wed Mar 31 02:43:58 2010 (r205956) @@ -171,7 +171,6 @@ void pmap_bootstrap(vm_offset_t, vm_off void pmap_kenter(vm_offset_t va, vm_offset_t pa); void pmap_kremove(vm_offset_t); void *pmap_mapdev(vm_offset_t, vm_size_t); -boolean_t pmap_page_executable(vm_page_t); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_deactivate(struct thread *); vm_offset_t pmap_kextract(vm_offset_t); Modified: stable/8/sys/powerpc/powerpc/mmu_if.m ============================================================================== --- stable/8/sys/powerpc/powerpc/mmu_if.m Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/powerpc/powerpc/mmu_if.m Wed Mar 31 02:43:58 2010 (r205956) @@ -789,15 +789,21 @@ METHOD boolean_t dev_direct_mapped { /** - * @brief Evaluate if a physical page has an executable mapping - * - * @param _pg physical page - * - * @retval bool TRUE if a physical mapping exists for the given page. + * @brief Enforce instruction cache coherency. Typically called after a + * region of memory has been modified and before execution of or within + * that region is attempted. Setting breakpoints in a process through + * ptrace(2) is one example of when the instruction cache needs to be + * made coherent. + * + * @param _pm the physical map of the virtual address + * @param _va the virtual address of the modified region + * @param _sz the size of the modified region */ -METHOD boolean_t page_executable { +METHOD void sync_icache { mmu_t _mmu; - vm_page_t _pg; + pmap_t _pm; + vm_offset_t _va; + vm_size_t _sz; }; Modified: stable/8/sys/powerpc/powerpc/pmap_dispatch.c ============================================================================== --- stable/8/sys/powerpc/powerpc/pmap_dispatch.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/powerpc/powerpc/pmap_dispatch.c Wed Mar 31 02:43:58 2010 (r205956) @@ -457,12 +457,12 @@ pmap_dev_direct_mapped(vm_offset_t pa, v return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size)); } -boolean_t -pmap_page_executable(vm_page_t pg) +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { - - CTR2(KTR_PMAP, "%s(%p)", __func__, pg); - return (MMU_PAGE_EXECUTABLE(mmu_obj, pg)); + + CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz); + return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz)); } vm_offset_t Modified: stable/8/sys/powerpc/powerpc/uio_machdep.c ============================================================================== --- stable/8/sys/powerpc/powerpc/uio_machdep.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/powerpc/powerpc/uio_machdep.c Wed Mar 31 02:43:58 2010 (r205956) @@ -107,9 +107,6 @@ uiomove_fromphys(vm_page_t ma[], vm_offs sf_buf_free(sf); goto out; } - if (uio->uio_rw == UIO_WRITE && - pmap_page_executable(m)) - __syncicache(cp, cnt); break; case UIO_SYSSPACE: if (uio->uio_rw == UIO_READ) Modified: stable/8/sys/sparc64/sparc64/pmap.c ============================================================================== --- stable/8/sys/sparc64/sparc64/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/sparc64/sparc64/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -1995,6 +1995,11 @@ pmap_activate(struct thread *td) mtx_unlock_spin(&sched_lock); } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/sun4v/sun4v/pmap.c ============================================================================== --- stable/8/sys/sun4v/sun4v/pmap.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/sun4v/sun4v/pmap.c Wed Mar 31 02:43:58 2010 (r205956) @@ -424,6 +424,11 @@ pmap_activate(struct thread *td) critical_exit(); } +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. Modified: stable/8/sys/vm/pmap.h ============================================================================== --- stable/8/sys/vm/pmap.h Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/vm/pmap.h Wed Mar 31 02:43:58 2010 (r205956) @@ -133,6 +133,7 @@ void pmap_remove(pmap_t, vm_offset_t, void pmap_remove_all(vm_page_t m); void pmap_remove_pages(pmap_t); void pmap_remove_write(vm_page_t m); +void pmap_sync_icache(pmap_t, vm_offset_t, vm_size_t); void pmap_zero_page(vm_page_t); void pmap_zero_page_area(vm_page_t, int off, int size); void pmap_zero_page_idle(vm_page_t); Modified: stable/8/sys/vm/vm_extern.h ============================================================================== --- stable/8/sys/vm/vm_extern.h Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/vm/vm_extern.h Wed Mar 31 02:43:58 2010 (r205956) @@ -63,6 +63,7 @@ int vm_forkproc(struct thread *, struct void vm_waitproc(struct proc *); int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t); void vm_set_page_size(void); +void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t); struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t); struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *); int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t); Modified: stable/8/sys/vm/vm_glue.c ============================================================================== --- stable/8/sys/vm/vm_glue.c Wed Mar 31 02:20:22 2010 (r205955) +++ stable/8/sys/vm/vm_glue.c Wed Mar 31 02:43:58 2010 (r205956) @@ -309,6 +309,13 @@ vm_imgact_unmap_page(struct sf_buf *sf) vm_page_unlock_queues(); } +void +vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) +{ + + pmap_sync_icache(map->pmap, va, sz); +} + struct kstack_cache_entry { vm_object_t ksobj; struct kstack_cache_entry *next_ks_entry;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201003310243.o2V2hwa9070387>