From owner-svn-src-head@FreeBSD.ORG Wed Jul 29 19:38:37 2009 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 6C82610657CA; Wed, 29 Jul 2009 19:38:37 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id AC5E28FC1A; Wed, 29 Jul 2009 19:38:33 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id n6TJcXtP080410; Wed, 29 Jul 2009 19:38:33 GMT (envelope-from kib@svn.freebsd.org) Received: (from kib@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id n6TJcXMP080408; Wed, 29 Jul 2009 19:38:33 GMT (envelope-from kib@svn.freebsd.org) Message-Id: <200907291938.n6TJcXMP080408@svn.freebsd.org> From: Konstantin Belousov Date: Wed, 29 Jul 2009 19:38:33 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r195949 - head/sys/i386/xen X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 29 Jul 2009 19:38:38 -0000 Author: kib Date: Wed Jul 29 19:38:33 2009 New Revision: 195949 URL: http://svn.freebsd.org/changeset/base/195949 Log: Fix XEN build breakage, by implementing pmap_invalidate_cache_range() and using it when appropriate. Merge analogue of the r195836 optimization to XEN. Approved by: re (kensmith) Modified: head/sys/i386/xen/pmap.c Modified: head/sys/i386/xen/pmap.c ============================================================================== --- head/sys/i386/xen/pmap.c Wed Jul 29 19:19:13 2009 (r195948) +++ head/sys/i386/xen/pmap.c Wed Jul 29 19:38:33 2009 (r195949) @@ -121,6 +121,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -759,7 +760,7 @@ pmap_init(void) * Determine the appropriate bits to set in a PTE or PDE for a specified * caching mode. */ -static int +int pmap_cache_bits(int mode, boolean_t is_pde) { int pat_flag, pat_index, cache_bits; @@ -988,6 +989,40 @@ pmap_invalidate_cache(void) } #endif /* !SMP */ +void +pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) +{ + + KASSERT((sva & PAGE_MASK) == 0, + ("pmap_invalidate_cache_range: sva not page-aligned")); + KASSERT((eva & PAGE_MASK) == 0, + ("pmap_invalidate_cache_range: eva not page-aligned")); + + if (cpu_feature & CPUID_SS) + ; /* If "Self Snoop" is supported, do nothing. */ + else if (cpu_feature & CPUID_CLFSH) { + + /* + * Otherwise, do per-cache line flush. Use the mfence + * instruction to insure that previous stores are + * included in the write-back. The processor + * propagates flush to other processors in the cache + * coherence domain. + */ + mfence(); + for (; eva < sva; eva += cpu_clflush_line_size) + clflush(eva); + mfence(); + } else { + + /* + * No targeted cache flush methods are supported by CPU, + * globally invalidate cache as a last resort. + */ + pmap_invalidate_cache(); + } +} + /* * Are we current address space or kernel? N.B. We return FALSE when * a pmap's page table is in use because a kernel thread is borrowing @@ -3865,7 +3900,8 @@ pmap_clear_reference(vm_page_t m) void * pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { - vm_offset_t va, tmpva, offset; + vm_offset_t va, offset; + vm_size_t tmpsize; offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); @@ -3878,14 +3914,10 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_ if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); - for (tmpva = va; size > 0; ) { - pmap_kenter_attr(tmpva, pa, mode); - size -= PAGE_SIZE; - tmpva += PAGE_SIZE; - pa += PAGE_SIZE; - } - pmap_invalidate_range(kernel_pmap, va, tmpva); - pmap_invalidate_cache(); + for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) + pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); + pmap_invalidate_range(kernel_pmap, va, va + tmpsize); + pmap_invalidate_cache_range(va, va + size); return ((void *)(va + offset)); } @@ -3927,16 +3959,49 @@ pmap_unmapdev(vm_offset_t va, vm_size_t void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + struct sysmaps *sysmaps; + vm_offset_t sva, eva; m->md.pat_mode = ma; + if ((m->flags & PG_FICTITIOUS) != 0) + return; /* * If "m" is a normal page, flush it from the cache. + * See pmap_invalidate_cache_range(). + * + * First, try to find an existing mapping of the page by sf + * buffer. sf_buf_invalidate_cache() modifies mapping and + * flushes the cache. */ - if ((m->flags & PG_FICTITIOUS) == 0) { - /* If "Self Snoop" is supported, do nothing. */ - if (!(cpu_feature & CPUID_SS)) - pmap_invalidate_cache(); + if (sf_buf_invalidate_cache(m)) + return; + + /* + * If page is not mapped by sf buffer, but CPU does not + * support self snoop, map the page transient and do + * invalidation. In the worst case, whole cache is flushed by + * pmap_invalidate_cache_range(). + */ + if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) { + sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; + mtx_lock(&sysmaps->lock); + if (*sysmaps->CMAP2) + panic("pmap_page_set_memattr: CMAP2 busy"); + sched_pin(); + PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | + xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M | + pmap_cache_bits(m->md.pat_mode, 0)); + invlcaddr(sysmaps->CADDR2); + sva = (vm_offset_t)sysmaps->CADDR2; + eva = sva + PAGE_SIZE; + } else + sva = eva = 0; /* gcc */ + pmap_invalidate_cache_range(sva, eva); + if (sva != 0) { + PT_SET_MA(sysmaps->CADDR2, 0); + sched_unpin(); + mtx_unlock(&sysmaps->lock); } } @@ -3950,6 +4015,7 @@ pmap_change_attr(va, size, mode) pt_entry_t *pte; u_int opte, npte; pd_entry_t *pde; + boolean_t changed; base = trunc_page(va); offset = va & PAGE_MASK; @@ -3971,6 +4037,8 @@ pmap_change_attr(va, size, mode) return (EINVAL); } + changed = FALSE; + /* * Ok, all the pages exist and are 4k, so run through them updating * their cache mode. @@ -3988,6 +4056,8 @@ pmap_change_attr(va, size, mode) npte |= pmap_cache_bits(mode, 0); PT_SET_VA_MA(pte, npte, TRUE); } while (npte != opte && (*pte != npte)); + if (npte != opte) + changed = TRUE; tmpva += PAGE_SIZE; size -= PAGE_SIZE; } @@ -3996,8 +4066,10 @@ pmap_change_attr(va, size, mode) * Flush CPU caches to make sure any data isn't cached that shouldn't * be, etc. */ - pmap_invalidate_range(kernel_pmap, base, tmpva); - pmap_invalidate_cache(); + if (changed) { + pmap_invalidate_range(kernel_pmap, base, tmpva); + pmap_invalidate_cache_range(base, tmpva); + } return (0); }