Date: Fri, 3 Feb 2017 12:03:10 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r313148 - in stable/11/sys: amd64/amd64 amd64/include i386/i386 i386/include Message-ID: <201702031203.v13C3Aki058840@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Fri Feb 3 12:03:10 2017 New Revision: 313148 URL: https://svnweb.freebsd.org/changeset/base/313148 Log: MFC r312555: Use SFENCE for ordering CLFLUSHOPT. Modified: stable/11/sys/amd64/amd64/pmap.c stable/11/sys/amd64/include/cpufunc.h stable/11/sys/i386/i386/pmap.c stable/11/sys/i386/include/cpufunc.h Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/amd64/amd64/pmap.c ============================================================================== --- stable/11/sys/amd64/amd64/pmap.c Fri Feb 3 12:01:28 2017 (r313147) +++ stable/11/sys/amd64/amd64/pmap.c Fri Feb 3 12:03:10 2017 (r313148) @@ -1863,16 +1863,16 @@ pmap_invalidate_cache_range(vm_offset_t return; /* - * Otherwise, do per-cache line flush. Use the mfence + * Otherwise, do per-cache line flush. Use the sfence * instruction to insure that previous stores are * included in the write-back. The processor * propagates flush to other processors in the cache * coherence domain. */ - mfence(); + sfence(); for (; sva < eva; sva += cpu_clflush_line_size) clflushopt(sva); - mfence(); + sfence(); } else if ((cpu_feature & CPUID_CLFSH) != 0 && eva - sva < PMAP_CLFLUSH_THRESHOLD) { if (pmap_kextract(sva) == lapic_paddr) @@ -1916,7 +1916,9 @@ pmap_invalidate_cache_pages(vm_page_t *p ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt)) pmap_invalidate_cache(); else { - if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL) + if (useclflushopt) + sfence(); + else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (i = 0; i < count; i++) { daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i])); @@ -1928,7 +1930,9 @@ pmap_invalidate_cache_pages(vm_page_t *p clflush(daddr); } } - if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL) + if (useclflushopt) + sfence(); + else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); } } Modified: stable/11/sys/amd64/include/cpufunc.h ============================================================================== --- stable/11/sys/amd64/include/cpufunc.h Fri Feb 3 12:01:28 2017 (r313147) +++ stable/11/sys/amd64/include/cpufunc.h Fri Feb 3 12:03:10 2017 (r313148) @@ -327,6 +327,13 @@ mfence(void) } static __inline void +sfence(void) +{ + + __asm __volatile("sfence" : : : "memory"); +} + +static __inline void ia32_pause(void) { __asm __volatile("pause"); Modified: stable/11/sys/i386/i386/pmap.c ============================================================================== --- stable/11/sys/i386/i386/pmap.c Fri Feb 3 12:01:28 2017 (r313147) +++ stable/11/sys/i386/i386/pmap.c Fri Feb 3 12:03:10 2017 (r313148) @@ -1284,16 +1284,16 @@ pmap_invalidate_cache_range(vm_offset_t return; #endif /* - * Otherwise, do per-cache line flush. Use the mfence + * Otherwise, do per-cache line flush. Use the sfence * instruction to insure that previous stores are * included in the write-back. The processor * propagates flush to other processors in the cache * coherence domain. */ - mfence(); + sfence(); for (; sva < eva; sva += cpu_clflush_line_size) clflushopt(sva); - mfence(); + sfence(); } else if ((cpu_feature & CPUID_CLFSH) != 0 && eva - sva < PMAP_CLFLUSH_THRESHOLD) { #ifdef DEV_APIC @@ -5348,12 +5348,14 @@ pmap_flush_page(vm_page_t m) eva = sva + PAGE_SIZE; /* - * Use mfence despite the ordering implied by + * Use mfence or sfence despite the ordering implied by * mtx_{un,}lock() because clflush on non-Intel CPUs * and clflushopt are not guaranteed to be ordered by * any other instruction. */ - if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL) + if (useclflushopt) + sfence(); + else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (; sva < eva; sva += cpu_clflush_line_size) { if (useclflushopt) @@ -5361,7 +5363,9 @@ pmap_flush_page(vm_page_t m) else clflush(sva); } - if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL) + if (useclflushopt) + sfence(); + else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); *cmap_pte2 = 0; sched_unpin(); Modified: stable/11/sys/i386/include/cpufunc.h ============================================================================== --- stable/11/sys/i386/include/cpufunc.h Fri Feb 3 12:01:28 2017 (r313147) +++ stable/11/sys/i386/include/cpufunc.h Fri Feb 3 12:03:10 2017 (r313148) @@ -158,6 +158,13 @@ mfence(void) __asm __volatile("mfence" : : : "memory"); } +static __inline void +sfence(void) +{ + + __asm __volatile("sfence" : : : "memory"); +} + #ifdef _KERNEL #define HAVE_INLINE_FFS
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201702031203.v13C3Aki058840>