From owner-svn-src-all@FreeBSD.ORG Sun Dec 28 18:19:07 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 4199A882; Sun, 28 Dec 2014 18:19:07 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 224EF28C0; Sun, 28 Dec 2014 18:19:07 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id sBSIJ7nk094383; Sun, 28 Dec 2014 18:19:07 GMT (envelope-from ian@FreeBSD.org) Received: (from ian@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id sBSIJ6lJ094382; Sun, 28 Dec 2014 18:19:06 GMT (envelope-from ian@FreeBSD.org) Message-Id: <201412281819.sBSIJ6lJ094382@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: ian set sender to ian@FreeBSD.org using -f From: Ian Lepore Date: Sun, 28 Dec 2014 18:19:06 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r276334 - head/sys/arm/include X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 28 Dec 2014 18:19:07 -0000 Author: ian Date: Sun Dec 28 18:19:05 2014 New Revision: 276334 URL: https://svnweb.freebsd.org/changeset/base/276334 Log: Add new TLB and cache maintainence functions for armv6 and armv7. These are inline functions that handle all the routine maintenance operations except the flush-all and invalidate-all routines which are required only during early kernel init. These inline functions should be very much faster than the old mechanism that involved jumping through the big cpufuncs table, especially for common operations such as invalidating a single TLB entry. Note that nothing is calling these yet, this just is just required infrastructure for upcoming changes to the pmap-v6 code. Modified: head/sys/arm/include/cpu-v6.h Modified: head/sys/arm/include/cpu-v6.h ============================================================================== --- head/sys/arm/include/cpu-v6.h Sun Dec 28 18:12:56 2014 (r276333) +++ head/sys/arm/include/cpu-v6.h Sun Dec 28 18:19:05 2014 (r276334) @@ -155,4 +155,242 @@ _RF0(cp15_cbar_get, CP15_CBAR(%0)) #undef _WF0 #undef _WF1 +/* + * TLB maintenance operations. + */ + +/* Local (i.e. not broadcasting ) operations. */ + +/* Flush all TLB entries (even global). */ +static __inline void +tlb_flush_all_local(void) +{ + + dsb(); + _CP15_TLBIALL(); + dsb(); +} + +/* Flush all not global TLB entries. */ +static __inline void +tlb_flush_all_ng_local(void) +{ + + dsb(); + _CP15_TLBIASID(CPU_ASID_KERNEL); + dsb(); +} + +/* Flush single TLB entry (even global). */ +static __inline void +tlb_flush_local(vm_offset_t sva) +{ + + dsb(); + _CP15_TLBIMVA((sva & ~PAGE_MASK ) | CPU_ASID_KERNEL); + dsb(); +} + +/* Flush range of TLB entries (even global). */ +static __inline void +tlb_flush_range_local(vm_offset_t sva, vm_size_t size) +{ + vm_offset_t va; + vm_offset_t eva = sva + size; + + dsb(); + for (va = sva; va < eva; va += PAGE_SIZE) + _CP15_TLBIMVA((va & ~PAGE_MASK ) | CPU_ASID_KERNEL); + dsb(); +} + +/* Broadcasting operations. */ +#ifndef SMP + +#define tlb_flush_all() tlb_flush_all_local() +#define tlb_flush_all_ng() tlb_flush_all_ng_local() +#define tlb_flush(sva) tlb_flush_local(sva) +#define tlb_flush_range(sva, size) tlb_flush_range_local(sva, size) + +#else /* SMP */ + +static __inline void +tlb_flush_all(void) +{ + + dsb(); + _CP15_TLBIALLIS(); + dsb(); +} + +static __inline void +tlb_flush_all_ng() +{ + + dsb(); + _CP15_TLBIASIDIS(CPU_ASID_KERNEL); + dsb(); +} + +static __inline void +tlb_flush(vm_offset_t sva) +{ + + dsb(); + _CP15_TLBIMVAAIS(sva); + dsb(); +} + +static __inline void +tlb_flush_range(vm_offset_t sva, vm_size_t size) +{ + vm_offset_t va; + vm_offset_t eva = sva + size; + + dsb(); + for (va = sva; va < eva; va += PAGE_SIZE) + _CP15_TLBIMVAAIS(va); + dsb(); +} +#endif /* SMP */ + +/* + * Cache maintenance operations. + */ + +/* Sync I and D caches to PoU */ +static __inline void +icache_sync(vm_offset_t sva, vm_size_t size) +{ + vm_offset_t va; + vm_offset_t eva = sva + size; + + dsb(); + for (va = sva; va < eva; va += arm_dcache_align) { +#ifdef SMP + _CP15_DCCMVAU(va); +#else + _CP15_DCCMVAC(va); +#endif + } + dsb(); +#ifdef SMP + _CP15_ICIALLUIS(); +#else + _CP15_ICIALLU(); +#endif + dsb(); + isb(); +} + +/* Invalidate I cache */ +static __inline void +icache_inv_all(void) +{ +#ifdef SMP + _CP15_ICIALLUIS(); +#else + _CP15_ICIALLU(); +#endif + dsb(); + isb(); +} + +/* Write back D-cache to PoU */ +static __inline void +dcache_wb_pou(vm_offset_t sva, vm_size_t size) +{ + vm_offset_t va; + vm_offset_t eva = sva + size; + + dsb(); + for (va = sva; va < eva; va += arm_dcache_align) { +#ifdef SMP + _CP15_DCCMVAU(va); +#else + _CP15_DCCMVAC(va); +#endif + } + dsb(); +} + +/* Invalidate D-cache to PoC */ +static __inline void +dcache_inv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) +{ + vm_offset_t va; + vm_offset_t eva = sva + size; + + /* invalidate L1 first */ + for (va = sva; va < eva; va += arm_dcache_align) { + _CP15_DCIMVAC(va); + } + dsb(); + + /* then L2 */ + cpu_l2cache_inv_range(pa, size); + dsb(); + + /* then L1 again */ + for (va = sva; va < eva; va += arm_dcache_align) { + _CP15_DCIMVAC(va); + } + dsb(); +} + +/* Write back D-cache to PoC */ +static __inline void +dcache_wb_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) +{ + vm_offset_t va; + vm_offset_t eva = sva + size; + + dsb(); + + for (va = sva; va < eva; va += arm_dcache_align) { + _CP15_DCCMVAC(va); + } + dsb(); + + cpu_l2cache_wb_range(pa, size); +} + +/* Write back and invalidate D-cache to PoC */ +static __inline void +dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) +{ + vm_offset_t va; + vm_offset_t eva = sva + size; + + dsb(); + + /* write back L1 first */ + for (va = sva; va < eva; va += arm_dcache_align) { + _CP15_DCCMVAC(va); + } + dsb(); + + /* then write back and invalidate L2 */ + cpu_l2cache_wbinv_range(pa, size); + + /* then invalidate L1 */ + for (va = sva; va < eva; va += arm_dcache_align) { + _CP15_DCIMVAC(va); + } + dsb(); +} + +/* Set TTB0 register */ +static __inline void +cp15_ttbr_set(uint32_t reg) +{ + dsb(); + _CP15_TTB_SET(reg); + dsb(); + _CP15_BPIALL(); + dsb(); + isb(); + tlb_flush_all_ng_local(); +} + #endif /* !MACHINE_CPU_V6_H */