From owner-svn-src-head@FreeBSD.ORG Tue Jan 20 11:10:26 2015 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id A5F987E1; Tue, 20 Jan 2015 11:10:26 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 87648D07; Tue, 20 Jan 2015 11:10:26 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id t0KBAQtZ047011; Tue, 20 Jan 2015 11:10:26 GMT (envelope-from br@FreeBSD.org) Received: (from br@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id t0KBAPaY047005; Tue, 20 Jan 2015 11:10:25 GMT (envelope-from br@FreeBSD.org) Message-Id: <201501201110.t0KBAPaY047005@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: br set sender to br@FreeBSD.org using -f From: Ruslan Bukin Date: Tue, 20 Jan 2015 11:10:25 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r277414 - in head/sys/mips: include mips X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 20 Jan 2015 11:10:26 -0000 Author: br Date: Tue Jan 20 11:10:25 2015 New Revision: 277414 URL: https://svnweb.freebsd.org/changeset/base/277414 Log: Add 128-byte cache flushing routines. Leave CNMIPS untouched as these functions depends on config2 register. Modified: head/sys/mips/include/cache_mipsNN.h head/sys/mips/mips/cache.c head/sys/mips/mips/cache_mipsNN.c Modified: head/sys/mips/include/cache_mipsNN.h ============================================================================== --- head/sys/mips/include/cache_mipsNN.h Tue Jan 20 09:07:28 2015 (r277413) +++ head/sys/mips/include/cache_mipsNN.h Tue Jan 20 11:10:25 2015 (r277414) @@ -57,7 +57,6 @@ void mipsNN_pdcache_inv_range_16(vm_offs void mipsNN_pdcache_inv_range_32(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_32(vm_offset_t, vm_size_t); -#ifdef CPU_CNMIPS void mipsNN_icache_sync_all_128(void); void mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t); void mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t); @@ -66,7 +65,6 @@ void mipsNN_pdcache_wbinv_range_128(vm_o void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_128(vm_offset_t, vm_size_t); -#endif void mipsNN_sdcache_wbinv_all_32(void); void mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t); void mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t); Modified: head/sys/mips/mips/cache.c ============================================================================== --- head/sys/mips/mips/cache.c Tue Jan 20 09:07:28 2015 (r277413) +++ head/sys/mips/mips/cache.c Tue Jan 20 11:10:25 2015 (r277414) @@ -104,7 +104,6 @@ mips_config_cache(struct mips_cpuinfo * mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_32; break; -#ifdef CPU_CNMIPS case 128: mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_128; mips_cache_ops.mco_icache_sync_range = @@ -112,7 +111,6 @@ mips_config_cache(struct mips_cpuinfo * mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_128; break; -#endif #ifdef MIPS_DISABLE_L1_CACHE case 0: @@ -172,7 +170,6 @@ mips_config_cache(struct mips_cpuinfo * mipsNN_pdcache_wb_range_32; #endif break; -#ifdef CPU_CNMIPS case 128: mips_cache_ops.mco_pdcache_wbinv_all = mips_cache_ops.mco_intern_pdcache_wbinv_all = @@ -188,7 +185,6 @@ mips_config_cache(struct mips_cpuinfo * mips_cache_ops.mco_intern_pdcache_wb_range = mipsNN_pdcache_wb_range_128; break; -#endif #ifdef MIPS_DISABLE_L1_CACHE case 0: mips_cache_ops.mco_pdcache_wbinv_all = Modified: head/sys/mips/mips/cache_mipsNN.c ============================================================================== --- head/sys/mips/mips/cache_mipsNN.c Tue Jan 20 09:07:28 2015 (r277413) +++ head/sys/mips/mips/cache_mipsNN.c Tue Jan 20 11:10:25 2015 (r277414) @@ -647,6 +647,225 @@ mipsNN_pdcache_wb_range_128(vm_offset_t SYNC; } +#else + +void +mipsNN_icache_sync_all_128(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + picache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + mips_intern_dcache_wbinv_all(); + + while (va < eva) { + cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += (32 * 128); + } + + SYNC; +} + +void +mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + mips_intern_dcache_wb_range(va, (eva - va)); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); + + eva = round_line128(va + size); + va = trunc_line128(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = picache_stride; + loopcount = picache_loopcount; + + mips_intern_dcache_wbinv_range_index(va, (eva - va)); + + while ((eva - va) >= (32 * 128)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_32lines_128(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 32 * 128; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 128; + } +} + +void +mipsNN_pdcache_wbinv_all_128(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + pdcache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + while (va < eva) { + cache_r4k_op_32lines_128(va, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += (32 * 128); + } + + SYNC; +} + + +void +mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, + CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask); + + eva = round_line128(va + size); + va = trunc_line128(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = pdcache_stride; + loopcount = pdcache_loopcount; + + while ((eva - va) >= (32 * 128)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_32lines_128(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 32 * 128; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 128; + } +} + +void +mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += 128; + } + + SYNC; +} + #endif void