Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 15 May 2015 18:10:01 +0000 (UTC)
From:      Ian Lepore <ian@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r282984 - head/sys/arm/include
Message-ID:  <201505151810.t4FIA1PG089228@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: ian
Date: Fri May 15 18:10:00 2015
New Revision: 282984
URL: https://svnweb.freebsd.org/changeset/base/282984

Log:
  Add assertions that the addresses passed to tlb maintenance are page-aligned.
  
  Perform cache writebacks and invalidations in the correct (inner to outer
  or vice versa) order, and add comments that explain that.
  
  Consistantly use 'va' as the variable name for virtual addresses.
  
  Submitted by:	Michal Meloun <meloun@miracle.cz>

Modified:
  head/sys/arm/include/cpu-v6.h

Modified: head/sys/arm/include/cpu-v6.h
==============================================================================
--- head/sys/arm/include/cpu-v6.h	Fri May 15 18:07:58 2015	(r282983)
+++ head/sys/arm/include/cpu-v6.h	Fri May 15 18:10:00 2015	(r282984)
@@ -39,7 +39,6 @@
 #include "machine/cpuinfo.h"
 #include "machine/sysreg.h"
 
-
 #define CPU_ASID_KERNEL 0
 
 vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t);
@@ -281,24 +280,29 @@ tlb_flush_all_ng_local(void)
 
 /* Flush single TLB entry (even global). */
 static __inline void
-tlb_flush_local(vm_offset_t sva)
+tlb_flush_local(vm_offset_t va)
 {
 
+	KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
+
 	dsb();
-	_CP15_TLBIMVA((sva & ~PAGE_MASK ) | CPU_ASID_KERNEL);
+	_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
 	dsb();
 }
 
 /* Flush range of TLB entries (even global). */
 static __inline void
-tlb_flush_range_local(vm_offset_t sva, vm_size_t size)
+tlb_flush_range_local(vm_offset_t va, vm_size_t size)
 {
-	vm_offset_t va;
-	vm_offset_t eva = sva + size;
+	vm_offset_t eva = va + size;
+
+	KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
+	KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
+	    size));
 
 	dsb();
-	for (va = sva; va < eva; va += PAGE_SIZE)
-		_CP15_TLBIMVA((va & ~PAGE_MASK ) | CPU_ASID_KERNEL);
+	for (; va < eva; va += PAGE_SIZE)
+		_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
 	dsb();
 }
 
@@ -324,22 +328,27 @@ tlb_flush_all_ng(void)
 }
 
 static __inline void
-tlb_flush(vm_offset_t sva)
+tlb_flush(vm_offset_t va)
 {
 
+	KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
+
 	dsb();
-	_CP15_TLBIMVAAIS(sva);
+	_CP15_TLBIMVAAIS(va);
 	dsb();
 }
 
 static __inline void
-tlb_flush_range(vm_offset_t sva,  vm_size_t size)
+tlb_flush_range(vm_offset_t va,  vm_size_t size)
 {
-	vm_offset_t va;
-	vm_offset_t eva = sva + size;
+	vm_offset_t eva = va + size;
+
+	KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
+	KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
+	    size));
 
 	dsb();
-	for (va = sva; va < eva; va += PAGE_SIZE)
+	for (; va < eva; va += PAGE_SIZE)
 		_CP15_TLBIMVAAIS(va);
 	dsb();
 }
@@ -347,8 +356,8 @@ tlb_flush_range(vm_offset_t sva,  vm_siz
 
 #define tlb_flush_all() 		tlb_flush_all_local()
 #define tlb_flush_all_ng() 		tlb_flush_all_ng_local()
-#define tlb_flush(sva) 			tlb_flush_local(sva)
-#define tlb_flush_range(sva, size) 	tlb_flush_range_local(sva, size)
+#define tlb_flush(va) 			tlb_flush_local(va)
+#define tlb_flush_range(va, size) 	tlb_flush_range_local(va, size)
 
 #endif /* SMP */
 
@@ -358,13 +367,13 @@ tlb_flush_range(vm_offset_t sva,  vm_siz
 
 /*  Sync I and D caches to PoU */
 static __inline void
-icache_sync(vm_offset_t sva, vm_size_t size)
+icache_sync(vm_offset_t va, vm_size_t size)
 {
-	vm_offset_t va;
-	vm_offset_t eva = sva + size;
+	vm_offset_t eva = va + size;
 
 	dsb();
-	for (va = sva; va < eva; va += cpuinfo.dcache_line_size) {
+	va &= ~cpuinfo.dcache_line_mask;
+	for ( ; va < eva; va += cpuinfo.dcache_line_size) {
 #if __ARM_ARCH >= 7 && defined SMP
 		_CP15_DCCMVAU(va);
 #else
@@ -409,13 +418,13 @@ bpb_inv_all(void)
 
 /* Write back D-cache to PoU */
 static __inline void
-dcache_wb_pou(vm_offset_t sva, vm_size_t size)
+dcache_wb_pou(vm_offset_t va, vm_size_t size)
 {
-	vm_offset_t va;
-	vm_offset_t eva = sva + size;
+	vm_offset_t eva = va + size;
 
 	dsb();
-	for (va = sva; va < eva; va += cpuinfo.dcache_line_size) {
+	va &= ~cpuinfo.dcache_line_mask;
+	for ( ; va < eva; va += cpuinfo.dcache_line_size) {
 #if __ARM_ARCH >= 7 && defined SMP
 		_CP15_DCCMVAU(va);
 #else
@@ -425,40 +434,46 @@ dcache_wb_pou(vm_offset_t sva, vm_size_t
 	dsb();
 }
 
-/* Invalidate D-cache to PoC */
+/*
+ * Invalidate D-cache to PoC
+ *
+ * Caches are invalidated from outermost to innermost as fresh cachelines
+ * flow in this direction. In given range, if there was no dirty cacheline
+ * in any cache before, no stale cacheline should remain in them after this
+ * operation finishes.
+ */
 static __inline void
-dcache_inv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
+dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
 {
-	vm_offset_t va;
-	vm_offset_t eva = sva + size;
-
-	/* invalidate L1 first */
-	for (va = sva; va < eva; va += cpuinfo.dcache_line_size) {
-		_CP15_DCIMVAC(va);
-	}
-	dsb();
+	vm_offset_t eva = va + size;
 
-	/* then L2 */
- 	cpu_l2cache_inv_range(pa, size);
 	dsb();
+	/* invalidate L2 first */
+	cpu_l2cache_inv_range(pa, size);
 
-	/* then L1 again */
-	for (va = sva; va < eva; va += cpuinfo.dcache_line_size) {
+	/* then L1 */
+	va &= ~cpuinfo.dcache_line_mask;
+	for ( ; va < eva; va += cpuinfo.dcache_line_size) {
 		_CP15_DCIMVAC(va);
 	}
 	dsb();
 }
 
-/* Write back D-cache to PoC */
+/*
+ * Write back D-cache to PoC
+ *
+ * Caches are written back from innermost to outermost as dirty cachelines
+ * flow in this direction. In given range, no dirty cacheline should remain
+ * in any cache after this operation finishes.
+ */
 static __inline void
-dcache_wb_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
+dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
 {
-	vm_offset_t va;
-	vm_offset_t eva = sva + size;
+	vm_offset_t eva = va + size;
 
 	dsb();
-
-	for (va = sva; va < eva; va += cpuinfo.dcache_line_size) {
+	va &= ~cpuinfo.dcache_line_mask;
+	for ( ; va < eva; va += cpuinfo.dcache_line_size) {
 		_CP15_DCCMVAC(va);
 	}
 	dsb();
@@ -474,9 +489,9 @@ dcache_wbinv_poc(vm_offset_t sva, vm_pad
 	vm_offset_t eva = sva + size;
 
 	dsb();
-
 	/* write back L1 first */
-	for (va = sva; va < eva; va += cpuinfo.dcache_line_size) {
+	va = sva & ~cpuinfo.dcache_line_mask;
+	for ( ; va < eva; va += cpuinfo.dcache_line_size) {
 		_CP15_DCCMVAC(va);
 	}
 	dsb();
@@ -485,7 +500,8 @@ dcache_wbinv_poc(vm_offset_t sva, vm_pad
 	cpu_l2cache_wbinv_range(pa, size);
 
 	/* then invalidate L1 */
-	for (va = sva; va < eva; va += cpuinfo.dcache_line_size) {
+	va = sva & ~cpuinfo.dcache_line_mask;
+	for ( ; va < eva; va += cpuinfo.dcache_line_size) {
 		_CP15_DCIMVAC(va);
 	}
 	dsb();



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201505151810.t4FIA1PG089228>