Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 21 Jul 2019 03:26:26 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r350191 - head/sys/arm64/arm64
Message-ID:  <201907210326.x6L3QQ4H041474@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Sun Jul 21 03:26:26 2019
New Revision: 350191
URL: https://svnweb.freebsd.org/changeset/base/350191

Log:
  Introduce pmap_store(), and use it to replace pmap_load_store() in places
  where the page table entry was previously invalid.  (Note that I did not
  replace pmap_load_store() when it was followed by a TLB invalidation, even
  if we are not using the return value from pmap_load_store().)
  
  Correct an error in pmap_enter().  A test for determining when to set
  PGA_WRITEABLE was always true, even if the mapping was read only.
  
  In pmap_enter_l2(), when replacing an empty kernel page table page by a
  superpage mapping, clear the old l2 entry and issue a TLB invalidation.  My
  reading of the ARM architecture manual leads me to believe that the TLB
  could hold an intermediate entry referencing the empty kernel page table
  page even though it contains no valid mappings.
  
  Replace a couple direct uses of atomic_clear_64() by the new
  pmap_clear_bits().
  
  In a couple comments, replace the term "paging-structure caches", which is
  an Intel-specific term for the caches that hold intermediate entries in the
  page table, with wording that is more consistent with the ARM architecture
  manual.
  
  Reviewed by:	markj
  X-MFC after:	r350004
  Differential Revision:	https://reviews.freebsd.org/D20998

Modified:
  head/sys/arm64/arm64/pmap.c

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c	Sun Jul 21 03:19:54 2019	(r350190)
+++ head/sys/arm64/arm64/pmap.c	Sun Jul 21 03:26:26 2019	(r350191)
@@ -328,6 +328,7 @@ static __inline vm_page_t pmap_remove_pt_page(pmap_t p
 #define	pmap_load_clear(table)		atomic_swap_64(table, 0)
 #define	pmap_load_store(table, entry)	atomic_swap_64(table, entry)
 #define	pmap_set_bits(table, bits)	atomic_set_64(table, bits)
+#define	pmap_store(table, entry)	atomic_store_64(table, entry)
 
 /********************/
 /* Inline functions */
@@ -637,7 +638,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t mi
 				    (vm_offset_t)l2);
 				freemempos += PAGE_SIZE;
 
-				pmap_load_store(&pagetable_dmap[l1_slot],
+				pmap_store(&pagetable_dmap[l1_slot],
 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
 
 				memset(l2, 0, PAGE_SIZE);
@@ -655,7 +656,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t mi
 
 				l2_slot = pmap_l2_index(va);
 				KASSERT(l2_slot != 0, ("..."));
-				pmap_load_store(&l2[l2_slot],
+				pmap_store(&l2[l2_slot],
 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
 				    ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
 			}
@@ -667,7 +668,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t mi
 		    (physmap[i + 1] - pa) >= L1_SIZE;
 		    pa += L1_SIZE, va += L1_SIZE) {
 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
-			pmap_load_store(&pagetable_dmap[l1_slot],
+			pmap_store(&pagetable_dmap[l1_slot],
 			    (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
 			    ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
 		}
@@ -682,7 +683,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t mi
 				    (vm_offset_t)l2);
 				freemempos += PAGE_SIZE;
 
-				pmap_load_store(&pagetable_dmap[l1_slot],
+				pmap_store(&pagetable_dmap[l1_slot],
 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
 
 				memset(l2, 0, PAGE_SIZE);
@@ -692,7 +693,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t mi
 			for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
 			    pa += L2_SIZE, va += L2_SIZE) {
 				l2_slot = pmap_l2_index(va);
-				pmap_load_store(&l2[l2_slot],
+				pmap_store(&l2[l2_slot],
 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
 				    ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
 			}
@@ -727,7 +728,7 @@ pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm
 		KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
 
 		pa = pmap_early_vtophys(l1pt, l2pt);
-		pmap_load_store(&l1[l1_slot],
+		pmap_store(&l1[l1_slot],
 		    (pa & ~Ln_TABLE_MASK) | L1_TABLE);
 		l2pt += PAGE_SIZE;
 	}
@@ -757,7 +758,7 @@ pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm
 		KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
 
 		pa = pmap_early_vtophys(l1pt, l3pt);
-		pmap_load_store(&l2[l2_slot],
+		pmap_store(&l2[l2_slot],
 		    (pa & ~Ln_TABLE_MASK) | L2_TABLE);
 		l3pt += PAGE_SIZE;
 	}
@@ -1540,7 +1541,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, str
 
 		l0index = ptepindex - (NUL2E + NUL1E);
 		l0 = &pmap->pm_l0[l0index];
-		pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
+		pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
 	} else if (ptepindex >= NUL2E) {
 		vm_pindex_t l0index, l1index;
 		pd_entry_t *l0, *l1;
@@ -1566,7 +1567,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, str
 
 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
-		pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
+		pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
 	} else {
 		vm_pindex_t l0index, l1index;
 		pd_entry_t *l0, *l1, *l2;
@@ -1608,7 +1609,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, str
 
 		l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
-		pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
+		pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
 	}
 
 	pmap_resident_count_inc(pmap, 1);
@@ -1781,7 +1782,7 @@ pmap_growkernel(vm_offset_t addr)
 			if ((nkpg->flags & PG_ZERO) == 0)
 				pmap_zero_page(nkpg);
 			paddr = VM_PAGE_TO_PHYS(nkpg);
-			pmap_load_store(l1, paddr | L1_TABLE);
+			pmap_store(l1, paddr | L1_TABLE);
 			continue; /* try again */
 		}
 		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
@@ -3018,7 +3019,7 @@ pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_ent
 	pmap_invalidate_range_nopin(pmap, va, va + size);
 
 	/* Create the new mapping */
-	pmap_load_store(pte, newpte);
+	pmap_store(pte, newpte);
 	dsb(ishst);
 
 	critical_exit();
@@ -3304,12 +3305,9 @@ havel3:
 			/*
 			 * No, might be a protection or wiring change.
 			 */
-			if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
-				if ((new_l3 & ATTR_AP(ATTR_AP_RW)) ==
-				    ATTR_AP(ATTR_AP_RW)) {
-					vm_page_aflag_set(m, PGA_WRITEABLE);
-				}
-			}
+			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
+			    (new_l3 & ATTR_SW_DBM) != 0)
+				vm_page_aflag_set(m, PGA_WRITEABLE);
 			goto validate;
 		}
 
@@ -3415,7 +3413,7 @@ validate:
 		}
 	} else {
 		/* New mapping */
-		pmap_load_store(l3, new_l3);
+		pmap_store(l3, new_l3);
 		dsb(ishst);
 	}
 
@@ -3517,12 +3515,16 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t 
 		vm_page_free_pages_toq(&free, true);
 		if (va >= VM_MAXUSER_ADDRESS) {
 			/*
-			 * Both pmap_remove_l2() and pmap_remove_l3() will
-			 * leave the kernel page table page zero filled.
+			 * Both pmap_remove_l2() and pmap_remove_l3_range()
+			 * will leave the kernel page table page zero filled.
+			 * Nonetheless, the TLB could have an intermediate
+			 * entry for the kernel page table page.
 			 */
 			mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
 			if (pmap_insert_pt_page(pmap, mt, false))
 				panic("pmap_enter_l2: trie insert failed");
+			pmap_clear(l2);
+			pmap_invalidate_page(pmap, va);
 		} else
 			KASSERT(pmap_load(l2) == 0,
 			    ("pmap_enter_l2: non-zero L2 entry %p", l2));
@@ -3536,10 +3538,13 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t 
 			SLIST_INIT(&free);
 			if (pmap_unwire_l3(pmap, va, l2pg, &free)) {
 				/*
-				 * Although "va" is not mapped, paging-structure
-				 * caches could nonetheless have entries that
+				 * Although "va" is not mapped, the TLB could
+				 * nonetheless have intermediate entries that
 				 * refer to the freed page table pages.
 				 * Invalidate those entries.
+				 *
+				 * XXX redundant invalidation (See
+				 * _pmap_unwire_l3().)
 				 */
 				pmap_invalidate_page(pmap, va);
 				vm_page_free_pages_toq(&free, true);
@@ -3564,7 +3569,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t 
 	/*
 	 * Map the superpage.
 	 */
-	(void)pmap_load_store(l2, new_l2);
+	pmap_store(l2, new_l2);
 	dsb(ishst);
 
 	atomic_add_long(&pmap_l2_mappings, 1);
@@ -3767,7 +3772,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, v
 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
 		cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
 
-	pmap_load_store(l3, l3_val);
+	pmap_store(l3, l3_val);
 	dsb(ishst);
 
 	return (mpte);
@@ -3840,7 +3845,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t 
 			 * demote the mapping and fall through.
 			 */
 			if (sva + L2_SIZE == va_next && eva >= va_next) {
-				atomic_clear_64(l2, ATTR_SW_WIRED);
+				pmap_clear_bits(l2, ATTR_SW_WIRED);
 				pmap->pm_stats.wired_count -= L2_SIZE /
 				    PAGE_SIZE;
 				continue;
@@ -3865,7 +3870,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t 
 			 * the pmap lock synchronizes access to ATTR_SW_WIRED,
 			 * the System MMU may write to the entry concurrently.
 			 */
-			atomic_clear_64(l3, ATTR_SW_WIRED);
+			pmap_clear_bits(l3, ATTR_SW_WIRED);
 			pmap->pm_stats.wired_count--;
 		}
 	}
@@ -3944,8 +3949,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_
 				nbits = 0;
 				if ((srcptepaddr & ATTR_SW_DBM) != 0)
 					nbits |= ATTR_AP_RW_BIT;
-				(void)pmap_load_store(l2,
-				    (srcptepaddr & ~mask) | nbits);
+				pmap_store(l2, (srcptepaddr & ~mask) | nbits);
 				pmap_resident_count_inc(dst_pmap, L2_SIZE /
 				    PAGE_SIZE);
 				atomic_add_long(&pmap_l2_mappings, 1);
@@ -3994,8 +3998,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_
 				nbits = 0;
 				if ((ptetemp & ATTR_SW_DBM) != 0)
 					nbits |= ATTR_AP_RW_BIT;
-				(void)pmap_load_store(dst_pte,
-				    (ptetemp & ~mask) | nbits);
+				pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
 				pmap_resident_count_inc(dst_pmap, 1);
 			} else {
 				SLIST_INIT(&free);
@@ -4003,8 +4006,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_
 				    &free)) {
 					/*
 					 * Although "addr" is not mapped,
-					 * paging-structure caches could
-					 * nonetheless have entries that refer
+					 * the TLB could nonetheless have
+					 * intermediate entries that refer
 					 * to the freed page table pages.
 					 * Invalidate those entries.
 					 *



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201907210326.x6L3QQ4H041474>