Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 18 Sep 2019 14:27:09 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org
Subject:   svn commit: r352484 - stable/12/sys/arm64/arm64
Message-ID:  <201909181427.x8IER9L7054404@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Wed Sep 18 14:27:09 2019
New Revision: 352484
URL: https://svnweb.freebsd.org/changeset/base/352484

Log:
  MFC r350546
    Because of AArch64's weak memory consistency model, we need to include a
    memory barrier between the stores for initializing a page table page and
    the store for adding that page to the page table.  Otherwise, a page table
    walk by another processor's MMU could see the page table page before it
    sees the initialized entries.
  
    Simplify pmap_growkernel().  In particular, eliminate an unnecessary TLB
    invalidation.

Modified:
  stable/12/sys/arm64/arm64/pmap.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/arm64/arm64/pmap.c
==============================================================================
--- stable/12/sys/arm64/arm64/pmap.c	Wed Sep 18 08:02:03 2019	(r352483)
+++ stable/12/sys/arm64/arm64/pmap.c	Wed Sep 18 14:27:09 2019	(r352484)
@@ -1525,6 +1525,16 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, str
 		pmap_zero_page(m);
 
 	/*
+	 * Because of AArch64's weak memory consistency model, we must have a
+	 * barrier here to ensure that the stores for zeroing "m", whether by
+	 * pmap_zero_page() or an earlier function, are visible before adding
+	 * "m" to the page table.  Otherwise, a page table walk by another
+	 * processor's MMU could see the mapping to "m" and a stale, non-zero
+	 * PTE within "m".
+	 */
+	dmb(ishst);
+
+	/*
 	 * Map the pagetable page into the process address space, if
 	 * it isn't already there.
 	 */
@@ -1775,12 +1785,14 @@ pmap_growkernel(vm_offset_t addr)
 				panic("pmap_growkernel: no memory to grow kernel");
 			if ((nkpg->flags & PG_ZERO) == 0)
 				pmap_zero_page(nkpg);
+			/* See the dmb() in _pmap_alloc_l3(). */
+			dmb(ishst);
 			paddr = VM_PAGE_TO_PHYS(nkpg);
 			pmap_store(l1, paddr | L1_TABLE);
 			continue; /* try again */
 		}
 		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
-		if ((pmap_load(l2) & ATTR_AF) != 0) {
+		if (pmap_load(l2) != 0) {
 			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
 				kernel_vm_end = vm_map_max(kernel_map);
@@ -1796,9 +1808,10 @@ pmap_growkernel(vm_offset_t addr)
 			panic("pmap_growkernel: no memory to grow kernel");
 		if ((nkpg->flags & PG_ZERO) == 0)
 			pmap_zero_page(nkpg);
+		/* See the dmb() in _pmap_alloc_l3(). */
+		dmb(ishst);
 		paddr = VM_PAGE_TO_PHYS(nkpg);
-		pmap_load_store(l2, paddr | L2_TABLE);
-		pmap_invalidate_page(kernel_pmap, kernel_vm_end);
+		pmap_store(l2, paddr | L2_TABLE);
 
 		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
@@ -5537,6 +5550,10 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_
 	/*
 	 * If the page table page is not leftover from an earlier promotion,
 	 * or the mapping attributes have changed, (re)initialize the L3 table.
+	 *
+	 * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
+	 * performs a dsb().  That dsb() ensures that the stores for filling
+	 * "l3" are visible before "l3" is added to the page table.
 	 */
 	if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
 		pmap_fill_l3(l3, newl3);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201909181427.x8IER9L7054404>