Date: Thu, 2 Jul 2015 15:17:31 +0000 (UTC) From: Andrew Turner <andrew@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r285045 - head/sys/arm64/arm64 Message-ID: <201507021517.t62FHVMv003857@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: andrew Date: Thu Jul 2 15:17:30 2015 New Revision: 285045 URL: https://svnweb.freebsd.org/changeset/base/285045 Log: Use pmap_load to load table entries. This simplifies finding places where we access the tables. Obtained from: ABT Systems Ltd Sponsored by: The fReeBSD Foundation Modified: head/sys/arm64/arm64/pmap.c Modified: head/sys/arm64/arm64/pmap.c ============================================================================== --- head/sys/arm64/arm64/pmap.c Thu Jul 2 15:02:59 2015 (r285044) +++ head/sys/arm64/arm64/pmap.c Thu Jul 2 15:17:30 2015 (r285045) @@ -807,7 +807,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_of PMAP_LOCK(pmap); retry: l3p = pmap_l3(pmap, va); - if (l3p != NULL && (l3 = *l3p) != 0) { + if (l3p != NULL && (l3 = pmap_load(l3p)) != 0) { if (((l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) || ((prot & VM_PROT_WRITE) == 0)) { if (vm_page_pa_tryrelock(pmap, l3 & ~ATTR_MASK, &pa)) @@ -1186,7 +1186,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t l1index = ptepindex >> (L1_SHIFT - L2_SHIFT); l1 = &pmap->pm_l1[l1index]; - if (*l1 == 0) { + if (pmap_load(l1) == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUPDE + l1index, lockp) == NULL) { @@ -1233,8 +1233,8 @@ retry: * If the page table page is mapped, we just increment the * hold count, and activate it. */ - if (l2 != NULL && *l2 != 0) { - m = PHYS_TO_VM_PAGE(*l2 & ~ATTR_MASK); + if (l2 != NULL && pmap_load(l2) != 0) { + m = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); m->wire_count++; } else { /* @@ -1318,7 +1318,7 @@ pmap_growkernel(vm_offset_t addr) addr = kernel_map->max_offset; while (kernel_vm_end < addr) { l1 = pmap_l1(kernel_pmap, kernel_vm_end); - if (*l1 == 0) { + if (pmap_load(l1) == 0) { /* We need a new PDP entry */ nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | @@ -1333,7 +1333,7 @@ pmap_growkernel(vm_offset_t addr) continue; /* try again */ } l2 = pmap_l1_to_l2(l1, kernel_vm_end); - if ((*l2 & ATTR_AF) != 0) { + if ((pmap_load(l2) & ATTR_AF) != 0) { kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; @@ -1682,7 +1682,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva break; l1 = pmap_l1(pmap, sva); - if (*l1 == 0) { + if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; @@ -1721,7 +1721,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva sva += L3_SIZE) { if (l3 == NULL) panic("l3 == NULL"); - if (*l3 == 0) { + if (pmap_load(l3) == 0) { if (va != va_next) { pmap_invalidate_range(pmap, va, sva); va = va_next; @@ -1833,7 +1833,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sv for (; sva < eva; sva = va_next) { l1 = pmap_l1(pmap, sva); - if (*l1 == 0) { + if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; @@ -2177,8 +2177,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ * attempt to allocate a page table page. If this * attempt fails, we don't retry. Instead, we give up. */ - if (l2 != NULL && *l2 != 0) { - mpte = PHYS_TO_VM_PAGE(*l2 & ~ATTR_MASK); + if (l2 != NULL && pmap_load(l2) != 0) { + mpte = + PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); mpte->wire_count++; } else { /* @@ -2277,7 +2278,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { l1 = pmap_l1(pmap, sva); - if (*l1 == 0) { + if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; @@ -2289,16 +2290,16 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva va_next = eva; l2 = pmap_l1_to_l2(l1, sva); - if (*l2 == 0) + if (pmap_load(l2) == 0) continue; if (va_next > eva) va_next = eva; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { - if (*l3 == 0) + if (pmap_load(l3) == 0) continue; - if ((*l3 & ATTR_SW_WIRED) == 0) + if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) panic("pmap_unwire: l3 %#jx is missing " "ATTR_SW_WIRED", (uintmax_t)*l3); @@ -2496,7 +2497,7 @@ restart: } } l3 = pmap_l3(pmap, pv->pv_va); - if (l3 != NULL && (*l3 & ATTR_SW_WIRED) != 0) + if (l3 != NULL && (pmap_load(l3) & ATTR_SW_WIRED) != 0) count++; PMAP_UNLOCK(pmap); } @@ -2714,7 +2715,7 @@ pmap_is_prefaultable(pmap_t pmap, vm_off rv = FALSE; PMAP_LOCK(pmap); l3 = pmap_l3(pmap, addr); - if (l3 != NULL && *l3 != 0) { + if (l3 != NULL && pmap_load(l3) != 0) { rv = TRUE; } PMAP_UNLOCK(pmap); @@ -2858,7 +2859,7 @@ retry: KASSERT((*l2 & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_ts_referenced: found an invalid l2 table")); l3 = pmap_l2_to_l3(l2, pv->pv_va); - if ((*l3 & ATTR_AF) != 0) { + if ((pmap_load(l3) & ATTR_AF) != 0) { if (safe_to_clear_referenced(pmap, *l3)) { /* * TODO: We don't handle the access flag @@ -2866,7 +2867,7 @@ retry: * the exception handler. */ panic("TODO: safe_to_clear_referenced\n"); - } else if ((*l3 & ATTR_SW_WIRED) == 0) { + } else if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) { /* * Wired pages cannot be paged out so * doing accessed bit emulation for
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201507021517.t62FHVMv003857>