Date: Fri, 14 Dec 2018 18:50:32 +0000 (UTC) From: Mark Johnston <markj@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r342093 - head/sys/riscv/riscv Message-ID: <201812141850.wBEIoWN9029120@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: markj Date: Fri Dec 14 18:50:32 2018 New Revision: 342093 URL: https://svnweb.freebsd.org/changeset/base/342093 Log: Clean up the riscv pmap_bootstrap() implementation. - Build up phys_avail[] in a single loop, excluding memory used by the loaded kernel. - Fix an array indexing bug in the aforementioned phys_avail[] initialization.[1] - Remove some unneeded code copied from the arm64 implementation. PR: 231515 [1] Reviewed by: jhb MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D18464 Modified: head/sys/riscv/riscv/pmap.c Modified: head/sys/riscv/riscv/pmap.c ============================================================================== --- head/sys/riscv/riscv/pmap.c Fri Dec 14 18:40:08 2018 (r342092) +++ head/sys/riscv/riscv/pmap.c Fri Dec 14 18:50:32 2018 (r342093) @@ -211,7 +211,7 @@ __FBSDID("$FreeBSD$"); /* The list of all the user pmaps */ LIST_HEAD(pmaplist, pmap); -static struct pmaplist allpmaps; +static struct pmaplist allpmaps = LIST_HEAD_INITIALIZER(); struct pmap kernel_pmap_store; @@ -508,17 +508,12 @@ pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm void pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) { - u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot; - uint64_t kern_delta; - pt_entry_t *l2; - vm_offset_t va, freemempos; + u_int l1_slot, l2_slot, avail_slot, map_slot; + vm_offset_t freemempos; vm_offset_t dpcpu, msgbufpv; - vm_paddr_t pa, min_pa, max_pa; + vm_paddr_t end, max_pa, min_pa, pa, start; int i; - kern_delta = KERNBASE - kernstart; - physmem = 0; - printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen); printf("%lx\n", l1pt); printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK); @@ -527,21 +522,16 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, kernel_pmap_store.pm_l1 = (pd_entry_t *)l1pt; PMAP_LOCK_INIT(kernel_pmap); - /* - * Initialize the global pv list lock. - */ rw_init(&pvh_global_lock, "pmap pv global"); - LIST_INIT(&allpmaps); + /* Assume the address we were loaded to is a valid physical address. */ + min_pa = max_pa = kernstart; - /* Assume the address we were loaded to is a valid physical address */ - min_pa = max_pa = KERNBASE - kern_delta; - /* * Find the minimum physical address. physmap is sorted, * but may contain empty ranges. */ - for (i = 0; i < (physmap_idx * 2); i += 2) { + for (i = 0; i < physmap_idx * 2; i += 2) { if (physmap[i] == physmap[i + 1]) continue; if (physmap[i] <= min_pa) @@ -556,67 +546,18 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, /* Create a direct map region early so we can use it for pa -> va */ pmap_bootstrap_dmap(l1pt, min_pa, max_pa); - va = KERNBASE; - pa = KERNBASE - kern_delta; - /* - * Start to initialize phys_avail by copying from physmap - * up to the physical address KERNBASE points at. - */ - map_slot = avail_slot = 0; - for (; map_slot < (physmap_idx * 2); map_slot += 2) { - if (physmap[map_slot] == physmap[map_slot + 1]) - continue; - - if (physmap[map_slot] <= pa && - physmap[map_slot + 1] > pa) - break; - - phys_avail[avail_slot] = physmap[map_slot]; - phys_avail[avail_slot + 1] = physmap[map_slot + 1]; - physmem += (phys_avail[avail_slot + 1] - - phys_avail[avail_slot]) >> PAGE_SHIFT; - avail_slot += 2; - } - - /* Add the memory before the kernel */ - if (physmap[avail_slot] < pa) { - phys_avail[avail_slot] = physmap[map_slot]; - phys_avail[avail_slot + 1] = pa; - physmem += (phys_avail[avail_slot + 1] - - phys_avail[avail_slot]) >> PAGE_SHIFT; - avail_slot += 2; - } - used_map_slot = map_slot; - - /* * Read the page table to find out what is already mapped. * This assumes we have mapped a block of memory from KERNBASE * using a single L1 entry. */ - l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot); + (void)pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot); /* Sanity check the index, KERNBASE should be the first VA */ KASSERT(l2_slot == 0, ("The L2 index is non-zero")); - /* Find how many pages we have mapped */ - for (; l2_slot < Ln_ENTRIES; l2_slot++) { - if ((l2[l2_slot] & PTE_V) == 0) - break; + freemempos = roundup2(KERNBASE + kernlen, PAGE_SIZE); - /* Check locore used L2 superpages */ - KASSERT((l2[l2_slot] & PTE_RX) != 0, - ("Invalid bootstrap L2 table")); - - va += L2_SIZE; - pa += L2_SIZE; - } - - va = roundup2(va, L2_SIZE); - - freemempos = KERNBASE + kernlen; - freemempos = roundup2(freemempos, PAGE_SIZE); - /* Create the l3 tables for the early devmap */ freemempos = pmap_bootstrap_l3(l1pt, VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos); @@ -642,31 +583,32 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, pa = pmap_early_vtophys(l1pt, freemempos); - /* Finish initialising physmap */ - map_slot = used_map_slot; - for (; avail_slot < (PHYS_AVAIL_SIZE - 2) && - map_slot < (physmap_idx * 2); map_slot += 2) { - if (physmap[map_slot] == physmap[map_slot + 1]) { - continue; - } + /* Initialize phys_avail. */ + for (avail_slot = map_slot = physmem = 0; map_slot < physmap_idx * 2; + map_slot += 2) { + start = physmap[map_slot]; + end = physmap[map_slot + 1]; - /* Have we used the current range? */ - if (physmap[map_slot + 1] <= pa) { + if (start == end) continue; - } + if (start >= kernstart && end <= pa) + continue; - /* Do we need to split the entry? */ - if (physmap[map_slot] < pa) { + if (start < kernstart && end > kernstart) + end = kernstart; + else if (start < pa && end > pa) + start = pa; + phys_avail[avail_slot] = start; + phys_avail[avail_slot + 1] = end; + physmem += (end - start) >> PAGE_SHIFT; + avail_slot += 2; + + if (end != physmap[map_slot + 1] && end > pa) { phys_avail[avail_slot] = pa; phys_avail[avail_slot + 1] = physmap[map_slot + 1]; - } else { - phys_avail[avail_slot] = physmap[map_slot]; - phys_avail[avail_slot + 1] = physmap[map_slot + 1]; + physmem += (physmap[map_slot + 1] - pa) >> PAGE_SHIFT; + avail_slot += 2; } - physmem += (phys_avail[avail_slot + 1] - - phys_avail[avail_slot]) >> PAGE_SHIFT; - - avail_slot += 2; } phys_avail[avail_slot] = 0; phys_avail[avail_slot + 1] = 0;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201812141850.wBEIoWN9029120>