From owner-svn-src-all@freebsd.org Mon Nov 4 00:35:41 2019 Return-Path: Delivered-To: svn-src-all@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id 8DD481AD819; Mon, 4 Nov 2019 00:35:41 +0000 (UTC) (envelope-from jhibbits@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 475v3K3GBzz3GcN; Mon, 4 Nov 2019 00:35:41 +0000 (UTC) (envelope-from jhibbits@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 4C4011C114; Mon, 4 Nov 2019 00:35:41 +0000 (UTC) (envelope-from jhibbits@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id xA40Zfxd003049; Mon, 4 Nov 2019 00:35:41 GMT (envelope-from jhibbits@FreeBSD.org) Received: (from jhibbits@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id xA40ZfRX003048; Mon, 4 Nov 2019 00:35:41 GMT (envelope-from jhibbits@FreeBSD.org) Message-Id: <201911040035.xA40ZfRX003048@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: jhibbits set sender to jhibbits@FreeBSD.org using -f From: Justin Hibbits Date: Mon, 4 Nov 2019 00:35:41 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r354326 - head/sys/powerpc/booke X-SVN-Group: head X-SVN-Commit-Author: jhibbits X-SVN-Commit-Paths: head/sys/powerpc/booke X-SVN-Commit-Revision: 354326 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 04 Nov 2019 00:35:41 -0000 Author: jhibbits Date: Mon Nov 4 00:35:40 2019 New Revision: 354326 URL: https://svnweb.freebsd.org/changeset/base/354326 Log: powerpc/pmap: Make use of tlb1_mapin_region in pmap_mapdev_attr() tlb1_mapin_region() and pmap_mapdev_attr() do roughly the same thing -- map a chunk of physical address space(memory or MMIO) into virtual, but do it in differing ways. Unify the code, settling on pmap_mapdev_attr()'s algorithm, to simplify and unify the logic. This fixes a bug with growing the kernel mappings in mmu_booke_bootstrap(), where part of the mapping was not getting done, leading to a hang when the unmapped VAs were accessed. Modified: head/sys/powerpc/booke/pmap.c Modified: head/sys/powerpc/booke/pmap.c ============================================================================== --- head/sys/powerpc/booke/pmap.c Sun Nov 3 22:17:49 2019 (r354325) +++ head/sys/powerpc/booke/pmap.c Mon Nov 4 00:35:40 2019 (r354326) @@ -237,11 +237,11 @@ static void tlb_print_entry(int, uint32_t, uint32_t, u static void tlb1_read_entry(tlb_entry_t *, unsigned int); static void tlb1_write_entry(tlb_entry_t *, unsigned int); static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); -static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); +static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t, int); static vm_size_t tsize2size(unsigned int); static unsigned int size2tsize(vm_size_t); -static unsigned int ilog2(unsigned long); +static unsigned long ilog2(unsigned long); static void set_mas4_defaults(void); @@ -1619,10 +1619,16 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_o debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", kernel_pdir, data_end); + /* Pre-round up to 1MB. This wastes some space, but saves TLB entries */ + data_end = roundup2(data_end, 1 << 20); debugf(" data_end: 0x%"PRI0ptrX"\n", data_end); + debugf(" kernstart: %p\n", kernstart); + debugf(" kernsize: %lx\n", kernsize); + if (data_end - kernstart > kernsize) { kernsize += tlb1_mapin_region(kernstart + kernsize, - kernload + kernsize, (data_end - kernstart) - kernsize); + kernload + kernsize, (data_end - kernstart) - kernsize, + _TLB_ENTRY_MEM); } data_end = kernstart + kernsize; debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end); @@ -1819,7 +1825,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_o * Round so it fits into a single mapping. */ tlb1_mapin_region(DMAP_BASE_ADDRESS, 0, - phys_avail[i + 1]); + phys_avail[i + 1], _TLB_ENTRY_MEM); #endif /*******************************************************/ @@ -3500,30 +3506,8 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz #endif res = (void *)va; - do { - sz = 1 << (ilog2(size) & ~1); - /* Align size to PA */ - if (pa % sz != 0) { - do { - sz >>= 2; - } while (pa % sz != 0); - } - /* Now align from there to VA */ - if (va % sz != 0) { - do { - sz >>= 2; - } while (va % sz != 0); - } - if (bootverbose) - printf("Wiring VA=%p to PA=%jx (size=%lx)\n", - (void *)va, (uintmax_t)pa, (long)sz); - if (tlb1_set_entry(va, pa, sz, - _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0) - return (NULL); - size -= sz; - pa += sz; - va += sz; - } while (size > 0); + if (tlb1_mapin_region(va, pa, size, tlb_calc_wimg(pa, ma)) != size) + return (NULL); return (res); } @@ -3864,7 +3848,7 @@ tlb1_write_entry(tlb_entry_t *e, unsigned int idx) /* * Return the largest uint value log such that 2^log <= num. */ -static unsigned int +static unsigned long ilog2(unsigned long num) { long lz; @@ -3952,69 +3936,49 @@ tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_ } /* - * Map in contiguous RAM region into the TLB1 using maximum of - * KERNEL_REGION_MAX_TLB_ENTRIES entries. - * - * If necessary round up last entry size and return total size - * used by all allocated entries. + * Map in contiguous RAM region into the TLB1. */ -vm_size_t -tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) +static vm_size_t +tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size, int wimge) { - vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; - vm_size_t mapped, pgsz, base, mask; - int idx, nents; + vm_offset_t base; + vm_size_t mapped, sz, ssize; - /* Round up to the next 1M */ - size = roundup2(size, 1 << 20); - mapped = 0; - idx = 0; base = va; - pgsz = 64*1024*1024; - while (mapped < size) { - while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { - while (pgsz > (size - mapped)) - pgsz >>= 2; - pgs[idx++] = pgsz; - mapped += pgsz; - } + ssize = size; - /* We under-map. Correct for this. */ - if (mapped < size) { - while (pgs[idx - 1] == pgsz) { - idx--; - mapped -= pgsz; - } - /* XXX We may increase beyond out starting point. */ - pgsz <<= 2; - pgs[idx++] = pgsz; - mapped += pgsz; + while (size > 0) { + sz = 1UL << (ilog2(size) & ~1); + /* Align size to PA */ + if (pa % sz != 0) { + do { + sz >>= 2; + } while (pa % sz != 0); } + /* Now align from there to VA */ + if (va % sz != 0) { + do { + sz >>= 2; + } while (va % sz != 0); + } + /* Now align from there to VA */ + if (bootverbose) + printf("Wiring VA=%p to PA=%jx (size=%lx)\n", + (void *)va, (uintmax_t)pa, (long)sz); + if (tlb1_set_entry(va, pa, sz, + _TLB_ENTRY_SHARED | wimge) < 0) + return (mapped); + size -= sz; + pa += sz; + va += sz; } - nents = idx; - mask = pgs[0] - 1; - /* Align address to the boundary */ - if (va & mask) { - va = (va + mask) & ~mask; - pa = (pa + mask) & ~mask; - } - - for (idx = 0; idx < nents; idx++) { - pgsz = pgs[idx]; - debugf("%u: %jx -> %jx, size=%jx\n", idx, (uintmax_t)pa, - (uintmax_t)va, (uintmax_t)pgsz); - tlb1_set_entry(va, pa, pgsz, - _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM); - pa += pgsz; - va += pgsz; - } - mapped = (va - base); if (bootverbose) printf("mapped size 0x%"PRIxPTR" (wasted space 0x%"PRIxPTR")\n", - mapped, mapped - size); + mapped, mapped - ssize); + return (mapped); }