From owner-svn-src-stable@FreeBSD.ORG Sun Sep 20 19:32:11 2009 Return-Path: Delivered-To: svn-src-stable@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 3BBDE106566B; Sun, 20 Sep 2009 19:32:11 +0000 (UTC) (envelope-from marius@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 28BBD8FC13; Sun, 20 Sep 2009 19:32:11 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id n8KJWBre065891; Sun, 20 Sep 2009 19:32:11 GMT (envelope-from marius@svn.freebsd.org) Received: (from marius@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id n8KJWBZu065887; Sun, 20 Sep 2009 19:32:11 GMT (envelope-from marius@svn.freebsd.org) Message-Id: <200909201932.n8KJWBZu065887@svn.freebsd.org> From: Marius Strobl Date: Sun, 20 Sep 2009 19:32:11 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-6@freebsd.org X-SVN-Group: stable-6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r197370 - in stable/6/sys: . conf contrib/pf dev/cxgb sparc64/include sparc64/sparc64 X-BeenThere: svn-src-stable@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: SVN commit messages for all the -stable branches of the src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 20 Sep 2009 19:32:11 -0000 Author: marius Date: Sun Sep 20 19:32:10 2009 New Revision: 197370 URL: http://svn.freebsd.org/changeset/base/197370 Log: MFC: r195149 (partial) - Work around the broken loader behavior of not demapping no longer used kernel TLB slots when unloading the kernel or modules, which results in havoc when loading a kernel and modules which take up less TLB slots afterwards as the unused but locked ones aren't accounted for in virtual_avail. Eventually this should be fixed in the loader which isn't straight forward though and the kernel should be robust against this anyway. [1] - Remove the no longer used global msgbuf_phys. - Remove the redundant ekva parameter of pmap_bootstrap_alloc(). - Correct some outdated function names in ktr(9) invocations. Requested by: jhb [1] Modified: stable/6/sys/ (props changed) stable/6/sys/conf/ (props changed) stable/6/sys/contrib/pf/ (props changed) stable/6/sys/dev/cxgb/ (props changed) stable/6/sys/sparc64/include/pmap.h stable/6/sys/sparc64/sparc64/machdep.c stable/6/sys/sparc64/sparc64/pmap.c Modified: stable/6/sys/sparc64/include/pmap.h ============================================================================== --- stable/6/sys/sparc64/include/pmap.h Sun Sep 20 19:26:04 2009 (r197369) +++ stable/6/sys/sparc64/include/pmap.h Sun Sep 20 19:32:10 2009 (r197370) @@ -77,7 +77,7 @@ struct pmap { #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) -void pmap_bootstrap(vm_offset_t ekva); +void pmap_bootstrap(void); vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kenter(vm_offset_t va, vm_page_t m); void pmap_kremove(vm_offset_t); @@ -105,8 +105,6 @@ extern vm_paddr_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; -extern vm_paddr_t msgbuf_phys; - #ifdef PMAP_STATS SYSCTL_DECL(_debug_pmap_stats); Modified: stable/6/sys/sparc64/sparc64/machdep.c ============================================================================== --- stable/6/sys/sparc64/sparc64/machdep.c Sun Sep 20 19:26:04 2009 (r197369) +++ stable/6/sys/sparc64/sparc64/machdep.c Sun Sep 20 19:32:10 2009 (r197370) @@ -275,6 +275,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_l char *env; struct pcpu *pc; vm_offset_t end; + vm_offset_t va; caddr_t kmdp; phandle_t child; phandle_t root; @@ -338,19 +339,28 @@ sparc64_init(caddr_t mdp, u_long o1, u_l * Panic if there is no metadata. Most likely the kernel was booted * directly, instead of through loader(8). */ - if (mdp == NULL || kmdp == NULL) { - printf("sparc64_init: no loader metadata.\n" + if (mdp == NULL || kmdp == NULL || end == 0 || + kernel_tlb_slots == 0 || kernel_tlbs == NULL) { + printf("sparc64_init: missing loader metadata.\n" "This probably means you are not using loader(8).\n"); panic("sparc64_init"); } /* - * Sanity check the kernel end, which is important. - */ - if (end == 0) { - printf("sparc64_init: warning, kernel end not specified.\n" - "Attempting to continue anyway.\n"); - end = (vm_offset_t)_end; + * Work around the broken loader behavior of not demapping no + * longer used kernel TLB slots when unloading the kernel or + * modules. + */ + for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M; + va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) { + printf("demapping unused kernel TLB slot (va %#lx - %#lx)\n", + va, va + PAGE_SIZE_4M - 1); + stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, + ASI_DMMU_DEMAP, 0); + stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, + ASI_IMMU_DEMAP, 0); + flush(KERNBASE); + kernel_tlb_slots--; } cache_init(child); @@ -371,7 +381,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_l /* * Initialize virtual memory and calculate physmem. */ - pmap_bootstrap(end); + pmap_bootstrap(); /* * Initialize tunables. Modified: stable/6/sys/sparc64/sparc64/pmap.c ============================================================================== --- stable/6/sys/sparc64/sparc64/pmap.c Sun Sep 20 19:26:04 2009 (r197369) +++ stable/6/sys/sparc64/sparc64/pmap.c Sun Sep 20 19:32:10 2009 (r197370) @@ -111,10 +111,9 @@ __FBSDID("$FreeBSD$"); #endif /* - * Virtual and physical address of message buffer + * Virtual address of message buffer */ struct msgbuf *msgbufp; -vm_paddr_t msgbuf_phys; int pmap_pagedaemon_waken; @@ -271,7 +270,7 @@ om_cmp(const void *a, const void *b) * Bootstrap the system enough to run with virtual memory. */ void -pmap_bootstrap(vm_offset_t ekva) +pmap_bootstrap(void) { struct pmap *pm; struct tte *tp; @@ -349,8 +348,8 @@ pmap_bootstrap(vm_offset_t ekva) /* * Allocate and map the message buffer. */ - msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE); - msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys); + pa = pmap_bootstrap_alloc(MSGBUF_SIZE); + msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa); /* * Patch the virtual address and the tsb mask into the trap table. @@ -399,10 +398,11 @@ pmap_bootstrap(vm_offset_t ekva) } /* - * Set the start and end of KVA. The kernel is loaded at the first - * available 4MB super page, so round up to the end of the page. + * Set the start and end of KVA. The kernel is loaded starting + * at the first available 4MB super page, so we advance to the + * end of the last one used for it. */ - virtual_avail = roundup2(ekva, PAGE_SIZE_4M); + virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M; virtual_end = vm_max_kernel_address; kernel_vm_end = vm_max_kernel_address; @@ -422,8 +422,7 @@ pmap_bootstrap(vm_offset_t ekva) * coloured properly, since we're allocating from phys_avail so the * memory won't have an associated vm_page_t. */ - pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) * - PAGE_SIZE); + pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE); kstack0_phys = pa; virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) * PAGE_SIZE; @@ -560,7 +559,7 @@ pmap_bootstrap_alloc(vm_size_t size) vm_paddr_t pa; int i; - size = round_page(size); + size = roundup(size, PAGE_SIZE * DCACHE_COLORS); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (phys_avail[i + 1] - phys_avail[i] < size) continue; @@ -928,7 +927,7 @@ pmap_kremove_flags(vm_offset_t va) struct tte *tp; tp = tsb_kvtotte(va); - CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, + CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp, tp->tte_data); TTE_ZERO(tp); } @@ -1330,7 +1329,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t } CTR6(KTR_PMAP, - "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", + "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", pm->pm_context[curcpu], m, va, pa, prot, wired); /* @@ -1338,7 +1337,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t * changed, must be protection or wiring change. */ if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) { - CTR0(KTR_PMAP, "pmap_enter: update"); + CTR0(KTR_PMAP, "pmap_enter_locked: update"); PMAP_STATS_INC(pmap_nenter_update); /* @@ -1394,12 +1393,12 @@ pmap_enter_locked(pmap_t pm, vm_offset_t * phsyical address, delete the old mapping. */ if (tp != NULL) { - CTR0(KTR_PMAP, "pmap_enter: replace"); + CTR0(KTR_PMAP, "pmap_enter_locked: replace"); PMAP_STATS_INC(pmap_nenter_replace); pmap_remove_tte(pm, NULL, tp, va); tlb_page_demap(pm, va); } else { - CTR0(KTR_PMAP, "pmap_enter: new"); + CTR0(KTR_PMAP, "pmap_enter_locked: new"); PMAP_STATS_INC(pmap_nenter_new); }