From owner-svn-src-projects@FreeBSD.ORG Sun Jun 10 13:15:13 2012 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id D0897106564A; Sun, 10 Jun 2012 13:15:13 +0000 (UTC) (envelope-from cherry@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id BAC4E8FC08; Sun, 10 Jun 2012 13:15:13 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q5ADFDjN098988; Sun, 10 Jun 2012 13:15:13 GMT (envelope-from cherry@svn.freebsd.org) Received: (from cherry@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q5ADFD6b098983; Sun, 10 Jun 2012 13:15:13 GMT (envelope-from cherry@svn.freebsd.org) Message-Id: <201206101315.q5ADFD6b098983@svn.freebsd.org> From: "Cherry G. Mathew" Date: Sun, 10 Jun 2012 13:15:13 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r236850 - in projects/amd64_xen_pv/sys/amd64: amd64 include xen X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 10 Jun 2012 13:15:13 -0000 Author: cherry Date: Sun Jun 10 13:15:13 2012 New Revision: 236850 URL: http://svn.freebsd.org/changeset/base/236850 Log: - Fine tune the pages that are added to the crashdump. - Enable the msgbufp initialisation. Note: console is not ready yet. - Further pmap tweaks to make boot progress. Approved by: gibbs (implicit) Modified: projects/amd64_xen_pv/sys/amd64/amd64/uma_machdep.c projects/amd64_xen_pv/sys/amd64/include/pmap.h projects/amd64_xen_pv/sys/amd64/xen/machdep.c projects/amd64_xen_pv/sys/amd64/xen/pmap.c Modified: projects/amd64_xen_pv/sys/amd64/amd64/uma_machdep.c ============================================================================== --- projects/amd64_xen_pv/sys/amd64/amd64/uma_machdep.c Sun Jun 10 13:10:21 2012 (r236849) +++ projects/amd64_xen_pv/sys/amd64/amd64/uma_machdep.c Sun Jun 10 13:15:13 2012 (r236850) @@ -68,6 +68,12 @@ uma_small_alloc(uma_zone_t zone, int byt if ((wait & M_NODUMP) == 0) dump_add_page(pa); va = (void *)PHYS_TO_DMAP(pa); +#ifdef XEN + /* XXX: temp fix, dmap not yet implemented. */ + vm_offset_t vaddr = (vm_offset_t) va; + pmap_map(&vaddr, pa, pa + roundup(bytes, PAGE_SIZE), + VM_PROT_READ | VM_PROT_WRITE); +#endif if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) pagezero(va); return (va); Modified: projects/amd64_xen_pv/sys/amd64/include/pmap.h ============================================================================== --- projects/amd64_xen_pv/sys/amd64/include/pmap.h Sun Jun 10 13:10:21 2012 (r236849) +++ projects/amd64_xen_pv/sys/amd64/include/pmap.h Sun Jun 10 13:15:13 2012 (r236850) @@ -345,7 +345,11 @@ extern vm_paddr_t dump_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; +#ifdef XEN +#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT /* XXX: review */ +#else #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) +#endif #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) void pmap_bootstrap(vm_paddr_t *); Modified: projects/amd64_xen_pv/sys/amd64/xen/machdep.c ============================================================================== --- projects/amd64_xen_pv/sys/amd64/xen/machdep.c Sun Jun 10 13:10:21 2012 (r236849) +++ projects/amd64_xen_pv/sys/amd64/xen/machdep.c Sun Jun 10 13:15:13 2012 (r236850) @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -65,6 +66,7 @@ #include #include #include +#include /* XXX: remove with RB_XXX */ #include #include #include @@ -104,7 +106,7 @@ xen_pfn_t *xen_phys_machine; #define PHYSMAP_SIZE (2 * VM_PHYSSEG_MAX) vm_offset_t pa_index = 0; vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; -vm_paddr_t dump_avail[2] = {0, 0}; /* XXX: todo */ +vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; struct pcpu __pcpu[MAXCPU]; @@ -307,6 +309,7 @@ initxen(struct start_info *si) physmem = si->nr_pages; Maxmem = si->nr_pages + 1; memset(phys_avail, 0, sizeof phys_avail); + memset(dump_avail, 0 , sizeof dump_avail); /* * Setup kernel tls registers. pcpu needs them, and other @@ -329,6 +332,9 @@ initxen(struct start_info *si) /* Address of lowest unused page */ physfree = VTOP(si->pt_base + si->nr_pt_frames * PAGE_SIZE); + /* Init basic tunables, hz, msgbufsize etc */ + init_param1(); + /* page tables */ pmap_bootstrap(&physfree); @@ -346,8 +352,11 @@ initxen(struct start_info *si) ("Attempt to use unmapped va\n")); /* Register the rest of free physical memory with phys_avail[] */ - phys_avail[pa_index++] = physfree; + /* dump_avail[] starts at index 1 */ + phys_avail[pa_index++] = physfree; + dump_avail[pa_index] = physfree; phys_avail[pa_index++] = ptoa(physmem); + dump_avail[pa_index] = ptoa(physmem); /* * This may be done better later if it gets more high level @@ -375,8 +384,6 @@ initxen(struct start_info *si) ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); #endif - /* Init basic tunables, hz etc */ - init_param1(); /* gdt */ vm_paddr_t gdt0_frame = phystomach(VTOP(gdt)); @@ -432,8 +439,13 @@ initxen(struct start_info *si) /* Event handling */ init_event_callbacks(); + + cninit(); /* Console subsystem init */ + identify_cpu(); /* Final stage of CPU initialization */ + init_param2(physmem); + //msgbufinit(msgbufp, msgbufsize); //fpuinit(); @@ -454,8 +466,6 @@ initxen(struct start_info *si) _ufssel = GSEL(GUFS32_SEL, SEL_UPL); _ugssel = GSEL(GUGS32_SEL, SEL_UPL); - gdtset = 1; - /* console */ printk("Hello world!\n"); Modified: projects/amd64_xen_pv/sys/amd64/xen/pmap.c ============================================================================== --- projects/amd64_xen_pv/sys/amd64/xen/pmap.c Sun Jun 10 13:10:21 2012 (r236849) +++ projects/amd64_xen_pv/sys/amd64/xen/pmap.c Sun Jun 10 13:15:13 2012 (r236850) @@ -121,6 +121,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #ifdef SMP @@ -128,6 +129,7 @@ __FBSDID("$FreeBSD$"); #endif #include +#include #include #include @@ -198,7 +200,6 @@ vallocpages(vm_paddr_t *firstaddr, int n /* Make sure we are still inside of available mapped va. */ KASSERT(PTOV(*firstaddr) <= (xenstack + 512 * 1024), ("Attempt to use unmapped va\n")); - return (ret); } @@ -495,8 +496,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr) KASSERT(pa_index == 0, ("reclaimed page table pages are not the lowest available!")); - phys_avail[pa_index] = VTOP(xen_start_info->pt_base); - phys_avail[pa_index + 1] = phys_avail[pa_index] + + dump_avail[pa_index + 1] = phys_avail[pa_index] = VTOP(xen_start_info->pt_base); + dump_avail[pa_index + 2] = phys_avail[pa_index + 1] = phys_avail[pa_index] + ptoa(xen_start_info->nr_pt_frames - 1); pa_index += 2; @@ -519,8 +520,6 @@ pmap_bootstrap(vm_paddr_t *firstaddr) /* XXX: Check we don't overlap xen pgdir entries. */ virtual_end = VM_MAX_KERNEL_ADDRESS; - virtual_avail = xenstack + 1052 * 1024; - /* * Initialize the kernel pmap (which is statically allocated). */ @@ -532,36 +531,70 @@ pmap_bootstrap(vm_paddr_t *firstaddr) tsz = mmu_map_t_size(); -#if 0 - /* XXX test */ - vm_offset_t va = virtual_avail + 4096 * 1024; - - vm_paddr_t pa = phys_avail[pa_index - 1]; - - pmap_kenter(va, pa); + /* Steal some memory (backing physical pages, and kva) */ + physmem -= atop(round_page(msgbufsize)); - memset((void *)va, 0, PAGE_SIZE); - while(1); - /* test XXX */ -#endif + msgbufp = (void *) pmap_map(&virtual_avail, + ptoa(physmem), ptoa(physmem) + round_page(msgbufsize), + VM_PROT_READ | VM_PROT_WRITE); } void pmap_page_init(vm_page_t m) { - KASSERT(0, ("XXX: TODO\n")); + /* XXX: TODO - pv_lists */ + } +/* + * Map in backing memory from kernel_vm_end to addr, + * and update kernel_vm_end. + */ void pmap_growkernel(vm_offset_t addr) { - KASSERT(0, ("XXX: TODO\n")); + KASSERT(kernel_vm_end < addr, ("trying to shrink kernel VA!")); + + addr = trunc_page(addr); + + char tbuf[tsz]; /* Safe to do this on the stack since tsz is + * effectively const. + */ + + mmu_map_t tptr = tbuf; + + struct mmu_map_mbackend mb = { + ptmb_mappedalloc, + ptmb_mappedfree, + ptmb_ptov, + ptmb_vtop + }; + + mmu_map_t_init(tptr, &mb); + + for (;addr <= kernel_vm_end;addr += PAGE_SIZE) { + + if (mmu_map_inspect_va(kernel_pmap, tptr, addr)) { + continue; + } + int pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; + vm_page_t m = vm_page_alloc(NULL, 0, pflags); + KASSERT(m != NULL, ("Backing page alloc failed!")); + vm_paddr_t pa =m->phys_addr; + + pmap_kenter(addr, pa); + } + + mmu_map_t_fini(tptr); } void pmap_init(void) { - KASSERT(0, ("XXX: TODO\n")); + /* XXX: review the use of gdtset for the purpose below */ + gdtset = 1; /* xpq may assert for locking sanity from this point onwards */ + + /* XXX: switch the mmu_map.c backend to something more sane */ } void @@ -602,10 +635,36 @@ pmap_lazyfix_action(void) } #endif /* SMP */ +/* + * Add a list of wired pages to the kva + * this routine is only used for temporary + * kernel mappings that do not need to have + * page modification or references recorded. + * Note that old mappings are simply written + * over. The page *must* be wired. + * XXX: TODO SMP. + * Note: SMP coherent. Uses a ranged shootdown IPI. + */ + void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { - KASSERT(0, ("XXX: TODO\n")); + KASSERT(count > 0, ("count > 0")); + KASSERT(sva == trunc_page(sva), + ("sva not page aligned")); + KASSERT(ma != NULL, ("ma != NULL")); + vm_page_t m; + vm_paddr_t pa; + + while (count--) { + m = *ma++; + pa = VM_PAGE_TO_PHYS(m); + pmap_kenter(sva, pa); + sva += PAGE_SIZE; + } + // XXX: TODO: pmap_invalidate_range(kernel_pmap, sva, sva + count * + // PAGE_SIZE); + } void @@ -710,13 +769,11 @@ pmap_kenter(vm_offset_t va, vm_paddr_t p } /* Backing page tables are in place, let xen do the maths */ - PT_SET_MA(va, xpmap_ptom(pa) | PG_RW | PG_V | PG_U); PT_UPDATES_FLUSH(); mmu_map_release_va(kernel_pmap, tptr, va); mmu_map_t_fini(tptr); - } /* @@ -754,33 +811,29 @@ pmap_map(vm_offset_t *virt, vm_paddr_t s vm_offset_t va, sva; va = sva = *virt; + CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", va, start, end, prot); while (start < end) { -#if 0 - if (PTOV(start) < xenstack + 512 * 1024) { /* XXX: - remove me */ - continue; - } -#endif - pmap_kenter(va, start); va += PAGE_SIZE; start += PAGE_SIZE; - - while(1); } // XXX: pmap_invalidate_range(kernel_pmap, sva, va); *virt = va; + return (sva); } void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { - KASSERT(0, ("XXX: TODO\n")); + /* + * XXX: TODO - ignore for now - we need to revisit this as + * soon as kdb is up. + */ } void @@ -811,7 +864,12 @@ pmap_copy_page(vm_page_t src, vm_page_t void pmap_zero_page(vm_page_t m) { - KASSERT(0, ("XXX: TODO\n")); + vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + + /* XXX: temp fix, dmap not yet implemented. */ + pmap_kenter(va, m->phys_addr); + + pagezero((void *)va); } void