Date: Wed, 15 Mar 2006 08:32:05 GMT From: Kip Macy <kmacy@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 93341 for review Message-ID: <200603150832.k2F8W5en020334@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=93341 Change 93341 by kmacy@kmacy_storage:sun4v_work on 2006/03/15 08:32:03 don't zero cleanwin on context switch as it is handled in user_rtt add support for direct mapped physical address using large pages allocate early processes' hash tables out of block allocated in pmap_bootstrap implement pmap_copy to avoid unnecessary read faults Affected files ... .. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#6 edit .. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#26 edit .. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/swtch.S#10 edit .. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#9 edit Differences ... ==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#6 (text+ko) ==== @@ -6,7 +6,7 @@ struct tte_hash; typedef struct tte_hash *tte_hash_t; -void tte_hash_init(void); +void tte_hash_init(vm_paddr_t); tte_hash_t tte_hash_kernel_create(vm_offset_t, uint64_t); ==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#26 (text+ko) ==== @@ -103,6 +103,7 @@ int sparc64_nmemreg; extern vm_paddr_t mmu_fault_status_area; +vm_paddr_t proc0_mem; /* @@ -130,13 +131,6 @@ */ struct pmap kernel_pmap_store; -/* - * pmap_copy_page vas NOT mp-safe - */ -vm_offset_t pmap_copy_0, pmap_copy_1; - - - hv_tsb_info_t kernel_td[MAX_TSB_INFO]; /* @@ -363,6 +357,8 @@ pmap->pm_active |= 1; #endif pmap->pm_hashscratch = tte_hash_set_scratchpad_user(pmap->pm_hash); + printf("hashscratch=%lx\n", pmap->pm_hashscratch); + pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb); PCPU_SET(curpmap, pmap); critical_exit(); @@ -506,33 +502,19 @@ pmap_scrub_pages(kernel_td[TSB4M_INDEX].hvtsb_pa, tsb_4m_size); - /* - * setup direct mappings - * - */ - for (pa = PAGE_SIZE_4M; pa < phys_avail[2]; pa += PAGE_SIZE_4M) - tsb_set_tte(&kernel_td[TSB4M_INDEX], TLB_PHYS_TO_DIRECT(pa), - pa | TTE_KERNEL | VTD_4M, 0); /* * allocate MMU fault status areas for all CPUS */ mmu_fault_status_area = pmap_bootstrap_alloc(MMFSA_SIZE*MAXCPU); + proc0_mem = pmap_bootstrap_alloc(PAGE_SIZE*4*40); /* * Allocate and map the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE); msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys); -#ifdef notyet - /* XXX this tries to map at a wacky address */ - for (i = 0; i < (MSGBUF_SIZE / PAGE_SIZE); i++) - tsb_set_tte(&kernel_td[TSB8K_INDEX], - ((vm_offset_t)msgbufp) + i*PAGE_SIZE, - msgbuf_phys + i*PAGE_SIZE | TTE_KERNEL | VTD_8K, 0); -#endif - /* * Allocate a kernel stack with guard page for thread0 and map it into * the kernel tsb. @@ -548,20 +530,20 @@ tsb_set_tte(&kernel_td[TSB8K_INDEX], va, pa | TTE_KERNEL | VTD_8K, 0); } - /* short-term MP-unsafe hack for pmap_copy_page - */ - pmap_copy_0 = virtual_avail; - virtual_avail += PAGE_SIZE; - pmap_copy_1 = virtual_avail; - virtual_avail += PAGE_SIZE; - - /* * Calculate the last available physical address. */ for (i = 0; phys_avail[i + 2] != 0; i += 2) ; Maxmem = sparc64_btop(phys_avail[i + 1]); + /* + * setup direct mappings + * + */ + for (pa = PAGE_SIZE_4M; pa < phys_avail[2]; pa += PAGE_SIZE_4M) { + tsb_set_tte(&kernel_td[TSB4M_INDEX], TLB_PHYS_TO_DIRECT(pa), + pa | TTE_KERNEL | VTD_4M, 0); + } /* * Add the prom mappings to the kernel tsb. @@ -713,22 +695,67 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { - IMPLEMENTME; + vm_offset_t addr, end_addr; + + end_addr = src_addr + len; + + printf("pmap_copy(0x%lx, %ld, 0x%lx)\n", dst_addr, len, src_addr); + if (dst_addr != src_addr) + return; + + /* + * Don't let optional prefaulting of pages make us go + * way below the low water mark of free pages or way + * above high water mark of used pv entries. + */ + if (cnt.v_free_count < cnt.v_free_reserved || + pv_entry_count > pv_entry_high_water) + return; + + vm_page_lock_queues(); + if (dst_pmap < src_pmap) { + PMAP_LOCK(dst_pmap); + PMAP_LOCK(src_pmap); + } else { + PMAP_LOCK(src_pmap); + PMAP_LOCK(dst_pmap); + } + sched_pin(); + for (addr = src_addr; addr < end_addr; addr += PAGE_SIZE) { + tte_t *src_tte, *dst_tte, tte_data; + vm_page_t m; + + src_tte = tte_hash_lookup(src_pmap->pm_hash, addr); + tte_data = src_tte ? *src_tte : 0; + if ((tte_data & VTD_MANAGED) != 0) { + if ((dst_tte = tte_hash_lookup(dst_pmap->pm_hash, addr)) == NULL) { + m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data)); + tte_hash_insert(dst_pmap->pm_hash, addr, tte_data & ~(VTD_W|VTD_REF)); + dst_pmap->pm_stats.resident_count++; + pmap_insert_entry(dst_pmap, addr, m); + } + } + + + } + + sched_unpin(); + vm_page_unlock_queues(); + PMAP_UNLOCK(src_pmap); + PMAP_UNLOCK(dst_pmap); + } void pmap_copy_page(vm_page_t src, vm_page_t dst) { - /* XXX NOT mp-safe */ - tsb_set_tte(&kernel_pmap->pm_tsb, pmap_copy_0, - VM_PAGE_TO_PHYS(src) | TTE_KERNEL | VTD_8K, 0); - tsb_set_tte(&kernel_pmap->pm_tsb, pmap_copy_1, - VM_PAGE_TO_PHYS(dst) | TTE_KERNEL | VTD_8K, 0); - - bcopy((char *)pmap_copy_0, (char *)pmap_copy_1, PAGE_SIZE); + vm_paddr_t srcpa, dstpa; + srcpa = VM_PAGE_TO_PHYS(src); + dstpa = VM_PAGE_TO_PHYS(dst); + + bcopy((char *)TLB_PHYS_TO_DIRECT(srcpa), (char *)TLB_PHYS_TO_DIRECT(dstpa), PAGE_SIZE); + - pmap_invalidate_page(kernel_pmap, pmap_copy_0); - pmap_invalidate_page(kernel_pmap, pmap_copy_1); } /* @@ -974,7 +1001,7 @@ pv_entry_high_water = 9 * (pv_entry_max / 10); uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); - tte_hash_init(); + tte_hash_init(proc0_mem); } @@ -1239,6 +1266,7 @@ ("max context limit hit - need to implement context recycling")); pmap->pm_hash = tte_hash_create(pmap->pm_context, &pmap->pm_hashscratch); + printf("hashscratch=%lx\n", pmap->pm_hashscratch); pmap->pm_tsb_ra = tsb_init(&pmap->pm_tsb, &pmap->pm_tsbscratch); pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); ==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/swtch.S#10 (text+ko) ==== @@ -40,6 +40,7 @@ #define PCB_REG %g6 +#define MAGIC_EXIT ta 0x71 #define MAGIC_TRAP_ON ta 0x77 #define MAGIC_TRAP_OFF ta 0x78 /* @@ -97,7 +98,6 @@ * pointer and program counter. */ 2: flushw - wrpr %g0, 0, %cleanwin stx %fp, [PCB_REG + PCB_SP] stx %i7, [PCB_REG + PCB_PC] @@ -184,7 +184,8 @@ cmp %g0, %o0 be %xcc, 4f nop - illtrap + MAGIC_TRAP_ON + MAGIC_EXIT 4: /* * install the new secondary context number in the cpu. ==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#9 (text+ko) ==== @@ -49,6 +49,8 @@ #include <machine/smp.h> #include <machine/mmu.h> #include <machine/tte.h> +#include <machine/vmparam.h> +#include <machine/tlb.h> #include <machine/tte_hash.h> #define HASH_SIZE 4 @@ -85,7 +87,7 @@ }; static struct tte_hash kernel_tte_hash; - +static vm_paddr_t proc0_mem; /* * Data for the tte_hash allocation mechanism @@ -115,12 +117,13 @@ } void -tte_hash_init(void) +tte_hash_init(vm_paddr_t bootmem) { thzone = uma_zcreate("TTE_HASH", sizeof(struct tte_hash), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); tte_hash_max = maxproc; uma_zone_set_obj(thzone, &thzone_obj, tte_hash_max); + proc0_mem = bootmem; } @@ -144,39 +147,46 @@ tte_hash_create(uint64_t context, uint64_t *scratchval) { tte_hash_t th; - vm_page_t m, hash_pages[HASH_SIZE]; + vm_page_t m, tm; int i; - static int color; - + static int proc0_mem_allocated; + th = get_tte_hash(); th->th_size = HASH_SIZE; th->th_entries = 0; th->th_context = (uint16_t)context; - th->th_hashtable = (tte_hash_entry_t)kmem_alloc_nofault(kernel_map, - PAGE_SIZE*HASH_SIZE); + m = NULL; + + + if (proc0_mem_allocated < 40) { + + printf("skipping vm_page_alloc_contig\n"); + proc0_mem_allocated++; + th->th_hashtable = (void *)TLB_PHYS_TO_DIRECT(proc0_mem); + proc0_mem += PAGE_SIZE*HASH_SIZE; + goto done; + } - printf("th->th_hashtable=%p ", th->th_hashtable); - for (i = 0; i < HASH_SIZE;) { - m = vm_page_alloc(NULL, color++, - VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | - VM_ALLOC_ZERO); - printf("PHYS(m)=0x%010lx ", VM_PAGE_TO_PHYS(m)); + printf("calling vm_page_alloc_contig\n"); + while (m == NULL) { + m = vm_page_alloc_contig(HASH_SIZE, 2*PAGE_SIZE_4M, + (1UL<<28), 0, 0); if (m == NULL) VM_WAIT; - else { - hash_pages[i++] = m; - } + } + printf("PHYS(m)=0x%010lx ", VM_PAGE_TO_PHYS(m)); + for (i = 0, tm = m; i < HASH_SIZE; i++, tm++) { + if (tm->flags & PG_ZERO) + pmap_zero_page(tm); } - printf("entered\n"); - pmap_qenter((vm_offset_t)th->th_hashtable, hash_pages, HASH_SIZE); - for (i = 0; i < HASH_SIZE; i++) { - if ((hash_pages[i]->flags & PG_ZERO) == 0) - pmap_zero_page(hash_pages[i]); - } - *scratchval = ((vm_offset_t)th->th_hashtable) | ((vm_offset_t)th->th_size); + th->th_hashtable = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); +done: + printf("th->th_hashtable %p\n", th->th_hashtable); + *scratchval = (uint64_t)((vm_offset_t)th->th_hashtable) | ((vm_offset_t)th->th_size); + printf("hash_create done\n"); return (th); } @@ -186,6 +196,7 @@ vm_page_t m, hash_pages[MAX_HASH_SIZE]; int i; + panic("FIXME"); for (i = 0; i < th->th_size; i++) hash_pages[i] = PHYS_TO_VM_PAGE(vtophys(((char *)th->th_hashtable) + i*PAGE_SIZE)); @@ -315,7 +326,7 @@ */ hash_scratch = ((vm_offset_t)th->th_hashtable) | ((vm_offset_t)th->th_size); - + printf("hash_scratch=0x%lx\n", hash_scratch); set_hash_user_scratchpad(hash_scratch); return hash_scratch;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200603150832.k2F8W5en020334>