From owner-p4-projects@FreeBSD.ORG Sat Nov 8 12:53:40 2003 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 28ED116A4D0; Sat, 8 Nov 2003 12:53:40 -0800 (PST) Delivered-To: perforce@freebsd.org Received: from mx1.FreeBSD.org (mx1.freebsd.org [216.136.204.125]) by hub.freebsd.org (Postfix) with ESMTP id EB54516A4CE for ; Sat, 8 Nov 2003 12:53:39 -0800 (PST) Received: from repoman.freebsd.org (repoman.freebsd.org [216.136.204.115]) by mx1.FreeBSD.org (Postfix) with ESMTP id 18DD743FE3 for ; Sat, 8 Nov 2003 12:53:39 -0800 (PST) (envelope-from jmallett@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.12.9/8.12.9) with ESMTP id hA8KrcXJ006188 for ; Sat, 8 Nov 2003 12:53:38 -0800 (PST) (envelope-from jmallett@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.12.9/8.12.9/Submit) id hA8KrcW1006185 for perforce@freebsd.org; Sat, 8 Nov 2003 12:53:38 -0800 (PST) (envelope-from jmallett@freebsd.org) Date: Sat, 8 Nov 2003 12:53:38 -0800 (PST) Message-Id: <200311082053.hA8KrcW1006185@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to jmallett@freebsd.org using -f From: Juli Mallett To: Perforce Change Reviews Subject: PERFORCE change 41753 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.1 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 08 Nov 2003 20:53:40 -0000 http://perforce.freebsd.org/chv.cgi?CH=41753 Change 41753 by jmallett@jmallett_dalek on 2003/11/08 12:53:13 IFC, update, stub, hack, slash, etc. Affected files ... .. //depot/projects/mips/sys/dev/arcbios/arcbios.c#10 edit .. //depot/projects/mips/sys/mips/include/atomic.h#18 edit .. //depot/projects/mips/sys/mips/include/cpu.h#13 edit .. //depot/projects/mips/sys/mips/include/md_var.h#9 edit .. //depot/projects/mips/sys/mips/include/param.h#13 edit .. //depot/projects/mips/sys/mips/mips/machdep.c#37 edit .. //depot/projects/mips/sys/mips/mips/pmap.c#22 edit .. //depot/projects/mips/sys/mips/mips/sig_machdep.c#2 edit .. //depot/projects/mips/sys/mips/mips/support.S#8 edit .. //depot/projects/mips/sys/mips/mips/tlb.c#12 edit .. //depot/projects/mips/sys/mips/mips/vm_machdep.c#7 edit Differences ... ==== //depot/projects/mips/sys/dev/arcbios/arcbios.c#10 (text+ko) ==== @@ -242,7 +242,6 @@ arcbios_cnattach(void) { arcbios_consdev.cn_pri = CN_NORMAL; - arcbios_consdev.cn_dev = makedev(MAJOR_AUTO, 0); make_dev(&arcbios_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "arccons"); cnadd(&arcbios_consdev); return; ==== //depot/projects/mips/sys/mips/include/atomic.h#18 (text+ko) ==== @@ -67,7 +67,7 @@ { \ int r; \ mips_read_membar(); \ - atomic_cmpset_ ## type(p, old, val); \ + r = atomic_cmpset_ ## type(p, old, val); \ return (r); \ } ==== //depot/projects/mips/sys/mips/include/cpu.h#13 (text+ko) ==== @@ -136,4 +136,8 @@ #define MIPS_NOT_SUPP 0x8000 #endif +void cpu_halt(void); +void cpu_reset(void); +void swi_vm(void *); + #endif /* !_MACHINE_CPU_H_ */ ==== //depot/projects/mips/sys/mips/include/md_var.h#9 (text+ko) ==== @@ -36,10 +36,7 @@ extern int cpu_id; extern int fpu_id; -void cpu_halt(void); -void cpu_reset(void); void mips_init(void); -void swi_vm(void *); /* Platform call-downs. */ void platform_identify(void); ==== //depot/projects/mips/sys/mips/include/param.h#13 (text+ko) ==== @@ -21,8 +21,11 @@ /* * Pages of u. area and kstack (with pcb) respectively. */ -#define UAREA_PAGES 1 -#define KSTACK_PAGES 4 +#ifndef KSTACK_PAGES +#define KSTACK_PAGES 2 /* pages of kstack (with pcb) */ +#endif +#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */ +#define UAREA_PAGES 1 /* pages of u-area */ /* * On mips, UPAGES is fixed by sys/arch/mips/mips/locore code ==== //depot/projects/mips/sys/mips/mips/machdep.c#37 (text+ko) ==== @@ -249,6 +249,12 @@ } void +cpu_idle(void) +{ + /* Insert code to halt (until next interrupt) for the idle loop */ +} + +void cpu_reset(void) { printf("Resetting...\n"); ==== //depot/projects/mips/sys/mips/mips/pmap.c#22 (text+ko) ==== @@ -573,6 +573,30 @@ return 0; } +/* + * Routine: pmap_extract_and_hold + * Function: + * Atomically extract and hold the physical page + * with the given pmap and virtual address pair + * if that mapping permits the given protection. + */ +vm_page_t +pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) +{ + vm_paddr_t pa; + vm_page_t m; + + m = NULL; + mtx_lock(&Giant); + if ((pa = pmap_extract(pmap, va)) != 0) { + m = PHYS_TO_VM_PAGE(pa); + vm_page_lock_queues(); + vm_page_hold(m); + vm_page_unlock_queues(); + } + mtx_unlock(&Giant); + return (m); +} /*************************************************** * Low level mapping routines..... @@ -653,233 +677,7 @@ return MIPS_PHYS_TO_KSEG0(start); } -#ifndef KSTACK_MAX_PAGES -#define KSTACK_MAX_PAGES 32 -#endif - -/* - * Create the kernel stack for a new thread. - * This routine directly affects the fork perf for a process and thread. - */ void -pmap_new_thread(struct thread *td, int pages) -{ - int i; - vm_object_t ksobj; - vm_offset_t ks; - vm_page_t m; - pt_entry_t *ptek, oldpte; - - /* Bounds check */ - if (pages <= 1) - pages = KSTACK_PAGES; - else if (pages > KSTACK_MAX_PAGES) - pages = KSTACK_MAX_PAGES; - - /* - * allocate object for the kstack - */ - ksobj = vm_object_allocate(OBJT_DEFAULT, pages); - td->td_kstack_obj = ksobj; - -#ifdef KSTACK_GUARD - /* get a kernel virtual address for the kstack for this thread */ - ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE); - if (ks == NULL) - panic("pmap_new_thread: kstack allocation failed"); - - /* Set the first page to be the unmapped guard page. */ - ptek = pmap_pte(kernel_pmap, ks); - oldpte = *ptek; - *ptek = 0; - if (oldpte & PG_V) - pmap_invalidate_page(kernel_pmap, ks); - /* move to the next page, which is where the real stack starts. */ - ks += PAGE_SIZE; - td->td_kstack = ks; - ptek++; -#else - /* get a kernel virtual address for the kstack for this thread */ - ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE); - if (ks == NULL) - panic("pmap_new_thread: kstack allocation failed"); - td->td_kstack = ks; - ptek = pmap_pte(kernel_pmap, ks); -#endif - /* - * Knowing the number of pages allocated is useful when you - * want to deallocate them. - */ - td->td_kstack_pages = pages; - - /* - * For the length of the stack, link in a real page of ram for each - * page of stack. - */ - for (i = 0; i < pages; i++) { - /* - * Get a kernel stack page - */ - m = vm_page_grab(ksobj, i, - VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); - - /* - * Enter the page into the kernel address space. - */ - oldpte = ptek[i]; - ptek[i] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) - | PG_V; - if (oldpte & PG_V) - pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE); - - vm_page_lock_queues(); - vm_page_wakeup(m); - vm_page_flag_clear(m, PG_ZERO); - m->valid = VM_PAGE_BITS_ALL; - vm_page_unlock_queues(); - } -} - -/* - * Dispose the kernel stack for a thread that has exited. - * This routine directly impacts the exit perf of a thread. - */ -void -pmap_dispose_thread(td) - struct thread *td; -{ - int i; - int pages; - vm_object_t ksobj; - vm_offset_t ks; - vm_page_t m; - - pages = td->td_kstack_pages; - ksobj = td->td_kstack_obj; - ks = td->td_kstack; - - tlb_remove_pages(kernel_pmap, ks, pages); - for (i = 0; i < pages; i++) { - m = vm_page_lookup(ksobj, i); - if (m == NULL) - panic("pmap_dispose_thread: kstack already missing?"); - vm_page_lock_queues(); - vm_page_busy(m); - vm_page_unwire(m, 0); - vm_page_free(m); - vm_page_unlock_queues(); - } - - /* - * Free the space that this stack was mapped to in the kernel - * address map. - */ -#ifdef KSTACK_GUARD - kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE); -#else - kmem_free(kernel_map, ks, pages * PAGE_SIZE); -#endif - vm_object_deallocate(ksobj); -} - -/* - * Set up a variable sized alternate kstack. - */ -void -pmap_new_altkstack(struct thread *td, int pages) -{ - /* shuffle the original stack */ - td->td_altkstack_obj = td->td_kstack_obj; - td->td_altkstack = td->td_kstack; - td->td_altkstack_pages = td->td_kstack_pages; - - pmap_new_thread(td, pages); -} - -void -pmap_dispose_altkstack(td) - struct thread *td; -{ - pmap_dispose_thread(td); - - /* restore the original kstack */ - td->td_kstack = td->td_altkstack; - td->td_kstack_obj = td->td_altkstack_obj; - td->td_kstack_pages = td->td_altkstack_pages; - td->td_altkstack = 0; - td->td_altkstack_obj = NULL; - td->td_altkstack_pages = 0; -} - -/* - * Allow the kernel stack for a thread to be prejudicially paged out. - */ -void -pmap_swapout_thread(td) - struct thread *td; -{ - int i; - int pages; - vm_object_t ksobj; - vm_offset_t ks; - vm_page_t m; - - /* - * Make sure we aren't fpcurthread. - */ - /* - alpha_fpstate_save(td, 1); - */ - - pages = td->td_kstack_pages; - ksobj = td->td_kstack_obj; - ks = td->td_kstack; - for (i = 0; i < pages; i++) { - m = vm_page_lookup(ksobj, i); - if (m == NULL) - panic("pmap_swapout_thread: kstack already missing?"); - vm_page_lock_queues(); - vm_page_dirty(m); - vm_page_unwire(m, 0); - vm_page_unlock_queues(); - pmap_kremove(ks + i * PAGE_SIZE); - } -} - -/* - * Bring the kernel stack for a specified thread back in. - */ -void -pmap_swapin_thread(td) - struct thread *td; -{ - int i, rv; - int pages; - vm_object_t ksobj; - vm_offset_t ks; - vm_page_t m; - - pages = td->td_kstack_pages; - ksobj = td->td_kstack_obj; - ks = td->td_kstack; - for (i = 0; i < pages; i++) { - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); - if (m->valid != VM_PAGE_BITS_ALL) { - rv = vm_pager_get_pages(ksobj, &m, 1, 0); - if (rv != VM_PAGER_OK) - panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid); - m = vm_page_lookup(ksobj, i); - m->valid = VM_PAGE_BITS_ALL; - } - vm_page_lock_queues(); - vm_page_wire(m); - vm_page_wakeup(m); - vm_page_unlock_queues(); - } -} - -void pmap_pinit0(pmap) struct pmap *pmap; { @@ -1386,6 +1184,13 @@ return (void *) MIPS_PHYS_TO_KSEG0(pa - (i * PAGE_SIZE)); } +vm_page_t +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte) +{ + pmap_enter(pmap, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE); + return (NULL); +} + /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft @@ -1394,24 +1199,12 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, - vm_size_t size, int limit) + vm_size_t size) { - panic("pmap_object_init_pt unimplemented\n"); -} -/* - * pmap_prefault provides a quick way of clustering - * pagefaults into a processes address space. It is a "cousin" - * of pmap_object_init_pt, except it runs at page fault time instead - * of mmap time. - */ -void -pmap_prefault(pmap, addra, entry) - pmap_t pmap; - vm_offset_t addra; - vm_map_entry_t entry; -{ - panic("pmap_prefault unimplemented\n"); + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + KASSERT(object->type == OBJT_DEVICE, + ("pmap_object_init_pt: non-device object")); } /* @@ -1716,6 +1509,24 @@ } /* + * pmap_is_prefaultable: + * + * Return whether or not the specified virtual address is elgible + * for prefault. + */ +boolean_t +pmap_is_prefaultable(pmap_t pmap, vm_offset_t va) +{ + pt_entry_t *pte; + + pte = pmap_pte(pmap, va); + if (pte_valid(pte)) + return (FALSE); + return (TRUE); +} + + +/* * Clear the modify bits on the specified physical page. */ void @@ -1888,5 +1699,3 @@ return addr; } - - ==== //depot/projects/mips/sys/mips/mips/sig_machdep.c#2 (text+ko) ==== @@ -26,9 +26,31 @@ */ #include +#include #include #include #include +#include +#include + +/* + * Build siginfo_t for SA thread + */ +void +cpu_thread_siginfo(int sig, u_long code, siginfo_t *si) +{ + struct proc *p; + struct thread *td; + + td = curthread; + p = td->td_proc; + PROC_LOCK_ASSERT(p, MA_OWNED); + + bzero(si, sizeof(*si)); + si->si_signo = sig; + si->si_code = code; + /* XXXKSE fill other fields */ +} int sigreturn(struct thread *td, struct sigreturn_args *uap) ==== //depot/projects/mips/sys/mips/mips/support.S#8 (text+ko) ==== @@ -173,7 +173,9 @@ base. This function is safe to call during an interrupt con- text. - fuword() Fetches a word of data from the user-space address base. + fuword() Fetches a word (long) of data from the user-space address base. + + fuword32() Fetches a word (int, 32-bits) of data from the user-space address base. */ /* @@ -217,6 +219,16 @@ END(fuword) /* + * fuword32(9) + * int fuword32(const void *addr) + */ +ENTRY(fuword32) + li v0, -1 + jr ra + nop +END(fuword32) + +/* * Stubs for copy(9) XXX copyin() Copies len bytes of data from the user-space address uaddr to the kernel-space address kaddr. ==== //depot/projects/mips/sys/mips/mips/tlb.c#12 (text+ko) ==== @@ -163,12 +163,13 @@ mips_wr_entryhi(ehi); mips_tlbp(); i = mips_rd_index(); - mips_wr_entrylo0(pte0 | PG_G); - mips_wr_entrylo1(pte1 | PG_G); + mips_wr_entrylo0(pte0); + mips_wr_entrylo1(pte1); mips_wr_entryhi(ehi); - if (i < 0) + if (i < 0) { + tlb_invalidate_page(va); mips_tlbwr(); - else + } else mips_tlbwi(); } ==== //depot/projects/mips/sys/mips/mips/vm_machdep.c#7 (text+ko) ==== @@ -34,10 +34,11 @@ #include #include #include +#include +#include #include #include -#include #include #include @@ -76,32 +77,37 @@ } void -cpu_wait(struct proc *p) +cpu_thread_exit(struct thread *td) +{ +} + +void +cpu_thread_clean(struct thread *td) { } void -cpu_thread_exit(struct thread *td) +cpu_thread_setup(struct thread *td) { } void -cpu_thread_clean(struct thread *td) +cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) { } void -cpu_thread_setup(struct thread *td) +cpu_thread_swapin(struct thread *td) { } void -cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) +cpu_thread_swapout(struct thread *td) { } void -cpu_set_upcall(struct thread *td, void *pcb) +cpu_set_upcall(struct thread *td, struct thread *td0) { } @@ -110,6 +116,17 @@ { } +struct sf_buf * +sf_buf_alloc(struct vm_page *m) +{ + return (NULL); +} + +void +sf_buf_free(void *addr, void *args) +{ +} + void swi_vm(void *m) {