From owner-p4-projects@FreeBSD.ORG Mon Mar 3 11:27:28 2008 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 00EB81065670; Mon, 3 Mar 2008 11:27:28 +0000 (UTC) Delivered-To: perforce@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id B354C106566B for ; Mon, 3 Mar 2008 11:27:27 +0000 (UTC) (envelope-from raj@freebsd.org) Received: from repoman.freebsd.org (repoman.freebsd.org [IPv6:2001:4f8:fff6::29]) by mx1.freebsd.org (Postfix) with ESMTP id AD47B8FC18 for ; Mon, 3 Mar 2008 11:27:27 +0000 (UTC) (envelope-from raj@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.14.1/8.14.1) with ESMTP id m23BRRWv018201 for ; Mon, 3 Mar 2008 11:27:27 GMT (envelope-from raj@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.14.1/8.14.1/Submit) id m23BRQRV018196 for perforce@freebsd.org; Mon, 3 Mar 2008 11:27:26 GMT (envelope-from raj@freebsd.org) Date: Mon, 3 Mar 2008 11:27:26 GMT Message-Id: <200803031127.m23BRQRV018196@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to raj@freebsd.org using -f From: Rafal Jaworowski To: Perforce Change Reviews Cc: Subject: PERFORCE change 136727 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 03 Mar 2008 11:27:28 -0000 http://perforce.freebsd.org/chv.cgi?CH=136727 Change 136727 by raj@raj_mimi on 2008/03/03 11:27:09 Cleanup, style(9) adjustments. Affected files ... .. //depot/projects/e500/sys/dev/tsec/if_tsec.c#7 edit .. //depot/projects/e500/sys/powerpc/booke/clock.c#3 edit .. //depot/projects/e500/sys/powerpc/booke/copyinout.c#3 edit .. //depot/projects/e500/sys/powerpc/booke/interrupt.c#7 edit .. //depot/projects/e500/sys/powerpc/booke/locore.S#5 edit .. //depot/projects/e500/sys/powerpc/booke/machdep.c#12 edit .. //depot/projects/e500/sys/powerpc/booke/pmap.c#12 edit .. //depot/projects/e500/sys/powerpc/booke/support.S#2 edit .. //depot/projects/e500/sys/powerpc/booke/trap.c#7 edit .. //depot/projects/e500/sys/powerpc/booke/trap_subr.S#6 edit .. //depot/projects/e500/sys/powerpc/booke/uio_machdep.c#3 edit .. //depot/projects/e500/sys/powerpc/booke/vm_machdep.c#4 edit .. //depot/projects/e500/sys/powerpc/mpc85xx/ocpbus.c#7 edit .. //depot/projects/e500/sys/powerpc/mpc85xx/pci_ocp.c#4 edit Differences ... ==== //depot/projects/e500/sys/dev/tsec/if_tsec.c#7 (text+ko) ==== @@ -168,8 +168,8 @@ uint8_t addr[6]; } curmac; uint32_t a[6]; + int count, i; char *cp; - int count, i; /* Use the currently programmed MAC address by default. */ curmac.reg[0] = TSEC_READ(sc, TSEC_REG_MACSTNADDR1); @@ -376,9 +376,9 @@ tsec_set_mac_address(struct tsec_softc *sc) { uint32_t macbuf[2] = { 0, 0 }; + int i; char *macbufp; char *curmac; - int i; TSEC_GLOBAL_LOCK_ASSERT(sc); @@ -836,6 +836,7 @@ static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) { + if (vaddr == NULL) return; @@ -1247,15 +1248,17 @@ static int tsec_suspend(device_t dev) { + /* TODO not implemented! */ - return (0); + return (ENODEV); } static int tsec_resume(device_t dev) { + /* TODO not implemented! */ - return (0); + return (ENODEV); } static void ==== //depot/projects/e500/sys/powerpc/booke/clock.c#3 (text+ko) ==== @@ -262,6 +262,7 @@ void cpu_stopprofclock(void) { + } /* ==== //depot/projects/e500/sys/powerpc/booke/copyinout.c#3 (text+ko) ==== @@ -71,10 +71,10 @@ int setfault(faultbuf); /* defined in locore.S */ static int -is_uaddr(const void *addr) { +is_uaddr(const void *addr) +{ + int rv = ((vm_offset_t)addr <= VM_MAXUSER_ADDRESS) ? 1 : 0; - int rv = ((vm_offset_t)addr <= VM_MAXUSER_ADDRESS) ? 1 : 0; - return rv; } @@ -213,6 +213,7 @@ int suword32(void *addr, int32_t word) { + return (suword(addr, (long)word)); } @@ -266,12 +267,14 @@ int32_t fuword32(const void *addr) { + return ((int32_t)fuword(addr)); } uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval) { + return (casuword((volatile u_long *)base, oldval, newval)); } ==== //depot/projects/e500/sys/powerpc/booke/interrupt.c#7 (text+ko) ==== @@ -26,7 +26,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: src/sys/powerpc/powerpc/interrupt.c,v 1.4 2005/01/07 02:29:20 imp Exp $ */ /* @@ -34,6 +33,7 @@ */ #include /* RCS ID & Copyright macro defns */ +__FBSDID("$FreeBSD$"); #include #include @@ -96,6 +96,7 @@ void powerpc_crit_interrupt(struct trapframe *framep) { + printf("powerpc_crit_interrupt: critical interrupt!\n"); dump_frame(framep); trap(framep); @@ -103,6 +104,7 @@ void powerpc_mchk_interrupt(struct trapframe *framep) { + printf("powerpc_mchk_interrupt: machine check interrupt!\n"); dump_frame(framep); trap(framep); ==== //depot/projects/e500/sys/powerpc/booke/locore.S#5 (text+ko) ==== @@ -23,7 +23,10 @@ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ */ + #include "assym.s" #include @@ -94,7 +97,7 @@ /* * Initial cleanup */ - li %r16, 0x200 /* Keep debug exceptions for CW. */ + li %r16, 0x200 /* Keep debug exceptions for CodeWarrior. */ mtmsr %r16 isync #if 0 @@ -345,7 +348,7 @@ /* Select TLB1 */ ori %r3, %r3, 0x08 - + isync tlbivax 0, %r3 isync @@ -468,7 +471,7 @@ .align 4 GLOBAL(kstack0_space) .space 16384 - + /* * Compiled KERNBASE locations */ ==== //depot/projects/e500/sys/powerpc/booke/machdep.c#12 (text+ko) ==== @@ -175,7 +175,7 @@ static int cacheline_size = CACHELINESIZE; SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, - CTLFLAG_RD, &cacheline_size, 0, ""); + CTLFLAG_RD, &cacheline_size, 0, ""); static void cpu_e500_startup(void *); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_e500_startup, NULL) @@ -189,12 +189,14 @@ void setPQL2(int *const size, int *const ways) { + return; } static void cpu_e500_startup(void *dummy) { + /* Initialise the decrementer-based clock. */ decr_init(); @@ -232,6 +234,7 @@ static char * kenv_next(char *cp) { + if (cp != NULL) { while (*cp != 0) cp++; @@ -245,8 +248,8 @@ void dump_kenv(void) { + int len; char *cp; - int len; debugf("loader passed (static) kenv:\n"); if (kern_envp == NULL) { @@ -297,6 +300,7 @@ void print_kernel_section_addr(void) { + debugf("kernel image addresses:\n"); debugf(" kernel_text = 0x%08x\n", (u_int32_t)kernel_text); debugf(" _etext (sdata) = 0x%08x\n", (u_int32_t)_etext); @@ -329,7 +333,7 @@ kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) { bootinfo = (struct bootinfo *)preload_search_info(kmdp, - MODINFO_METADATA|MODINFOMD_BOOTINFO); + MODINFO_METADATA|MODINFOMD_BOOTINFO); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); @@ -425,7 +429,7 @@ /* Finish setting up thread0. */ thread0.td_kstack = (vm_offset_t)kstack0_space; thread0.td_pcb = (struct pcb *) - (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; + (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; bzero((void *)thread0.td_pcb, sizeof(struct pcb)); pc->pc_curpcb = thread0.td_pcb; @@ -511,6 +515,7 @@ int fill_fpregs(struct thread *td, struct fpreg *fpregs) { + return (0); } @@ -518,6 +523,7 @@ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { + return (ENXIO); } @@ -531,6 +537,7 @@ void makectx(struct trapframe *tf, struct pcb *pcb) { + pcb->pcb_lr = tf->srr0; pcb->pcb_sp = tf->fixreg[1]; } @@ -585,8 +592,7 @@ pcb = td->td_pcb; tf = td->td_frame; - if (mcp->mc_vers != _MC_VERSION || - mcp->mc_len != sizeof(*mcp)) + if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp)) return (EINVAL); memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame)); @@ -596,25 +602,6 @@ return (0); } -#if 0 -/* Build siginfo_t for SA thread. */ -void -cpu_thread_siginfo(int sig, u_long code, siginfo_t *si) -{ - struct proc *p; - struct thread *td; - - td = curthread; - p = td->td_proc; - PROC_LOCK_ASSERT(p, MA_OWNED); - - bzero(si, sizeof(*si)); - si->si_signo = sig; - si->si_code = code; - /* XXXKSE fill other fields */ -} -#endif - int sigreturn(struct thread *td, struct sigreturn_args *uap) { @@ -641,7 +628,7 @@ PROC_UNLOCK(p); CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", - td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); + td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); return (EJUSTRETURN); } @@ -650,6 +637,7 @@ int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { + return sigreturn(td, (struct sigreturn_args *)uap); } #endif @@ -715,6 +703,7 @@ void cpu_halt(void) { + mtmsr(mfmsr() & ~(PSL_CE | PSL_EE | PSL_ME | PSL_DE)); while (1); } @@ -732,6 +721,7 @@ int fill_dbregs(struct thread *td, struct dbreg *dbregs) { + /* No debug registers on PowerPC */ return (ENOSYS); } @@ -739,6 +729,7 @@ int set_dbregs(struct thread *td, struct dbreg *dbregs) { + /* No debug registers on PowerPC */ return (ENOSYS); } @@ -746,6 +737,7 @@ int set_fpregs(struct thread *td, struct fpreg *fpregs) { + return (0); } @@ -830,7 +822,7 @@ rndfsize = ((sizeof(sf) + 15) / 16) * 16; CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, - catcher, sig); + catcher, sig); /* * Save user context @@ -848,9 +840,9 @@ * Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && - SIGISMEMBER(psp->ps_sigonstack, sig)) { + SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe *)((caddr_t)td->td_sigstk.ss_sp + - td->td_sigstk.ss_size - rndfsize); + td->td_sigstk.ss_size - rndfsize); } else { sfp = (struct sigframe *)(tf->fixreg[1] - rndfsize); } @@ -917,7 +909,7 @@ } CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, - tf->srr0, tf->fixreg[1]); + tf->srr0, tf->fixreg[1]); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); @@ -966,5 +958,6 @@ int mem_valid(vm_offset_t addr, int len) { + return (1); } ==== //depot/projects/e500/sys/powerpc/booke/pmap.c#12 (text+ko) ==== @@ -49,9 +49,10 @@ * 0xfff0_0000 - 0xffff_ffff : I/O devices region */ -#include #include +__FBSDID("$FreeBSD$"); +#include #include #include #include @@ -133,7 +134,7 @@ /**************************************************************************/ static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, boolean_t); + vm_prot_t, boolean_t); unsigned int kptbl_min; /* Index of the first kernel ptbl. */ unsigned int kernel_ptbls; /* Number of KVA ptbls. */ @@ -195,7 +196,7 @@ static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, u_int32_t); static void __tlb1_set_entry(unsigned int, vm_offset_t, vm_offset_t, - vm_size_t, u_int32_t, unsigned int, unsigned int); + vm_size_t, u_int32_t, unsigned int, unsigned int); static void tlb1_write_entry(unsigned int); static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); @@ -269,48 +270,54 @@ * Kernel MMU interface */ static vm_offset_t mmu_booke_addr_hint(mmu_t, vm_object_t, vm_offset_t, vm_size_t); -static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); -static void mmu_booke_clear_modify(mmu_t, vm_page_t); -static void mmu_booke_clear_reference(mmu_t, vm_page_t); -static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); -static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); -static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); -static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, - vm_page_t, vm_prot_t); -static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); +static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); +static void mmu_booke_clear_modify(mmu_t, vm_page_t); +static void mmu_booke_clear_reference(mmu_t, vm_page_t); +static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, + vm_offset_t); +static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); +static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, boolean_t); +static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, + vm_page_t, vm_prot_t); +static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, + vm_prot_t); static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); -static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); -static void mmu_booke_init(mmu_t); +static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, + vm_prot_t); +static void mmu_booke_init(mmu_t); static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); -static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); -static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); -static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t, - vm_pindex_t, vm_size_t); +static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, + int); +static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); +static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, + vm_object_t, vm_pindex_t, vm_size_t); static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); -static void mmu_booke_page_init(mmu_t, vm_page_t); -static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); -static void mmu_booke_pinit(mmu_t, pmap_t); -static void mmu_booke_pinit0(mmu_t, pmap_t); -static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); -static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); -static void mmu_booke_qremove(mmu_t, vm_offset_t, int); -static void mmu_booke_release(mmu_t, pmap_t); -static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); -static void mmu_booke_remove_all(mmu_t, vm_page_t); -static void mmu_booke_remove_write(mmu_t, vm_page_t); -static void mmu_booke_zero_page(mmu_t, vm_page_t); -static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); -static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); -static void mmu_booke_activate(mmu_t, struct thread *); -static void mmu_booke_deactivate(mmu_t, struct thread *); -static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); -static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); -static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); +static void mmu_booke_page_init(mmu_t, vm_page_t); +static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); +static void mmu_booke_pinit(mmu_t, pmap_t); +static void mmu_booke_pinit0(mmu_t, pmap_t); +static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, + vm_prot_t); +static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); +static void mmu_booke_qremove(mmu_t, vm_offset_t, int); +static void mmu_booke_release(mmu_t, pmap_t); +static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); +static void mmu_booke_remove_all(mmu_t, vm_page_t); +static void mmu_booke_remove_write(mmu_t, vm_page_t); +static void mmu_booke_zero_page(mmu_t, vm_page_t); +static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); +static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); +static void mmu_booke_activate(mmu_t, struct thread *); +static void mmu_booke_deactivate(mmu_t, struct thread *); +static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); +static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); +static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); -static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); -static void mmu_booke_kremove(mmu_t, vm_offset_t); +static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); +static void mmu_booke_kremove(mmu_t, vm_offset_t); static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); @@ -379,6 +386,7 @@ static __inline int track_modified_needed(pmap_t pmap, vm_offset_t va) { + if (pmap == kernel_pmap) return ((va < kmi.clean_sva) || (va >= kmi.clean_eva)); else @@ -389,7 +397,7 @@ static __inline void tlb0_get_tlbconf(void) { - u_int32_t tlb0_cfg; + uint32_t tlb0_cfg; tlb0_cfg = mfspr(SPR_TLB0CFG); tlb0_size = tlb0_cfg & TLBCFG_NENTRY_MASK; @@ -441,6 +449,7 @@ static void ptbl_buf_free(struct ptbl_buf *buf) { + //debugf("ptbl_buf_free: s (buf = 0x%08x)\n", (u_int32_t)buf); mtx_lock(&ptbl_buf_freelist_lock); @@ -483,17 +492,17 @@ { vm_page_t mtbl[PTBL_PAGES]; vm_page_t m; + struct ptbl_buf *pbuf; unsigned int pidx; - struct ptbl_buf *pbuf; int i; //int su = (pmap == kernel_pmap); //debugf("ptbl_alloc: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), - ("ptbl_alloc: invalid pdir_idx")); + ("ptbl_alloc: invalid pdir_idx")); KASSERT((pmap->pm_pdir[pdir_idx] == NULL), - ("pte_alloc: valid ptbl entry exists!")); + ("pte_alloc: valid ptbl entry exists!")); pbuf = ptbl_buf_alloc(); if (pbuf == NULL) @@ -515,8 +524,7 @@ } /* Map in allocated pages into kernel_pmap. */ - mmu_booke_qenter(mmu, (vm_offset_t)pmap->pm_pdir[pdir_idx], mtbl, - PTBL_PAGES); + mmu_booke_qenter(mmu, (vm_offset_t)pmap->pm_pdir[pdir_idx], mtbl, PTBL_PAGES); /* Zero whole ptbl. */ bzero((caddr_t)pmap->pm_pdir[pdir_idx], PTBL_PAGES * PAGE_SIZE); @@ -541,7 +549,7 @@ //debugf("ptbl_free: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), - ("ptbl_free: invalid pdir_idx")); + ("ptbl_free: invalid pdir_idx")); ptbl = pmap->pm_pdir[pdir_idx]; @@ -582,14 +590,15 @@ // (u_int32_t)pmap, su, pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), - ("ptbl_unhold: invalid pdir_idx")); + ("ptbl_unhold: invalid pdir_idx")); KASSERT((pmap != kernel_pmap), - ("ptbl_unhold: unholding kernel ptbl!")); + ("ptbl_unhold: unholding kernel ptbl!")); ptbl = pmap->pm_pdir[pdir_idx]; //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); - KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), ("ptbl_unhold: non kva ptbl")); + KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), + ("ptbl_unhold: non kva ptbl")); /* decrement hold count */ for (i = 0; i < PTBL_PAGES; i++) { @@ -605,7 +614,7 @@ */ if (m->wire_count == 0) { ptbl_free(mmu, pmap, pdir_idx); - + //debugf("ptbl_unhold: e (freed ptbl)\n"); return (1); } @@ -629,9 +638,9 @@ //debugf("ptbl_hold: s (pmap = 0x%08x pdir_idx = %d)\n", (u_int32_t)pmap, pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), - ("ptbl_hold: invalid pdir_idx")); + ("ptbl_hold: invalid pdir_idx")); KASSERT((pmap != kernel_pmap), - ("ptbl_hold: holding kernel ptbl!")); + ("ptbl_hold: holding kernel ptbl!")); ptbl = pmap->pm_pdir[pdir_idx]; @@ -655,8 +664,7 @@ debugf("pv_alloc: s\n"); pv_entry_count++; - if ((pv_entry_count > pv_entry_high_water) && - (pagedaemon_waken == 0)) { + if ((pv_entry_count > pv_entry_high_water) && (pagedaemon_waken == 0)) { pagedaemon_waken = 1; wakeup (&vm_pages_needed); } @@ -778,7 +786,7 @@ } /* Referenced pages. */ - if (PTE_ISREFERENCED(pte)) + if (PTE_ISREFERENCED(pte)) vm_page_flag_set(m, PG_REFERENCED); /* Remove pv_entry from pv_list. */ @@ -1044,7 +1052,7 @@ if (sz == 0) { empty: memmove(mp, mp + 1, - (cnt - (mp - availmem_regions)) * sizeof(*mp)); + (cnt - (mp - availmem_regions)) * sizeof(*mp)); cnt--; mp--; continue; @@ -1076,16 +1084,17 @@ debugf("fill in phys_avail:\n"); for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { - debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", availmem_regions[i].mr_start, - availmem_regions[i].mr_start + availmem_regions[i].mr_size, - availmem_regions[i].mr_size); + debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", + availmem_regions[i].mr_start, + availmem_regions[i].mr_start + availmem_regions[i].mr_size, + availmem_regions[i].mr_size); if (hwphyssz != 0 && (physsz + availmem_regions[i].mr_size) >= hwphyssz) { debugf(" hw.physmem adjust\n"); if (physsz < hwphyssz) { phys_avail[j] = availmem_regions[i].mr_start; phys_avail[j + 1] = availmem_regions[i].mr_start + - hwphyssz - physsz; + hwphyssz - physsz; physsz = hwphyssz; phys_avail_count++; } @@ -1093,7 +1102,8 @@ } phys_avail[j] = availmem_regions[i].mr_start; - phys_avail[j + 1] = availmem_regions[i].mr_start + availmem_regions[i].mr_size; + phys_avail[j + 1] = availmem_regions[i].mr_start + + availmem_regions[i].mr_size; phys_avail_count++; physsz += availmem_regions[i].mr_size; } @@ -1117,13 +1127,13 @@ debugf("kernel_pmap = 0x%08x\n", (u_int32_t)kernel_pmap); debugf("kptbl_min = %d, kernel_kptbls = %d\n", kptbl_min, kernel_ptbls); debugf("kernel pdir range: 0x%08x - 0x%08x\n", - kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); + kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); /* Initialize kernel pdir */ - for (i = 0; i < kernel_ptbls; i++) { + for (i = 0; i < kernel_ptbls; i++) kernel_pmap->pm_pdir[kptbl_min + i] = - (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); - } + (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); + kernel_pmap->pm_tid = KERNEL_TID; kernel_pmap->pm_active = ~0; @@ -1161,7 +1171,8 @@ static vm_paddr_t mmu_booke_kextract(mmu_t mmu, vm_offset_t va) { - return pte_vatopa(mmu, kernel_pmap, va); + + return (pte_vatopa(mmu, kernel_pmap, va)); } /* @@ -1182,7 +1193,7 @@ * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, - NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); + NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; @@ -1316,7 +1327,7 @@ //debugf("mmu_booke_kremove: s (va = 0x%08x)\n", va); KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)), - ("mmu_booke_kremove: invalid va")); + ("mmu_booke_kremove: invalid va")); pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); @@ -1353,6 +1364,7 @@ static void mmu_booke_pinit(mmu_t mmu, pmap_t pmap) { + //struct thread *td; //struct proc *p; @@ -1382,6 +1394,7 @@ static void mmu_booke_release(mmu_t mmu, pmap_t pmap) { + //debugf("mmu_booke_release: s\n"); PMAP_LOCK_DESTROY(pmap); @@ -1404,7 +1417,7 @@ */ static void mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, boolean_t wired) + vm_prot_t prot, boolean_t wired) { vm_page_lock_queues(); PMAP_LOCK(pmap); @@ -1415,7 +1428,7 @@ static void mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, boolean_t wired) + vm_prot_t prot, boolean_t wired) { pte_t *pte; vm_paddr_t pa; @@ -1446,7 +1459,7 @@ * changed, must be protection or wiring change. */ if (((pte = pte_find(mmu, pmap, va)) != NULL) && - (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { + (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { //debugf("mmu_booke_enter_locked: update\n"); @@ -1465,8 +1478,7 @@ /* Save the old bits and clear the ones we're interested in. */ flags = pte->flags; - pte->flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | - PTE_MODIFIED); + pte->flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); if (prot & VM_PROT_WRITE) { /* Add write permissions. */ @@ -1571,7 +1583,7 @@ */ static void mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, - vm_offset_t end, vm_page_t m_start, vm_prot_t prot) + vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; vm_pindex_t diff, psize; @@ -1589,13 +1601,14 @@ static void mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot) + vm_prot_t prot) { + //debugf("mmu_booke_enter_quick: s\n"); PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, - prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); PMAP_UNLOCK(pmap); //debugf("mmu_booke_enter_quick e\n"); @@ -1619,10 +1632,10 @@ if (su) { KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)), - ("mmu_booke_enter: kernel pmap, non kernel va")); + ("mmu_booke_enter: kernel pmap, non kernel va")); } else { KASSERT((va <= VM_MAXUSER_ADDRESS), - ("mmu_booke_enter: user pmap, non user va")); + ("mmu_booke_enter: user pmap, non user va")); } if (PMAP_REMOVE_DONE(pmap)) { @@ -1690,7 +1703,7 @@ */ static vm_offset_t mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, - vm_offset_t pa_end, int prot) + vm_offset_t pa_end, int prot) { vm_offset_t sva = *virt; vm_offset_t va = sva; @@ -1764,8 +1777,9 @@ */ static void mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, - vm_size_t len, vm_offset_t src_addr) + vm_size_t len, vm_offset_t src_addr) { + } /* @@ -1773,7 +1787,7 @@ */ static void mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, - vm_prot_t prot) + vm_prot_t prot) { vm_offset_t va; vm_page_t m; @@ -1805,7 +1819,8 @@ vm_page_flag_set(m, PG_REFERENCED); /* Flush mapping from TLB0. */ - pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | PTE_REFERENCED); + pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | + PTE_REFERENCED); tlb0_flush_entry(pmap, va); } } @@ -1836,16 +1851,18 @@ /* Handle modified pages. */ if (PTE_ISMODIFIED(pte)) { - if (track_modified_needed(pv->pv_pmap, pv->pv_va)) + if (track_modified_needed(pv->pv_pmap, + pv->pv_va)) vm_page_dirty(m); } /* Referenced pages. */ - if (PTE_ISREFERENCED(pte)) + if (PTE_ISREFERENCED(pte)) vm_page_flag_set(m, PG_REFERENCED); /* Flush mapping from TLB0. */ - pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | PTE_REFERENCED); + pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | + PTE_REFERENCED); tlb0_flush_entry(pv->pv_pmap, pv->pv_va); } } @@ -1882,7 +1899,7 @@ */ static vm_page_t mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, - vm_prot_t prot) + vm_prot_t prot) { pte_t *pte; vm_page_t m; @@ -1916,6 +1933,7 @@ static void mmu_booke_page_init(mmu_t mmu, vm_page_t m) { + TAILQ_INIT(&m->md.pv_list); } @@ -1951,6 +1969,7 @@ static void mmu_booke_zero_page(mmu_t mmu, vm_page_t m) { + //debugf("mmu_booke_zero_page: s\n"); mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); //debugf("mmu_booke_zero_page: e\n"); @@ -2057,6 +2076,7 @@ static boolean_t mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) { + return (FALSE); } @@ -2080,7 +2100,8 @@ goto make_sure_to_unlock; if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { - pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | PTE_REFERENCED); + pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | + PTE_REFERENCED); tlb0_flush_entry(pv->pv_pmap, pv->pv_va); } } @@ -2310,11 +2331,11 @@ */ static void mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, - vm_object_t object, vm_pindex_t pindex, vm_size_t size) + vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, - ("mmu_booke_object_init_pt: non-device object")); + ("mmu_booke_object_init_pt: non-device object")); } /* @@ -2323,14 +2344,16 @@ static int mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) { + TODO; return (0); } static vm_offset_t mmu_booke_addr_hint(mmu_t mmu, vm_object_t object, vm_offset_t va, - vm_size_t size) + vm_size_t size) { + return (va); } @@ -2419,7 +2442,7 @@ pmap->pm_tid = tid; //debugf("tid_alloc: e (%02d next = %02d)\n", tid, next_tid); - return tid; + return (tid); } #if 0 @@ -2475,8 +2498,7 @@ /**************************************************************************/ static void -tlb_print_entry(int i, u_int32_t mas1, - u_int32_t mas2, u_int32_t mas3, u_int32_t mas7) +tlb_print_entry(int i, u_int32_t mas1, u_int32_t mas2, u_int32_t mas3, u_int32_t mas7) { int as; char desc[3]; >>> TRUNCATED FOR MAIL (1000 lines) <<<