Date: Thu, 19 Feb 2009 14:25:40 GMT From: Arnar Mar Sig <antab@FreeBSD.org> To: Perforce Change Reviews <perforce@FreeBSD.org> Subject: PERFORCE change 157933 for review Message-ID: <200902191425.n1JEPe04066469@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=157933 Change 157933 by antab@antab_farm on 2009/02/19 14:25:27 More AVR32 code. The kernel is now able to go thru 5 kernel thread switches before giving up. It crashes somewhere in cpu_switch because old kstack is no longer in the tlb, not sure why this doesn't happen before the 5 switch. Affected files ... .. //depot/projects/avr32/src/sys/avr32/avr32/cpu.c#5 edit .. //depot/projects/avr32/src/sys/avr32/avr32/exception.S#4 edit .. //depot/projects/avr32/src/sys/avr32/avr32/genassym.c#3 edit .. //depot/projects/avr32/src/sys/avr32/avr32/intr.c#4 edit .. //depot/projects/avr32/src/sys/avr32/avr32/locore.S#2 edit .. //depot/projects/avr32/src/sys/avr32/avr32/machdep.c#6 edit .. //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#4 edit .. //depot/projects/avr32/src/sys/avr32/avr32/support.S#4 edit .. //depot/projects/avr32/src/sys/avr32/avr32/switch.S#3 edit .. //depot/projects/avr32/src/sys/avr32/avr32/tlb.c#3 edit .. //depot/projects/avr32/src/sys/avr32/avr32/trap.c#4 edit .. //depot/projects/avr32/src/sys/avr32/avr32/vm_machdep.c#5 edit .. //depot/projects/avr32/src/sys/avr32/conf/NGW100#6 edit .. //depot/projects/avr32/src/sys/avr32/include/atomic.h#3 edit .. //depot/projects/avr32/src/sys/avr32/include/db_machdep.h#3 edit .. //depot/projects/avr32/src/sys/avr32/include/intr.h#4 edit .. //depot/projects/avr32/src/sys/avr32/include/param.h#2 edit .. //depot/projects/avr32/src/sys/avr32/include/pcb.h#4 edit .. //depot/projects/avr32/src/sys/avr32/include/pmap.h#2 edit .. //depot/projects/avr32/src/sys/avr32/include/proc.h#3 edit .. //depot/projects/avr32/src/sys/avr32/include/reg.h#5 edit .. //depot/projects/avr32/src/sys/avr32/include/reg_ocd.h#1 add .. //depot/projects/avr32/src/sys/avr32/include/tlb.h#3 edit .. //depot/projects/avr32/src/sys/avr32/include/trap.h#2 edit .. //depot/projects/avr32/src/sys/conf/Makefile.avr32#2 edit .. //depot/projects/avr32/src/sys/conf/files.avr32#5 edit Differences ... ==== //depot/projects/avr32/src/sys/avr32/avr32/cpu.c#5 (text+ko) ==== @@ -59,6 +59,7 @@ #include <machine/intr.h> #include <machine/reg.h> #include <machine/reg_sys.h> +#include <machine/reg_ocd.h> #include <machine/reg_intc.h> #include <machine/reg_usart.h> #include <machine/at32ap700x.h> @@ -71,6 +72,18 @@ /* Set exception vector */ sysreg_write(EVBA, (uint32_t)&_evba); __asm__ __volatile__ ("csrf %0" : : "i"(AT32_SYS_SR_EM)); + +#if 0 // defined(DDB) + /* + * Enable Debug mode in monitor mode. Allow peripherals to run + * while in debug mode so we can report tru uart. + */ + ocdreg_write(DC, + bit_offset(OCD, DC, DBE) | + bit_offset(OCD, DC, RID) | + bit_offset(OCD, DC, MM)); + __asm__ __volatile__ ("csrf %0" : : "i"(AT32_SYS_SR_DM)); +#endif } void @@ -88,7 +101,7 @@ void cpu_reset(void) { - avr32_impl(); + ocdreg_write(DC, bit_offset(OCD, DC, RES)); } /** ==== //depot/projects/avr32/src/sys/avr32/avr32/exception.S#4 (text+ko) ==== @@ -64,14 +64,6 @@ .long AT32AP700X_BASE + AT32AP700X_INTC_OFFSET + \ AT32_INTC_ICR0 - (4 * num); -#if 0 -#define IRQ(num) \ - GLOBAL(intr_handle##num); \ - sub r12, pc, (. - i##num); \ - bral panic; \ -i##num: .asciz "IRQ!"; - -#endif .section .text.evba,"ax",@progbits .align 2 @@ -79,11 +71,11 @@ .align 2 /* 0x00 Unrecoverable exception */ bral handle_critical .align 2 /* 0x04 TLB multiple hit */ - bral handle_critical + bral tlb_critical .align 2 /* 0x08 Bus error data fetch */ - bral handle_critical + bral handle_bus_data_fetch_error .align 2 /* 0x0C Bus error instruction fetch */ - bral handle_critical + bral handle_bus_instruction_fetch_error .align 2 /* 0x10 nmi */ bral handle_mni .align 2 /* 0x14 Instruction Address */ @@ -100,7 +92,7 @@ bral handle_illegal_opcode .align 2 /* 0x2C FPU */ bral handle_illegal_opcode -.align 2 /* 0x30 Coprocessor absent */ +.align 2 /* 0x30 Coprocessor absent */ bral handle_illegal_opcode .align 2 /* 0x34 Data Address (Read) */ bral handle_address_fault @@ -131,7 +123,10 @@ .section .text.evba.syscall /* 0x100 Supervisor call */ .global supervisor_call supervisor_call: - rete + PUSH_TRAPFRAME(SUP) + /* call C syscall handler */ + POP_TRAPFRAME(SUP) + rets /* later this should be done in assembly, but using C for now */ tlb_miss: @@ -144,10 +139,28 @@ rete handle_critical: + breakpoint + mov r12, 0 + rcall panic rete +tlb_critical: + breakpoint + rete + +handle_bus_data_fetch_error: + breakpoint + rete + +handle_bus_instruction_fetch_error: + breakpoint + rete + handle_mni: + PUSH_TRAPFRAME(NMI) + mov r12, sp rcall intr_handle_mni + POP_TRAPFRAME(NMI) rete handle_illegal_opcode: @@ -155,6 +168,7 @@ mfsr r12, AT32_SYS_ECR mov r11, sp rcall trap_handle_illegal_opcode + POP_TRAPFRAME(EX) rete handle_address_fault: @@ -166,6 +180,7 @@ rete handle_protection_fault: + breakpoint PUSH_TRAPFRAME(EX) mfsr r12, AT32_SYS_ECR mov r11, sp @@ -174,16 +189,20 @@ rete handle_dtlb_modified: + PUSH_TRAPFRAME(EX) + mfsr r12, AT32_SYS_ECR + mov r11, sp rcall trap_handle_dtlb_modified + POP_TRAPFRAME(EX) rete handle_breakpoint: - PUSH_TRAPFRAME(EX) + PUSH_TRAPFRAME(DBG) mov r12, AT32_SYS_ECR mov r11, sp rcall trap_handle_breakpoint - POP_TRAPFRAME(EX) - rete + POP_TRAPFRAME(DBG) + retd IRQ(0) IRQ(1) ==== //depot/projects/avr32/src/sys/avr32/avr32/genassym.c#3 (text+ko) ==== @@ -40,3 +40,7 @@ ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_PCB_SIZE, sizeof(struct trapframe)); +ASSYM(TD_KPTE, offsetof(struct thread, td_md.md_kpte)); +ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack)); +ASSYM(PMAP_ASID, offsetof(struct pmap, pm_asid)); +ASSYM(PMAP_PD, offsetof(struct pmap, pm_pd)); ==== //depot/projects/avr32/src/sys/avr32/avr32/intr.c#4 (text+ko) ==== @@ -66,7 +66,7 @@ } void -intr_handle_mni(void) +intr_handle_mni(struct trapframe *tf) { avr32_impl(); } ==== //depot/projects/avr32/src/sys/avr32/avr32/locore.S#2 (text+ko) ==== @@ -25,6 +25,7 @@ * $FreeBSD: $ */ +#include "opt_ddb.h" #include <sys/syscall.h> #include <machine/asm.h> #include <machine/reg_sys.h> @@ -49,6 +50,12 @@ cp.w r2, r3 brlo 1b +#ifdef DDB + /* Set first frame pointer */ + mov lr, 0 + mov r7, 0 +#endif + /* Store tags address */ lddpc r0, uboot_tags_addr st.w r0[0], r11 @@ -85,8 +92,8 @@ proc0_stack_ptr: .long proc0_stack_end -proc0_stack: +GLOBAL(proc0_stack) .space KSTACK_SIZE .align 4 -GLOBAL(proc0_stack_end) +proc0_stack_end: ==== //depot/projects/avr32/src/sys/avr32/avr32/machdep.c#6 (text+ko) ==== @@ -78,7 +78,7 @@ struct pcpu __pcpu; struct pcpu *pcpup = &__pcpu; struct pcb proc0_pcb; -extern vm_offset_t proc0_stack_end; +extern vm_offset_t proc0_stack; extern uint64_t clock_cpu_frequency; vm_offset_t phys_avail[10]; @@ -111,7 +111,7 @@ avr32_init_proc0() { proc_linkup(&proc0, &thread0); - thread0.td_kstack = proc0_stack_end; + thread0.td_kstack = proc0_stack; thread0.td_kstack_pages = KSTACK_PAGES - 1; thread0.td_pcb = &proc0_pcb; thread0.td_frame = &thread0.td_pcb->pcb_regs; @@ -272,7 +272,7 @@ void makectx(struct trapframe *tf, struct pcb *pcb) { - avr32_impl(); + bcopy(&tf->regs, &pcb->pcb_regs, sizeof(struct trapframe)); } u_int32_t ==== //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#4 (text+ko) ==== @@ -44,8 +44,6 @@ static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); -static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte); - static struct pmap kernel_pmap_store; @@ -62,7 +60,9 @@ static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; -pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va) { +pt_entry_t +*pmap_pte(pmap_t pmap, vm_offset_t va) +{ pt_entry_t *pdaddr; pdaddr = (pt_entry_t *)pmap->pm_pd[pd_index_from_va(va)]; @@ -72,31 +72,33 @@ return NULL; } -void pmap_bootstrap(void) { +void +pmap_bootstrap(void) +{ pt_entry_t *pagetables; int i, j; - // phys_avail should be set in uboot_parse_targs + /* phys_avail should be set in uboot_parse_targs */ virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; - // Setup kernel pmap + /* Setup kernel pmap */ kernel_pmap = &kernel_pmap_store; PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_active = ~0; kernel_pmap->pm_asid = 0; kernel_pmap->pm_asid_generation = 0; - // Setup kernel page dir and table + /* Setup kernel page dir and table */ kernel_pmap->pm_pd = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE); pagetables = (pt_entry_t *)pmap_steal_memory(PAGE_SIZE * NKPT); for (i = 0, j = (virtual_avail >> PD_SHIFT); i < NKPT; i++, j++) { kernel_pmap->pm_pd[j] = (pd_entry_t)(pagetables + (i * NPTEPG)); } - // Enable paging + /* Enable paging */ tlb_flush(); - sysreg_write(PTBR, (uint32_t)kernel_pmap); + sysreg_write(PTBR, (uint32_t)kernel_pmap->pm_pd); sysreg_write(MMUCR, bit_offset(SYS, MMUCR, S) | bit_offset(SYS, MMUCR, E) | @@ -104,7 +106,9 @@ nop(); nop(); nop(); nop(); nop(); nop(); nop(); nop(); } -vm_offset_t pmap_steal_memory(vm_size_t size) { +vm_offset_t +pmap_steal_memory(vm_size_t size) +{ vm_size_t avail_size; vm_offset_t ret; @@ -121,7 +125,9 @@ return ret; } -void pmap_init(void) { +void +pmap_init(void) +{ pvzone = uma_zcreate("PV_ENTRY", sizeof(struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count; @@ -133,7 +139,9 @@ * The pmap_pinit0() function initializes the physical map pm, associated * with process 0, the first process created in the system. */ -void pmap_pinit0(pmap_t pmap) { +void +pmap_pinit0(pmap_t pmap) +{ PMAP_LOCK_INIT(pmap); pmap->pm_pd = kernel_pmap->pm_pd; pmap->pm_active = 0; @@ -148,9 +156,10 @@ * The pmap_pinit() function initializes the preallocated and zeroed structure pmap, * such as one in a vmspace structure. */ -int pmap_pinit(pmap_t pmap) { +int +pmap_pinit(pmap_t pmap) +{ vm_page_t ptdpg; - PMAP_LOCK_INIT(pmap); /* allocate the page directory page */ @@ -158,12 +167,12 @@ VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO); - pmap->pm_pd = (pd_entry_t *)AVR32_PHYS_TO_P2(VM_PAGE_TO_PHYS(ptdpg)); - if ((ptdpg->flags & PG_ZERO) == 0) { - bzero(pmap->pm_pd, PAGE_SIZE); + pmap->pm_pd = (pd_entry_t *)AVR32_PHYS_TO_P1(VM_PAGE_TO_PHYS(ptdpg)); + if ((ptdpg->flags & PG_ZERO) == 0) { + bzero(pmap->pm_pd, PAGE_SIZE); } - pmap->pm_active = 0; + pmap->pm_active = 0; pmap->pm_asid = 0; pmap->pm_asid_generation = 0; TAILQ_INIT(&pmap->pm_pvlist); @@ -172,36 +181,57 @@ return(1); } -void pmap_activate(struct thread *td) { - avr32_impl(); +void +pmap_activate(struct thread *td) +{ + struct proc *p; + pmap_t pmap; + + p = td->td_proc; + pmap = vmspace_pmap(p->p_vmspace); + + pmap_asid_alloc(pmap); + PCPU_SET(curpmap, pmap); } -boolean_t pmap_is_modified(vm_page_t m) { +boolean_t +pmap_is_modified(vm_page_t m) +{ avr32_impl(); return (0); } -void pmap_clear_modify(vm_page_t m) { +void +pmap_clear_modify(vm_page_t m) +{ avr32_impl(); } -int pmap_ts_referenced(vm_page_t m) { +int +pmap_ts_referenced(vm_page_t m) +{ avr32_impl(); return (0); } -void pmap_clear_reference(vm_page_t m) { +void +pmap_clear_reference(vm_page_t m) +{ avr32_impl(); } -void pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) { +void +pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) +{ avr32_impl(); } /* * Add page to kernel pmap */ -void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { +void +pmap_kenter(vm_offset_t va, vm_paddr_t pa) +{ pt_entry_t *ent; ent = pmap_pte(kernel_pmap, va); @@ -211,8 +241,6 @@ *ent = PTE_CACHEABLE | PTE_PERM_READ | PTE_PERM_WRITE; pfn_set(*ent, pa); - //printf("pmap_kenter: va: 0x%08X\tpa: 0x%08X\tent: 0x%08X\n", va, pa, *ent); - /* No need to do any tlb inserts, will just get a miss exception * when the page is needed */ } @@ -220,52 +248,70 @@ /* * Remove page from kernel pmap */ -void pmap_kremove(vm_offset_t va) { -/* pt_entry_t *ent; +void +pmap_kremove(vm_offset_t va) +{ + pt_entry_t *ent; ent = pmap_pte(kernel_pmap, va); *ent = 0; - cpu_tlb_flush();*/ + tlb_remove_entry(kernel_pmap, va); } /* * Extract the physical page address associated kernel virtual address. */ -vm_paddr_t pmap_kextract(vm_offset_t va) { +vm_paddr_t +pmap_kextract(vm_offset_t va) +{ return pmap_extract(kernel_pmap, va); } /* * Return whether or not the specified virtual address is elgible for prefault. */ -boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { +boolean_t +pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) +{ avr32_impl(); return (0); } -void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { +void +pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) +{ avr32_impl(); } -void pmap_copy_page(vm_page_t src, vm_page_t dst) { +void +pmap_copy_page(vm_page_t src, vm_page_t dst) +{ avr32_impl(); } -void pmap_zero_page(vm_page_t m) { +void +pmap_zero_page(vm_page_t m) +{ vm_paddr_t phys = VM_PAGE_TO_PHYS(m); bzero((caddr_t)AVR32_PHYS_TO_P2(phys), PAGE_SIZE); } -void pmap_zero_page_area(vm_page_t m, int off, int size) { +void +pmap_zero_page_area(vm_page_t m, int off, int size) +{ avr32_impl(); } -void pmap_zero_page_idle(vm_page_t m) { +void +pmap_zero_page_idle(vm_page_t m) +{ avr32_impl(); } -void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { +void +pmap_qenter(vm_offset_t va, vm_page_t *m, int count) +{ int i; for (i = 0; i < count; i++) { @@ -274,14 +320,18 @@ } } -void pmap_qremove(vm_offset_t va, int count) { +void +pmap_qremove(vm_offset_t va, int count) +{ while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } } -void pmap_page_init(vm_page_t m) { +void +pmap_page_init(vm_page_t m) +{ TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; m->md.pv_flags = 0; @@ -290,7 +340,9 @@ /* * The pmap_growkernel() function grows the kernel virtual address space to the virtual address addr. */ -void pmap_growkernel(vm_offset_t addr) { +void +pmap_growkernel(vm_offset_t addr) +{ // Not really sure what to do here, need to look better into it, but the // kernel should have all the pages tables needed to grow within the P3 segment } @@ -299,11 +351,11 @@ * The pmap_map() function maps a range of physical addresses into kernel * virtual address (KVA) space, from start to end, with protection bits prot. */ -vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { +vm_offset_t +pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) +{ vm_offset_t va, sva; - //printf("pmap_map: virt: 0x%08x\tstart: 0x%08x\tend: 0x%08x\n", *virt, start, end); - va = sva = *virt; while (start < end) { pmap_kenter(va, start); @@ -321,18 +373,19 @@ * If wired is TRUE, then increment the wired count for the page as soon as * the mapping is inserted into pmap. */ -void pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, vm_prot_t prot, boolean_t wired) { +void +pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, + vm_prot_t prot, boolean_t wired) +{ vm_offset_t pa, opa; pt_entry_t *pte; pt_entry_t origpte, newpte; vm_page_t mpte, om; - //printf("pmap_enter 0x%08X: va: 0x%08x\taccess: 0x%08x\tm: 0x%08x\tprot: 0x%08X\twired: %d\n", pmap, va, access, m, prot, wired); vm_page_lock_queues(); PMAP_LOCK(pmap); - mpte = NULL; if (va < VM_MAXUSER_ADDRESS) { mpte = pmap_allocpte(pmap, va, M_WAITOK); @@ -448,7 +501,7 @@ *pte = newpte; } } - pmap_update_page(pmap, va, newpte); + tlb_update_entry(pmap, va, newpte); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); @@ -457,7 +510,9 @@ /* * Same as pmap_enter, but does not have to validate inputs. */ -void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) +{ avr32_impl(); } @@ -473,39 +528,56 @@ * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ -void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { +void +pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, + vm_page_t m_start, vm_prot_t prot) +{ avr32_impl(); } -void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { +void +pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) +{ avr32_impl(); } -void pmap_remove_all(vm_page_t m) { +void +pmap_remove_all(vm_page_t m) +{ avr32_impl(); } -void pmap_remove_pages(pmap_t pmap) { +void +pmap_remove_pages(pmap_t pmap) +{ avr32_impl(); } -void pmap_remove_write(vm_page_t m) { +void +pmap_remove_write(vm_page_t m) +{ avr32_impl(); } -vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { +vm_paddr_t +pmap_extract(pmap_t pmap, vm_offset_t va) +{ pt_entry_t *ent; ent = pmap_pte(pmap, va); return pfn_get(*ent); } -vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { +vm_page_t +pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) +{ avr32_impl(); return (0); } -boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { +boolean_t +pmap_page_exists_quick(pmap_t pmap, vm_page_t m) +{ avr32_impl(); return (0); } @@ -513,7 +585,9 @@ /* * Set the physical protection on the specified range of this map as requested. */ -void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t pr) { +void +pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t pr) +{ avr32_impl(); } @@ -521,7 +595,10 @@ * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ -void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { +void +pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, + vm_offset_t *addr, vm_size_t size) +{ // Not sure what to do here, unimplemented in ARM avr32_debug("pmap_align_superpage: Needs implementing?\n"); } @@ -529,7 +606,9 @@ /* * Return the number of managed mappings to the given physical page that are wired */ -int pmap_page_wired_mappings(vm_page_t m) { +int +pmap_page_wired_mappings(vm_page_t m) +{ avr32_impl(); return (0); } @@ -539,7 +618,9 @@ * addr in the physical map pmap is resident in physical memory. It is the * machine-dependent interface used by the mincore(2) system call. */ -int pmap_mincore(pmap_t pmap, vm_offset_t addr) { +int +pmap_mincore(pmap_t pmap, vm_offset_t addr) +{ avr32_impl(); return (0); } @@ -549,11 +630,16 @@ * processor address space. Note that some shortcuts * are taken, but the code works. */ -void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { +void +pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, + vm_pindex_t pindex, vm_size_t size) +{ avr32_impl(); } -static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) { +static vm_page_t +_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) +{ vm_offset_t ptepa; vm_page_t m; int req; @@ -592,13 +678,16 @@ * isn't already there. */ pmap->pm_stats.resident_count++; - ptepa = VM_PAGE_TO_PHYS(m); + ptepa = VM_PAGE_TO_PHYS(m); + printf("****: page: %x\n", ptepa); pmap->pm_pd[ptepindex] = (pd_entry_t)ptepa; - + avr32_impl(); return m; } -static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) { +static vm_page_t +pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) +{ unsigned int pdindex; pd_entry_t pd; vm_page_t page; @@ -620,7 +709,9 @@ return page; } -static int page_is_managed(vm_offset_t pa) { +static int +page_is_managed(vm_offset_t pa) +{ vm_page_t m; m = PHYS_TO_VM_PAGE(pa); @@ -630,7 +721,10 @@ return 0; } -static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m, boolean_t wired) { +static void +pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m, + boolean_t wired) +{ pv_entry_t pv; pv = get_pv_entry(); @@ -649,7 +743,9 @@ m->md.pv_list_count++; } -static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va) { +static void +pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va) +{ pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -679,7 +775,9 @@ /* * free the pv_entry back to the free list */ -static void free_pv_entry(pv_entry_t pv) { +static void +free_pv_entry(pv_entry_t pv) +{ pv_entry_count--; uma_zfree(pvzone, pv); } @@ -690,7 +788,9 @@ * the memory allocation is performed bypassing the malloc code * because of the possibility of allocations at interrupt time. */ -static pv_entry_t get_pv_entry(void) { +static pv_entry_t +get_pv_entry(void) +{ pv_entry_count++; if (pv_entry_count > pv_entry_high_water) { wakeup(&vm_pages_needed); @@ -698,41 +798,37 @@ return uma_zalloc(pvzone, M_NOWAIT); } -static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte) { - uint32_t tlbehi, mmucr; - - if (pmap->pm_asid_generation != PCPU_GET(asid_generation)) { - return; - } - - // Search tlb and update entry if found - tlbehi = va | pmap->pm_asid; - sysreg_write(TLBEHI, tlbehi); - - __builtin_tlbs(); - mmucr = sysreg_read(MMUCR); - - if (!(mmucr & bit_offset(SYS, MMUCR, N))) { - sysreg_write(TLBELO, (pte & bit_mask(SYS, TLBELO, SZ)) | PTE_DIRTY | PTE_SIZE_4K); - __builtin_tlbw(); - cpu_sync_pipeline(); +void +pmap_asid_alloc(pmap_t pmap) +{ + if (pmap->pm_asid_generation < PCPU_GET(asid_generation)) { + if (PCPU_GET(asid_next) == ASID_MAX) { + PCPU_INC(asid_generation); + PCPU_SET(asid_next, 0); + } + pmap->pm_asid = PCPU_GET(asid_next); + pmap->pm_asid_generation = PCPU_GET(asid_generation); + PCPU_INC(asid_next); } } - - /** * Called when we need to update the TLB */ -static int tlb_at; +static int tlb_at = KSTACK_PAGES; void pmap_tlb_miss(uint32_t ecr, uint32_t tlbear, uint32_t tlbehi) { - pmap_t pmap = (pmap_t)sysreg_read(PTBR); + pd_entry_t* pd = (pd_entry_t *)sysreg_read(PTBR); pt_entry_t *ent; + register_t mmucr; + + ent = (pt_entry_t *)pd[pd_index_from_va(tlbear)]; + if (ent) { + ent += pt_index_from_va(tlbear); + } - ent = pmap_pte(pmap, tlbear); if (!ent || !*ent) { printf("\nTLB miss: %x\n", ecr); - printf("pmap: %x\n", sysreg_read(PTBR)); + printf("pd: %x\n", sysreg_read(PTBR)); printf("TLBEAR: %x\n", tlbear); printf("TLBEHI: %x\n", tlbehi); printf("PC: %x\n", sysreg_read(RAR_EX)); @@ -741,33 +837,26 @@ panic("pmap_tlb_miss: address not in pmap\n"); } - // Generate ASID? - if (pmap->pm_asid_generation < PCPU_GET(asid_generation)) { - if (PCPU_GET(asid_next) == ASID_MAX) { - PCPU_INC(asid_generation); - PCPU_SET(asid_next, 0); - } - pmap->pm_asid = PCPU_GET(asid_next); - pmap->pm_asid_generation = PCPU_GET(asid_generation); - PCPU_INC(asid_next); - } + mmucr = sysreg_read(MMUCR); + mmucr &= ~bit_mask(SYS, MMUCR, DRP); + mmucr |= tlb_at << bit_shift(SYS, MMUCR, DRP); - // Insert into TLB - sysreg_write(TLBEHI, (tlbehi & bit_mask(SYS, TLBEHI, VPN)) | - bit_offset(SYS, TLBEHI, V) | - pmap->pm_asid); - sysreg_write(TLBELO, (*ent & ~bit_mask(SYS, TLBELO, SZ)) | PTE_DIRTY | PTE_SIZE_4K); - sysreg_write(MMUCR, bit_offset(SYS, MMUCR, S) | - bit_offset(SYS, MMUCR, E) | - bit_offset(SYS, MMUCR, I) | - (tlb_at << bit_shift(SYS, MMUCR, DRP))); + /* Insert into TLB */ + sysreg_write(TLBEHI, (tlbear & bit_mask(SYS, TLBEHI, VPN)) | + bit_offset(SYS, TLBEHI, V) | + (bit_mask(SYS, TLBEHI, ASID) & tlbehi)); + sysreg_write(TLBELO, (*ent & ~bit_mask(SYS, TLBELO, SZ)) | PTE_DIRTY | + PTE_SIZE_4K); + sysreg_write(MMUCR, mmucr); + nop(); - // Sync everything + /* Write and sync pipeline */ __builtin_tlbw(); cpu_sync_pipeline(); tlb_at++; if (tlb_at == TLB_SIZE) { - tlb_at = 0; + tlb_at = KSTACK_PAGES; } } + ==== //depot/projects/avr32/src/sys/avr32/avr32/support.S#4 (text+ko) ==== @@ -69,12 +69,14 @@ * r10: len */ ENTRY(bcopy) - mov r9, r10 - sub r9, 1 + cp r10, 0 + breq 2f + sub r10, 1 1: ld.ub r8, r12++ st.b r11++, r8 - sub r9, 1 + sub r10, 1 brge 1b +2: retal r12 /** @@ -83,11 +85,11 @@ * r11: len */ ENTRY(bzero) - mov r9, r12 - mov r8, 0 - sub r11, 1 >>> TRUNCATED FOR MAIL (1000 lines) <<<
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200902191425.n1JEPe04066469>