Date: Tue, 3 Jun 2003 00:04:04 -0700 (PDT) From: Juli Mallett <jmallett@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 32460 for review Message-ID: <200306030704.h53744WG046470@repoman.freebsd.org>
index | next in thread | raw e-mail
http://perforce.freebsd.org/chv.cgi?CH=32460 Change 32460 by jmallett@jmallett_dalek on 2003/06/03 00:03:33 Simplify into a single level page table, which would really like to be in tlb.c and have clean hooks here. Remove a lot of stuff related to pagetables being done like on the Alpha. Catch up with the PTE / TLB format for MIPS. Put in a function which, again, belongs in tlb.c, for servicing a TLBMod exception. This would work but for TLBUpdate being seemingly broken. Marking newly entered pages as dirty gets a bit further. Various hooks into ASID stuff. Affected files ... .. //depot/projects/mips/sys/mips/mips/pmap.c#15 edit Differences ... ==== //depot/projects/mips/sys/mips/mips/pmap.c#15 (text+ko) ==== @@ -74,81 +74,14 @@ */ /* - * Notes for alpha pmap. - * - * On alpha, pm_pdeobj will hold lev1, lev2 and lev3 page tables. - * Indices from 0 to NUSERLEV3MAPS-1 will map user lev3 page tables, - * indices from NUSERLEV3MAPS to NUSERLEV3MAPS+NUSERLEV2MAPS-1 will - * map user lev2 page tables and index NUSERLEV3MAPS+NUSERLEV2MAPS - * will map the lev1 page table. The lev1 table will self map at - * address VADDR(PTLEV1I,0,0). - * - * The vm_object kptobj holds the kernel page tables on i386 (62 or 63 - * of them, depending on whether the system is SMP). On alpha, kptobj - * will hold the lev3 and lev2 page tables for K1SEG. Indices 0 to - * NKLEV3MAPS-1 will map kernel lev3 page tables and indices - * NKLEV3MAPS to NKLEV3MAPS+NKLEV2MAPS will map lev2 page tables. (XXX - * should the kernel Lev1map be inserted into this object?). - * - * pvtmmap is not needed for alpha since K0SEG maps all of physical - * memory. - * - * - * alpha virtual memory map: - * - * - * Address Lev1 index - * - * --------------------------------- - * 0000000000000000 | | 0 - * | | - * | | - * | | - * | | - * --- --- - * User space (USEG) - * --- --- - * | | - * | | - * | | - * | | - * 000003ffffffffff | | 511=UMAXLEV1I - * --------------------------------- - * fffffc0000000000 | | 512=K0SEGLEV1I - * | Kernel code/data/bss | - * | | - * | | - * | | - * --- --- - * K0SEG - * --- --- - * | | - * | 1-1 physical/virtual | - * | | - * | | - * fffffdffffffffff | | - * --------------------------------- - * fffffe0000000000 | | 768=K1SEGLEV1I - * | Kernel dynamic data | - * | | - * | | - * | | - * --- --- - * K1SEG - * --- --- - * | | - * | mapped by ptes | - * | | - * | | - * fffffff7ffffffff | | - * --------------------------------- - * fffffffe00000000 | | 1023=PTLEV1I - * | PTmap (pte self map) | - * ffffffffffffffff | | - * --------------------------------- - * - * On the MIPS, K0SEG here is KSEG0, and K1SEG is XKSEG. - * + * Notes on MIPS pmap: + * o) Unlike the Alpha, we don't put pages directly into the TLB. + * We use a PFN (which can be hidden by using PTE_TO_PA or + * PA_TO_PTE) which is the TLB's idea of a page, more or less. + * Don't be confused by PageMask, though, that just defines + * how big of an area each mapping masks. The addresses we + * take and put into the TLB are physical addresses, converted + * to a PFN. * XXX Should we use XKPHYS instead of KSEG0 ? */ @@ -182,22 +115,12 @@ #include <machine/md_var.h> /* - * Map MIPS_PG definitions to PG ones. + * The joy of indexing. + * + * User addresses don't have the bits set that XKSEG has, best way to + * index the page table is to remove those bits, and get a page number. */ -#define PG_D MIPS3_PG_D -#define PG_G MIPS3_PG_G -#define PG_V MIPS3_PG_V -/* SW bits. */ -#define PG_W MIPS3_PG_WIRED -#define PG_MANAGED MIPS3_PG_M -#define PG_RO MIPS3_PG_RO -#define PG_PROT (PG_D | PG_RO) - -/* - * Handy macros for PTE<->PFN. - */ -#define MIPS_PTE_TO_PFN(pte) ((pte) >> 32) -#define MIPS_PTE_FROM_PFN(pfn) ((pfn) << 32) +#define pmap_index(va) (((va) & ~VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT) #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 @@ -221,21 +144,13 @@ #endif /* - * Some macros for manipulating virtual addresses - */ -#define MIPS_L1SIZE (1L << MIPS_L1SHIFT) -#define MIPS_L2SIZE (1L << MIPS_L2SHIFT) - -#define mips_l1trunc(va) ((va) & ~(MIPS_L1SIZE-1)) -#define mips_l2trunc(va) ((va) & ~(MIPS_L2SIZE-1)) - -/* * Get PDEs and PTEs for user/kernel address space */ +#define pmap_pte_ro(pte) ((*(pte) & PG_RO) != 0) #define pmap_pte_w(pte) ((*(pte) & PG_W) != 0) -#define pmap_pte_managed(pte) ((*(pte) & PG_MANAGED) != 0) +#define pmap_pte_managed(pte) ((*(pte) & PG_M) != 0) #define pmap_pte_v(pte) ((*(pte) & PG_V) != 0) -#define pmap_pte_pa(pte) ptob(MIPS_PTE_TO_PFN(*(pte))) +#define pmap_pte_pa(pte) MIPS_PTE_TO_PA(*(pte)) #define pmap_pte_prot(pte) (*(pte) & PG_PROT) #define pmap_pte_set_w(pte, v) ((v)?(*pte |= PG_W):(*pte &= ~PG_W)) @@ -259,14 +174,10 @@ */ #define PTMASK ((1 << MIPS_PTSHIFT) - 1) -#define pmap_lev1_index(va) (((va) >> MIPS_L1SHIFT) & PTMASK) -#define pmap_lev2_index(va) (((va) >> MIPS_L2SHIFT) & PTMASK) -#define pmap_lev3_index(va) (((va) >> MIPS_L3SHIFT) & PTMASK) - /* * Given a physical address, construct a pte */ -#define pmap_phys_to_pte(pa) MIPS_PTE_FROM_PFN(btop(pa)) +#define pmap_phys_to_pte(pa) MIPS_PA_TO_PFN(pa) /* * Given a page frame number, construct a k0seg va @@ -276,46 +187,10 @@ /* * Given a pte, construct a k0seg va */ -#define pmap_k0seg_to_pte(va) MIPS_PTE_FROM_PFN(pmap_k0seg_to_pfn(va)) +#define pmap_k0seg_to_pte(va) MIPS_PA_TO_PFN(pmap_k0seg_to_pfn(va)) -/* - * Lev1map: - * - * Kernel level 1 page table. This maps all kernel level 2 - * page table pages, and is used as a template for all user - * pmap level 1 page tables. When a new user level 1 page - * table is allocated, all Lev1map PTEs for kernel addresses - * are copied to the new map. - * - * Lev2map: - * - * Initial set of kernel level 2 page table pages. These - * map the kernel level 3 page table pages. As kernel - * level 3 page table pages are added, more level 2 page - * table pages may be added to map them. These pages are - * never freed. - * - * Lev3map: - * - * Initial set of kernel level 3 page table pages. These - * map pages in K1SEG. More level 3 page table pages may - * be added at run-time if additional K1SEG address space - * is required. These pages are never freed. - * - * Lev2mapsize: - * - * Number of entries in the initial Lev2map. - * - * Lev3mapsize: - * - * Number of entries in the initial Lev3map. - * - * NOTE: When mappings are inserted into the kernel pmap, all - * level 2 and level 3 page table pages must already be allocated - * and mapped into the parent page table. - */ -pt_entry_t *Lev1map, *Lev2map, *Lev3map; -vm_size_t Lev2mapsize, Lev3mapsize; +pt_entry_t *kptmap; +vm_size_t kptsize; /* * Statically allocated kernel pmap @@ -330,7 +205,6 @@ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ -static int nklev3, nklev2; vm_offset_t kernel_vm_end; struct msgbuf *msgbufp; @@ -353,80 +227,37 @@ static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); -static vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, - vm_page_t m, vm_page_t mpte); static int pmap_remove_pte(pmap_t pmap, pt_entry_t* ptq, vm_offset_t sva); static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); static int pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m); -static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va); -static int pmap_release_free_page(pmap_t pmap, vm_page_t p); -static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex); -static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex); -static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); -#ifdef SMP -static void pmap_invalidate_page_action(void *arg); -static void pmap_invalidate_all_action(void *arg); -#endif - - /* - * Routine: pmap_lev1pte - * Function: - * Extract the level 1 page table entry associated - * with the given map/virtual_address pair. + * Routine: pmap_pte + * Function: + * Extract the page table entry associated with + * the given map/virtual addresss pair. */ static PMAP_INLINE pt_entry_t* -pmap_lev1pte(pmap_t pmap, vm_offset_t va) +pmap_pte(pmap_t pmap, vm_offset_t va) { - if (!pmap) - return 0; - return &pmap->pm_lev1[pmap_lev1_index(va)]; + if (pmap == NULL || pmap->pm_lev1 == NULL) + return NULL; + return &pmap->pm_lev1[pmap_index(va)]; } -/* - * Routine: pmap_lev2pte - * Function: - * Extract the level 2 page table entry associated - * with the given map/virtual_address pair. - */ -static PMAP_INLINE pt_entry_t* -pmap_lev2pte(pmap_t pmap, vm_offset_t va) -{ - pt_entry_t* l1pte; - pt_entry_t* l2map; - - l1pte = pmap_lev1pte(pmap, va); - if (!pmap_pte_v(l1pte)) - return 0; - l2map = (pt_entry_t*) MIPS_PHYS_TO_KSEG0(pmap_pte_pa(l1pte)); - return &l2map[pmap_lev2_index(va)]; -} - /* - * Routine: pmap_lev3pte - * Function: - * Extract the level 3 page table entry associated - * with the given map/virtual_address pair. + * Routine: pmap_steal_memory + * Function: + * Steal memory from the phys_avail[] array, early + * in the bootup process. It returns zeroed memory + * and looks around for a contiguous segment big + * enough to fill the request, mapped into direct + * memory. */ -static PMAP_INLINE pt_entry_t* -pmap_lev3pte(pmap_t pmap, vm_offset_t va) -{ - pt_entry_t* l2pte; - pt_entry_t* l3map; - - l2pte = pmap_lev2pte(pmap, va); - if (!l2pte || !pmap_pte_v(l2pte)) - return 0; - - l3map = (pt_entry_t*) MIPS_PHYS_TO_KSEG0(pmap_pte_pa(l2pte)); - return &l3map[pmap_lev3_index(va)]; -} - vm_offset_t pmap_steal_memory(vm_size_t size) { @@ -459,95 +290,81 @@ /* * Bootstrap the system enough to run with virtual memory. + * + * This sets up the ASID generator, message buffer, and page + * table. XXX Probably want to move page table and related to + * a TLB-specific file. It also sets up some very important + * values for MI VM code to run. */ void pmap_bootstrap(void) { - pt_entry_t newpte; + pt_entry_t *pte; int i; /* * Setup ASIDs. PCPU_GET(next_asid) and PCPU_GET(current_asidgen) are set * up already. */ - pmap_maxasid = ASID_BITS * 8; + pmap_maxasid = MIPS3_TLB_NUM_ASIDS; /* * Steal the message buffer from the beginning of memory. */ msgbufp = (struct msgbuf *) pmap_steal_memory(MSGBUF_SIZE); + msgbufinit(msgbufp, MSGBUF_SIZE); /* - * Allocate a level 1 map for the kernel. + * Set up kernel page table. */ - Lev1map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE); - - /* - * Allocate a level 2 map for the kernel - */ - Lev2map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE); - Lev2mapsize = PAGE_SIZE; + kptsize = physsz >> PAGE_SHIFT; + printf("Kernel page table indexes %ld %dK pages.\n", + kptsize, PAGE_SIZE / 1024); + kptmap = (pt_entry_t *) + pmap_steal_memory(kptsize * sizeof (pt_entry_t)); - /* - * Allocate some level 3 maps for the kernel - */ - Lev3map = (pt_entry_t*) pmap_steal_memory(PAGE_SIZE*NKPT); - Lev3mapsize = NKPT * PAGE_SIZE; - - /* Map all of the level 2 maps */ - for (i = 0; i < howmany(Lev2mapsize, PAGE_SIZE); i++) { - unsigned long pfn = - pmap_k0seg_to_pfn((vm_offset_t) Lev2map) + i; - newpte = MIPS_PTE_FROM_PFN(pfn); - newpte |= PG_V | PG_W; - Lev1map[K1SEGLEV1I + i] = newpte; - } - - /* - * Level 1 self mapping. - * - * Don't use ASID since the self-mapping is different for each - * address space. - */ - newpte = pmap_k0seg_to_pte((vm_offset_t) Lev1map); - newpte |= PG_V | PG_G; - Lev1map[PTLEV1I] = newpte; - - /* Map all of the level 3 maps */ - for (i = 0; i < howmany(Lev3mapsize, PAGE_SIZE); i++) { - unsigned long pfn = - pmap_k0seg_to_pfn((vm_offset_t) Lev3map) + i; - newpte = MIPS_PTE_FROM_PFN(pfn); - newpte |= PG_V | PG_W; - Lev2map[i] = newpte; - } - avail_start = phys_avail[0]; for (i = 0; phys_avail[i+2]; i+= 2) ; avail_end = phys_avail[i+1]; virtual_avail = VM_MIN_KERNEL_ADDRESS; - virtual_end = VPTBASE; + virtual_end = virtual_avail + (avail_end - avail_start); kernel_vm_end = virtual_end; /* * Initialize the kernel pmap (which is statically allocated). */ - kernel_pmap->pm_lev1 = Lev1map; + kernel_pmap->pm_lev1 = kptmap; kernel_pmap->pm_active = ~0; kernel_pmap->pm_asid = 0; kernel_pmap->pm_asidgen = 1; TAILQ_INIT(&kernel_pmap->pm_pvlist); - nklev3 = NKPT; - nklev2 = 1; /* * Initialize list of pmaps. */ LIST_INIT(&allpmaps); LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); + + /* + * Lock in the current ASID, and set the global bit on each PTE. + */ + mips_wr_entryhi(kernel_pmap->pm_asid); + for (i = 0; i < kptsize; i++) { + pte = &kptmap[i]; + *pte = PG_G; + } + + /* + * Clear the TLB. + */ + MIPS_TBIAP(); } +/* + * Perform a small allocation for UMA, used early in the boot process + * and possibly at other times. + */ void * uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { @@ -583,6 +400,9 @@ return (va); } +/* + * A free function for the above. + */ void uma_small_free(void *mem, int size, u_int8_t flags) { @@ -663,11 +483,6 @@ pmap->pm_asidgen = 0; } -struct pmap_invalidate_page_arg { - pmap_t pmap; - vm_offset_t va; -}; - static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { @@ -760,7 +575,10 @@ * Routine: pmap_kextract * Function: * Extract the physical page address associated - * kernel virtual address. + * kernel virtual address. If this is a direct- + * mapped piece of memory, just decode that, + * otherwise call pmap_extract which is pretty + * fast. */ vm_offset_t pmap_kextract(vm_offset_t va) @@ -770,8 +588,7 @@ if (va >= MIPS_KSEG0_START && va <= MIPS_KSEG0_END) pa = MIPS_KSEG0_TO_PHYS(va); else - pa = ptob(MIPS_PTE_TO_PFN(*vtopte(va))) - | (va & PAGE_MASK); + pa = pmap_extract(kernel_pmap, va); return pa; } @@ -786,9 +603,9 @@ register pmap_t pmap; vm_offset_t va; { - pt_entry_t* pte = pmap_lev3pte(pmap, va); + pt_entry_t* pte = pmap_pte(pmap, va); if (pte) - return ptob(MIPS_PTE_TO_PFN(*pte)); + return MIPS_PTE_TO_PA(*pte) | (va & PAGE_MASK); else return 0; } @@ -817,10 +634,10 @@ pt_entry_t npte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m[i])) | PG_V; pt_entry_t opte; - pte = vtopte(tva); + pte = pmap_pte(kernel_pmap, tva); opte = *pte; *pte = npte; - if (opte) + if (opte & PG_V) pmap_invalidate_page(kernel_pmap, tva); } } @@ -838,7 +655,7 @@ register pt_entry_t *pte; for (i = 0; i < count; i++) { - pte = vtopte(va); + pte = pmap_pte(kernel_pmap, va); *pte = 0; pmap_invalidate_page(kernel_pmap, va); va += PAGE_SIZE; @@ -857,11 +674,12 @@ pt_entry_t npte, opte; npte = pmap_phys_to_pte(pa) | PG_V; - pte = vtopte(va); + pte = pmap_pte(kernel_pmap, va); opte = *pte; *pte = npte; - if (opte) + if (opte & PG_V) pmap_invalidate_page(kernel_pmap, va); + MachTLBUpdate(va & ~PAGE_MASK, npte); } /* @@ -872,7 +690,7 @@ { register pt_entry_t *pte; - pte = vtopte(va); + pte = pmap_pte(kernel_pmap, va); *pte = 0; pmap_invalidate_page(kernel_pmap, va); } @@ -895,22 +713,6 @@ return MIPS_PHYS_TO_KSEG0(start); } - -static vm_page_t -pmap_page_lookup(vm_object_t object, vm_pindex_t pindex) -{ - vm_page_t m; -retry: - m = vm_page_lookup(object, pindex); - if (m != NULL) { - vm_page_lock_queues(); - if (vm_page_sleep_if_busy(m, FALSE, "pplookp")) - goto retry; - vm_page_unlock_queues(); - } - return m; -} - #ifndef KSTACK_MAX_PAGES #define KSTACK_MAX_PAGES 32 #endif @@ -947,10 +749,10 @@ panic("pmap_new_thread: kstack allocation failed"); /* Set the first page to be the unmapped guard page. */ - ptek = vtopte(ks); + ptek = pmap_pte(kernel_pmap, ks); oldpte = *ptek; *ptek = 0; - if (oldpte) + if (oldpte & PG_V) pmap_invalidate_page(kernel_pmap, ks); /* move to the next page, which is where the real stack starts. */ ks += PAGE_SIZE; @@ -962,7 +764,7 @@ if (ks == NULL) panic("pmap_new_thread: kstack allocation failed"); td->td_kstack = ks; - ptek = vtopte(ks); + ptek = pmap_pte(kernel_pmap, ks); #endif /* * Knowing the number of pages allocated is useful when you @@ -987,7 +789,7 @@ oldpte = ptek[i]; ptek[i] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V; - if (oldpte) + if (oldpte & PG_V) pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE); vm_page_lock_queues(); @@ -1016,7 +818,7 @@ pages = td->td_kstack_pages; ksobj = td->td_kstack_obj; ks = td->td_kstack; - ptek = vtopte(ks); + ptek = pmap_pte(kernel_pmap, ks); for (i = 0; i < pages; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) @@ -1139,119 +941,12 @@ } } -/*************************************************** - * Page table page management routines..... - ***************************************************/ - -/* - * This routine unholds page table pages, and if the hold count - * drops to zero, then it decrements the wire count. - */ -static int -_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) -{ - - while (vm_page_sleep_if_busy(m, FALSE, "pmuwpt")) - vm_page_lock_queues(); - - if (m->hold_count == 0) { - vm_offset_t pteva; - pt_entry_t* pte; - - /* - * unmap the page table page - */ - if (m->pindex >= NUSERLEV3MAPS) { - /* Level 2 page table */ - pte = pmap_lev1pte(pmap, va); - pteva = (vm_offset_t) PTlev2 + ptob(m->pindex - NUSERLEV3MAPS); - } else { - /* Level 3 page table */ - pte = pmap_lev2pte(pmap, va); - pteva = (vm_offset_t) PTmap + ptob(m->pindex); - } - - *pte = 0; - - if (m->pindex < NUSERLEV3MAPS) { - /* unhold the level 2 page table */ - vm_page_t lev2pg; - lev2pg = vm_page_lookup(pmap->pm_pteobj, - NUSERLEV3MAPS + pmap_lev1_index(va)); - while (vm_page_sleep_if_busy(lev2pg, FALSE, "pulook")) - vm_page_lock_queues(); - vm_page_unhold(lev2pg); - if (lev2pg->hold_count == 0) - _pmap_unwire_pte_hold(pmap, va, lev2pg); - } - - --pmap->pm_stats.resident_count; - /* - * Do a invltlb to make the invalidated mapping - * take effect immediately. - */ - pmap_invalidate_page(pmap, pteva); - - if (pmap->pm_ptphint == m) - pmap->pm_ptphint = NULL; - - /* - * If the page is finally unwired, simply free it. - */ - --m->wire_count; - if (m->wire_count == 0) { - vm_page_busy(m); - vm_page_free_zero(m); - --cnt.v_wire_count; - } - return 1; - } - return 0; -} - -static PMAP_INLINE int -pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) -{ - vm_page_unhold(m); - if (m->hold_count == 0) - return _pmap_unwire_pte_hold(pmap, va, m); - else - return 0; -} - -/* - * After removing a page table entry, this routine is used to - * conditionally free the page, and manage the hold/wire counts. - */ -static int -pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte) -{ - unsigned ptepindex; - if (va >= VM_MAXUSER_ADDRESS) - return 0; - - if (mpte == NULL) { - ptepindex = (va >> MIPS_L2SHIFT); - if (pmap->pm_ptphint && - (pmap->pm_ptphint->pindex == ptepindex)) { - mpte = pmap->pm_ptphint; - } else { - while ((mpte = vm_page_lookup(pmap->pm_pteobj, ptepindex)) != NULL && - vm_page_sleep_if_busy(mpte, FALSE, "pulook")) - vm_page_lock_queues(); - pmap->pm_ptphint = mpte; - } - } - - return pmap_unwire_pte_hold(pmap, va, mpte); -} - void pmap_pinit0(pmap) struct pmap *pmap; { - pmap->pm_lev1 = Lev1map; + pmap->pm_lev1 = kptmap; pmap->pm_ptphint = NULL; pmap->pm_active = 0; pmap->pm_asid = 0; @@ -1319,194 +1014,8 @@ pmap_pinit2(pmap) struct pmap *pmap; { - bcopy(PTlev1 + K1SEGLEV1I, pmap->pm_lev1 + K1SEGLEV1I, nklev2 * sizeof (pt_entry_t)); -} - -static int -pmap_release_free_page(pmap_t pmap, vm_page_t p) -{ - pt_entry_t* pte; - pt_entry_t* l2map; - - if (p->pindex >= NUSERLEV3MAPS + NUSERLEV2MAPS) - /* level 1 page table */ - pte = &pmap->pm_lev1[PTLEV1I]; - else if (p->pindex >= NUSERLEV3MAPS) - /* level 2 page table */ - pte = &pmap->pm_lev1[p->pindex - NUSERLEV3MAPS]; - else { - /* level 3 page table */ - pte = &pmap->pm_lev1[p->pindex >> MIPS_PTSHIFT]; - l2map = (pt_entry_t*) MIPS_PHYS_TO_KSEG0(pmap_pte_pa(pte)); - pte = &l2map[p->pindex & ((1 << MIPS_PTSHIFT) - 1)]; - } - - /* - * This code optimizes the case of freeing non-busy - * page-table pages. Those pages are zero now, and - * might as well be placed directly into the zero queue. - */ - vm_page_lock_queues(); - if (vm_page_sleep_if_busy(p, FALSE, "pmaprl")) - return 0; - - vm_page_busy(p); - - /* - * Remove the page table page from the processes address space. - */ - *pte = 0; - pmap->pm_stats.resident_count--; - -#ifdef PMAP_DEBUG - if (p->hold_count) { - panic("pmap_release: freeing held page table page"); - } -#endif - /* - * Level1 pages need to have the kernel - * stuff cleared, so they can go into the zero queue also. - */ - if (p->pindex == NUSERLEV3MAPS + NUSERLEV2MAPS) - bzero(pmap->pm_lev1 + K1SEGLEV1I, nklev2 * sizeof (pt_entry_t)); - - if (pmap->pm_ptphint == p) - pmap->pm_ptphint = NULL; - -#ifdef PMAP_DEBUG - { - u_long *lp = (u_long*) MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(p)); - u_long *ep = (u_long*) ((char*) lp + PAGE_SIZE); - for (; lp < ep; lp++) - if (*lp != 0) - panic("pmap_release_free_page: page not zero"); - } -#endif - - p->wire_count--; - cnt.v_wire_count--; - vm_page_free_zero(p); - vm_page_unlock_queues(); - return 1; -} - -/* - * this routine is called if the page table page is not - * mapped correctly. - */ -static vm_page_t -_pmap_allocpte(pmap, ptepindex) - pmap_t pmap; - unsigned ptepindex; -{ - pt_entry_t* pte; - vm_offset_t ptepa; - vm_page_t m; - - /* - * Find or fabricate a new pagetable page - */ - m = vm_page_grab(pmap->pm_pteobj, ptepindex, - VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY); - - KASSERT(m->queue == PQ_NONE, - ("_pmap_allocpte: %p->queue != PQ_NONE", m)); - - /* - * Increment the hold count for the page table page - * (denoting a new mapping.) - */ - m->hold_count++; - - /* - * Map the pagetable page into the process address space, if - * it isn't already there. - */ - - pmap->pm_stats.resident_count++; - - ptepa = VM_PAGE_TO_PHYS(m); - - if (ptepindex >= NUSERLEV3MAPS) { - pte = &pmap->pm_lev1[ptepindex - NUSERLEV3MAPS]; - } else { - int l1index = ptepindex >> MIPS_PTSHIFT; - pt_entry_t* l1pte = &pmap->pm_lev1[l1index]; - pt_entry_t* l2map; - if (!pmap_pte_v(l1pte)) - _pmap_allocpte(pmap, NUSERLEV3MAPS + l1index); - else { - vm_page_t l2page = - pmap_page_lookup(pmap->pm_pteobj, - NUSERLEV3MAPS + l1index); - l2page->hold_count++; - } - l2map = (pt_entry_t*) MIPS_PHYS_TO_KSEG0(pmap_pte_pa(l1pte)); - pte = &l2map[ptepindex & ((1 << MIPS_PTSHIFT) - 1)]; - } - - *pte = pmap_phys_to_pte(ptepa) | PG_V; - - /* - * Set the page table hint - */ - pmap->pm_ptphint = m; - - if ((m->flags & PG_ZERO) == 0) - bzero((caddr_t) MIPS_PHYS_TO_KSEG0(ptepa), PAGE_SIZE); - - vm_page_lock_queues(); - m->valid = VM_PAGE_BITS_ALL; - vm_page_flag_clear(m, PG_ZERO); - vm_page_wakeup(m); - vm_page_unlock_queues(); - - return m; -} - -static vm_page_t -pmap_allocpte(pmap_t pmap, vm_offset_t va) -{ - unsigned ptepindex; - pt_entry_t* lev2pte; - vm_page_t m; - - /* - * Calculate pagetable page index - */ - ptepindex = va >> (PAGE_SHIFT + MIPS_PTSHIFT); - - /* - * Get the level2 entry - */ - lev2pte = pmap_lev2pte(pmap, va); - - /* - * If the page table page is mapped, we just increment the - * hold count, and activate it. - */ - if (lev2pte && pmap_pte_v(lev2pte)) { - /* - * In order to get the page table page, try the - * hint first. - */ - if (pmap->pm_ptphint && - (pmap->pm_ptphint->pindex == ptepindex)) { - m = pmap->pm_ptphint; - } else { - m = pmap_page_lookup(pmap->pm_pteobj, ptepindex); - pmap->pm_ptphint = m; - } - m->hold_count++; - return m; - } - /* - * Here if the pte page isn't mapped, or if it has been deallocated. - */ - return _pmap_allocpte(pmap, ptepindex); } - /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ @@ -1537,7 +1046,7 @@ continue; } while (1) { - if (!pmap_release_free_page(pmap, p) && + if (/*!pmap_release_free_page(pmap, p) &&*/ (object->generation != curgeneration)) goto retry; } @@ -1553,13 +1062,13 @@ continue; } while (1) { - if (!pmap_release_free_page(pmap, p) && + if (/*!pmap_release_free_page(pmap, p) &&*/ (object->generation != curgeneration)) goto retry; } } - if (lev1pg && !pmap_release_free_page(pmap, lev1pg)) + if (lev1pg/* && !pmap_release_free_page(pmap, lev1pg)*/) goto retry; mtx_lock_spin(&allpmaps_lock); LIST_REMOVE(pmap, pm_list); @@ -1572,93 +1081,7 @@ void pmap_growkernel(vm_offset_t addr) { - /* XXX come back to this */ - struct pmap *pmap; - pt_entry_t* pte; - pt_entry_t newlev1, newlev2; - vm_offset_t pa; - vm_page_t nkpg; - - critical_enter(); - if (kernel_vm_end == 0) { - kernel_vm_end = VM_MIN_KERNEL_ADDRESS; - - /* Count the level 2 page tables */ - nklev2 = 0; - nklev3 = 0; - while (pmap_pte_v(pmap_lev1pte(kernel_pmap, kernel_vm_end))) { - nklev2++; - nklev3 += (1L << MIPS_PTSHIFT); - kernel_vm_end += MIPS_L1SIZE; - } - - /* Count the level 3 page tables in the last level 2 page table */ - kernel_vm_end -= MIPS_L1SIZE; - nklev3 -= (1 << MIPS_PTSHIFT); - while (pmap_pte_v(pmap_lev2pte(kernel_pmap, kernel_vm_end))) { - nklev3++; - kernel_vm_end += MIPS_L2SIZE; - } - } >>> TRUNCATED FOR MAIL (1000 lines) <<<help
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200306030704.h53744WG046470>
