From owner-p4-projects@FreeBSD.ORG Tue Mar 4 11:33:23 2008 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 31E151065672; Tue, 4 Mar 2008 11:33:23 +0000 (UTC) Delivered-To: perforce@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id E5A60106566C for ; Tue, 4 Mar 2008 11:33:22 +0000 (UTC) (envelope-from rrs@cisco.com) Received: from repoman.freebsd.org (repoman.freebsd.org [IPv6:2001:4f8:fff6::29]) by mx1.freebsd.org (Postfix) with ESMTP id E02378FC1D for ; Tue, 4 Mar 2008 11:33:22 +0000 (UTC) (envelope-from rrs@cisco.com) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.14.1/8.14.1) with ESMTP id m24BXMMl088923 for ; Tue, 4 Mar 2008 11:33:22 GMT (envelope-from rrs@cisco.com) Received: (from perforce@localhost) by repoman.freebsd.org (8.14.1/8.14.1/Submit) id m24BXMQ2088921 for perforce@freebsd.org; Tue, 4 Mar 2008 11:33:22 GMT (envelope-from rrs@cisco.com) Date: Tue, 4 Mar 2008 11:33:22 GMT Message-Id: <200803041133.m24BXMQ2088921@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to rrs@cisco.com using -f From: "Randall R. Stewart" To: Perforce Change Reviews Cc: Subject: PERFORCE change 136802 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 04 Mar 2008 11:33:23 -0000 http://perforce.freebsd.org/chv.cgi?CH=136802 Change 136802 by rrs@rrs-mips2-jnpr on 2008/03/04 11:32:45 PG_XXX -> PTE_XXX Affected files ... .. //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#26 edit Differences ... ==== //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#26 (text+ko) ==== @@ -122,12 +122,12 @@ #define pmap_pde(m, v) (&((m)->pm_segtab[(vm_offset_t)(v) >> SEGSHIFT])) #define segtab_pde(m, v) (m[(vm_offset_t)(v) >> SEGSHIFT]) -#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) +#define pmap_pte_w(pte) ((*(int *)pte & PTE_W) != 0) #define pmap_pde_v(pte) ((*(int *)pte) != 0) -#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) -#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) +#define pmap_pte_m(pte) ((*(int *)pte & PTE_M) != 0) +#define pmap_pte_v(pte) ((*(int *)pte & PTE_V) != 0) -#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) +#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PTE_W):(*(int *)pte &= ~PTE_W)) #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) #define MIPS_SEGSIZE (1L << SEGSHIFT) @@ -350,8 +350,8 @@ */ if (memory_larger_than_512meg) { for (i = 0; i < MAXCPU; i++) { - sysmap_lmem[i].CMAP1 = PG_G; - sysmap_lmem[i].CMAP2 = PG_G; + sysmap_lmem[i].CMAP1 = PTE_G; + sysmap_lmem[i].CMAP2 = PTE_G; sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail; virtual_avail += PAGE_SIZE; sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail; @@ -396,7 +396,7 @@ * in the tlb. */ for (i = 0, pte = pgtab; i < (nkpt * NPTEPG); i++, pte++) - *pte = PG_G; + *pte = PTE_G; printf("Va=0x%x Ve=%x\n", virtual_avail, virtual_end); /* @@ -484,7 +484,7 @@ static int pmap_nw_modified(pt_entry_t pte) { - if ((pte & (PG_M | PG_RO)) == (PG_M | PG_RO)) + if ((pte & (PTE_M | PTE_RO)) == (PTE_M | PTE_RO)) return (1); else return (0); @@ -672,7 +672,7 @@ pte = *pmap_pte(pmap, va); if (pte != 0 && pmap_pte_v(&pte) && - ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { + ((pte & PTE_RW) || (prot & VM_PROT_WRITE) == 0)) { m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pte)); vm_page_hold(m); } @@ -694,12 +694,12 @@ register pt_entry_t *pte; pt_entry_t npte, opte; - npte = mips_paddr_to_tlbpfn(pa) | PG_RW | PG_V | PG_G | PG_W; + npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W; if (is_cacheable_mem(pa)) - npte |= PG_CACHE; + npte |= PTE_CACHE; else - npte |= PG_UNCACHED; + npte |= PTE_UNCACHED; pte = pmap_pte(kernel_pmap, va); opte = *pte; @@ -717,7 +717,7 @@ register pt_entry_t *pte; pte = pmap_pte(kernel_pmap, va); - *pte = PG_G; + *pte = PTE_G; pmap_invalidate_page(kernel_pmap, va); } @@ -876,7 +876,7 @@ fp->state = TRUE; kva = fp->kva; - npte = mips_paddr_to_tlbpfn(pa) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pte = pmap_pte(kernel_pmap, kva); *pte = npte; @@ -904,7 +904,7 @@ kva = fp->kva; pte = pmap_pte(kernel_pmap, kva); - *pte = PG_G; + *pte = PTE_G; pmap_TLB_invalidate_kernel(kva); fp->state = FALSE; @@ -1029,7 +1029,7 @@ #if 0 /* I think we can just delete these, now that PG_BUSY is gone */ vm_page_lock_queues(); - vm_page_flag_clear(ptdpg, PG_BUSY); /* not usually mapped */ + vm_page_flag_clear(ptdpg, PTE_BUSY); /* not usually mapped */ #endif ptdpg->valid = VM_PAGE_BITS_ALL; @@ -1295,7 +1295,7 @@ * produce a global bit to store in the tlb. */ for (i = 0; i < NPTEPG; i++, pte++) - *pte = PG_G; + *pte = PTE_G; kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); @@ -1432,9 +1432,9 @@ oldpte = loadandclear((u_int *)ptq); if (is_kernel_pmap(pmap)) - *ptq = PG_G; + *ptq = PTE_G; - if (oldpte & PG_W) + if (oldpte & PTE_W) pmap->pm_stats.wired_count -= 1; pmap->pm_stats.resident_count -= 1; @@ -1442,7 +1442,7 @@ if (page_is_managed(pa)) { m = PHYS_TO_VM_PAGE(pa); - if (oldpte & PG_M) { + if (oldpte & PTE_M) { #if defined(PMAP_DIAGNOSTIC) if (pmap_nw_modified(oldpte)) { printf( @@ -1572,15 +1572,15 @@ tpte = loadandclear((u_int *)pte); if (is_kernel_pmap(pv->pv_pmap)) - *pte = PG_G; + *pte = PTE_G; - if (tpte & PG_W) + if (tpte & PTE_W) pv->pv_pmap->pm_stats.wired_count--; /* * Update the vm_page_t clean and reference bits. */ - if (tpte & PG_M) { + if (tpte & PTE_M) { #if defined(PMAP_DIAGNOSTIC) if (pmap_nw_modified(tpte)) { printf( @@ -1656,14 +1656,14 @@ vm_page_flag_set(m, PG_REFERENCED); m->md.pv_flags &= ~PV_TABLE_REF; } - if (pbits & PG_M) { + if (pbits & PTE_M) { if (pmap_track_modified(sva)) { vm_page_dirty(m); m->md.pv_flags &= ~PV_TABLE_MOD; } } } - pbits = (pbits & ~PG_M) | PG_RO; + pbits = (pbits & ~PTE_M) | PTE_RO; if (pbits != *pte) { if (!atomic_cmpset_int((u_int *)pte, obits, pbits)) @@ -1736,16 +1736,16 @@ /* * Mapping has not changed, must be protection or wiring change. */ - if ((origpte & PG_V) && (opa == pa)) { + if ((origpte & PTE_V) && (opa == pa)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is * wired, the PT page will be also. */ - if (wired && ((origpte & PG_W) == 0)) + if (wired && ((origpte & PTE_W) == 0)) pmap->pm_stats.wired_count++; - else if (!wired && (origpte & PG_W)) + else if (!wired && (origpte & PTE_W)) pmap->pm_stats.wired_count--; #if defined(PMAP_DIAGNOSTIC) @@ -1776,7 +1776,7 @@ * handle validating new mapping. */ if (opa) { - if (origpte & PG_W) + if (origpte & PTE_W) pmap->pm_stats.wired_count--; if (page_is_managed(opa)) { @@ -1812,18 +1812,18 @@ /* * Now validate mapping with desired protection/wiring. */ - newpte = mips_paddr_to_tlbpfn(pa) | rw | PG_V; + newpte = mips_paddr_to_tlbpfn(pa) | rw | PTE_V; if (is_cacheable_mem(pa)) - newpte |= PG_CACHE; + newpte |= PTE_CACHE; else - newpte |= PG_UNCACHED; + newpte |= PTE_UNCACHED; if (wired) - newpte |= PG_W; + newpte |= PTE_W; if (is_kernel_pmap(pmap)) { - newpte |= PG_G; + newpte |= PTE_G; } /* @@ -1831,7 +1831,7 @@ * update the pte. */ if (origpte != newpte) { - if (origpte & PG_V) { + if (origpte & PTE_V) { *pte = newpte; if (page_is_managed(opa) && (opa != pa)) { if (om->md.pv_flags & PV_TABLE_REF) @@ -1839,8 +1839,8 @@ om->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); } - if (origpte & PG_M) { - KASSERT((origpte & PG_RW), + if (origpte & PTE_M) { + KASSERT((origpte & PTE_RW), ("pmap_enter: modified page not writable:" " va: 0x%x, pte: 0x%lx", va, origpte)); if ((page_is_managed(opa)) && @@ -1966,17 +1966,17 @@ /* * Now validate mapping with RO protection */ - *pte = mips_paddr_to_tlbpfn(pa) | PG_V; + *pte = mips_paddr_to_tlbpfn(pa) | PTE_V; if (is_cacheable_mem(pa)) - *pte |= PG_CACHE; + *pte |= PTE_CACHE; else - *pte |= PG_UNCACHED; + *pte |= PTE_UNCACHED; if (is_kernel_pmap(pmap)) - *pte |= PG_G; + *pte |= PTE_G; else { - *pte |= PG_RO; + *pte |= PTE_RO; /* * Sync I & D caches. Do this only if the the target pmap * belongs to the current process. Otherwise, an @@ -2020,7 +2020,7 @@ cpu = PCPU_GET(cpuid); sysm = &sysmap_lmem[cpu]; /* Since this is for the debugger, no locks or any other fun */ - sysm->CMAP1 = mips_paddr_to_tlbpfn(pa) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + sysm->CMAP1 = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1); sysm->valid1 = 1; va = (vm_offset_t)sysm->CADDR1; @@ -2187,7 +2187,7 @@ sysm = &sysmap_lmem[cpu]; PMAP_LGMEM_LOCK(sysm); sched_pin(); - sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1); sysm->valid1 = 1; bzero(sysm->CADDR1, PAGE_SIZE); @@ -2241,7 +2241,7 @@ sysm = &sysmap_lmem[cpu]; PMAP_LGMEM_LOCK(sysm); sched_pin(); - sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1); sysm->valid1 = 1; bzero((char *)sysm->CADDR1 + off, size); @@ -2279,7 +2279,7 @@ sysm = &sysmap_lmem[cpu]; PMAP_LGMEM_LOCK(sysm); sched_pin(); - sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1); sysm->valid1 = 1; bzero(sysm->CADDR1, PAGE_SIZE); @@ -2350,21 +2350,21 @@ if (phy_src < MIPS_KSEG0_LARGEST_PHYS) { /* one side needs mapping - dest */ va_src = MIPS_PHYS_TO_CACHED(phy_src); - sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2); sysm->valid2 = 2; va_dst = (vm_offset_t)sysm->CADDR2; } else if (phy_dst < MIPS_KSEG0_LARGEST_PHYS) { /* one side needs mapping - src */ va_dst = MIPS_PHYS_TO_CACHED(phy_dst); - sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1); va_src = (vm_offset_t)sysm->CADDR1; sysm->valid1 = 1; } else { /* all need mapping */ - sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; - sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE; + sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; + sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1); pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2); sysm->valid1 = sysm->valid2 = 1; @@ -2464,11 +2464,11 @@ /* * We cannot remove wired pages from a process' mapping at this time */ - if (tpte & PG_W) { + if (tpte & PTE_W) { npv = TAILQ_NEXT(pv, pv_plist); continue; } - *pte = is_kernel_pmap(pmap) ? PG_G : 0; + *pte = is_kernel_pmap(pmap) ? PTE_G : 0; m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(tpte)); @@ -2480,7 +2480,7 @@ /* * Update the vm_page_t clean and reference bits. */ - if (tpte & PG_M) { + if (tpte & PTE_M) { vm_page_dirty(m); } npv = TAILQ_NEXT(pv, pv_plist); @@ -2521,7 +2521,7 @@ mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { - if (bit & PG_M) { + if (bit & PTE_M) { if (!pmap_track_modified(pv->pv_va)) continue; } @@ -2563,7 +2563,7 @@ /* * don't write protect pager mappings */ - if (!setem && (bit == PG_RW)) { + if (!setem && (bit == PTE_RW)) { if (!pmap_track_modified(pv->pv_va)) continue; } @@ -2584,12 +2584,12 @@ vm_offset_t pbits = *(vm_offset_t *)pte; if (pbits & bit) { - if (bit == PG_RW) { - if (pbits & PG_M) { + if (bit == PTE_RW) { + if (pbits & PTE_M) { vm_page_dirty(m); } - *(int *)pte = (pbits & ~(PG_M | PG_RW)) | - PG_RO; + *(int *)pte = (pbits & ~(PTE_M | PTE_RW)) | + PTE_RO; } else { *(int *)pte = pbits & ~bit; } @@ -2598,7 +2598,7 @@ } PMAP_UNLOCK(pv->pv_pmap); } - if (!setem && bit == PG_RW) + if (!setem && bit == PTE_RW) vm_page_flag_clear(m, PG_WRITEABLE); } @@ -2687,7 +2687,7 @@ if (m->md.pv_flags & PV_TABLE_MOD) return TRUE; else - return pmap_testbit(m, PG_M); + return pmap_testbit(m, PTE_M); } /* N/C */ @@ -2724,7 +2724,7 @@ return; mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->md.pv_flags & PV_TABLE_MOD) { - pmap_changebit(m, PG_M, FALSE); + pmap_changebit(m, PTE_M, FALSE); m->md.pv_flags &= ~PV_TABLE_MOD; } } @@ -2801,7 +2801,7 @@ /* * Modified by us */ - if (pte & PG_M) + if (pte & PTE_M) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; /* * Modified by someone @@ -2883,6 +2883,9 @@ p->p_vmspace); index = 0; pmap = vmspace_pmap(p->p_vmspace); + printf("pmap asid:%x generation:%x\n", + pmap->pm_asid[0].asid, + pmap->pm_asid[0].gen); for (i = 0; i < NUSERPGTBLS; i++) { pd_entry_t *pde; pt_entry_t *pte; @@ -3041,19 +3044,19 @@ int rw = 0; if (!(prot & VM_PROT_WRITE)) - rw = PG_ROPAGE; + rw = PTE_ROPAGE; else { if (va >= VM_MIN_KERNEL_ADDRESS) { /* * Don't bother to trap on kernel writes, just * record page as dirty. */ - rw = PG_RWPAGE; + rw = PTE_RWPAGE; vm_page_dirty(m); } else if ((m->md.pv_flags & PV_TABLE_MOD) || m->dirty) - rw = PG_RWPAGE; + rw = PTE_RWPAGE; else - rw = PG_CWPAGE; + rw = PTE_CWPAGE; } return rw; } @@ -3100,18 +3103,18 @@ while (tlbno <= last) { MachTLBRead(tlbno, &tlb); - if (tlb.tlb_lo0 & PG_V || tlb.tlb_lo1 & PG_V) { + if (tlb.tlb_lo0 & PTE_V || tlb.tlb_lo1 & PTE_V) { printf("TLB %2d vad 0x%08x ", tlbno, (tlb.tlb_hi & 0xffffff00)); } else { printf("TLB*%2d vad 0x%08x ", tlbno, (tlb.tlb_hi & 0xffffff00)); } printf("0=0x%08x ", pfn_to_vad(tlb.tlb_lo0)); - printf("%c", tlb.tlb_lo0 & PG_M ? 'M' : ' '); - printf("%c", tlb.tlb_lo0 & PG_G ? 'G' : ' '); + printf("%c", tlb.tlb_lo0 & PTE_M ? 'M' : ' '); + printf("%c", tlb.tlb_lo0 & PTE_G ? 'G' : ' '); printf(" atr %x ", (tlb.tlb_lo0 >> 3) & 7); printf("1=0x%08x ", pfn_to_vad(tlb.tlb_lo1)); - printf("%c", tlb.tlb_lo1 & PG_M ? 'M' : ' '); - printf("%c", tlb.tlb_lo1 & PG_G ? 'G' : ' '); + printf("%c", tlb.tlb_lo1 & PTE_M ? 'M' : ' '); + printf("%c", tlb.tlb_lo1 & PTE_G ? 'G' : ' '); printf(" atr %x ", (tlb.tlb_lo1 >> 3) & 7); printf(" sz=%x pid=%x\n", tlb.tlb_mask, (tlb.tlb_hi & 0x000000ff)