From owner-p4-projects@FreeBSD.ORG Sun Apr 9 17:20:29 2006 Return-Path: X-Original-To: p4-projects@freebsd.org Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 4A7EB16A405; Sun, 9 Apr 2006 17:20:29 +0000 (UTC) X-Original-To: perforce@freebsd.org Delivered-To: perforce@freebsd.org Received: from mx1.FreeBSD.org (mx1.freebsd.org [216.136.204.125]) by hub.freebsd.org (Postfix) with ESMTP id 292E016A401 for ; Sun, 9 Apr 2006 17:20:29 +0000 (UTC) (envelope-from cognet@freebsd.org) Received: from repoman.freebsd.org (repoman.freebsd.org [216.136.204.115]) by mx1.FreeBSD.org (Postfix) with ESMTP id DA3EC43D66 for ; Sun, 9 Apr 2006 17:20:28 +0000 (GMT) (envelope-from cognet@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.13.1/8.13.1) with ESMTP id k39HKSKg032722 for ; Sun, 9 Apr 2006 17:20:28 GMT (envelope-from cognet@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.13.1/8.13.1/Submit) id k39HKS1Q032716 for perforce@freebsd.org; Sun, 9 Apr 2006 17:20:28 GMT (envelope-from cognet@freebsd.org) Date: Sun, 9 Apr 2006 17:20:28 GMT Message-Id: <200604091720.k39HKS1Q032716@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to cognet@freebsd.org using -f From: Olivier Houchard To: Perforce Change Reviews Cc: Subject: PERFORCE change 94855 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 09 Apr 2006 17:20:29 -0000 http://perforce.freebsd.org/chv.cgi?CH=94855 Change 94855 by cognet@cognet on 2006/04/09 17:19:52 MFi36: enable promotion of kmem_object pages. Affected files ... .. //depot/projects/superpages/src/sys/arm/arm/pmap.c#8 edit Differences ... ==== //depot/projects/superpages/src/sys/arm/arm/pmap.c#8 (text+ko) ==== @@ -493,7 +493,7 @@ * we have a write-back cache, then we assume setting * only C will make those pages write-through. */ - if (1 || cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { + if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; pte_l2_l_cache_mode_pt = L2_B|L2_C; pte_l2_s_cache_mode_pt = L2_B|L2_C; @@ -1565,12 +1565,6 @@ /* There's no way we can do it. */ return; } - if (((ptep[i] & pte_l2_s_cache_mask) == - pte_l2_s_cache_mode_pt || - (ptep[i - 1] & pte_l2_s_cache_mask) == - pte_l2_s_cache_mode_pt) && pte_l2_s_cache_mode_pt - != pte_l2_s_cache_mode) - panic("fuck"); } #ifdef SP_DEBUG printf("promoting small %x\n", va); @@ -1595,6 +1589,7 @@ vm_offset_t va0; vm_paddr_t pa; struct l2_dtable *l2; + struct l1_ttable *l1; uint16_t l1idx; int i; @@ -1623,11 +1618,6 @@ pa = (pt[i] & L2_L_FRAME) + (i & 0xf) * PAGE_SIZE; else pa = pt[i] & L2_S_FRAME; - if (((pt[i - 1] & pte_l2_s_cache_mask) == pte_l2_s_cache_mode_pt - || - (pt[i] & pte_l2_s_cache_mask) == pte_l2_s_cache_mode_pt) && - pte_l2_s_cache_mode_pt != pte_l2_s_cache_mode) - panic("fuck2"); if ((pt[i - 1] & L2_TYPE_MASK) == L2_TYPE_L) pa2 = (pt[i - 1] & L2_L_FRAME) + ((i - 1) & 0xf) * PAGE_SIZE; @@ -1646,10 +1636,13 @@ if (*pt & L2_S_PROT_W) pa |= L1_S_PROT_W; *pd = L1_S_PROTO | pa | pte_l1_s_cache_mode | L1_S_DOM(pmap->pm_domain); -#if 0 - bzero(pt, 0x100 * sizeof(*pt)); -#endif pmap_free_l2_bucket(pmap, &l2->l2_bucket[L2_BUCKET(l1idx)], 0x100); + if (pmap == kernel_pmap) { + SLIST_FOREACH(l1, &l1_list, l1_link) { + l1->l1_kva[l1idx] = *pd; + PTE_SYNC(&l1->l1_kva[l1idx]); + } + } } static void @@ -1658,6 +1651,7 @@ pd_entry_t *pd; pt_entry_t *pt; struct l2_bucket *l2b; + struct l1_ttable *l1; vm_offset_t va0; uint16_t l1idx; uint16_t demote_size; @@ -1684,6 +1678,12 @@ pt = &l2b->l2b_kva[l2pte_index(va0)]; *pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO; l2b->l2b_occupancy += 0x100; + if (pmap == kernel_pmap) { + SLIST_FOREACH(l1, &l1_list, l1_link) { + l1->l1_kva[l1idx] = *pd; + PTE_SYNC(&l1->l1_kva[l1idx]); + } + } } else { va0 = va & L2_L_FRAME; l2b = pmap_get_l2_bucket(pmap, va0); @@ -3099,6 +3099,8 @@ if ((pv->pv_va & L1_S_ADDR_MASK) >= sva && ((pv->pv_va & L1_S_ADDR_MASK) + L1_S_SIZE) < eva) { + struct l1_ttable *l1; + pmap_destroy_pv_range(pmap, *pd & L1_S_ADDR_MASK, pv->pv_va & L1_S_ADDR_MASK, @@ -3114,6 +3116,18 @@ } *pd = 0; npv = TAILQ_FIRST(&pmap->pm_pvlist); + if (pmap == pmap_kernel()) { + SLIST_FOREACH(l1, &l1_list, + l1_link) { + l1->l1_kva[ + L1_IDX(pv->pv_va & + L1_S_ADDR_MASK)] = 0; + PTE_SYNC( + &l1->l1_kva[L1_IDX( + pv->pv_va & + L1_S_ADDR_MASK)]); + } + } continue; } else pmap_demote(pmap, pv->pv_va); @@ -3577,6 +3591,8 @@ if (l1pte_section_p(pm->pm_l1->l1_kva[L1_IDX(sva)])) { if ((sva & L1_S_OFFSET) == 0 && sva + L1_S_SIZE < eva) { + struct l1_ttable *l1; + /* Change the whole 1MB superpage. */ pd = &pm->pm_l1->l1_kva[L1_IDX(sva)]; if (*pd & L1_S_PROT_W) { @@ -3597,6 +3613,13 @@ vm_page_dirty(m); } } + if (pm == pmap_kernel()) { + SLIST_FOREACH(l1, &l1_list, l1_link) { + l1->l1_kva[L1_IDX(sva)] = + *pd; + PTE_SYNC(&l1->l1_kva[L1_IDX(sva)]); + } + } sva += L1_S_SIZE; continue; } @@ -4282,6 +4305,7 @@ if (l2b == NULL) { pd_entry_t *pd = &pm->pm_l1->l1_kva[L1_IDX(sva)]; if (l1pte_section_p(*pd)) { + struct l1_ttable *l1; /* We can just remove the superpage. */ if ((sva == (sva & L1_S_ADDR_MASK)) && (sva + L1_S_SIZE < eva)) { @@ -4291,6 +4315,14 @@ *pd = 0; flushall = 1; sva += L1_S_SIZE; + if (pm == pmap_kernel()) { + SLIST_FOREACH(l1, &l1_list, + l1_link) { + l1->l1_kva[L1_IDX(sva)] = + 0; + PTE_SYNC(&l1->l1_kva[L1_IDX(sva)]); + } + } continue; } else { pmap_demote(pm, sva);