From owner-svn-src-all@FreeBSD.ORG Thu May 23 12:24:47 2013 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by hub.freebsd.org (Postfix) with ESMTP id 381B7F02; Thu, 23 May 2013 12:24:47 +0000 (UTC) (envelope-from gber@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) by mx1.freebsd.org (Postfix) with ESMTP id 1A35CCCA; Thu, 23 May 2013 12:24:47 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.6/8.14.6) with ESMTP id r4NCOkDM092732; Thu, 23 May 2013 12:24:46 GMT (envelope-from gber@svn.freebsd.org) Received: (from gber@localhost) by svn.freebsd.org (8.14.6/8.14.5/Submit) id r4NCOksf092731; Thu, 23 May 2013 12:24:46 GMT (envelope-from gber@svn.freebsd.org) Message-Id: <201305231224.r4NCOksf092731@svn.freebsd.org> From: Grzegorz Bernacki Date: Thu, 23 May 2013 12:24:46 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r250931 - head/sys/arm/arm X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 23 May 2013 12:24:47 -0000 Author: gber Date: Thu May 23 12:24:46 2013 New Revision: 250931 URL: http://svnweb.freebsd.org/changeset/base/250931 Log: Rework and organize pmap_enter_locked() function. pmap_enter_locked() implementation was very ambiguous and confusing. Rearrange it so that each part of the mapping creation is separated. Avoid walking through the redundant conditions. Extract vector_page specific PTE setup from normal PTE setting. Submitted by: Zbigniew Bodek Sponsored by: The FreeBSD Foundation, Semihalf Modified: head/sys/arm/arm/pmap-v6.c Modified: head/sys/arm/arm/pmap-v6.c ============================================================================== --- head/sys/arm/arm/pmap-v6.c Thu May 23 12:23:18 2013 (r250930) +++ head/sys/arm/arm/pmap-v6.c Thu May 23 12:24:46 2013 (r250931) @@ -2723,38 +2723,54 @@ do_l2b_alloc: is_exec = is_refd = 0; if (opte) { - /* - * There is already a mapping at this address. - * If the physical address is different, lookup the - * vm_page. - */ - if (l2pte_pa(opte) != pa) - om = PHYS_TO_VM_PAGE(l2pte_pa(opte)); - else - om = m; - } else - om = NULL; - - if ((prot & (VM_PROT_ALL)) || !m) { - /* - * - The access type indicates that we don't need - * to do referenced emulation. - * OR - * - The physical page has already been referenced - * so no need to re-do referenced emulation here. - */ - npte |= L2_S_REF; + if (l2pte_pa(opte) == pa) { + /* + * We're changing the attrs of an existing mapping. + */ + if (m != NULL) + pmap_modify_pv(m, pmap, va, + PVF_WRITE | PVF_WIRED, nflags); + is_exec |= PTE_BEEN_EXECD(opte); + is_refd |= PTE_BEEN_REFD(opte); + goto validate; + } + if ((om = PHYS_TO_VM_PAGE(l2pte_pa(opte)))) { + /* + * Replacing an existing mapping with a new one. + * It is part of our managed memory so we + * must remove it from the PV list + */ + if ((pve = pmap_remove_pv(om, pmap, va))) { + is_exec |= PTE_BEEN_EXECD(opte); + is_refd |= PTE_BEEN_REFD(opte); + + if (m && ((m->oflags & VPO_UNMANAGED))) + pmap_free_pv_entry(pmap, pve); + } + } - if (m != NULL && - (m->oflags & VPO_UNMANAGED) == 0) - vm_page_aflag_set(m, PGA_REFERENCED); } else { /* - * Need to do page referenced emulation. + * Keep the stats up to date */ - npte &= ~L2_S_REF; + l2b->l2b_occupancy++; + pmap->pm_stats.resident_count++; } + /* + * Enter on the PV list if part of our managed memory. + */ + if ((m && !(m->oflags & VPO_UNMANAGED))) { + if ((!pve) && (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL) + panic("pmap_enter: no pv entries"); + + KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, + ("pmap_enter: managed mapping within the clean submap")); + KASSERT(pve != NULL, ("No pv")); + pmap_enter_pv(m, pve, pmap, va, nflags); + } + +validate: /* Make the new PTE valid */ npte |= L2_S_PROTO; #ifdef SMP @@ -2763,78 +2779,48 @@ do_l2b_alloc: /* Set defaults first - kernel read access */ npte |= L2_APX; npte |= L2_S_PROT_R; + /* Set "referenced" flag */ + npte |= L2_S_REF; /* Now tune APs as desired */ if (user) npte |= L2_S_PROT_U; - - if (prot & VM_PROT_WRITE) { - npte &= ~(L2_APX); - - if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0) { - vm_page_aflag_set(m, PGA_WRITEABLE); + /* + * If this is not a vector_page + * then continue setting mapping parameters + */ + if (m != NULL) { + if (prot & (VM_PROT_ALL)) { + if ((m->oflags & VPO_UNMANAGED) == 0) + vm_page_aflag_set(m, PGA_REFERENCED); + } else { /* - * The access type and permissions indicate - * that the page will be written as soon as returned - * from fault service. - * Mark it dirty from the outset. + * Need to do page referenced emulation. */ - if ((access & VM_PROT_WRITE) != 0) - vm_page_dirty(m); + npte &= ~L2_S_REF; } - } - - if (!(prot & VM_PROT_EXECUTE) && m) - npte |= L2_XN; - if (m && (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)) - npte |= pte_l2_s_cache_mode; + if (prot & VM_PROT_WRITE) { + /* Write enable */ + npte &= ~(L2_APX); - if (m && m == om) { - /* - * We're changing the attrs of an existing mapping. - */ - pmap_modify_pv(m, pmap, va, PVF_WRITE | PVF_WIRED, nflags); - is_exec |= PTE_BEEN_EXECD(opte); - is_refd |= PTE_BEEN_REFD(opte); - } else { - /* - * New mapping, or changing the backing page - * of an existing mapping. - */ - if (om) { - /* - * Replacing an existing mapping with a new one. - * It is part of our managed memory so we - * must remove it from the PV list - */ - if ((pve = pmap_remove_pv(om, pmap, va))) { - is_exec |= PTE_BEEN_EXECD(opte); - is_refd |= PTE_BEEN_REFD(opte); - - if (m && ((m->oflags & VPO_UNMANAGED))) - pmap_free_pv_entry(pmap, pve); + if ((m->oflags & VPO_UNMANAGED) == 0) { + vm_page_aflag_set(m, PGA_WRITEABLE); + /* + * The access type and permissions indicate + * that the page will be written as soon as + * returned from fault service. + * Mark it dirty from the outset. + */ + if ((access & VM_PROT_WRITE) != 0) + vm_page_dirty(m); } } + if (!(prot & VM_PROT_EXECUTE)) + npte |= L2_XN; - if ((m && !(m->oflags & VPO_UNMANAGED))) { - if ((!pve) && - (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL) - panic("pmap_enter: no pv entries"); - - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, - ("pmap_enter: managed mapping within the clean submap")); - KASSERT(pve != NULL, ("No pv")); - pmap_enter_pv(m, pve, pmap, va, nflags); - } - } - - /* - * Keep the stats up to date - */ - if (opte == 0) { - l2b->l2b_occupancy++; - pmap->pm_stats.resident_count++; + if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + npte |= pte_l2_s_cache_mode; } CTR5(KTR_PMAP,"enter: pmap:%p va:%x prot:%x pte:%x->%x",