From owner-svn-src-projects@FreeBSD.ORG Sat Sep 28 00:45:00 2013 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [8.8.178.115]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTP id 633E5F6F; Sat, 28 Sep 2013 00:45:00 +0000 (UTC) (envelope-from cherry@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mx1.freebsd.org (Postfix) with ESMTPS id 422A92519; Sat, 28 Sep 2013 00:45:00 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.7/8.14.7) with ESMTP id r8S0j0FG072596; Sat, 28 Sep 2013 00:45:00 GMT (envelope-from cherry@svn.freebsd.org) Received: (from cherry@localhost) by svn.freebsd.org (8.14.7/8.14.5/Submit) id r8S0j0oI072593; Sat, 28 Sep 2013 00:45:00 GMT (envelope-from cherry@svn.freebsd.org) Message-Id: <201309280045.r8S0j0oI072593@svn.freebsd.org> From: "Cherry G. Mathew" Date: Sat, 28 Sep 2013 00:45:00 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org Subject: svn commit: r255920 - projects/amd64_xen_pv/sys/amd64/xen X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 28 Sep 2013 00:45:00 -0000 Author: cherry Date: Sat Sep 28 00:44:59 2013 New Revision: 255920 URL: http://svnweb.freebsd.org/changeset/base/255920 Log: Add pmap_advise() and pmap_protect() implementations for xen. These are now mandatory with the sync to r255918 to boot up to single user mode. Approved by: gibbs ( implicit ) Modified: projects/amd64_xen_pv/sys/amd64/xen/pmap.c Modified: projects/amd64_xen_pv/sys/amd64/xen/pmap.c ============================================================================== --- projects/amd64_xen_pv/sys/amd64/xen/pmap.c Sat Sep 28 00:26:03 2013 (r255919) +++ projects/amd64_xen_pv/sys/amd64/xen/pmap.c Sat Sep 28 00:44:59 2013 (r255920) @@ -125,6 +125,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -1724,13 +1725,116 @@ pmap_map(vm_offset_t *virt, vm_paddr_t s return (sva); } +/* + * Set the physical protection on the + * specified range of this map as requested. + */ + + void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { - /* - * XXX: TODO - ignore for now - we need to revisit this as - * soon as kdb is up. - */ + vm_offset_t addr; + vm_offset_t va_next; + + if ((prot & VM_PROT_READ) == VM_PROT_NONE) { + pmap_remove(pmap, sva, eva); + return; + } + + if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == + (VM_PROT_WRITE|VM_PROT_EXECUTE)) + return; + + PMAP_LOCK(pmap); + + /* XXX: unify va range operations over ptes across functions */ + + char tbuf[tsz]; /* Safe to do this on the stack since tsz is + * effectively const. + */ + + mmu_map_t tptr = tbuf; + + struct mmu_map_mbackend mb = { + ptmb_mappedalloc, + ptmb_mappedfree, + ptmb_ptov, + ptmb_vtop + }; + mmu_map_t_init(tptr, &mb); + + for (addr = sva; addr < eva; addr = va_next) { + pt_entry_t *pte; + + if (!mmu_map_inspect_va(pmap, tptr, addr)) { + if (mmu_map_pdpt(tptr) == NULL) { + va_next = (addr + NBPML4) & ~PML4MASK; + if (va_next < addr) /* Overflow */ + va_next = eva; + continue; + } + + if (mmu_map_pdt(tptr) == NULL) { + va_next = (addr + NBPDP) & ~PDPMASK; + if (va_next < addr) /* Overflow */ + va_next = eva; + continue; + } + + + if (mmu_map_pt(tptr) == NULL) { + va_next = (addr + NBPDR) & ~PDRMASK; + if (va_next < addr) + va_next = eva; + continue; + } + + panic("%s: All backing tables non-NULL," + "yet hierarchy can't be inspected at va = 0x%lx\n", + __func__, addr); + } + + va_next = (addr + NBPDR) & ~PDRMASK; + if (va_next < addr) + va_next = eva; + + pte = mmu_map_pt(tptr) + pt_index(addr); + + while (addr < va_next) { + pt_entry_t ptetemp; + ptetemp = *pte; + + if ((ptetemp & PG_V) == 0) { + goto nextpte; /* continue */ + } + + if ((prot & VM_PROT_WRITE) == 0) { + if ((ptetemp & (PG_MANAGED | PG_M | PG_RW)) == + (PG_MANAGED | PG_M | PG_RW)) { + vm_page_t m = MACH_TO_VM_PAGE(ptetemp & PG_FRAME); + vm_page_dirty(m); + } + ptetemp &= ~(PG_RW | PG_M); + } + + if ((prot & VM_PROT_EXECUTE) == 0) + ptetemp |= pg_nx; + + if (ptetemp != *pte) { + PT_SET_VA_MA(pte, ptetemp, true); + } + + nextpte: + addr += PAGE_SIZE; + pte++; + } + } + + mmu_map_t_fini(tptr); + + PMAP_UNLOCK(pmap); + } void @@ -2218,7 +2322,96 @@ pmap_is_prefaultable(pmap_t pmap, vm_off void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) { - KASSERT(0, ("XXX: %s: TODO\n", __func__)); + vm_offset_t addr; + vm_offset_t va_next; + vm_page_t m; + boolean_t anychanged, pv_lists_locked; + + if (advice != MADV_DONTNEED && advice != MADV_FREE) + return; + pv_lists_locked = FALSE; + + anychanged = FALSE; + PMAP_LOCK(pmap); + /* XXX: unify va range operations over ptes across functions */ + + char tbuf[tsz]; /* Safe to do this on the stack since tsz is + * effectively const. + */ + + mmu_map_t tptr = tbuf; + + struct mmu_map_mbackend mb = { + ptmb_mappedalloc, + ptmb_mappedfree, + ptmb_ptov, + ptmb_vtop + }; + mmu_map_t_init(tptr, &mb); + + for (addr = sva; addr < eva; addr = va_next) { + pt_entry_t *pte; + + if (!mmu_map_inspect_va(pmap, tptr, addr)) { + if (mmu_map_pdpt(tptr) == NULL) { + va_next = (addr + NBPML4) & ~PML4MASK; + if (va_next < addr) /* Overflow */ + va_next = eva; + continue; + } + + if (mmu_map_pdt(tptr) == NULL) { + va_next = (addr + NBPDP) & ~PDPMASK; + if (va_next < addr) /* Overflow */ + va_next = eva; + continue; + } + + + if (mmu_map_pt(tptr) == NULL) { + va_next = (addr + NBPDR) & ~PDRMASK; + if (va_next < addr) + va_next = eva; + continue; + + } + } + va_next = (addr + NBPDR) & ~PDRMASK; + if (va_next > eva) + va_next = eva; + + for (pte = mmu_map_pt(tptr) + pt_index(sva); sva != va_next; pte++, + sva += PAGE_SIZE) { + if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | + PG_V)) + continue; + else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { + if (advice == MADV_DONTNEED) { + /* + * Future calls to pmap_is_modified() + * can be avoided by making the page + * dirty now. + */ + m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); + vm_page_dirty(m); + } + /* XXX: This is not atomic */ + PT_SET_VA_MA(pte, *pte & ~(PG_M | PG_A), true); + } else if ((*pte & PG_A) != 0) + /* XXX: This is not atomic */ + PT_SET_VA_MA(pte, *pte & ~(PG_A), true); + else + continue; + if ((*pte & PG_G) != 0) + pmap_invalidate_page(pmap, sva); + else + anychanged = TRUE; + } + } + if (anychanged) + pmap_invalidate_all(pmap); + PMAP_UNLOCK(pmap); + mmu_map_t_fini(tptr); } void