Date: Mon, 26 Aug 2013 18:21:04 +0000 (UTC) From: Mark Murray <markm@FreeBSD.org> To: src-committers@freebsd.org, svn-src-projects@freebsd.org Subject: svn commit: r254926 - in projects/random_number_generator: . share/man/man5 share/man/man9 share/misc sys/arm/arm sys/arm/include sys/dev/amdtemp sys/dev/xen/netback sys/vm tools/build/mk tools/bui... Message-ID: <201308261821.r7QIL4pe038660@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: markm Date: Mon Aug 26 18:21:04 2013 New Revision: 254926 URL: http://svnweb.freebsd.org/changeset/base/254926 Log: MFC Added: projects/random_number_generator/tools/build/options/WITHOUT_ICONV - copied unchanged from r254924, head/tools/build/options/WITHOUT_ICONV projects/random_number_generator/tools/build/options/WITH_LIBICONV_COMPAT - copied unchanged from r254924, head/tools/build/options/WITH_LIBICONV_COMPAT projects/random_number_generator/tools/build/options/WITH_USB_GADGET_EXAMPLES - copied unchanged from r254924, head/tools/build/options/WITH_USB_GADGET_EXAMPLES Deleted: projects/random_number_generator/tools/build/options/WITH_ICONV Modified: projects/random_number_generator/ObsoleteFiles.inc projects/random_number_generator/share/man/man5/src.conf.5 projects/random_number_generator/share/man/man9/Makefile projects/random_number_generator/share/misc/committers-src.dot projects/random_number_generator/sys/arm/arm/pmap-v6.c projects/random_number_generator/sys/arm/include/param.h projects/random_number_generator/sys/arm/include/pmap.h projects/random_number_generator/sys/arm/include/pte.h projects/random_number_generator/sys/arm/include/vmparam.h projects/random_number_generator/sys/dev/amdtemp/amdtemp.c projects/random_number_generator/sys/dev/xen/netback/netback.c projects/random_number_generator/sys/vm/vm_page.c projects/random_number_generator/tools/build/mk/OptionalObsoleteFiles.inc projects/random_number_generator/usr.bin/kdump/kdump.c projects/random_number_generator/usr.bin/kdump/mksubr projects/random_number_generator/usr.sbin/mfiutil/mfi_drive.c projects/random_number_generator/usr.sbin/mfiutil/mfiutil.8 projects/random_number_generator/usr.sbin/mfiutil/mfiutil.c Directory Properties: projects/random_number_generator/ (props changed) projects/random_number_generator/sys/ (props changed) Modified: projects/random_number_generator/ObsoleteFiles.inc ============================================================================== --- projects/random_number_generator/ObsoleteFiles.inc Mon Aug 26 18:16:05 2013 (r254925) +++ projects/random_number_generator/ObsoleteFiles.inc Mon Aug 26 18:21:04 2013 (r254926) @@ -38,6 +38,14 @@ # xargs -n1 | sort | uniq -d; # done +# 20130822: bind 9.9.3-P2 import +OLD_LIBS+=usr/lib/liblwres.so.80 +# 20130814: vm_page_busy(9) +OLD_FILES+=usr/share/man/man9/vm_page_flash.9.gz +OLD_FILES+=usr/share/man/man9/vm_page_io.9.gz +OLD_FILES+=usr/share/man/man9/vm_page_io_finish.9.gz +OLD_FILES+=usr/share/man/man9/vm_page_io_start.9.gz +OLD_FILES+=usr/share/man/man9/vm_page_wakeup.9.gz # 20130710: libkvm version bump OLD_LIBS+=lib/libkvm.so.5 OLD_LIBS+=usr/lib32/libkvm.so.5 @@ -114,6 +122,7 @@ OLD_FILES+=usr/include/clang/3.2/xmmintr OLD_FILES+=usr/include/clang/3.2/xopintrin.h OLD_DIRS+=usr/include/clang/3.2 # 20130404: legacy ATA stack removed +OLD_FILES+=etc/periodic/daily/405.status-ata-raid OLD_FILES+=rescue/atacontrol OLD_FILES+=sbin/atacontrol OLD_FILES+=usr/share/man/man8/atacontrol.8.gz Modified: projects/random_number_generator/share/man/man5/src.conf.5 ============================================================================== --- projects/random_number_generator/share/man/man5/src.conf.5 Mon Aug 26 18:16:05 2013 (r254925) +++ projects/random_number_generator/share/man/man5/src.conf.5 Mon Aug 26 18:21:04 2013 (r254926) @@ -1,7 +1,7 @@ .\" DO NOT EDIT-- this file is automatically generated. .\" from FreeBSD: head/tools/build/options/makeman 253304 2013-07-12 23:08:44Z bapt .\" $FreeBSD$ -.Dd July 16, 2013 +.Dd August 26, 2013 .Dt SRC.CONF 5 .Os .Sh NAME @@ -245,9 +245,6 @@ Set to not build the BSD licensed versio .It Va WITH_BSD_GREP .\" from FreeBSD: head/tools/build/options/WITH_BSD_GREP 222273 2011-05-25 01:04:12Z obrien Install BSD-licensed grep as '[ef]grep' instead of GNU grep. -.It Va WITH_BSD_PATCH -.\" from FreeBSD: head/tools/build/options/WITH_BSD_PATCH 246074 2013-01-29 17:03:18Z gabor -Install BSD-licensed patch as 'patch' instead of GNU patch. .It Va WITHOUT_BSNMP .\" from FreeBSD: head/tools/build/options/WITHOUT_BSNMP 183306 2008-09-23 16:15:42Z sam Set to not build or install @@ -506,6 +503,9 @@ When set, it also enforces the following .It .Va WITHOUT_GNU_SUPPORT .El +.It Va WITH_GNU_PATCH +.\" from FreeBSD: head/tools/build/options/WITH_GNU_PATCH 253689 2013-07-26 21:25:18Z pfg +Install GNU-licensed patch as 'patch' instead of BSD patch. .It Va WITHOUT_GNU_SUPPORT .\" from FreeBSD: head/tools/build/options/WITHOUT_GNU_SUPPORT 156932 2006-03-21 07:50:50Z ru Set to build some programs without optional GNU support. @@ -538,9 +538,15 @@ Set to build Hesiod support. .It Va WITHOUT_HTML .\" from FreeBSD: head/tools/build/options/WITHOUT_HTML 156932 2006-03-21 07:50:50Z ru Set to not build HTML docs. -.It Va WITH_ICONV -.\" from FreeBSD: head/tools/build/options/WITH_ICONV 219020 2011-02-25 00:10:26Z gabor -Set to build iconv as part of libc. +.It Va WITHOUT_ICONV +.\" from FreeBSD: head/tools/build/options/WITHOUT_ICONV 254919 2013-08-26 17:15:56Z antoine +Set to not build iconv as part of libc. +When set, it also enforces the following options: +.Pp +.Bl -item -compact +.It +.Va WITHOUT_LIBICONV_COMPAT +.El .It Va WITHOUT_INET .\" from FreeBSD: head/tools/build/options/WITHOUT_INET 221266 2011-04-30 17:58:28Z bz Set to not build programs and libraries related to IPv4 networking. @@ -701,6 +707,9 @@ runtime linker. .It Va WITHOUT_LIBCPLUSPLUS .\" from FreeBSD: head/tools/build/options/WITHOUT_LIBCPLUSPLUS 246262 2013-02-02 22:42:46Z dim Set to avoid building libcxxrt and libc++. +.It Va WITH_LIBICONV_COMPAT +.\" from FreeBSD: head/tools/build/options/WITH_LIBICONV_COMPAT 254919 2013-08-26 17:15:56Z antoine +Set to build libiconv API and link time compatibility. .It Va WITHOUT_LIBPTHREAD .\" from FreeBSD: head/tools/build/options/WITHOUT_LIBPTHREAD 188848 2009-02-20 11:09:55Z mtm Set to not build the @@ -1129,6 +1138,9 @@ When set, it also enforces the following .It Va WITHOUT_USB .\" from FreeBSD: head/tools/build/options/WITHOUT_USB 156932 2006-03-21 07:50:50Z ru Set to not build USB-related programs and libraries. +.It Va WITH_USB_GADGET_EXAMPLES +.\" from FreeBSD: head/tools/build/options/WITH_USB_GADGET_EXAMPLES 254919 2013-08-26 17:15:56Z antoine +Set to build USB gadget kernel modules. .It Va WITHOUT_UTMPX .\" from FreeBSD: head/tools/build/options/WITHOUT_UTMPX 231530 2012-02-11 20:28:42Z ed Set to not build user accounting tools such as Modified: projects/random_number_generator/share/man/man9/Makefile ============================================================================== --- projects/random_number_generator/share/man/man9/Makefile Mon Aug 26 18:16:05 2013 (r254925) +++ projects/random_number_generator/share/man/man9/Makefile Mon Aug 26 18:21:04 2013 (r254926) @@ -324,6 +324,7 @@ MAN= accept_filter.9 \ vm_map_wire.9 \ vm_page_alloc.9 \ vm_page_bits.9 \ + vm_page_busy.9 \ vm_page_cache.9 \ vm_page_deactivate.9 \ vm_page_dontneed.9 \ Modified: projects/random_number_generator/share/misc/committers-src.dot ============================================================================== --- projects/random_number_generator/share/misc/committers-src.dot Mon Aug 26 18:16:05 2013 (r254925) +++ projects/random_number_generator/share/misc/committers-src.dot Mon Aug 26 18:21:04 2013 (r254926) @@ -187,6 +187,7 @@ jkim [label="Jung-uk Kim\njkim@FreeBSD.o jkoshy [label="A. Joseph Koshy\njkoshy@FreeBSD.org\n1998/05/13"] jlh [label="Jeremie Le Hen\njlh@FreeBSD.org\n2012/04/22"] jls [label="Jordan Sissel\njls@FreeBSD.org\n2006/12/06"] +jmg [label="John-Mark Gurney\njmg@FreeBSD.org\n1997/02/13"] joerg [label="Joerg Wunsch\njoerg@FreeBSD.org\n1993/11/14"] jon [label="Jonathan Chen\njon@FreeBSD.org\n2000/10/17"] jonathan [label="Jonathan Anderson\njonathan@FreeBSD.org\n2010/10/07"] @@ -495,6 +496,7 @@ jlemon -> brooks joerg -> brian joerg -> eik +joerg -> jmg joerg -> le joerg -> netchild joerg -> schweikh Modified: projects/random_number_generator/sys/arm/arm/pmap-v6.c ============================================================================== --- projects/random_number_generator/sys/arm/arm/pmap-v6.c Mon Aug 26 18:16:05 2013 (r254925) +++ projects/random_number_generator/sys/arm/arm/pmap-v6.c Mon Aug 26 18:21:04 2013 (r254926) @@ -171,6 +171,7 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_page.h> #include <vm/vm_pageout.h> #include <vm/vm_extern.h> +#include <vm/vm_reserv.h> #include <machine/md_var.h> #include <machine/cpu.h> @@ -201,6 +202,8 @@ int pmap_debug_level = 0; #define PV_STAT(x) do { } while (0) #endif +#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) + #ifdef ARM_L2_PIPT #define pmap_l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range((pa), (size)) #define pmap_l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range((pa), (size)) @@ -215,10 +218,16 @@ extern struct pv_addr systempage; * Internal function prototypes */ +static PMAP_INLINE +struct pv_entry *pmap_find_pv(struct md_page *, pmap_t, vm_offset_t); static void pmap_free_pv_chunk(struct pv_chunk *pc); static void pmap_free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t pmap_get_pv_entry(pmap_t pmap, boolean_t try); static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap); +static boolean_t pmap_pv_insert_section(pmap_t, vm_offset_t, + vm_paddr_t); +static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t); +static int pmap_pvh_wired_mappings(struct md_page *, int); static void pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t, vm_page_t, vm_prot_t, boolean_t, int); @@ -226,6 +235,14 @@ static vm_paddr_t pmap_extract_locked(pm static void pmap_alloc_l1(pmap_t); static void pmap_free_l1(pmap_t); +static void pmap_map_section(pmap_t, vm_offset_t, vm_offset_t, + vm_prot_t, boolean_t); +static void pmap_promote_section(pmap_t, vm_offset_t); +static boolean_t pmap_demote_section(pmap_t, vm_offset_t); +static boolean_t pmap_enter_section(pmap_t, vm_offset_t, vm_page_t, + vm_prot_t); +static void pmap_remove_section(pmap_t, vm_offset_t); + static int pmap_clearbit(struct vm_page *, u_int); static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); @@ -403,6 +420,7 @@ int pmap_needs_pte_sync; */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static int pv_entry_count, pv_entry_max, pv_entry_high_water; +static struct md_page *pv_table; static int shpgperproc = PMAP_SHPGPERPROC; struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ @@ -433,6 +451,11 @@ static const uint32_t pc_freemask[_NPCM] static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); +/* Superpages utilization enabled = 1 / disabled = 0 */ +static int sp_enabled = 0; +SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN, &sp_enabled, 0, + "Are large page mappings enabled?"); + SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, "Current number of pv entries"); @@ -891,7 +914,9 @@ static int pmap_clearbit(struct vm_page *m, u_int maskbits) { struct l2_bucket *l2b; - struct pv_entry *pv; + struct pv_entry *pv, *pve, *next_pv; + struct md_page *pvh; + pd_entry_t *pl1pd; pt_entry_t *ptep, npte, opte; pmap_t pmap; vm_offset_t va; @@ -899,7 +924,79 @@ pmap_clearbit(struct vm_page *m, u_int m int count = 0; rw_wlock(&pvh_global_lock); + if ((m->flags & PG_FICTITIOUS) != 0) + goto small_mappings; + pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { + va = pv->pv_va; + pmap = PV_PMAP(pv); + PMAP_LOCK(pmap); + pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; + KASSERT((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO, + ("pmap_clearbit: valid section mapping expected")); + if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_WRITE)) + (void)pmap_demote_section(pmap, va); + else if ((maskbits & PVF_REF) && L1_S_REFERENCED(*pl1pd)) { + if (pmap_demote_section(pmap, va)) { + if ((pv->pv_flags & PVF_WIRED) == 0) { + /* + * Remove the mapping to a single page + * so that a subsequent access may + * repromote. Since the underlying + * l2_bucket is fully populated, this + * removal never frees an entire + * l2_bucket. + */ + va += (VM_PAGE_TO_PHYS(m) & + L1_S_OFFSET); + l2b = pmap_get_l2_bucket(pmap, va); + KASSERT(l2b != NULL, + ("pmap_clearbit: no l2 bucket for " + "va 0x%#x, pmap 0x%p", va, pmap)); + ptep = &l2b->l2b_kva[l2pte_index(va)]; + *ptep = 0; + PTE_SYNC(ptep); + pmap_free_l2_bucket(pmap, l2b, 1); + pve = pmap_remove_pv(m, pmap, va); + KASSERT(pve != NULL, ("pmap_clearbit: " + "no PV entry for managed mapping")); + pmap_free_pv_entry(pmap, pve); + + } + } + } else if ((maskbits & PVF_MOD) && L1_S_WRITABLE(*pl1pd)) { + if (pmap_demote_section(pmap, va)) { + if ((pv->pv_flags & PVF_WIRED) == 0) { + /* + * Write protect the mapping to a + * single page so that a subsequent + * write access may repromote. + */ + va += (VM_PAGE_TO_PHYS(m) & + L1_S_OFFSET); + l2b = pmap_get_l2_bucket(pmap, va); + KASSERT(l2b != NULL, + ("pmap_clearbit: no l2 bucket for " + "va 0x%#x, pmap 0x%p", va, pmap)); + ptep = &l2b->l2b_kva[l2pte_index(va)]; + if ((*ptep & L2_S_PROTO) != 0) { + pve = pmap_find_pv(&m->md, + pmap, va); + KASSERT(pve != NULL, + ("pmap_clearbit: no PV " + "entry for managed mapping")); + pve->pv_flags &= ~PVF_WRITE; + *ptep &= ~L2_APX; + PTE_SYNC(ptep); + } + } + } + } + PMAP_UNLOCK(pmap); + } + +small_mappings: if (TAILQ_EMPTY(&m->md.pv_list)) { rw_wunlock(&pvh_global_lock); return (0); @@ -917,6 +1014,8 @@ pmap_clearbit(struct vm_page *m, u_int m PMAP_LOCK(pmap); l2b = pmap_get_l2_bucket(pmap, va); + KASSERT(l2b != NULL, ("pmap_clearbit: no l2 bucket for " + "va 0x%#x, pmap 0x%p", va, pmap)); ptep = &l2b->l2b_kva[l2pte_index(va)]; npte = opte = *ptep; @@ -999,14 +1098,15 @@ pmap_enter_pv(struct vm_page *m, struct * => caller should hold lock on vm_page */ static PMAP_INLINE struct pv_entry * -pmap_find_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va) +pmap_find_pv(struct md_page *md, pmap_t pmap, vm_offset_t va) { struct pv_entry *pv; rw_assert(&pvh_global_lock, RA_WLOCKED); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - if (pmap == PV_PMAP(pv) && va == pv->pv_va) - break; + TAILQ_FOREACH(pv, &md->pv_list, pv_list) + if (pmap == PV_PMAP(pv) && va == pv->pv_va) + break; + return (pv); } @@ -1075,7 +1175,7 @@ pmap_remove_pv(struct vm_page *m, pmap_t rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_ASSERT_LOCKED(pmap); - pve = pmap_find_pv(m, pmap, va); /* find corresponding pve */ + pve = pmap_find_pv(&m->md, pmap, va); /* find corresponding pve */ if (pve != NULL) { TAILQ_REMOVE(&m->md.pv_list, pve, pv_list); if (pve->pv_flags & PVF_WIRED) @@ -1106,7 +1206,7 @@ pmap_modify_pv(struct vm_page *m, pmap_t PMAP_ASSERT_LOCKED(pmap); rw_assert(&pvh_global_lock, RA_WLOCKED); - if ((npv = pmap_find_pv(m, pmap, va)) == NULL) + if ((npv = pmap_find_pv(&m->md, pmap, va)) == NULL) return (0); /* @@ -1143,6 +1243,7 @@ pmap_pinit0(struct pmap *pmap) bcopy(kernel_pmap, pmap, sizeof(*pmap)); bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); PMAP_LOCK_INIT(pmap); + TAILQ_INIT(&pmap->pm_pvchunk); } /* @@ -1206,6 +1307,8 @@ pmap_ptelist_init(vm_offset_t *head, voi void pmap_init(void) { + vm_size_t s; + int i, pv_npg; PDEBUG(1, printf("pmap_init: phys_start = %08x\n", PHYSADDR)); @@ -1215,6 +1318,32 @@ pmap_init(void) NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); /* + * Are large page mappings supported and enabled? + */ + TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled); + if (sp_enabled) { + KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, + ("pmap_init: can't assign to pagesizes[1]")); + pagesizes[1] = NBPDR; + } + + /* + * Calculate the size of the pv head table for superpages. + */ + for (i = 0; phys_avail[i + 1]; i += 2); + pv_npg = round_1mpage(phys_avail[(i - 2) + 1]) / NBPDR; + + /* + * Allocate memory for the pv head table for superpages. + */ + s = (vm_size_t)(pv_npg * sizeof(struct md_page)); + s = round_page(s); + pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, + M_WAITOK | M_ZERO); + for (i = 0; i < pv_npg; i++) + TAILQ_INIT(&pv_table[i].pv_list); + + /* * Initialize the address space for the pv chunks. */ @@ -1243,6 +1372,25 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_ SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, "Page share factor per proc"); +static SYSCTL_NODE(_vm_pmap, OID_AUTO, section, CTLFLAG_RD, 0, + "1MB page mapping counters"); + +static u_long pmap_section_demotions; +SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, demotions, CTLFLAG_RD, + &pmap_section_demotions, 0, "1MB page demotions"); + +static u_long pmap_section_mappings; +SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, mappings, CTLFLAG_RD, + &pmap_section_mappings, 0, "1MB page mappings"); + +static u_long pmap_section_p_failures; +SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, p_failures, CTLFLAG_RD, + &pmap_section_p_failures, 0, "1MB page promotion failures"); + +static u_long pmap_section_promotions; +SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, promotions, CTLFLAG_RD, + &pmap_section_promotions, 0, "1MB page promotions"); + int pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype, int user) { @@ -1257,7 +1405,47 @@ pmap_fault_fixup(pmap_t pmap, vm_offset_ l1idx = L1_IDX(va); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); - + /* + * Check and possibly fix-up L1 section mapping + * only when superpage mappings are enabled to speed up. + */ + if (sp_enabled) { + pl1pd = &pmap->pm_l1->l1_kva[l1idx]; + l1pd = *pl1pd; + if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) { + /* Catch an access to the vectors section */ + if (l1idx == L1_IDX(vector_page)) + goto out; + /* + * Stay away from the kernel mappings. + * None of them should fault from L1 entry. + */ + if (pmap == pmap_kernel()) + goto out; + /* + * Catch a forbidden userland access + */ + if (user && !(l1pd & L1_S_PROT_U)) + goto out; + /* + * Superpage is always either mapped read only + * or it is modified and permitted to be written + * by default. Therefore, process only reference + * flag fault and demote page in case of write fault. + */ + if ((ftype & VM_PROT_WRITE) && !L1_S_WRITABLE(l1pd) && + L1_S_REFERENCED(l1pd)) { + (void)pmap_demote_section(pmap, va); + goto out; + } else if (!L1_S_REFERENCED(l1pd)) { + /* Mark the page "referenced" */ + *pl1pd = l1pd | L1_S_REF; + PTE_SYNC(pl1pd); + goto l1_section_out; + } else + goto out; + } + } /* * If there is no l2_dtable for this address, then the process * has no business accessing it. @@ -1310,7 +1498,7 @@ pmap_fault_fixup(pmap_t pmap, vm_offset_ } /* Get the current flags for this page. */ - pv = pmap_find_pv(m, pmap, va); + pv = pmap_find_pv(&m->md, pmap, va); if (pv == NULL) { goto out; } @@ -1345,7 +1533,7 @@ pmap_fault_fixup(pmap_t pmap, vm_offset_ if ((m = PHYS_TO_VM_PAGE(pa)) == NULL) goto out; /* Get the current flags for this page. */ - pv = pmap_find_pv(m, pmap, va); + pv = pmap_find_pv(&m->md, pmap, va); if (pv == NULL) goto out; @@ -1410,6 +1598,7 @@ pmap_fault_fixup(pmap_t pmap, vm_offset_ } #endif +l1_section_out: cpu_tlb_flushID_SE(va); cpu_cpwait(); @@ -1976,6 +2165,24 @@ pmap_growkernel(vm_offset_t addr) kernel_vm_end = pmap_curmaxkvaddr; } +/* + * Returns TRUE if the given page is mapped individually or as part of + * a 1MB section. Otherwise, returns FALSE. + */ +boolean_t +pmap_page_is_mapped(vm_page_t m) +{ + boolean_t rv; + + if ((m->oflags & VPO_UNMANAGED) != 0) + return (FALSE); + rw_wlock(&pvh_global_lock); + rv = !TAILQ_EMPTY(&m->md.pv_list) || + ((m->flags & PG_FICTITIOUS) == 0 && + !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); + rw_wunlock(&pvh_global_lock); + return (rv); +} /* * Remove all pages from specified address space @@ -1990,9 +2197,12 @@ pmap_remove_pages(pmap_t pmap) { struct pv_entry *pv; struct l2_bucket *l2b = NULL; - vm_page_t m; - pt_entry_t *ptep; struct pv_chunk *pc, *npc; + struct md_page *pvh; + pd_entry_t *pl1pd, l1pd; + pt_entry_t *ptep; + vm_page_t m, mt; + vm_offset_t va; uint32_t inuse, bitmask; int allfree, bit, field, idx; @@ -2008,33 +2218,63 @@ pmap_remove_pages(pmap_t pmap) bitmask = 1ul << bit; idx = field * sizeof(inuse) * NBBY + bit; pv = &pc->pc_pventry[idx]; + va = pv->pv_va; inuse &= ~bitmask; if (pv->pv_flags & PVF_WIRED) { /* Cannot remove wired pages now. */ allfree = 0; continue; } - l2b = pmap_get_l2_bucket(pmap, pv->pv_va); - KASSERT(l2b != NULL, - ("No L2 bucket in pmap_remove_pages")); - ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - m = PHYS_TO_VM_PAGE(*ptep & L2_ADDR_MASK); - KASSERT((vm_offset_t)m >= KERNBASE, - ("Trying to access non-existent page " - "va %x pte %x", pv->pv_va, *ptep)); - *ptep = 0; - PTE_SYNC(ptep); + pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; + l1pd = *pl1pd; + l2b = pmap_get_l2_bucket(pmap, va); + if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) { + pvh = pa_to_pvh(l1pd & L1_S_FRAME); + TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); + if (TAILQ_EMPTY(&pvh->pv_list)) { + m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME); + KASSERT((vm_offset_t)m >= KERNBASE, + ("Trying to access non-existent page " + "va %x l1pd %x", trunc_1mpage(va), l1pd)); + for (mt = m; mt < &m[L2_PTE_NUM_TOTAL]; mt++) { + if (TAILQ_EMPTY(&mt->md.pv_list)) + vm_page_aflag_clear(mt, PGA_WRITEABLE); + } + } + if (l2b != NULL) { + KASSERT(l2b->l2b_occupancy == L2_PTE_NUM_TOTAL, + ("pmap_remove_pages: l2_bucket occupancy error")); + pmap_free_l2_bucket(pmap, l2b, L2_PTE_NUM_TOTAL); + } + pmap->pm_stats.resident_count -= L2_PTE_NUM_TOTAL; + *pl1pd = 0; + PTE_SYNC(pl1pd); + } else { + KASSERT(l2b != NULL, + ("No L2 bucket in pmap_remove_pages")); + ptep = &l2b->l2b_kva[l2pte_index(va)]; + m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep)); + KASSERT((vm_offset_t)m >= KERNBASE, + ("Trying to access non-existent page " + "va %x pte %x", va, *ptep)); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + if (TAILQ_EMPTY(&m->md.pv_list) && + (m->flags & PG_FICTITIOUS) == 0) { + pvh = pa_to_pvh(l2pte_pa(*ptep)); + if (TAILQ_EMPTY(&pvh->pv_list)) + vm_page_aflag_clear(m, PGA_WRITEABLE); + } + *ptep = 0; + PTE_SYNC(ptep); + pmap_free_l2_bucket(pmap, l2b, 1); + pmap->pm_stats.resident_count--; + } /* Mark free */ PV_STAT(pv_entry_frees++); PV_STAT(pv_entry_spare++); pv_entry_count--; - pmap->pm_stats.resident_count--; pc->pc_map[field] |= bitmask; - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); - if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_aflag_clear(m, PGA_WRITEABLE); - pmap_free_l2_bucket(pmap, l2b, 1); } } if (allfree) { @@ -2063,7 +2303,8 @@ pmap_kenter_supersection(vm_offset_t va, { pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) | (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL, - VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); + VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) | + L1_S_DOM(PMAP_DOMAIN_KERNEL); struct l1_ttable *l1; vm_offset_t va0, va_end; @@ -2092,7 +2333,8 @@ void pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) { pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, - VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); + VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) | L1_S_REF | + L1_S_DOM(PMAP_DOMAIN_KERNEL); struct l1_ttable *l1; KASSERT(((va | pa) & L1_S_OFFSET) == 0, @@ -2327,9 +2569,11 @@ pmap_is_prefaultable(pmap_t pmap, vm_off if (!pmap_get_pde_pte(pmap, addr, &pdep, &ptep)) return (FALSE); - KASSERT(ptep != NULL, ("Valid mapping but no pte ?")); - if (*ptep == 0) - return (TRUE); + KASSERT((pdep != NULL && (l1pte_section_p(*pdep) || ptep != NULL)), + ("Valid mapping but no pte ?")); + if (*pdep != 0 && !l1pte_section_p(*pdep)) + if (*ptep == 0) + return (TRUE); return (FALSE); } @@ -2398,6 +2642,7 @@ pmap_get_pde_pte(pmap_t pmap, vm_offset_ void pmap_remove_all(vm_page_t m) { + struct md_page *pvh; pv_entry_t pv; pmap_t pmap; pt_entry_t *ptep; @@ -2406,12 +2651,23 @@ pmap_remove_all(vm_page_t m) pmap_t curpmap; u_int is_exec = 0; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); - - if (TAILQ_EMPTY(&m->md.pv_list)) - return; + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_remove_all: page %p is not managed", m)); rw_wlock(&pvh_global_lock); + if ((m->flags & PG_FICTITIOUS) != 0) + goto small_mappings; + pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { + pmap = PV_PMAP(pv); + PMAP_LOCK(pmap); + pd_entry_t *pl1pd; + pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(pv->pv_va)]; + KASSERT((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO, + ("pmap_remove_all: valid section mapping expected")); + (void)pmap_demote_section(pmap, pv->pv_va); + PMAP_UNLOCK(pmap); + } +small_mappings: curpmap = vmspace_pmap(curproc->p_vmspace); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); @@ -2513,6 +2769,9 @@ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct l2_bucket *l2b; + struct md_page *pvh; + struct pv_entry *pve; + pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep, pte; vm_offset_t next_bucket; u_int is_exec, is_refd; @@ -2544,9 +2803,47 @@ pmap_protect(pmap_t pmap, vm_offset_t sv while (sva < eva) { next_bucket = L2_NEXT_BUCKET(sva); + /* + * Check for large page. + */ + pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)]; + l1pd = *pl1pd; + if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) { + KASSERT(pmap != pmap_kernel(), + ("pmap_protect: trying to modify " + "kernel section protections")); + /* + * Are we protecting the entire large page? If not, + * demote the mapping and fall through. + */ + if (sva + L1_S_SIZE == L2_NEXT_BUCKET(sva) && + eva >= L2_NEXT_BUCKET(sva)) { + l1pd &= ~(L1_S_PROT_MASK | L1_S_XN); + if (!(prot & VM_PROT_EXECUTE)) + *pl1pd |= L1_S_XN; + /* + * At this point we are always setting + * write-protect bit. + */ + l1pd |= L1_S_APX; + /* All managed superpages are user pages. */ + l1pd |= L1_S_PROT_U; + *pl1pd = l1pd; + PTE_SYNC(pl1pd); + pvh = pa_to_pvh(l1pd & L1_S_FRAME); + pve = pmap_find_pv(pvh, pmap, + trunc_1mpage(sva)); + pve->pv_flags &= ~PVF_WRITE; + sva = next_bucket; + continue; + } else if (!pmap_demote_section(pmap, sva)) { + /* The large page mapping was destroyed. */ + sva = next_bucket; + continue; + } + } if (next_bucket > eva) next_bucket = eva; - l2b = pmap_get_l2_bucket(pmap, sva); if (l2b == NULL) { sva = next_bucket; @@ -2632,6 +2929,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset struct l2_bucket *l2b = NULL; struct vm_page *om; struct pv_entry *pve = NULL; + pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep, npte, opte; u_int nflags; u_int is_exec, is_refd; @@ -2650,6 +2948,10 @@ pmap_enter_locked(pmap_t pmap, vm_offset pa = VM_PAGE_TO_PHYS(m); } + pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; + if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO) + panic("pmap_enter_locked: attempt pmap_enter_on 1MB page"); + user = 0; /* * Make sure userland mappings get the right permissions @@ -2824,9 +3126,6 @@ validate: * L1 entry to avoid taking another * page/domain fault. */ - pd_entry_t *pl1pd, l1pd; - - pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO; if (*pl1pd != l1pd) { @@ -2844,6 +3143,14 @@ validate: if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) cpu_icache_sync_range(va, PAGE_SIZE); + /* + * If both the l2b_occupancy and the reservation are fully + * populated, then attempt promotion. + */ + if ((l2b->l2b_occupancy == L2_PTE_NUM_TOTAL) && + sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && + vm_reserv_level_iffullpop(m) == 0) + pmap_promote_section(pmap, va); } /* @@ -2862,6 +3169,7 @@ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { + vm_offset_t va; vm_page_t m; vm_pindex_t diff, psize; vm_prot_t access; @@ -2874,8 +3182,15 @@ pmap_enter_object(pmap_t pmap, vm_offset rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { - pmap_enter_locked(pmap, start + ptoa(diff), access, m, prot, - FALSE, M_NOWAIT); + va = start + ptoa(diff); + if ((va & L1_S_OFFSET) == 0 && L2_NEXT_BUCKET(va) <= end && + (VM_PAGE_TO_PHYS(m) & L1_S_OFFSET) == 0 && + sp_enabled && vm_reserv_level_iffullpop(m) == 0 && + pmap_enter_section(pmap, va, m, prot)) + m = &m[L1_S_SIZE / PAGE_SIZE - 1]; + else + pmap_enter_locked(pmap, va, access, m, prot, + FALSE, M_NOWAIT); m = TAILQ_NEXT(m, listq); } PMAP_UNLOCK(pmap); @@ -2915,11 +3230,32 @@ void pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) { struct l2_bucket *l2b; + struct md_page *pvh; + struct pv_entry *pve; + pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep, pte; vm_page_t m; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); + pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; + l1pd = *pl1pd; + if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) { + m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME); + KASSERT((m != NULL) && ((m->oflags & VPO_UNMANAGED) == 0), + ("pmap_change_wiring: unmanaged superpage should not " + "be changed")); + KASSERT(pmap != pmap_kernel(), + ("pmap_change_wiring: managed kernel superpage " + "should not exist")); + pvh = pa_to_pvh(l1pd & L1_S_FRAME); + pve = pmap_find_pv(pvh, pmap, trunc_1mpage(va)); + if (!wired != ((pve->pv_flags & PVF_WIRED) == 0)) { + if (!pmap_demote_section(pmap, va)) + panic("pmap_change_wiring: demotion failed"); + } else + goto out; + } l2b = pmap_get_l2_bucket(pmap, va); KASSERT(l2b, ("No l2b bucket in pmap_change_wiring")); ptep = &l2b->l2b_kva[l2pte_index(va)]; @@ -2928,6 +3264,7 @@ pmap_change_wiring(pmap_t pmap, vm_offse if (m != NULL) pmap_modify_pv(m, pmap, va, PVF_WIRED, wired == TRUE ? PVF_WIRED : 0); +out: rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3034,10 +3371,6 @@ pmap_extract_and_hold(pmap_t pmap, vm_of retry: l1pd = pmap->pm_l1->l1_kva[l1idx]; if (l1pte_section_p(l1pd)) { - /* - * These should only happen for pmap_kernel() - */ - KASSERT(pmap == pmap_kernel(), ("huh")); /* XXX: what to do about the bits > 32 ? */ if (l1pd & L1_S_SUPERSEC) pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); @@ -3122,6 +3455,520 @@ pmap_pinit(pmap_t pmap) /*************************************************** + * Superpage management routines. + ***************************************************/ + +static PMAP_INLINE struct pv_entry * +pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) +{ + pv_entry_t pv; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + + pv = pmap_find_pv(pvh, pmap, va); + if (pv != NULL) + TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); + + return (pv); +} + +static void +pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) +{ + pv_entry_t pv; + + pv = pmap_pvh_remove(pvh, pmap, va); + KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); + pmap_free_pv_entry(pmap, pv); +} + +static boolean_t +pmap_pv_insert_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) +{ + struct md_page *pvh; + pv_entry_t pv; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + if (pv_entry_count < pv_entry_high_water && + (pv = pmap_get_pv_entry(pmap, TRUE)) != NULL) { + pv->pv_va = va; + pvh = pa_to_pvh(pa); + TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); + return (TRUE); + } else + return (FALSE); +} + +/* + * Create the pv entries for each of the pages within a superpage. + */ +static void +pmap_pv_demote_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) +{ + struct md_page *pvh; + pv_entry_t pve, pv; + vm_offset_t va_last; + vm_page_t m; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + KASSERT((pa & L1_S_OFFSET) == 0, + ("pmap_pv_demote_section: pa is not 1mpage aligned")); + + /* + * Transfer the 1mpage's pv entry for this mapping to the first + * page's pv list. + */ + pvh = pa_to_pvh(pa); + va = trunc_1mpage(va); + pv = pmap_pvh_remove(pvh, pmap, va); + KASSERT(pv != NULL, ("pmap_pv_demote_section: pv not found")); + m = PHYS_TO_VM_PAGE(pa); + TAILQ_INSERT_HEAD(&m->md.pv_list, pv, pv_list); + /* Instantiate the remaining pv entries. */ + va_last = L2_NEXT_BUCKET(va) - PAGE_SIZE; + do { + m++; + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_pv_demote_section: page %p is not managed", m)); + va += PAGE_SIZE; + pve = pmap_get_pv_entry(pmap, FALSE); + pmap_enter_pv(m, pve, pmap, va, pv->pv_flags); + } while (va < va_last); +} + +static void +pmap_pv_promote_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) +{ + struct md_page *pvh; + pv_entry_t pv; + vm_offset_t va_last; + vm_page_t m; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + KASSERT((pa & L1_S_OFFSET) == 0, + ("pmap_pv_promote_section: pa is not 1mpage aligned")); + + /* + * Transfer the first page's pv entry for this mapping to the + * 1mpage's pv list. Aside from avoiding the cost of a call + * to get_pv_entry(), a transfer avoids the possibility that + * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim() + * removes one of the mappings that is being promoted. + */ + m = PHYS_TO_VM_PAGE(pa); + va = trunc_1mpage(va); + pv = pmap_pvh_remove(&m->md, pmap, va); + KASSERT(pv != NULL, ("pmap_pv_promote_section: pv not found")); + pvh = pa_to_pvh(pa); + TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); + /* Free the remaining pv entries in the newly mapped section pages */ + va_last = L2_NEXT_BUCKET(va) - PAGE_SIZE; + do { + m++; + va += PAGE_SIZE; + /* + * Don't care the flags, first pv contains sufficient + * information for all of the pages so nothing is really lost. + */ + pmap_pvh_free(&m->md, pmap, va); + } while (va < va_last); +} + +/* + * Tries to create a 1MB page mapping. Returns TRUE if successful and + * FALSE otherwise. Fails if (1) page is unmanageg, kernel pmap or vectors + * page, (2) a mapping already exists at the specified virtual address, or + * (3) a pv entry cannot be allocated without reclaiming another pv entry. + */ +static boolean_t +pmap_enter_section(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) +{ + pd_entry_t *pl1pd; + vm_offset_t pa; + struct l2_bucket *l2b; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + PMAP_ASSERT_LOCKED(pmap); + + /* Skip kernel, vectors page and unmanaged mappings */ + if ((pmap == pmap_kernel()) || (L1_IDX(va) == L1_IDX(vector_page)) || + ((m->oflags & VPO_UNMANAGED) != 0)) { + CTR2(KTR_PMAP, "pmap_enter_section: failure for va %#lx" + " in pmap %p", va, pmap); + return (FALSE); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201308261821.r7QIL4pe038660>