Date: Mon, 30 Jan 2006 22:25:51 GMT From: Peter Wemm <peter@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 90730 for review Message-ID: <200601302225.k0UMPpTa062717@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=90730 Change 90730 by peter@peter_melody on 2006/01/30 22:25:01 Move the pv entry zone out of kvm and into direct map area like most of the other zones. Convert to a regular freeable zone. Turn the vm.pmap.* tunables into real working sysctls. These now set a pmap-internal limit on pv entries, rather than the zone setting the limit. i386 uses pvzone_obj to enable preallocation of kva. We can use direct map instead. XXX perhaps remove the "high water" mark soft limit and make pv_entry_max the soft limit instead. Affected files ... .. //depot/projects/hammer/sys/amd64/amd64/pmap.c#131 edit Differences ... ==== //depot/projects/hammer/sys/amd64/amd64/pmap.c#131 (text+ko) ==== @@ -183,8 +183,8 @@ * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; -static struct vm_object pvzone_obj; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; +static int shpgperproc = PMAP_SHPGPERPROC; /* * All those kernel PT submaps that BSD is so fond of @@ -563,7 +563,6 @@ void pmap_init(void) { - int shpgperproc = PMAP_SHPGPERPROC; /* * Initialize the address space (zone) for the pv entries. Set a @@ -571,13 +570,43 @@ * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, - NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); + NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); - uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); +} + +SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); +static int +pmap_pventry_proc(SYSCTL_HANDLER_ARGS) +{ + int error; + + error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); + if (error == 0 && req->newptr) { + shpgperproc = (pv_entry_max - cnt.v_page_count) / maxproc; + pv_entry_high_water = 9 * (pv_entry_max / 10); + } + return (error); +} +SYSCTL_PROC(_vm_pmap, OID_AUTO, pv_entry_max, CTLTYPE_INT|CTLFLAG_RW, + &pv_entry_max, 0, pmap_pventry_proc, "IU", "Max number of PV entries"); + +static int +pmap_shpgperproc_proc(SYSCTL_HANDLER_ARGS) +{ + int error; + + error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); + if (error == 0 && req->newptr) { + pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; + pv_entry_high_water = 9 * (pv_entry_max / 10); + } + return (error); } +SYSCTL_PROC(_vm_pmap, OID_AUTO, shpgperproc, CTLTYPE_INT|CTLFLAG_RW, + &shpgperproc, 0, pmap_shpgperproc_proc, "IU", "Page share factor per proc"); /*************************************************** @@ -1452,8 +1481,9 @@ * mappings to active pages. */ if (ratecheck(&lastprint, &printinterval)) - printf("Approaching the limit on PV entries, " - "increase the vm.pmap.shpgperproc tunable.\n"); + printf("Approaching the limit on PV entries, consider " + "increasing sysctl vm.pmap.shpgperproc or " + "vm.pmap.pv_entry_max\n"); vpq = &vm_page_queues[PQ_INACTIVE]; retry: TAILQ_FOREACH(m, &vpq->pl, pageq) {
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200601302225.k0UMPpTa062717>