From owner-p4-projects@FreeBSD.ORG Sat Feb 4 19:13:46 2006 Return-Path: X-Original-To: p4-projects@freebsd.org Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 99F1516A423; Sat, 4 Feb 2006 19:13:45 +0000 (GMT) X-Original-To: perforce@freebsd.org Delivered-To: perforce@freebsd.org Received: from mx1.FreeBSD.org (mx1.freebsd.org [216.136.204.125]) by hub.freebsd.org (Postfix) with ESMTP id 72E6F16A420 for ; Sat, 4 Feb 2006 19:13:45 +0000 (GMT) (envelope-from alc@freebsd.org) Received: from repoman.freebsd.org (repoman.freebsd.org [216.136.204.115]) by mx1.FreeBSD.org (Postfix) with ESMTP id 3EF7543D4C for ; Sat, 4 Feb 2006 19:13:45 +0000 (GMT) (envelope-from alc@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.13.1/8.13.1) with ESMTP id k14JDjms040235 for ; Sat, 4 Feb 2006 19:13:45 GMT (envelope-from alc@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.13.1/8.13.1/Submit) id k14JDiMg040232 for perforce@freebsd.org; Sat, 4 Feb 2006 19:13:44 GMT (envelope-from alc@freebsd.org) Date: Sat, 4 Feb 2006 19:13:44 GMT Message-Id: <200602041913.k14JDiMg040232@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to alc@freebsd.org using -f From: Alan Cox To: Perforce Change Reviews Cc: Subject: PERFORCE change 91094 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 04 Feb 2006 19:13:46 -0000 http://perforce.freebsd.org/chv.cgi?CH=91094 Change 91094 by alc@alc_home on 2006/02/04 19:13:32 Preallocate memory for the reservation structures at initialization time. Use a simple free list to manage them. The motivation is that reservation allocation must not fail in certain circumstances, such as when a reservation is being preempted under low memory conditions. This change also permits the use of superpages within the kmem object on i386 because there is no longer the possibility of recursion within UMA. Affected files ... .. //depot/projects/superpages/src/sys/vm/vm_page.c#19 edit .. //depot/projects/superpages/src/sys/vm/vm_reserve.c#15 edit .. //depot/projects/superpages/src/sys/vm/vm_reserve.h#3 edit Differences ... ==== //depot/projects/superpages/src/sys/vm/vm_page.c#19 (text+ko) ==== @@ -171,7 +171,7 @@ vm_page_startup(vm_offset_t vaddr) { vm_offset_t mapped; - vm_size_t npages; + vm_size_t npages, nreservations; vm_paddr_t page_range; vm_paddr_t new_end; int i; @@ -234,8 +234,22 @@ VM_PROT_READ | VM_PROT_WRITE); bzero((void *)mapped, end - new_end); uma_startup((void *)mapped, boot_pages); + end = new_end; /* + * Initialize the reservation structures. + */ + nreservations = 0; + for (i = 0; i < SP_LEVELS; i++) + nreservations += total >> (SP_ORDER(i) + PAGE_SHIFT); + new_end = end - (nreservations * sizeof(struct vm_reserve)); + new_end = trunc_page(new_end); + mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); + bzero((void *)mapped, end - new_end); + preempt_init((void *)mapped, nreservations); + end = new_end; + + /* * Compute the number of pages of memory that will be available for * use (taking into account the overhead of a page structure per * page). @@ -243,8 +257,8 @@ first_page = phys_avail[0] / PAGE_SIZE; page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page; npages = (total - (page_range * sizeof(struct vm_page)) - - (end - new_end)) / PAGE_SIZE; - end = new_end; + round_page(boot_pages * UMA_SLAB_SIZE) - + round_page(nreservations * sizeof(struct vm_reserve))) / PAGE_SIZE; /* * Reserve an unmapped guard page to trap access to vm_page_array[-1]. @@ -283,11 +297,6 @@ pa += PAGE_SIZE; } } - - /* - * Initialize the reservation structures. - */ - preempt_init(); return (vaddr); } ==== //depot/projects/superpages/src/sys/vm/vm_reserve.c#15 (text+ko) ==== @@ -61,17 +61,16 @@ static reservation_t new_reserve(int level, vm_object_t object); static void reserve_procreate(reservation_t, vm_page_t); -static uma_zone_t sp_zone; - /* * Reservations in rres[level], with level=-1..SP_LEVELS-2, are those that, * if broken, will yield chunks of size at most SP_MAGN(level). * Fully populated reservations don't go here */ TAILQ_HEAD(sp_list, vm_reserve); -struct sp_list rres_store[SP_LEVELS]; -struct sp_list *rres = &rres_store[1]; /* just to shift indexes */ -struct mtx rres_mtx; +static struct sp_list rres_free; +static struct sp_list rres_store[SP_LEVELS]; +static struct sp_list *rres = &rres_store[1]; /* just to shift indexes */ +static struct mtx rres_mtx; /* * Determine the size of the reservation to create for the given @@ -127,7 +126,9 @@ if (child != NULL && child->popfrom == child->popto) { KASSERT(child->refcnt == 0, ("update_maxavail: child->refcnt != 0")); - uma_zfree(sp_zone, child); + mtx_lock(&rres_mtx); + TAILQ_INSERT_HEAD(&rres_free, child, next); + mtx_unlock(&rres_mtx); child = res->child[i] = NULL; } if (child == NULL) @@ -227,8 +228,11 @@ m->reserv = res->child[IDX(m, res)]; KASSERT(res->refcnt > 0, ("reserve_lazy_update: refcnt <= 0")); - if (--res->refcnt == 0) - uma_zfree(sp_zone, res); + if (--res->refcnt == 0) { + mtx_lock(&rres_mtx); + TAILQ_INSERT_HEAD(&rres_free, res, next); + mtx_unlock(&rres_mtx); + } } return (res); } @@ -236,9 +240,15 @@ static reservation_t new_reserve(int level, vm_object_t object) { + reservation_t sp; + KASSERT(level > -1, ("level > -1")); - reservation_t sp = uma_zalloc(sp_zone, M_NOWAIT | M_ZERO); + mtx_lock(&rres_mtx); + sp = TAILQ_FIRST(&rres_free); + TAILQ_REMOVE(&rres_free, sp, next); + mtx_unlock(&rres_mtx); KASSERT(sp,("new_reservation, uma_zalloc failed")); + bzero(sp, sizeof(*sp)); sp->level = level; sp->order = SP_ORDER(level); sp->rql = NO_RESQ; @@ -401,14 +411,15 @@ } void -preempt_init(void) +preempt_init(reservation_t reservation, int nreservations) { int i; - sp_zone = uma_zcreate("VM RESERVE", sizeof(struct vm_reserve), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); - KASSERT(sp_zone,("preempt_init: unable to initialize sp_zone")); - + TAILQ_INIT(&rres_free); + for (i = 0; i < nreservations; i++) { + TAILQ_INSERT_TAIL(&rres_free, reservation, next); + reservation++; + } for (i = -1; i < SP_LEVELS - 1; i++) TAILQ_INIT(&rres[i]); mtx_init(&rres_mtx, "reservation queues", NULL, MTX_DEF); @@ -628,7 +639,9 @@ else if (sp->refcnt == 0) { /* empty: free */ KASSERT(sp->maxavail == 1 << sp->order, ("preempt_move maxavail=%d\n", sp->maxavail)); buddy_free(sp->first_page, sp->order); - uma_zfree(sp_zone, sp); + mtx_lock(&rres_mtx); + TAILQ_INSERT_HEAD(&rres_free, sp, next); + mtx_unlock(&rres_mtx); } else { KASSERT(sp->popto != sp->popfrom,("preempt_move: popto = popfrom")); sp->rql = logf(sp->maxavail); ==== //depot/projects/superpages/src/sys/vm/vm_reserve.h#3 (text+ko) ==== @@ -77,5 +77,5 @@ void reserve_populate(vm_page_t, reservation_t); void reserve_unpopulate(vm_page_t); -void preempt_init(void) ; +void preempt_init(reservation_t reservation, int nreservations); void preempt_destroy(vm_page_t m);