Date: Thu, 22 Mar 2018 16:25:30 -0500 From: Justin Hibbits <jrh29@alumni.cwru.edu> To: Jeff Roberson <jeff@freebsd.org> Cc: src-committers <src-committers@freebsd.org>, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: Re: svn commit: r331369 - head/sys/vm Message-ID: <CAHSQbTBumEe2-=5DMG8NG2c3C6Agf-eUok7=ti_XGE_Fp7E=MQ@mail.gmail.com> In-Reply-To: <201803221921.w2MJLBi7058560@repo.freebsd.org> References: <201803221921.w2MJLBi7058560@repo.freebsd.org>
next in thread | previous in thread | raw e-mail | index | archive | help
This broke gcc builds. On Thu, Mar 22, 2018 at 2:21 PM, Jeff Roberson <jeff@freebsd.org> wrote: > Author: jeff > Date: Thu Mar 22 19:21:11 2018 > New Revision: 331369 > URL: https://svnweb.freebsd.org/changeset/base/331369 > > Log: > Lock reservations with a dedicated lock in each reservation. Protect the > vmd_free_count with atomics. > > This allows us to allocate and free from reservations without the free lock > except where a superpage is allocated from the physical layer, which is > roughly 1/512 of the operations on amd64. > > Use the counter api to eliminate cache conention on counters. > > Reviewed by: markj > Tested by: pho > Sponsored by: Netflix, Dell/EMC Isilon > Differential Revision: https://reviews.freebsd.org/D14707 > > Modified: > head/sys/vm/vm_page.c > head/sys/vm/vm_pagequeue.h > head/sys/vm/vm_reserv.c > head/sys/vm/vm_reserv.h > > Modified: head/sys/vm/vm_page.c > ============================================================================== > --- head/sys/vm/vm_page.c Thu Mar 22 19:11:43 2018 (r331368) > +++ head/sys/vm/vm_page.c Thu Mar 22 19:21:11 2018 (r331369) > @@ -177,7 +177,6 @@ static uma_zone_t fakepg_zone; > static void vm_page_alloc_check(vm_page_t m); > static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); > static void vm_page_enqueue(uint8_t queue, vm_page_t m); > -static void vm_page_free_phys(struct vm_domain *vmd, vm_page_t m); > static void vm_page_init(void *dummy); > static int vm_page_insert_after(vm_page_t m, vm_object_t object, > vm_pindex_t pindex, vm_page_t mpred); > @@ -1677,10 +1676,10 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi > * for the request class and false otherwise. > */ > int > -vm_domain_available(struct vm_domain *vmd, int req, int npages) > +vm_domain_allocate(struct vm_domain *vmd, int req, int npages) > { > + u_int limit, old, new; > > - vm_domain_free_assert_locked(vmd); > req = req & VM_ALLOC_CLASS_MASK; > > /* > @@ -1688,15 +1687,34 @@ vm_domain_available(struct vm_domain *vmd, int req, in > */ > if (curproc == pageproc && req != VM_ALLOC_INTERRUPT) > req = VM_ALLOC_SYSTEM; > + if (req == VM_ALLOC_INTERRUPT) > + limit = 0; > + else if (req == VM_ALLOC_SYSTEM) > + limit = vmd->vmd_interrupt_free_min; > + else > + limit = vmd->vmd_free_reserved; > > - if (vmd->vmd_free_count >= npages + vmd->vmd_free_reserved || > - (req == VM_ALLOC_SYSTEM && > - vmd->vmd_free_count >= npages + vmd->vmd_interrupt_free_min) || > - (req == VM_ALLOC_INTERRUPT && > - vmd->vmd_free_count >= npages)) > - return (1); > + /* > + * Attempt to reserve the pages. Fail if we're below the limit. > + */ > + limit += npages; > + old = vmd->vmd_free_count; > + do { > + if (old < limit) > + return (0); > + new = old - npages; > + } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); > > - return (0); > + /* Wake the page daemon if we've crossed the threshold. */ > + if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old)) > + pagedaemon_wakeup(vmd->vmd_domain); > + > + /* Only update bitsets on transitions. */ > + if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || > + (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) > + vm_domain_set(vmd); > + > + return (1); > } > > vm_page_t > @@ -1723,44 +1741,34 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pind > again: > m = NULL; > #if VM_NRESERVLEVEL > 0 > + /* > + * Can we allocate the page from a reservation? > + */ > if (vm_object_reserv(object) && > - (m = vm_reserv_extend(req, object, pindex, domain, mpred)) > - != NULL) { > + ((m = vm_reserv_extend(req, object, pindex, domain, mpred)) != NULL || > + (m = vm_reserv_alloc_page(req, object, pindex, domain, mpred)) != NULL)) { > domain = vm_phys_domain(m); > vmd = VM_DOMAIN(domain); > goto found; > } > #endif > vmd = VM_DOMAIN(domain); > - vm_domain_free_lock(vmd); > - if (vm_domain_available(vmd, req, 1)) { > + if (vm_domain_allocate(vmd, req, 1)) { > /* > - * Can we allocate the page from a reservation? > + * If not, allocate it from the free page queues. > */ > + vm_domain_free_lock(vmd); > + m = vm_phys_alloc_pages(domain, object != NULL ? > + VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); > + vm_domain_free_unlock(vmd); > + if (m == NULL) { > + vm_domain_freecnt_inc(vmd, 1); > #if VM_NRESERVLEVEL > 0 > - if (!vm_object_reserv(object) || > - (m = vm_reserv_alloc_page(object, pindex, > - domain, mpred)) == NULL) > + if (vm_reserv_reclaim_inactive(domain)) > + goto again; > #endif > - { > - /* > - * If not, allocate it from the free page queues. > - */ > - m = vm_phys_alloc_pages(domain, object != NULL ? > - VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); > -#if VM_NRESERVLEVEL > 0 > - if (m == NULL && vm_reserv_reclaim_inactive(domain)) { > - m = vm_phys_alloc_pages(domain, > - object != NULL ? > - VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, > - 0); > - } > -#endif > } > } > - if (m != NULL) > - vm_domain_freecnt_dec(vmd, 1); > - vm_domain_free_unlock(vmd); > if (m == NULL) { > /* > * Not allocatable, give up. > @@ -1775,9 +1783,7 @@ again: > */ > KASSERT(m != NULL, ("missing page")); > > -#if VM_NRESERVLEVEL > 0 > found: > -#endif 'found' is now declared, but unused on powerpc64. - Justin
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?CAHSQbTBumEe2-=5DMG8NG2c3C6Agf-eUok7=ti_XGE_Fp7E=MQ>