Date: Tue, 26 Jun 2018 18:29:56 +0000 (UTC) From: Alan Cox <alc@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r335674 - head/sys/vm Message-ID: <201806261829.w5QITuqI082667@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: alc Date: Tue Jun 26 18:29:56 2018 New Revision: 335674 URL: https://svnweb.freebsd.org/changeset/base/335674 Log: Update the physical page selection strategy used by vm_page_import() so that it does not cause rapid fragmentation of the free physical memory. Reviewed by: jeff, markj (an earlier version) Differential Revision: https://reviews.freebsd.org/D15976 Modified: head/sys/vm/vm_page.c head/sys/vm/vm_phys.c head/sys/vm/vm_phys.h Modified: head/sys/vm/vm_page.c ============================================================================== --- head/sys/vm/vm_page.c Tue Jun 26 18:07:16 2018 (r335673) +++ head/sys/vm/vm_page.c Tue Jun 26 18:29:56 2018 (r335674) @@ -2235,24 +2235,16 @@ static int vm_page_import(void *arg, void **store, int cnt, int domain, int flags) { struct vm_domain *vmd; - vm_page_t m; - int i, j, n; + int i; vmd = arg; /* Only import if we can bring in a full bucket. */ if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) return (0); domain = vmd->vmd_domain; - n = 64; /* Starting stride, arbitrary. */ vm_domain_free_lock(vmd); - for (i = 0; i < cnt; i+=n) { - n = vm_phys_alloc_npages(domain, VM_FREELIST_DEFAULT, &m, - MIN(n, cnt-i)); - if (n == 0) - break; - for (j = 0; j < n; j++) - store[i+j] = m++; - } + i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt, + (vm_page_t *)store); vm_domain_free_unlock(vmd); if (cnt != i) vm_domain_freecnt_inc(vmd, cnt - i); Modified: head/sys/vm/vm_phys.c ============================================================================== --- head/sys/vm/vm_phys.c Tue Jun 26 18:07:16 2018 (r335673) +++ head/sys/vm/vm_phys.c Tue Jun 26 18:29:56 2018 (r335674) @@ -605,6 +605,76 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_f } /* + * Tries to allocate the specified number of pages from the specified pool + * within the specified domain. Returns the actual number of allocated pages + * and a pointer to each page through the array ma[]. + * + * The returned pages may not be physically contiguous. However, in contrast to + * performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), + * calling this function once to allocate the desired number of pages will avoid + * wasted time in vm_phys_split_pages(). + * + * The free page queues for the specified domain must be locked. + */ +int +vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) +{ + struct vm_freelist *alt, *fl; + vm_page_t m; + int avail, end, flind, freelist, i, need, oind, pind; + + KASSERT(domain >= 0 && domain < vm_ndomains, + ("vm_phys_alloc_npages: domain %d is out of range", domain)); + KASSERT(pool < VM_NFREEPOOL, + ("vm_phys_alloc_npages: pool %d is out of range", pool)); + KASSERT(npages <= 1 << (VM_NFREEORDER - 1), + ("vm_phys_alloc_npages: npages %d is out of range", npages)); + vm_domain_free_assert_locked(VM_DOMAIN(domain)); + i = 0; + for (freelist = 0; freelist < VM_NFREELIST; freelist++) { + flind = vm_freelist_to_flind[freelist]; + if (flind < 0) + continue; + fl = vm_phys_free_queues[domain][flind][pool]; + for (oind = 0; oind < VM_NFREEORDER; oind++) { + while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) { + vm_freelist_rem(fl, m, oind); + avail = 1 << oind; + need = imin(npages - i, avail); + for (end = i + need; i < end;) + ma[i++] = m++; + if (need < avail) { + vm_phys_free_contig(m, avail - need); + return (npages); + } else if (i == npages) + return (npages); + } + } + for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { + for (pind = 0; pind < VM_NFREEPOOL; pind++) { + alt = vm_phys_free_queues[domain][flind][pind]; + while ((m = TAILQ_FIRST(&alt[oind].pl)) != + NULL) { + vm_freelist_rem(alt, m, oind); + vm_phys_set_pool(pool, m, oind); + avail = 1 << oind; + need = imin(npages - i, avail); + for (end = i + need; i < end;) + ma[i++] = m++; + if (need < avail) { + vm_phys_free_contig(m, avail - + need); + return (npages); + } else if (i == npages) + return (npages); + } + } + } + } + return (i); +} + +/* * Allocate a contiguous, power of two-sized set of physical pages * from the free lists. * @@ -622,26 +692,6 @@ vm_phys_alloc_pages(int domain, int pool, int order) return (m); } return (NULL); -} - -int -vm_phys_alloc_npages(int domain, int pool, vm_page_t *mp, int cnt) -{ - vm_page_t m; - int order, freelist; - - for (freelist = 0; freelist < VM_NFREELIST; freelist++) { - for (order = fls(cnt) -1; order >= 0; order--) { - m = vm_phys_alloc_freelist_pages(domain, freelist, - pool, order); - if (m != NULL) { - *mp = m; - return (1 << order); - } - } - } - *mp = NULL; - return (0); } /* Modified: head/sys/vm/vm_phys.h ============================================================================== --- head/sys/vm/vm_phys.h Tue Jun 26 18:07:16 2018 (r335673) +++ head/sys/vm/vm_phys.h Tue Jun 26 18:29:56 2018 (r335674) @@ -77,8 +77,8 @@ vm_page_t vm_phys_alloc_contig(int domain, u_long npag vm_paddr_t high, u_long alignment, vm_paddr_t boundary); vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order); +int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]); vm_page_t vm_phys_alloc_pages(int domain, int pool, int order); -int vm_phys_alloc_npages(int domain, int pool, vm_page_t *m, int cnt); int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high); int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, vm_memattr_t memattr);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201806261829.w5QITuqI082667>