Date: Fri, 5 Jul 2019 16:49:34 +0000 (UTC) From: Doug Moore <dougm@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r349767 - head/sys/vm Message-ID: <201907051649.x65GnYaN092041@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: dougm Date: Fri Jul 5 16:49:34 2019 New Revision: 349767 URL: https://svnweb.freebsd.org/changeset/base/349767 Log: Based on work posted at https://reviews.freebsd.org/D13484, change swap_pager_swapoff_object and swp_pager_force_pagein so that they can page in multiple pages at a time to a swap device, rather than doing one I/O operation for each page. Tested by: pho Submitted by: ota_j.email.ne.jp (Yoshihiro Ota) Reviewed by: alc, markj, kib Approved by: kib, markj (mentors) MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D20635 Modified: head/sys/vm/swap_pager.c Modified: head/sys/vm/swap_pager.c ============================================================================== --- head/sys/vm/swap_pager.c Fri Jul 5 16:43:41 2019 (r349766) +++ head/sys/vm/swap_pager.c Fri Jul 5 16:49:34 2019 (r349767) @@ -1662,6 +1662,7 @@ swp_pager_force_dirty(vm_page_t m) vm_page_unlock(m); #endif vm_page_xunbusy(m); + swap_pager_unswapped(m); } static void @@ -1673,69 +1674,116 @@ swp_pager_force_launder(vm_page_t m) vm_page_launder(m); vm_page_unlock(m); vm_page_xunbusy(m); + swap_pager_unswapped(m); } /* - * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in + * SWP_PAGER_FORCE_PAGEIN() - force swap blocks to be paged in * - * This routine dissociates the page at the given index within an object - * from its backing store, paging it in if it does not reside in memory. - * If the page is paged in, it is marked dirty and placed in the laundry - * queue. The page is marked dirty because it no longer has backing - * store. It is placed in the laundry queue because it has not been - * accessed recently. Otherwise, it would already reside in memory. - * - * We also attempt to swap in all other pages in the swap block. - * However, we only guarantee that the one at the specified index is - * paged in. - * - * XXX - The code to page the whole block in doesn't work, so we - * revert to the one-by-one behavior for now. Sigh. + * This routine dissociates pages starting at the given index within an + * object from their backing store, paging them in if they do not reside + * in memory. Pages that are paged in are marked dirty and placed in the + * laundry queue. Pages are marked dirty because they no longer have + * backing store. They are placed in the laundry queue because they have + * not been accessed recently. Otherwise, they would already reside in + * memory. */ static void -swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) +swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex, int npages) { - vm_page_t m; + vm_page_t ma[npages]; + int i, j; - vm_object_pip_add(object, 1); - m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); - if (m->valid == VM_PAGE_BITS_ALL) { - vm_object_pip_wakeup(object); - swp_pager_force_dirty(m); - vm_pager_page_unswapped(m); - return; + KASSERT(npages > 0, ("%s: No pages", __func__)); + KASSERT(npages <= MAXPHYS / PAGE_SIZE, + ("%s: Too many pages: %d", __func__, npages)); + vm_object_pip_add(object, npages); + vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages); + for (i = j = 0;; i++) { + /* Count nonresident pages, to page-in all at once. */ + if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL) + continue; + if (j < i) { + /* Page-in nonresident pages. Mark for laundering. */ + if (swap_pager_getpages(object, &ma[j], i - j, NULL, + NULL) != VM_PAGER_OK) + panic("%s: read from swap failed", __func__); + do { + swp_pager_force_launder(ma[j]); + } while (++j < i); + } + if (i == npages) + break; + /* Mark dirty a resident page. */ + swp_pager_force_dirty(ma[j++]); } - - if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) - panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ - vm_object_pip_wakeup(object); - swp_pager_force_launder(m); - vm_pager_page_unswapped(m); + vm_object_pip_wakeupn(object, npages); } /* * swap_pager_swapoff_object: * * Page in all of the pages that have been paged out for an object - * from a given swap device. + * to a swap device. */ static void swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) { struct swblk *sb; - vm_pindex_t pi; + vm_pindex_t pi, s_pindex; + daddr_t blk, n_blks, s_blk; int i; + n_blks = 0; for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( &object->un_pager.swp.swp_blks, pi)) != NULL; ) { - pi = sb->p + SWAP_META_PAGES; for (i = 0; i < SWAP_META_PAGES; i++) { - if (sb->d[i] == SWAPBLK_NONE) + blk = sb->d[i]; + if (!swp_pager_isondev(blk, sp)) + blk = SWAPBLK_NONE; + + /* + * If there are no blocks/pages accumulated, start a new + * accumulation here. + */ + if (n_blks == 0) { + if (blk != SWAPBLK_NONE) { + s_blk = blk; + s_pindex = sb->p + i; + n_blks = 1; + } continue; - if (swp_pager_isondev(sb->d[i], sp)) - swp_pager_force_pagein(object, sb->p + i); + } + + /* + * If the accumulation can be extended without breaking + * the sequence of consecutive blocks and pages that + * swp_pager_force_pagein() depends on, do so. + */ + if (n_blks < MAXPHYS / PAGE_SIZE && + s_blk + n_blks == blk && + s_pindex + n_blks == sb->p + i) { + ++n_blks; + continue; + } + + /* + * The sequence of consecutive blocks and pages cannot + * be extended, so page them all in here. Then, + * because doing so involves releasing and reacquiring + * a lock that protects the swap block pctrie, do not + * rely on the current swap block. Break this loop and + * re-fetch the same pindex from the pctrie again. + */ + swp_pager_force_pagein(object, s_pindex, n_blks); + n_blks = 0; + break; } + if (i == SWAP_META_PAGES) + pi = sb->p + SWAP_META_PAGES; } + if (n_blks > 0) + swp_pager_force_pagein(object, s_pindex, n_blks); } /*
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201907051649.x65GnYaN092041>