Date: Thu, 3 Dec 2015 17:21:10 +0000 (UTC) From: "Conrad E. Meyer" <cem@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r291704 - head/sys/vm Message-ID: <201512031721.tB3HLAXe075169@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: cem Date: Thu Dec 3 17:21:10 2015 New Revision: 291704 URL: https://svnweb.freebsd.org/changeset/base/291704 Log: Pull vm_object_scan_all_shadowed out of vm_object_backing_scan These two functions were largely unrelated, they just used the same same loop logic to walk through a backing object's memq. Pull out the all_shadowed test as its own function and eliminate OBSC_TEST_ALL_SHADOWED. Rename vm_object_backing_scan to vm_object_collapse_scan. No functional change. Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D4335 Modified: head/sys/vm/vm_object.c Modified: head/sys/vm/vm_object.c ============================================================================== --- head/sys/vm/vm_object.c Thu Dec 3 16:54:45 2015 (r291703) +++ head/sys/vm/vm_object.c Thu Dec 3 17:21:10 2015 (r291704) @@ -1419,12 +1419,11 @@ retry: VM_OBJECT_WLOCK(new_object); } -#define OBSC_TEST_ALL_SHADOWED 0x0001 #define OBSC_COLLAPSE_NOWAIT 0x0002 #define OBSC_COLLAPSE_WAIT 0x0004 static vm_page_t -vm_object_backing_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, +vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, int op) { vm_object_t backing_object; @@ -1452,192 +1451,185 @@ vm_object_backing_scan_wait(vm_object_t } static bool -vm_object_backing_scan(vm_object_t object, int op) +vm_object_scan_all_shadowed(vm_object_t object) { vm_object_t backing_object; - vm_page_t next, p, pp; + vm_page_t p, pp; vm_pindex_t backing_offset_index, new_pindex; VM_OBJECT_ASSERT_WLOCKED(object); VM_OBJECT_ASSERT_WLOCKED(object->backing_object); backing_object = object->backing_object; - backing_offset_index = OFF_TO_IDX(object->backing_object_offset); /* - * Initial conditions + * Initial conditions: + * + * We do not want to have to test for the existence of cache or swap + * pages in the backing object. XXX but with the new swapper this + * would be pretty easy to do. */ - if (op & OBSC_TEST_ALL_SHADOWED) { + if (backing_object->type != OBJT_DEFAULT) + return (false); + + backing_offset_index = OFF_TO_IDX(object->backing_object_offset); + + for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; + p = TAILQ_NEXT(p, listq)) { + new_pindex = p->pindex - backing_offset_index; + + /* + * Ignore pages outside the parent object's range and outside + * the parent object's mapping of the backing object. + */ + if (p->pindex < backing_offset_index || + new_pindex >= object->size) + continue; + /* - * We do not want to have to test for the existence of cache - * or swap pages in the backing object. XXX but with the - * new swapper this would be pretty easy to do. + * See if the parent has the page or if the parent's object + * pager has the page. If the parent has the page but the page + * is not valid, the parent's object pager must have the page. * - * XXX what about anonymous MAP_SHARED memory that hasn't - * been ZFOD faulted yet? If we do not test for this, the - * shadow test may succeed! XXX + * If this fails, the parent does not completely shadow the + * object and we might as well give up now. */ - if (backing_object->type != OBJT_DEFAULT) { + pp = vm_page_lookup(object, new_pindex); + if ((pp == NULL || pp->valid == 0) && + !vm_pager_has_page(object, new_pindex, NULL, NULL)) return (false); - } } - if (op & OBSC_COLLAPSE_WAIT) { + return (true); +} + +static bool +vm_object_collapse_scan(vm_object_t object, int op) +{ + vm_object_t backing_object; + vm_page_t next, p, pp; + vm_pindex_t backing_offset_index, new_pindex; + + VM_OBJECT_ASSERT_WLOCKED(object); + VM_OBJECT_ASSERT_WLOCKED(object->backing_object); + + backing_object = object->backing_object; + backing_offset_index = OFF_TO_IDX(object->backing_object_offset); + + /* + * Initial conditions + */ + if ((op & OBSC_COLLAPSE_WAIT) != 0) vm_object_set_flag(backing_object, OBJ_DEAD); - } /* * Our scan */ - p = TAILQ_FIRST(&backing_object->memq); - while (p) { + for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { next = TAILQ_NEXT(p, listq); new_pindex = p->pindex - backing_offset_index; - if (op & OBSC_TEST_ALL_SHADOWED) { - /* - * Ignore pages outside the parent object's range - * and outside the parent object's mapping of the - * backing object. - * - * Note that we do not busy the backing object's - * page. - */ - if (p->pindex < backing_offset_index || - new_pindex >= object->size) { - p = next; - continue; - } - - /* - * See if the parent has the page or if the parent's - * object pager has the page. If the parent has the - * page but the page is not valid, the parent's - * object pager must have the page. - * - * If this fails, the parent does not completely shadow - * the object and we might as well give up now. - */ - - pp = vm_page_lookup(object, new_pindex); - if ((pp == NULL || pp->valid == 0) && - !vm_pager_has_page(object, new_pindex, NULL, NULL)) - return (false); - } /* * Check for busy page */ - if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { - if (vm_page_busied(p)) { - p = vm_object_backing_scan_wait(object, p, - next, op); - continue; - } - - KASSERT(p->object == backing_object, - ("vm_object_backing_scan: object mismatch")); - - if (p->pindex < backing_offset_index || - new_pindex >= object->size) { - if (backing_object->type == OBJT_SWAP) - swap_pager_freespace(backing_object, - p->pindex, 1); + if (vm_page_busied(p)) { + next = vm_object_collapse_scan_wait(object, p, next, op); + continue; + } - /* - * Page is out of the parent object's range, we - * can simply destroy it. - */ - vm_page_lock(p); - KASSERT(!pmap_page_is_mapped(p), - ("freeing mapped page %p", p)); - if (p->wire_count == 0) - vm_page_free(p); - else - vm_page_remove(p); - vm_page_unlock(p); - p = next; - continue; - } + KASSERT(p->object == backing_object, + ("vm_object_collapse_scan: object mismatch")); - pp = vm_page_lookup(object, new_pindex); - if (pp != NULL && vm_page_busied(pp)) { - /* - * The page in the parent is busy and - * possibly not (yet) valid. Until - * its state is finalized by the busy - * bit owner, we can't tell whether it - * shadows the original page. - * Therefore, we must either skip it - * and the original (backing_object) - * page or wait for its state to be - * finalized. - * - * This is due to a race with vm_fault() - * where we must unbusy the original - * (backing_obj) page before we can - * (re)lock the parent. Hence we can - * get here. - */ - p = vm_object_backing_scan_wait(object, pp, - next, op); - continue; - } - - KASSERT(pp == NULL || pp->valid != 0, - ("unbusy invalid page %p", pp)); + if (p->pindex < backing_offset_index || + new_pindex >= object->size) { + if (backing_object->type == OBJT_SWAP) + swap_pager_freespace(backing_object, p->pindex, + 1); - if (pp != NULL || vm_pager_has_page(object, - new_pindex, NULL, NULL)) { - /* - * The page already exists in the - * parent OR swap exists for this - * location in the parent. Leave the - * parent's page alone. Destroy the - * original page from the backing - * object. - */ - if (backing_object->type == OBJT_SWAP) - swap_pager_freespace(backing_object, - p->pindex, 1); - vm_page_lock(p); - KASSERT(!pmap_page_is_mapped(p), - ("freeing mapped page %p", p)); - if (p->wire_count == 0) - vm_page_free(p); - else - vm_page_remove(p); - vm_page_unlock(p); - p = next; - continue; - } + /* + * Page is out of the parent object's range, we can + * simply destroy it. + */ + vm_page_lock(p); + KASSERT(!pmap_page_is_mapped(p), + ("freeing mapped page %p", p)); + if (p->wire_count == 0) + vm_page_free(p); + else + vm_page_remove(p); + vm_page_unlock(p); + continue; + } + pp = vm_page_lookup(object, new_pindex); + if (pp != NULL && vm_page_busied(pp)) { /* - * Page does not exist in parent, rename the - * page from the backing object to the main object. + * The page in the parent is busy and possibly not + * (yet) valid. Until its state is finalized by the + * busy bit owner, we can't tell whether it shadows the + * original page. Therefore, we must either skip it + * and the original (backing_object) page or wait for + * its state to be finalized. * - * If the page was mapped to a process, it can remain - * mapped through the rename. - * vm_page_rename() will handle dirty and cache. + * This is due to a race with vm_fault() where we must + * unbusy the original (backing_obj) page before we can + * (re)lock the parent. Hence we can get here. */ - if (vm_page_rename(p, object, new_pindex)) { - p = vm_object_backing_scan_wait(object, NULL, - next, op); - continue; - } + next = vm_object_collapse_scan_wait(object, pp, next, + op); + continue; + } - /* Use the old pindex to free the right page. */ - if (backing_object->type == OBJT_SWAP) - swap_pager_freespace(backing_object, - new_pindex + backing_offset_index, 1); + KASSERT(pp == NULL || pp->valid != 0, + ("unbusy invalid page %p", pp)); -#if VM_NRESERVLEVEL > 0 + if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, + NULL)) { /* - * Rename the reservation. + * The page already exists in the parent OR swap exists + * for this location in the parent. Leave the parent's + * page alone. Destroy the original page from the + * backing object. */ - vm_reserv_rename(p, object, backing_object, - backing_offset_index); -#endif + if (backing_object->type == OBJT_SWAP) + swap_pager_freespace(backing_object, p->pindex, + 1); + vm_page_lock(p); + KASSERT(!pmap_page_is_mapped(p), + ("freeing mapped page %p", p)); + if (p->wire_count == 0) + vm_page_free(p); + else + vm_page_remove(p); + vm_page_unlock(p); + continue; + } + + /* + * Page does not exist in parent, rename the page from the + * backing object to the main object. + * + * If the page was mapped to a process, it can remain mapped + * through the rename. vm_page_rename() will handle dirty and + * cache. + */ + if (vm_page_rename(p, object, new_pindex)) { + next = vm_object_collapse_scan_wait(object, NULL, next, + op); + continue; } - p = next; + + /* Use the old pindex to free the right page. */ + if (backing_object->type == OBJT_SWAP) + swap_pager_freespace(backing_object, + new_pindex + backing_offset_index, 1); + +#if VM_NRESERVLEVEL > 0 + /* + * Rename the reservation. + */ + vm_reserv_rename(p, object, backing_object, + backing_offset_index); +#endif } return (true); } @@ -1659,7 +1651,7 @@ vm_object_qcollapse(vm_object_t object) if (backing_object->ref_count != 1) return; - vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); + vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); } /* @@ -1717,15 +1709,15 @@ vm_object_collapse(vm_object_t object) * all the resident pages in the entire backing object. * * This is ignoring pager-backed pages such as swap pages. - * vm_object_backing_scan fails the shadowing test in this + * vm_object_collapse_scan fails the shadowing test in this * case. */ if (backing_object->ref_count == 1) { /* * If there is exactly one reference to the backing - * object, we can collapse it into the parent. + * object, we can collapse it into the parent. */ - vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); + vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); #if VM_NRESERVLEVEL > 0 /* @@ -1806,8 +1798,7 @@ vm_object_collapse(vm_object_t object) * there is nothing we can do so we give up. */ if (object->resident_page_count != object->size && - !vm_object_backing_scan(object, - OBSC_TEST_ALL_SHADOWED)) { + !vm_object_scan_all_shadowed(object)) { VM_OBJECT_WUNLOCK(backing_object); break; }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201512031721.tB3HLAXe075169>