Date: Fri, 26 Jul 2013 23:22:05 +0000 (UTC) From: Jeff Roberson <jeff@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r253697 - head/sys/vm Message-ID: <201307262322.r6QNM51H095113@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: jeff Date: Fri Jul 26 23:22:05 2013 New Revision: 253697 URL: http://svnweb.freebsd.org/changeset/base/253697 Log: Improve page LRU quality and simplify the logic. - Don't short-circuit aging tests for unmapped objects. This biases against unmapped file pages and transient mappings. - Always honor PGA_REFERENCED. We can now use this after soft busying to lazily restart the LRU. - Don't transition directly from active to cached bypassing the inactive queue. This frees recently used data much too early. - Rename actcount to act_delta to be more consistent with use and meaning. Reviewed by: kib, alc Sponsored by: EMC / Isilon Storage Division Modified: head/sys/vm/vm_pageout.c Modified: head/sys/vm/vm_pageout.c ============================================================================== --- head/sys/vm/vm_pageout.c Fri Jul 26 22:53:17 2013 (r253696) +++ head/sys/vm/vm_pageout.c Fri Jul 26 23:22:05 2013 (r253697) @@ -708,7 +708,7 @@ vm_pageout_object_deactivate_pages(pmap_ { vm_object_t backing_object, object; vm_page_t p; - int actcount, remove_mode; + int act_delta, remove_mode; VM_OBJECT_ASSERT_LOCKED(first_object); if ((first_object->flags & OBJ_FICTITIOUS) != 0) @@ -739,17 +739,17 @@ vm_pageout_object_deactivate_pages(pmap_ vm_page_unlock(p); continue; } - actcount = pmap_ts_referenced(p); + act_delta = pmap_ts_referenced(p); if ((p->aflags & PGA_REFERENCED) != 0) { - if (actcount == 0) - actcount = 1; + if (act_delta == 0) + act_delta = 1; vm_page_aflag_clear(p, PGA_REFERENCED); } - if (p->queue != PQ_ACTIVE && actcount != 0) { + if (p->queue != PQ_ACTIVE && act_delta != 0) { vm_page_activate(p); - p->act_count += actcount; + p->act_count += act_delta; } else if (p->queue == PQ_ACTIVE) { - if (actcount == 0) { + if (act_delta == 0) { p->act_count -= min(p->act_count, ACT_DECLINE); if (!remove_mode && p->act_count == 0) { @@ -869,7 +869,7 @@ vm_pageout_scan(int pass) int page_shortage, maxscan, pcount; int addl_page_shortage; vm_object_t object; - int actcount; + int act_delta; int vnodes_skipped = 0; int maxlaunder; boolean_t queues_locked; @@ -989,44 +989,40 @@ vm_pageout_scan(int pass) queues_locked = FALSE; /* - * If the object is not being used, we ignore previous + * We bump the activation count if the page has been + * referenced while in the inactive queue. This makes + * it less likely that the page will be added back to the + * inactive queue prematurely again. Here we check the + * page tables (or emulated bits, if any), given the upper + * level VM system not knowing anything about existing * references. */ - if (object->ref_count == 0) { + act_delta = 0; + if ((m->aflags & PGA_REFERENCED) != 0) { vm_page_aflag_clear(m, PGA_REFERENCED); + act_delta = 1; + } + if (object->ref_count != 0) { + act_delta += pmap_ts_referenced(m); + } else { KASSERT(!pmap_page_is_mapped(m), ("vm_pageout_scan: page %p is mapped", m)); - - /* - * Otherwise, if the page has been referenced while in the - * inactive queue, we bump the "activation count" upwards, - * making it less likely that the page will be added back to - * the inactive queue prematurely again. Here we check the - * page tables (or emulated bits, if any), given the upper - * level VM system not knowing anything about existing - * references. - */ - } else if ((m->aflags & PGA_REFERENCED) == 0 && - (actcount = pmap_ts_referenced(m)) != 0) { - vm_page_activate(m); - VM_OBJECT_WUNLOCK(object); - m->act_count += actcount + ACT_ADVANCE; - vm_page_unlock(m); - goto relock_queues; } /* * If the upper level VM system knows about any page - * references, we activate the page. We also set the - * "activation count" higher than normal so that we will less - * likely place pages back onto the inactive queue again. + * references, we reactivate the page or requeue it. */ - if ((m->aflags & PGA_REFERENCED) != 0) { - vm_page_aflag_clear(m, PGA_REFERENCED); - actcount = pmap_ts_referenced(m); - vm_page_activate(m); + if (act_delta != 0) { + if (object->ref_count) { + vm_page_activate(m); + m->act_count += act_delta + ACT_ADVANCE; + } else { + vm_pagequeue_lock(pq); + queues_locked = TRUE; + vm_page_requeue_locked(m); + } VM_OBJECT_WUNLOCK(object); - m->act_count += actcount + ACT_ADVANCE + 1; vm_page_unlock(m); goto relock_queues; } @@ -1324,50 +1320,40 @@ relock_queues: /* * Check to see "how much" the page has been used. */ - actcount = 0; - if (object->ref_count != 0) { - if (m->aflags & PGA_REFERENCED) { - actcount += 1; - } - actcount += pmap_ts_referenced(m); - if (actcount) { - m->act_count += ACT_ADVANCE + actcount; - if (m->act_count > ACT_MAX) - m->act_count = ACT_MAX; - } + act_delta = 0; + if (m->aflags & PGA_REFERENCED) { + vm_page_aflag_clear(m, PGA_REFERENCED); + act_delta += 1; } + if (object->ref_count != 0) + act_delta += pmap_ts_referenced(m); /* - * Since we have "tested" this bit, we need to clear it now. + * Advance or decay the act_count based on recent usage. */ - vm_page_aflag_clear(m, PGA_REFERENCED); + if (act_delta) { + m->act_count += ACT_ADVANCE + act_delta; + if (m->act_count > ACT_MAX) + m->act_count = ACT_MAX; + } else { + m->act_count -= min(m->act_count, ACT_DECLINE); + act_delta = m->act_count; + } /* - * Only if an object is currently being used, do we use the - * page activation count stats. + * Move this page to the tail of the active or inactive + * queue depending on usage. */ - if (actcount != 0 && object->ref_count != 0) + if (act_delta == 0) { + KASSERT(object->ref_count != 0 || + !pmap_page_is_mapped(m), + ("vm_pageout_scan: page %p is mapped", m)); + /* Dequeue to avoid later lock recursion. */ + vm_page_dequeue_locked(m); + vm_page_deactivate(m); + page_shortage--; + } else vm_page_requeue_locked(m); - else { - m->act_count -= min(m->act_count, ACT_DECLINE); - if (object->ref_count == 0 || - m->act_count == 0) { - page_shortage--; - /* Dequeue to avoid later lock recursion. */ - vm_page_dequeue_locked(m); - if (object->ref_count == 0) { - KASSERT(!pmap_page_is_mapped(m), - ("vm_pageout_scan: page %p is mapped", m)); - if (m->dirty == 0) - vm_page_cache(m); - else - vm_page_deactivate(m); - } else { - vm_page_deactivate(m); - } - } else - vm_page_requeue_locked(m); - } vm_page_unlock(m); VM_OBJECT_WUNLOCK(object); m = next;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201307262322.r6QNM51H095113>