Date: Wed, 5 Sep 2012 16:35:38 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org Subject: svn commit: r240142 - in stable/9/sys: sys vm Message-ID: <201209051635.q85GZcN5068978@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Wed Sep 5 16:35:37 2012 New Revision: 240142 URL: http://svn.freebsd.org/changeset/base/240142 Log: MFC r238212: Drop page queues mutex on each iteration of vm_pageout_scan over the inactive queue, unless busy page is found. MFC r238258: Avoid vm page queues lock leak after r238212. Modified: stable/9/sys/sys/vmmeter.h stable/9/sys/vm/vm_pageout.c Directory Properties: stable/9/sys/ (props changed) Modified: stable/9/sys/sys/vmmeter.h ============================================================================== --- stable/9/sys/sys/vmmeter.h Wed Sep 5 16:01:08 2012 (r240141) +++ stable/9/sys/sys/vmmeter.h Wed Sep 5 16:35:37 2012 (r240142) @@ -79,7 +79,7 @@ struct vmmeter { u_int v_pdpages; /* (q) pages analyzed by daemon */ u_int v_tcached; /* (p) total pages cached */ - u_int v_dfree; /* (q) pages freed by daemon */ + u_int v_dfree; /* (p) pages freed by daemon */ u_int v_pfree; /* (p) pages freed by exiting processes */ u_int v_tfree; /* (p) total pages freed */ /* Modified: stable/9/sys/vm/vm_pageout.c ============================================================================== --- stable/9/sys/vm/vm_pageout.c Wed Sep 5 16:01:08 2012 (r240141) +++ stable/9/sys/vm/vm_pageout.c Wed Sep 5 16:35:37 2012 (r240142) @@ -743,6 +743,7 @@ vm_pageout_scan(int pass) int actcount; int vnodes_skipped = 0; int maxlaunder; + boolean_t queues_locked; /* * Decrease registered cache sizes. @@ -784,6 +785,7 @@ vm_pageout_scan(int pass) if (pass) maxlaunder = 10000; vm_page_lock_queues(); + queues_locked = TRUE; rescan0: addl_page_shortage = addl_page_shortage_init; maxscan = cnt.v_inactive_count; @@ -791,6 +793,8 @@ rescan0: for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); m != NULL && maxscan-- > 0 && page_shortage > 0; m = next) { + KASSERT(queues_locked, ("unlocked queues")); + mtx_assert(&vm_page_queue_mtx, MA_OWNED); cnt.v_pdpages++; @@ -850,6 +854,16 @@ rescan0: } /* + * We unlock vm_page_queue_mtx, invalidating the + * 'next' pointer. Use our marker to remember our + * place. + */ + TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, + m, &marker, pageq); + vm_page_unlock_queues(); + queues_locked = FALSE; + + /* * If the object is not being used, we ignore previous * references. */ @@ -873,7 +887,7 @@ rescan0: vm_page_unlock(m); m->act_count += actcount + ACT_ADVANCE; VM_OBJECT_UNLOCK(object); - continue; + goto relock_queues; } /* @@ -889,7 +903,7 @@ rescan0: vm_page_unlock(m); m->act_count += actcount + ACT_ADVANCE + 1; VM_OBJECT_UNLOCK(object); - continue; + goto relock_queues; } /* @@ -924,7 +938,7 @@ rescan0: * Invalid pages can be easily freed */ vm_page_free(m); - cnt.v_dfree++; + PCPU_INC(cnt.v_dfree); --page_shortage; } else if (m->dirty == 0) { /* @@ -947,6 +961,8 @@ rescan0: * the thrash point for a heavily loaded machine. */ m->flags |= PG_WINATCFLS; + vm_page_lock_queues(); + queues_locked = TRUE; vm_page_requeue(m); } else if (maxlaunder > 0) { /* @@ -976,21 +992,13 @@ rescan0: if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { vm_page_unlock(m); VM_OBJECT_UNLOCK(object); + vm_page_lock_queues(); + queues_locked = TRUE; vm_page_requeue(m); - continue; + goto relock_queues; } /* - * Following operations may unlock - * vm_page_queue_mtx, invalidating the 'next' - * pointer. To prevent an inordinate number - * of restarts we use our marker to remember - * our place. - * - */ - TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, - m, &marker, pageq); - /* * The object is already known NOT to be dead. It * is possible for the vget() to block the whole * pageout daemon, but the new low-memory handling @@ -1014,7 +1022,6 @@ rescan0: * of time. */ if (object->type == OBJT_VNODE) { - vm_page_unlock_queues(); vm_page_unlock(m); vp = object->handle; if (vp->v_type == VREG && @@ -1023,7 +1030,6 @@ rescan0: ++pageout_lock_miss; if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; - vm_page_lock_queues(); goto unlock_and_continue; } KASSERT(mp != NULL, @@ -1034,7 +1040,6 @@ rescan0: if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, curthread)) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); ++pageout_lock_miss; if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; @@ -1044,6 +1049,7 @@ rescan0: VM_OBJECT_LOCK(object); vm_page_lock(m); vm_page_lock_queues(); + queues_locked = TRUE; /* * The page might have been moved to another * queue during potential blocking in vget() @@ -1081,6 +1087,8 @@ rescan0: vnodes_skipped++; goto unlock_and_continue; } + vm_page_unlock_queues(); + queues_locked = FALSE; } /* @@ -1093,32 +1101,37 @@ rescan0: * the (future) cleaned page. Otherwise we could wind * up laundering or cleaning too many pages. */ - vm_page_unlock_queues(); if (vm_pageout_clean(m) != 0) { --page_shortage; --maxlaunder; } - vm_page_lock_queues(); unlock_and_continue: vm_page_lock_assert(m, MA_NOTOWNED); VM_OBJECT_UNLOCK(object); if (mp != NULL) { - vm_page_unlock_queues(); + if (queues_locked) { + vm_page_unlock_queues(); + queues_locked = FALSE; + } if (vp != NULL) vput(vp); VFS_UNLOCK_GIANT(vfslocked); vm_object_deallocate(object); vn_finished_write(mp); - vm_page_lock_queues(); } - next = TAILQ_NEXT(&marker, pageq); - TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, - &marker, pageq); vm_page_lock_assert(m, MA_NOTOWNED); - continue; + goto relock_queues; } vm_page_unlock(m); VM_OBJECT_UNLOCK(object); +relock_queues: + if (!queues_locked) { + vm_page_lock_queues(); + queues_locked = TRUE; + } + next = TAILQ_NEXT(&marker, pageq); + TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, + &marker, pageq); } /*
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201209051635.q85GZcN5068978>