From owner-freebsd-stable@FreeBSD.ORG Tue Nov 20 22:41:26 2007 Return-Path: Delivered-To: freebsd-stable@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 9C72416A420; Tue, 20 Nov 2007 22:41:26 +0000 (UTC) (envelope-from kris@FreeBSD.org) Received: from weak.local (pointyhat.freebsd.org [IPv6:2001:4f8:fff6::2b]) by mx1.freebsd.org (Postfix) with ESMTP id 4644D13C45D; Tue, 20 Nov 2007 22:41:25 +0000 (UTC) (envelope-from kris@FreeBSD.org) Message-ID: <4743629B.9090408@FreeBSD.org> Date: Tue, 20 Nov 2007 23:41:31 +0100 From: Kris Kennaway User-Agent: Thunderbird 2.0.0.9 (Macintosh/20071031) MIME-Version: 1.0 To: Kris Kennaway References: <4741905E.8050300@chistydom.ru> <47419AB3.5030008@chistydom.ru> <4741A7DA.2050706@chistydom.ru> <4741DA15.9000308@FreeBSD.org> <47429DB8.7040504@chistydom.ru> <4742ADFE.40902@FreeBSD.org> <4742C46A.1060701@chistydom.ru> <47432F77.3030606@FreeBSD.org> <474339E9.4080301@FreeBSD.org> In-Reply-To: <474339E9.4080301@FreeBSD.org> Content-Type: multipart/mixed; boundary="------------090206040000020400040605" Cc: Attilio Rao , freebsd-stable@freebsd.org, Alexey Popov Subject: Re: 2 x quad-core system is slower that 2 x dual core on FreeBSD X-BeenThere: freebsd-stable@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: Production branch of FreeBSD source code List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 20 Nov 2007 22:41:26 -0000 This is a multi-part message in MIME format. --------------090206040000020400040605 Content-Type: text/plain; charset=windows-1252; format=flowed Content-Transfer-Encoding: 7bit Kris Kennaway wrote: > Kris Kennaway wrote: >> In the meantime there is unfortunately not a lot that can be done, >> AFAICT. There is one hack that I will send you later but it is not >> likely to help much. I will also think about how to track down the >> cause of the contention further (the profiling trace only shows that >> it comes mostly from vget/vput but doesn't show where these are called >> from). > > Actually this patch might help. It doesn't replace lockmgr but it does > fix a silly thundering herd behaviour. It probably needs some > adjustment to get it to apply cleanly (it is about 7 months old), and I > apparently stopped using it because I ran into deadlocks. It might be > stable enough to at least see how much it helps. > > Set the vfs.lookup_shared=1 sysctl to enable the other half of the patch. > > Kris > Try this one instead, it applies to HEAD. You'll need to manually enter the paths though because of how p4 mangles diffs. Kris --------------090206040000020400040605 Content-Type: text/plain; x-mac-type="0"; x-mac-creator="0"; name="lockmgr.diff" Content-Transfer-Encoding: 7bit Content-Disposition: inline; filename="lockmgr.diff" ==== //depot/user/kris/contention/sys/kern/kern_lock.c#10 - /zoo/kris/contention/kern/kern_lock.c ==== @@ -109,7 +109,6 @@ #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ LK_SHARE_NONZERO | LK_WAIT_NONZERO) -static int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime); static int acquiredrain(struct lock *lkp, int extflags) ; static __inline void @@ -126,61 +125,17 @@ COUNT(td, -decr); if (lkp->lk_sharecount == decr) { - lkp->lk_flags &= ~LK_SHARE_NONZERO; - if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { - wakeup(lkp); - } + if (lkp->lk_exclusivewait != 0) + wakeup_one(&lkp->lk_exclusivewait); lkp->lk_sharecount = 0; } else { lkp->lk_sharecount -= decr; + if (lkp->lk_sharecount == 1 && lkp->lk_flags & LK_WANT_UPGRADE) + wakeup(&lkp->lk_flags); } } -static int -acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime) -{ - struct lock *lkp = *lkpp; - int error; - CTR3(KTR_LOCK, - "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", - lkp, extflags, wanted); - if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) - return EBUSY; - error = 0; - if ((lkp->lk_flags & wanted) != 0) - lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime); - - while ((lkp->lk_flags & wanted) != 0) { - CTR2(KTR_LOCK, - "acquire(): lkp == %p, lk_flags == 0x%x sleeping", - lkp, lkp->lk_flags); - lkp->lk_flags |= LK_WAIT_NONZERO; - lkp->lk_waitcount++; - error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, - lkp->lk_wmesg, - ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); - lkp->lk_waitcount--; - if (lkp->lk_waitcount == 0) - lkp->lk_flags &= ~LK_WAIT_NONZERO; - if (error) - break; - if (extflags & LK_SLEEPFAIL) { - error = ENOLCK; - break; - } - if (lkp->lk_newlock != NULL) { - mtx_lock(lkp->lk_newlock->lk_interlock); - mtx_unlock(lkp->lk_interlock); - if (lkp->lk_waitcount == 0) - wakeup((void *)(&lkp->lk_newlock)); - *lkpp = lkp = lkp->lk_newlock; - } - } - mtx_assert(lkp->lk_interlock, MA_OWNED); - return (error); -} - /* * Set, change, or release a lock. * @@ -189,16 +144,16 @@ * accepted shared locks and shared-to-exclusive upgrades to go away. */ int -_lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, - struct thread *td, char *file, int line) - +lockmgr(lkp, flags, interlkp, td) + struct lock *lkp; + u_int flags; + struct mtx *interlkp; + struct thread *td; { int error; struct thread *thr; - int extflags, lockflags; - int contested = 0; - uint64_t waitstart = 0; - + int extflags; + error = 0; if (td == NULL) thr = LK_KERNPROC; @@ -226,7 +181,7 @@ if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, - &lkp->lk_interlock->lock_object, + &lkp->lk_interlock->mtx_object, "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); if (panicstr != NULL) { @@ -253,16 +208,30 @@ * lock itself ). */ if (lkp->lk_lockholder != thr) { - lockflags = LK_HAVE_EXCL; - if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) - lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; - error = acquire(&lkp, extflags, lockflags, &contested, &waitstart); - if (error) + + while (lkp->lk_exclusivecount != 0 /* || + (!(td->td_pflags & TDP_DEADLKTREAT) && + (lkp->lk_flags & LK_WANT_UPGRADE)) */) { + lkp->lk_sharewait++; + error = msleep(&lkp->lk_sharewait, lkp->lk_interlock, + lkp->lk_prio,lkp->lk_wmesg, + ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); + + if (error) + break; + if (extflags & LK_SLEEPFAIL) { + error = ENOLCK; + break; + } + + lkp->lk_sharewait--; + } + + if (error != 0) { + shareunlock(td,lkp,0); break; + } sharelock(td, lkp, 1); - if (lkp->lk_sharecount == 1) - lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); - #if defined(DEBUG_LOCKS) stack_save(&lkp->lk_stack); #endif @@ -273,8 +242,6 @@ * An alternative would be to fail with EDEADLK. */ sharelock(td, lkp, 1); - if (lkp->lk_sharecount == 1) - lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); /* FALLTHROUGH downgrade */ case LK_DOWNGRADE: @@ -285,10 +252,9 @@ sharelock(td, lkp, lkp->lk_exclusivecount); COUNT(td, -lkp->lk_exclusivecount); lkp->lk_exclusivecount = 0; - lkp->lk_flags &= ~LK_HAVE_EXCL; lkp->lk_lockholder = LK_NOPROC; - if (lkp->lk_waitcount) - wakeup((void *)lkp); + if (lkp->lk_sharewait) + wakeup((void *)&lkp->lk_sharewait); break; case LK_EXCLUPGRADE: @@ -317,15 +283,13 @@ panic("lockmgr: upgrade exclusive lock"); if (lkp->lk_sharecount <= 0) panic("lockmgr: upgrade without shared"); - shareunlock(td, lkp, 1); - if (lkp->lk_sharecount == 0) - lock_profile_release_lock(&lkp->lk_object); /* * If we are just polling, check to see if we will block. */ if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & LK_WANT_UPGRADE) || lkp->lk_sharecount > 1)) { + shareunlock(td, lkp, 1); error = EBUSY; break; } @@ -336,34 +300,43 @@ * drop to zero, then take exclusive lock. */ lkp->lk_flags |= LK_WANT_UPGRADE; - error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart); + + while(lkp->lk_sharecount != 1) { + + error = msleep(&lkp->lk_flags, lkp->lk_interlock, + lkp->lk_prio,lkp->lk_wmesg, + ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); + + if (error) + break; + if (extflags & LK_SLEEPFAIL) { + error = ENOLCK; + break; + } + } + lkp->lk_flags &= ~LK_WANT_UPGRADE; if (error) { - if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) - wakeup((void *)lkp); + shareunlock(td, lkp, 1); + if (lkp->lk_sharewait) + wakeup(&lkp->lk_sharewait); break; } + if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); - lkp->lk_flags |= LK_HAVE_EXCL; lkp->lk_lockholder = thr; lkp->lk_exclusivecount = 1; + lkp->lk_sharecount = 0; COUNT(td, 1); - lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); #if defined(DEBUG_LOCKS) stack_save(&lkp->lk_stack); #endif break; } - /* - * Someone else has requested upgrade. Release our shared - * lock, awaken upgrade requestor if we are the last shared - * lock, then request an exclusive lock. - */ - if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == - LK_WAIT_NONZERO) - wakeup((void *)lkp); + + shareunlock(td, lkp, 1); /* FALLTHROUGH exclusive request */ case LK_EXCLUSIVE: @@ -379,38 +352,52 @@ break; } } - /* - * If we are just polling, check to see if we will sleep. - */ - if ((extflags & LK_NOWAIT) && - (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { - error = EBUSY; - break; + + + if (lkp->lk_exclusivecount != 0 || lkp->lk_sharecount != 0) { + + + /* + * If we are just polling, check to see if we will sleep. + */ + if (extflags & LK_NOWAIT) { + error = EBUSY; + break; + } + + lkp->lk_exclusivewait++; + + while(lkp->lk_exclusivecount != 0 || lkp->lk_sharecount != 0) { + error = msleep(&lkp->lk_exclusivewait, lkp->lk_interlock, + lkp->lk_prio,lkp->lk_wmesg, + ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); + + if (error) + break; + if (extflags & LK_SLEEPFAIL) { + error = ENOLCK; + break; + } + + } + lkp->lk_exclusivewait--; + + if(error) { + if (lkp->lk_exclusivewait != 0) + wakeup(&lkp->lk_exclusivewait); + else if (lkp->lk_sharewait != 0) + wakeup(&lkp->lk_sharewait); + + break; + } } - /* - * Try to acquire the want_exclusive flag. - */ - error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart); - if (error) - break; - lkp->lk_flags |= LK_WANT_EXCL; - /* - * Wait for shared locks and upgrades to finish. - */ - error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart); - lkp->lk_flags &= ~LK_WANT_EXCL; - if (error) { - if (lkp->lk_flags & LK_WAIT_NONZERO) - wakeup((void *)lkp); - break; - } - lkp->lk_flags |= LK_HAVE_EXCL; + + lkp->lk_lockholder = thr; if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); lkp->lk_exclusivecount = 1; COUNT(td, 1); - lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); #if defined(DEBUG_LOCKS) stack_save(&lkp->lk_stack); #endif @@ -427,23 +414,21 @@ if (lkp->lk_lockholder != LK_KERNPROC) COUNT(td, -1); if (lkp->lk_exclusivecount == 1) { - lkp->lk_flags &= ~LK_HAVE_EXCL; lkp->lk_lockholder = LK_NOPROC; lkp->lk_exclusivecount = 0; - lock_profile_release_lock(&lkp->lk_object); + if (lkp->lk_sharewait) + wakeup(&lkp->lk_sharewait); + else if (lkp->lk_exclusivewait) + wakeup_one(&lkp->lk_exclusivewait); } else { lkp->lk_exclusivecount--; } - } else if (lkp->lk_flags & LK_SHARE_NONZERO) + } else if (lkp->lk_sharecount != 0) shareunlock(td, lkp, 1); - else { - printf("lockmgr: thread %p unlocking unheld lock\n", - thr); - kdb_backtrace(); - } - - if (lkp->lk_flags & LK_WAIT_NONZERO) - wakeup((void *)lkp); + else + panic("lockmgr: thread %p, not holding a lock", + thr); + break; case LK_DRAIN: @@ -459,7 +444,7 @@ error = acquiredrain(lkp, extflags); if (error) break; - lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; + lkp->lk_flags |= LK_DRAINING; lkp->lk_lockholder = thr; lkp->lk_exclusivecount = 1; COUNT(td, 1); @@ -475,8 +460,10 @@ /* NOTREACHED */ } if ((lkp->lk_flags & LK_WAITDRAIN) && - (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | - LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { + (lkp->lk_sharewait == 0) && + (lkp->lk_exclusivewait == 0) && + (lkp->lk_sharecount== 0) && + (lkp->lk_exclusivecount== 0)) { lkp->lk_flags &= ~LK_WAITDRAIN; wakeup((void *)&lkp->lk_flags); } @@ -488,10 +475,14 @@ acquiredrain(struct lock *lkp, int extflags) { int error; - if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { - return EBUSY; - } - while (lkp->lk_flags & LK_ALL) { + + while ((lkp->lk_sharecount != 0) || + (lkp->lk_sharewait != 0) || + (lkp->lk_exclusivecount != 0) || + (lkp->lk_exclusivewait != 0)) { + + if (extflags & LK_NOWAIT) return EBUSY; + lkp->lk_flags |= LK_WAITDRAIN; error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, lkp->lk_wmesg, @@ -505,34 +496,7 @@ return 0; } -/* - * Transfer any waiting processes from one lock to another. - */ -void -transferlockers(from, to) - struct lock *from; - struct lock *to; -{ - KASSERT(from != to, ("lock transfer to self")); - KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock")); - - mtx_lock(from->lk_interlock); - if (from->lk_waitcount == 0) { - mtx_unlock(from->lk_interlock); - return; - } - from->lk_newlock = to; - wakeup((void *)from); - msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio, - "lkxfer", 0); - from->lk_newlock = NULL; - from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); - KASSERT(from->lk_waitcount == 0, ("active lock")); - mtx_unlock(from->lk_interlock); -} - - /* * Initialize a lock; required before use. */ @@ -550,12 +514,13 @@ lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); lkp->lk_flags = (flags & LK_EXTFLG_MASK); lkp->lk_sharecount = 0; - lkp->lk_waitcount = 0; + lkp->lk_sharewait = 0; lkp->lk_exclusivecount = 0; + lkp->lk_exclusivewait = 0; lkp->lk_prio = prio; lkp->lk_timo = timo; + lkp->lk_wmesg = wmesg; lkp->lk_lockholder = LK_NOPROC; - lkp->lk_newlock = NULL; #ifdef DEBUG_LOCKS stack_zero(&lkp->lk_stack); #endif @@ -614,7 +579,8 @@ int count; mtx_lock(lkp->lk_interlock); - count = lkp->lk_exclusivecount + lkp->lk_sharecount; + count = lkp->lk_exclusivecount + + lkp->lk_sharecount; mtx_unlock(lkp->lk_interlock); return (count); } @@ -629,7 +595,9 @@ int count; mtx_lock(lkp->lk_interlock); - count = lkp->lk_waitcount; + count = lkp->lk_exclusivewait + + lkp->lk_sharewait + + (lkp->lk_flags & LK_WANT_UPGRADE) ? 1 : 0; mtx_unlock(lkp->lk_interlock); return (count); } @@ -646,12 +614,14 @@ if (lkp->lk_sharecount) printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, lkp->lk_sharecount); - else if (lkp->lk_flags & LK_HAVE_EXCL) + else if (lkp->lk_exclusivecount) printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); - if (lkp->lk_waitcount > 0) - printf(" with %d pending", lkp->lk_waitcount); + if (lkp->lk_sharewait > 0) + printf(" with %d pending readers", lkp->lk_sharewait); + if (lkp->lk_exclusivewait > 0) + printf(" with %d pending writers", lkp->lk_exclusivewait); #ifdef DEBUG_LOCKS stack_print(&lkp->lk_stack); #endif @@ -671,24 +641,9 @@ lkp = td->td_wchan; /* Simple test to see if wchan points to a lockmgr lock. */ - if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && - lkp->lk_wmesg == td->td_wmesg) - goto ok; + if (lkp->lk_wmesg != td->td_wmesg) + return (0); - /* - * If this thread is doing a DRAIN, then it would be asleep on - * &lkp->lk_flags rather than lkp. - */ - lkp = (struct lock *)((char *)td->td_wchan - - offsetof(struct lock, lk_flags)); - if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && - lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN)) - goto ok; - - /* Doen't seem to be a lockmgr lock. */ - return (0); - -ok: /* Ok, we think we have a lockmgr lock, so output some details. */ db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg); if (lkp->lk_sharecount) { @@ -701,7 +656,7 @@ return (1); } -void +static void db_show_lockmgr(struct lock_object *lock) { struct thread *td; @@ -709,18 +664,20 @@ lkp = (struct lock *)lock; - db_printf(" lock type: %s\n", lkp->lk_wmesg); - db_printf(" state: "); + db_printf("lock type: %s\n", lkp->lk_wmesg); + db_printf("state: "); if (lkp->lk_sharecount) db_printf("SHARED (count %d)\n", lkp->lk_sharecount); - else if (lkp->lk_flags & LK_HAVE_EXCL) { + else if (lkp->lk_exclusivecount != 0) { td = lkp->lk_lockholder; db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); } else db_printf("UNLOCKED\n"); - if (lkp->lk_waitcount > 0) - db_printf(" waiters: %d\n", lkp->lk_waitcount); + if (lkp->lk_sharewait > 0) + db_printf("waiters shared: %d\n", lkp->lk_sharewait); + if (lkp->lk_exclusivewait > 0) + db_printf("waiters exclusive: %d\n", lkp->lk_exclusivewait); } #endif ==== //depot/user/kris/contention/sys/kern/subr_lock.c#14 - /zoo/kris/contention/kern/subr_lock.c ==== @@ -59,7 +59,6 @@ &lock_class_sx, &lock_class_rm, &lock_class_rw, - &lock_class_lockmgr, }; #ifdef LOCK_PROFILING ==== //depot/user/kris/contention/sys/kern/vfs_default.c#6 - /zoo/kris/contention/kern/vfs_default.c ==== @@ -263,7 +263,7 @@ { struct vnode *vp = ap->a_vp; - return (_lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line)); + return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td)); } /* See above. */ ==== //depot/user/kris/contention/sys/sys/lockmgr.h#6 - /zoo/kris/contention/sys/lockmgr.h ==== @@ -51,23 +51,23 @@ * can be gained. */ struct lock { - struct lock_object lk_object; /* common lock properties */ + struct lock_object lk_object; /* common lock properties */ struct mtx *lk_interlock; /* lock on remaining fields */ u_int lk_flags; /* see below */ int lk_sharecount; /* # of accepted shared locks */ - int lk_waitcount; /* # of processes sleeping for lock */ - short lk_exclusivecount; /* # of recursive exclusive locks */ + int lk_sharewait; /* # waiting for shared locks */ + int lk_exclusivecount; /* # of recursive exclusive locks */ + int lk_exclusivewait; /* # of recursive exclusive locks */ + short lk_prio; /* priority at which to sleep */ + const char *lk_wmesg; /* resource sleeping (for tsleep) */ int lk_timo; /* maximum sleep time (for tsleep) */ struct thread *lk_lockholder; /* thread of exclusive lock holder */ - struct lock *lk_newlock; /* lock taking over this lock */ - + #ifdef DEBUG_LOCKS struct stack lk_stack; #endif }; - -#define lk_wmesg lk_object.lo_name /* * Lock request types: * LK_SHARED - get one of many possible shared locks. If a process @@ -202,15 +202,13 @@ int timo, int flags); void lockdestroy(struct lock *); -int _lockmgr(struct lock *, u_int flags, - struct mtx *, struct thread *p, char *file, int line); +int lockmgr(struct lock *, u_int flags, + struct mtx *, struct thread *p); void transferlockers(struct lock *, struct lock *); void lockmgr_printinfo(struct lock *); int lockstatus(struct lock *, struct thread *); int lockcount(struct lock *); int lockwaiters(struct lock *); - -#define lockmgr(lock, flags, mtx, td) _lockmgr((lock), (flags), (mtx), (td), __FILE__, __LINE__) #ifdef DDB int lockmgr_chain(struct thread *td, struct thread **ownerp); #endif ==== //depot/user/kris/contention/sys/ufs/ffs/ffs_vnops.c#13 - /zoo/kris/contention/ufs/ffs/ffs_vnops.c ==== @@ -371,7 +371,7 @@ flags |= LK_INTERLOCK; } lkp = vp->v_vnlock; - result = _lockmgr(lkp, flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line); + result = lockmgr(lkp, flags, VI_MTX(vp), ap->a_td); if (lkp == vp->v_vnlock || result != 0) break; /* @@ -382,7 +382,7 @@ * right lock. Release it, and try to get the * new lock. */ - (void) _lockmgr(lkp, LK_RELEASE, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line); + (void) lockmgr(lkp, LK_RELEASE, VI_MTX(vp), ap->a_td); if ((flags & LK_TYPE_MASK) == LK_UPGRADE) flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; flags &= ~LK_INTERLOCK; --------------090206040000020400040605--