Date: Tue, 2 Jun 2009 13:03:35 +0000 (UTC) From: Attilio Rao <attilio@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r193307 - in head/sys: kern sys ufs/ffs Message-ID: <200906021303.n52D3ZwD016385@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: attilio Date: Tue Jun 2 13:03:35 2009 New Revision: 193307 URL: http://svn.freebsd.org/changeset/base/193307 Log: Handle lock recursion differenty by always checking against LO_RECURSABLE instead the lock own flag itself. Tested by: pho Modified: head/sys/kern/kern_lock.c head/sys/kern/kern_rwlock.c head/sys/kern/kern_sx.c head/sys/sys/vnode.h head/sys/ufs/ffs/ffs_softdep.c Modified: head/sys/kern/kern_lock.c ============================================================================== --- head/sys/kern/kern_lock.c Tue Jun 2 12:35:04 2009 (r193306) +++ head/sys/kern/kern_lock.c Tue Jun 2 13:03:35 2009 (r193307) @@ -51,8 +51,7 @@ __FBSDID("$FreeBSD$"); #include <ddb/ddb.h> #endif -CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) == - (LK_CANRECURSE | LK_NOSHARE)); +CTASSERT((LK_NOSHARE & LO_CLASSFLAGS) == LK_NOSHARE); #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 @@ -316,7 +315,9 @@ lockinit(struct lock *lk, int pri, const MPASS((flags & ~LK_INIT_MASK) == 0); - iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; + iflags = LO_SLEEPABLE | LO_UPGRADABLE; + if (flags & LK_CANRECURSE) + iflags |= LO_RECURSABLE; if ((flags & LK_NODUP) == 0) iflags |= LO_DUPOK; if (flags & LK_NOPROFILE) @@ -325,7 +326,7 @@ lockinit(struct lock *lk, int pri, const iflags |= LO_WITNESS; if (flags & LK_QUIET) iflags |= LO_QUIET; - iflags |= flags & (LK_CANRECURSE | LK_NOSHARE); + iflags |= flags & LK_NOSHARE; lk->lk_lock = LK_UNLOCKED; lk->lk_recurse = 0; @@ -530,7 +531,7 @@ __lockmgr_args(struct lock *lk, u_int fl */ if (lockmgr_xlocked(lk)) { if ((flags & LK_CANRECURSE) == 0 && - (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) { + (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { /* * If the lock is expected to not panic just Modified: head/sys/kern/kern_rwlock.c ============================================================================== --- head/sys/kern/kern_rwlock.c Tue Jun 2 12:35:04 2009 (r193306) +++ head/sys/kern/kern_rwlock.c Tue Jun 2 13:03:35 2009 (r193307) @@ -51,8 +51,6 @@ __FBSDID("$FreeBSD$"); #include <machine/cpu.h> -CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); - #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) #define ADAPTIVE_RWLOCKS #endif @@ -177,16 +175,17 @@ rw_init_flags(struct rwlock *rw, const c MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | RW_RECURSE)) == 0); - flags = LO_UPGRADABLE | LO_RECURSABLE; + flags = LO_UPGRADABLE; if (opts & RW_DUPOK) flags |= LO_DUPOK; if (opts & RW_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & RW_NOWITNESS)) flags |= LO_WITNESS; + if (opts & RW_RECURSE) + flags |= LO_RECURSABLE; if (opts & RW_QUIET) flags |= LO_QUIET; - flags |= opts & RW_RECURSE; rw->rw_lock = RW_UNLOCKED; rw->rw_recurse = 0; @@ -249,7 +248,8 @@ _rw_try_wlock(struct rwlock *rw, const c KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); - if (rw_wlocked(rw) && (rw->lock_object.lo_flags & RW_RECURSE) != 0) { + if (rw_wlocked(rw) && + (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) { rw->rw_recurse++; rval = 1; } else @@ -646,7 +646,7 @@ _rw_wlock_hard(struct rwlock *rw, uintpt #endif if (rw_wlocked(rw)) { - KASSERT(rw->lock_object.lo_flags & RW_RECURSE, + KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, ("%s: recursing but non-recursive rw %s @ %s:%d\n", __func__, rw->lock_object.lo_name, file, line)); rw->rw_recurse++; Modified: head/sys/kern/kern_sx.c ============================================================================== --- head/sys/kern/kern_sx.c Tue Jun 2 12:35:04 2009 (r193306) +++ head/sys/kern/kern_sx.c Tue Jun 2 13:03:35 2009 (r193307) @@ -66,8 +66,7 @@ __FBSDID("$FreeBSD$"); #define ADAPTIVE_SX #endif -CTASSERT(((SX_NOADAPTIVE | SX_RECURSE) & LO_CLASSFLAGS) == - (SX_NOADAPTIVE | SX_RECURSE)); +CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); /* Handy macros for sleep queues. */ #define SQ_EXCLUSIVE_QUEUE 0 @@ -207,17 +206,19 @@ sx_init_flags(struct sx *sx, const char MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | SX_NOPROFILE | SX_NOADAPTIVE)) == 0); - flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; + flags = LO_SLEEPABLE | LO_UPGRADABLE; if (opts & SX_DUPOK) flags |= LO_DUPOK; if (opts & SX_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & SX_NOWITNESS)) flags |= LO_WITNESS; + if (opts & SX_RECURSE) + flags |= LO_RECURSABLE; if (opts & SX_QUIET) flags |= LO_QUIET; - flags |= opts & (SX_NOADAPTIVE | SX_RECURSE); + flags |= opts & SX_NOADAPTIVE; sx->sx_lock = SX_LOCK_UNLOCKED; sx->sx_recurse = 0; lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); @@ -305,7 +306,8 @@ _sx_try_xlock(struct sx *sx, const char KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); - if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) { + if (sx_xlocked(sx) && + (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) { sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); rval = 1; @@ -479,7 +481,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t /* If we already hold an exclusive lock, then recurse. */ if (sx_xlocked(sx)) { - KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0, + KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", sx->lock_object.lo_name, file, line)); sx->sx_recurse++; Modified: head/sys/sys/vnode.h ============================================================================== --- head/sys/sys/vnode.h Tue Jun 2 12:35:04 2009 (r193306) +++ head/sys/sys/vnode.h Tue Jun 2 13:03:35 2009 (r193307) @@ -419,7 +419,7 @@ extern struct vattr va_null; /* predefi #define VI_MTX(vp) (&(vp)->v_interlock) #define VN_LOCK_AREC(vp) \ - ((vp)->v_vnlock->lock_object.lo_flags |= LK_CANRECURSE) + ((vp)->v_vnlock->lock_object.lo_flags |= LO_RECURSABLE) #define VN_LOCK_ASHARE(vp) \ ((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE) Modified: head/sys/ufs/ffs/ffs_softdep.c ============================================================================== --- head/sys/ufs/ffs/ffs_softdep.c Tue Jun 2 12:35:04 2009 (r193306) +++ head/sys/ufs/ffs/ffs_softdep.c Tue Jun 2 13:03:35 2009 (r193307) @@ -556,8 +556,8 @@ MTX_SYSINIT(softdep_lock, &lk, "Softdep #define ACQUIRE_LOCK(lk) mtx_lock(lk) #define FREE_LOCK(lk) mtx_unlock(lk) -#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LK_CANRECURSE) -#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LK_CANRECURSE) +#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LO_RECURSABLE) +#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LO_RECURSABLE) /* * Worklist queue management.
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200906021303.n52D3ZwD016385>