Date: Mon, 28 Mar 2005 22:27:10 GMT From: John Baldwin <jhb@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 74002 for review Message-ID: <200503282227.j2SMRA3W079631@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=74002 Change 74002 by jhb@jhb_twclab on 2005/03/28 22:26:38 Fix the mutex code and panic_thread stuff to be happy with the fixed atomic_foo_ptr() prototypes. Affected files ... .. //depot/projects/smpng/sys/kern/kern_fork.c#91 edit .. //depot/projects/smpng/sys/kern/kern_mutex.c#92 edit .. //depot/projects/smpng/sys/kern/kern_shutdown.c#55 edit .. //depot/projects/smpng/sys/kern/sched_4bsd.c#46 edit .. //depot/projects/smpng/sys/kern/sched_ule.c#53 edit .. //depot/projects/smpng/sys/kern/subr_witness.c#128 edit .. //depot/projects/smpng/sys/sys/_mutex.h#13 edit .. //depot/projects/smpng/sys/sys/mutex.h#49 edit Differences ... ==== //depot/projects/smpng/sys/kern/kern_fork.c#91 (text+ko) ==== @@ -762,7 +762,7 @@ td->td_oncpu = PCPU_GET(cpuid); KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); - sched_lock.mtx_lock = (uintptr_t)td; + sched_lock.mtx_lock = td; mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); cpu_critical_fork_exit(); CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)", ==== //depot/projects/smpng/sys/kern/kern_mutex.c#92 (text+ko) ==== @@ -86,10 +86,10 @@ /* * Internal utility macros. */ -#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) +#define mtx_unowned(m) ((m)->mtx_lock == (void *)MTX_UNOWNED) #define mtx_owner(m) (mtx_unowned((m)) ? NULL \ - : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) + : (struct thread *)((uintptr_t)(m)->mtx_lock & ~MTX_FLAGMASK)) /* * Lock classes for sleep and spin mutexes. @@ -473,7 +473,7 @@ atomic_add_int(&m->mtx_contest_holding, 1); #endif turnstile_lock(&m->mtx_object); - v = m->mtx_lock; + v = (uintptr_t)m->mtx_lock; /* * Check if the lock has been released while spinning for @@ -519,7 +519,7 @@ * If the current owner of the lock is executing on another * CPU, spin instead of blocking. */ - owner = (struct thread *)(v & MTX_FLAGMASK); + owner = (struct thread *)(v & ~MTX_FLAGMASK); #ifdef ADAPTIVE_GIANT if (TD_IS_RUNNING(owner)) { #else @@ -600,7 +600,7 @@ if (apic_hack) APIC_IPI_SPINWAIT_ENTER(); #endif - while (m->mtx_lock != MTX_UNOWNED) { + while (m->mtx_lock != (void *)MTX_UNOWNED) { if (i++ < 10000000) { cpu_spinwait(); continue; @@ -609,7 +609,7 @@ DELAY(1); else if (!kdb_active) { printf("spin lock %s held by %p for > 5 seconds\n", - m->mtx_object.lo_name, (void *)m->mtx_lock); + m->mtx_object.lo_name, m->mtx_lock); #ifdef WITNESS witness_display_spinlock(&m->mtx_object, mtx_owner(m)); @@ -683,7 +683,7 @@ if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); } else { - m->mtx_lock = MTX_CONTESTED; + m->mtx_lock = (void *)MTX_CONTESTED; if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", m); @@ -861,7 +861,7 @@ if (opts & MTX_DUPOK) lock->lo_flags |= LO_DUPOK; - m->mtx_lock = MTX_UNOWNED; + m->mtx_lock = (void *)MTX_UNOWNED; LOCK_LOG_INIT(lock, opts); @@ -883,7 +883,7 @@ if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { - MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); + MPASS(((uintptr_t)m->mtx_lock & (MTX_FLAGMASK)) == 0); /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, ==== //depot/projects/smpng/sys/kern/kern_shutdown.c#55 (text+ko) ==== @@ -475,7 +475,7 @@ } #ifdef SMP -static struct thread *panic_thread = NULL; +static volatile void *panic_thread = NULL; #endif /* @@ -500,8 +500,8 @@ * panic_thread if we are spinning in case the panic on the first * CPU is canceled. */ - if (panic_thread != curthread) - while (atomic_cmpset_ptr(&panic_thread, NULL, curthread) == 0) + if (panic_thread != td) + while (atomic_cmpset_ptr(&panic_thread, NULL, td) == 0) while (panic_thread != NULL) cpu_spinwait(); #endif ==== //depot/projects/smpng/sys/kern/sched_4bsd.c#46 (text+ko) ==== @@ -961,7 +961,7 @@ if (td != newtd) cpu_switch(td, newtd); - sched_lock.mtx_lock = (uintptr_t)td; + sched_lock.mtx_lock = td; td->td_oncpu = PCPU_GET(cpuid); } ==== //depot/projects/smpng/sys/kern/sched_ule.c#53 (text+ko) ==== @@ -1393,7 +1393,7 @@ newtd = choosethread(); if (td != newtd) cpu_switch(td, newtd); - sched_lock.mtx_lock = (uintptr_t)td; + sched_lock.mtx_lock = td; td->td_oncpu = PCPU_GET(cpuid); } ==== //depot/projects/smpng/sys/kern/subr_witness.c#128 (text+ko) ==== @@ -407,7 +407,7 @@ LO_INITIALIZED, /* mtx_object.lo_flags */ { NULL, NULL }, /* mtx_object.lo_list */ NULL }, /* mtx_object.lo_witness */ - MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ + (void *)MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ }; /* ==== //depot/projects/smpng/sys/sys/_mutex.h#13 (text+ko) ==== @@ -36,8 +36,8 @@ */ struct mtx { struct lock_object mtx_object; /* Common lock properties. */ - volatile uintptr_t mtx_lock; /* Owner and flags. */ - volatile u_int mtx_recurse; /* Number of recursive holds. */ + volatile void * mtx_lock; /* Owner and flags. */ + u_int mtx_recurse; /* Number of recursive holds. */ #ifdef MUTEX_PROFILING /* ==== //depot/projects/smpng/sys/sys/mutex.h#49 (text+ko) ==== @@ -71,7 +71,7 @@ #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ -#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) +#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED) #endif /* _KERNEL */ @@ -124,19 +124,19 @@ * here, if they are not already defined in the machine-dependent mutex.h */ -/* Actually obtain mtx_lock */ +/* Try to obtain mtx_lock once. */ #ifndef _obtain_lock #define _obtain_lock(mp, tid) \ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) #endif -/* Actually release mtx_lock */ +/* Try to release mtx_lock if it is unrecursed and uncontested. */ #ifndef _release_lock #define _release_lock(mp, tid) \ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) #endif -/* Actually release mtx_lock quickly, assuming we own it. */ +/* Release mtx_lock quickly, assuming we own it. */ #ifndef _release_lock_quick #define _release_lock_quick(mp) \ atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) @@ -169,7 +169,7 @@ \ critical_enter(); \ if (!_obtain_lock((mp), _tid)) { \ - if ((mp)->mtx_lock == (uintptr_t)_tid) \ + if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else \ _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ @@ -180,11 +180,12 @@ struct thread *_tid = (tid); \ \ critical_enter(); \ - if ((mp)->mtx_lock == (uintptr_t)_tid) \ + if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else { \ - KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ - (mp)->mtx_lock = (uintptr_t)_tid; \ + KASSERT((mp)->mtx_lock == (void *)MTX_UNOWNED, \ + ("corrupt spinlock")); \ + (mp)->mtx_lock = _tid; \ } \ } while (0) #endif /* SMP */ @@ -225,7 +226,7 @@ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else \ - (mp)->mtx_lock = MTX_UNOWNED; \ + (mp)->mtx_lock = (void *)MTX_UNOWNED; \ critical_exit(); \ } while (0) #endif /* SMP */ @@ -320,7 +321,7 @@ #define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED) -#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread) +#define mtx_owned(m) (((uintptr_t)(m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread) #define mtx_recursed(m) ((m)->mtx_recurse != 0)
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200503282227.j2SMRA3W079631>