Date: Wed, 30 Mar 2005 15:17:41 GMT From: John Baldwin <jhb@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 74108 for review Message-ID: <200503301517.j2UFHfQp041996@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=74108 Change 74108 by jhb@jhb_slimer on 2005/03/30 15:17:00 Revert the changes to atomic_ptr ops. They need to operate on uintptr_t's instead because gcc is applying the volatile to what mtx_lock points to rather than mtx_lock's value when mtx_lock is a void *. Affected files ... .. //depot/projects/smpng/sys/alpha/include/atomic.h#15 edit .. //depot/projects/smpng/sys/amd64/include/atomic.h#11 edit .. //depot/projects/smpng/sys/ia64/include/atomic.h#6 edit .. //depot/projects/smpng/sys/kern/kern_fork.c#92 edit .. //depot/projects/smpng/sys/kern/kern_mutex.c#93 edit .. //depot/projects/smpng/sys/kern/sched_4bsd.c#47 edit .. //depot/projects/smpng/sys/kern/sched_ule.c#54 edit .. //depot/projects/smpng/sys/kern/subr_witness.c#129 edit .. //depot/projects/smpng/sys/powerpc/include/atomic.h#9 edit .. //depot/projects/smpng/sys/sparc64/include/atomic.h#9 edit .. //depot/projects/smpng/sys/sys/_mutex.h#14 edit .. //depot/projects/smpng/sys/sys/mutex.h#50 edit Differences ... ==== //depot/projects/smpng/sys/alpha/include/atomic.h#15 (text+ko) ==== @@ -381,7 +381,7 @@ #define atomic_cmpset_long atomic_cmpset_64 static __inline int -atomic_cmpset_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_ptr(volatile void *dst, void *exp, void *src) { return (atomic_cmpset_long((volatile u_long *)dst, (u_long)exp, @@ -430,7 +430,7 @@ #define atomic_cmpset_rel_long atomic_cmpset_rel_64 static __inline int -atomic_cmpset_acq_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src) { return (atomic_cmpset_acq_long((volatile u_long *)dst, (u_long)exp, @@ -438,7 +438,7 @@ } static __inline int -atomic_cmpset_rel_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src) { return (atomic_cmpset_rel_long((volatile u_long *)dst, (u_long)exp, @@ -446,32 +446,32 @@ } static __inline void * -atomic_load_acq_ptr(volatile void **p) +atomic_load_acq_ptr(volatile void *p) { return (void *)atomic_load_acq_long((volatile u_long *)p); } static __inline void -atomic_store_rel_ptr(volatile void **p, void *v) +atomic_store_rel_ptr(volatile void *p, void *v) { atomic_store_rel_long((volatile u_long *)p, (u_long)v); } #define ATOMIC_PTR(NAME) \ static __inline void \ -atomic_##NAME##_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_long((volatile u_long *)p, v); \ } \ \ static __inline void \ -atomic_##NAME##_acq_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_acq_long((volatile u_long *)p, v);\ } \ \ static __inline void \ -atomic_##NAME##_rel_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_rel_long((volatile u_long *)p, v);\ } ==== //depot/projects/smpng/sys/amd64/include/atomic.h#11 (text+ko) ==== @@ -356,26 +356,26 @@ } static __inline void -atomic_store_rel_ptr(volatile void **p, void *v) +atomic_store_rel_ptr(volatile void *p, void *v) { atomic_store_rel_long((volatile u_long *)p, (u_long)v); } #define ATOMIC_PTR(NAME) \ static __inline void \ -atomic_##NAME##_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_long((volatile u_long *)p, v); \ } \ \ static __inline void \ -atomic_##NAME##_acq_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_acq_long((volatile u_long *)p, v);\ } \ \ static __inline void \ -atomic_##NAME##_rel_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_rel_long((volatile u_long *)p, v);\ } ==== //depot/projects/smpng/sys/ia64/include/atomic.h#6 (text+ko) ==== @@ -301,7 +301,7 @@ #define atomic_cmpset_rel_long atomic_cmpset_rel_64 static __inline int -atomic_cmpset_acq_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src) { int ret; ret = atomic_cmpset_acq_long((volatile u_long *)dst, (u_long)exp, @@ -310,7 +310,7 @@ } static __inline int -atomic_cmpset_rel_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src) { int ret; ret = atomic_cmpset_rel_long((volatile u_long *)dst, (u_long)exp, @@ -321,32 +321,32 @@ #define atomic_cmpset_ptr atomic_cmpset_acq_ptr static __inline void * -atomic_load_acq_ptr(volatile void **p) +atomic_load_acq_ptr(volatile void *p) { return ((void *)atomic_load_acq_long((volatile u_long *)p)); } static __inline void -atomic_store_rel_ptr(volatile void **p, void *v) +atomic_store_rel_ptr(volatile void *p, void *v) { atomic_store_rel_long((volatile u_long *)p, (u_long)v); } #define ATOMIC_PTR(NAME) \ static __inline void \ - atomic_##NAME##_ptr(volatile void **p, uintptr_t v) \ + atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_long((volatile u_long *)p, v); \ } \ \ static __inline void \ - atomic_##NAME##_acq_ptr(volatile void **p, uintptr_t v) \ + atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_acq_long((volatile u_long *)p, v); \ } \ \ static __inline void \ - atomic_##NAME##_rel_ptr(volatile void **p, uintptr_t v) \ + atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_rel_long((volatile u_long *)p, v); \ } ==== //depot/projects/smpng/sys/kern/kern_fork.c#92 (text+ko) ==== @@ -762,7 +762,7 @@ td->td_oncpu = PCPU_GET(cpuid); KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); - sched_lock.mtx_lock = td; + sched_lock.mtx_lock = (uintptr_t)td; mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); cpu_critical_fork_exit(); CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)", ==== //depot/projects/smpng/sys/kern/kern_mutex.c#93 (text+ko) ==== @@ -86,10 +86,10 @@ /* * Internal utility macros. */ -#define mtx_unowned(m) ((m)->mtx_lock == (void *)MTX_UNOWNED) +#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) #define mtx_owner(m) (mtx_unowned((m)) ? NULL \ - : (struct thread *)((uintptr_t)(m)->mtx_lock & ~MTX_FLAGMASK)) + : (struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) /* * Lock classes for sleep and spin mutexes. @@ -473,7 +473,7 @@ atomic_add_int(&m->mtx_contest_holding, 1); #endif turnstile_lock(&m->mtx_object); - v = (uintptr_t)m->mtx_lock; + v = m->mtx_lock; /* * Check if the lock has been released while spinning for @@ -609,7 +609,7 @@ DELAY(1); else if (!kdb_active) { printf("spin lock %s held by %p for > 5 seconds\n", - m->mtx_object.lo_name, m->mtx_lock); + m->mtx_object.lo_name, (void *)m->mtx_lock); #ifdef WITNESS witness_display_spinlock(&m->mtx_object, mtx_owner(m)); @@ -683,7 +683,7 @@ if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); } else { - m->mtx_lock = (void *)MTX_CONTESTED; + m->mtx_lock = MTX_CONTESTED; if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", m); @@ -861,7 +861,7 @@ if (opts & MTX_DUPOK) lock->lo_flags |= LO_DUPOK; - m->mtx_lock = (void *)MTX_UNOWNED; + m->mtx_lock = MTX_UNOWNED; LOCK_LOG_INIT(lock, opts); @@ -883,7 +883,7 @@ if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { - MPASS(((uintptr_t)m->mtx_lock & (MTX_FLAGMASK)) == 0); + MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, ==== //depot/projects/smpng/sys/kern/sched_4bsd.c#47 (text+ko) ==== @@ -961,7 +961,7 @@ if (td != newtd) cpu_switch(td, newtd); - sched_lock.mtx_lock = td; + sched_lock.mtx_lock = (uintptr_t)td; td->td_oncpu = PCPU_GET(cpuid); } ==== //depot/projects/smpng/sys/kern/sched_ule.c#54 (text+ko) ==== @@ -1393,7 +1393,7 @@ newtd = choosethread(); if (td != newtd) cpu_switch(td, newtd); - sched_lock.mtx_lock = td; + sched_lock.mtx_lock = (uintptr_t)td; td->td_oncpu = PCPU_GET(cpuid); } ==== //depot/projects/smpng/sys/kern/subr_witness.c#129 (text+ko) ==== @@ -407,7 +407,7 @@ LO_INITIALIZED, /* mtx_object.lo_flags */ { NULL, NULL }, /* mtx_object.lo_list */ NULL }, /* mtx_object.lo_witness */ - (void *)MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ + MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ }; /* ==== //depot/projects/smpng/sys/powerpc/include/atomic.h#9 (text+ko) ==== @@ -406,7 +406,7 @@ #endif /* 0 */ static __inline int -atomic_cmpset_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_ptr(volatile void *dst, void *exp, void *src) { return (atomic_cmpset_32((volatile uint32_t *)dst, (uint32_t)exp, @@ -437,7 +437,7 @@ #define atomic_cmpset_rel_long atomic_cmpset_rel_32 static __inline int -atomic_cmpset_acq_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src) { return (atomic_cmpset_acq_32((volatile uint32_t *)dst, @@ -445,7 +445,7 @@ } static __inline int -atomic_cmpset_rel_ptr(volatile void **dst, void *exp, void *src) +atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src) { return (atomic_cmpset_rel_32((volatile uint32_t *)dst, @@ -453,14 +453,14 @@ } static __inline void * -atomic_load_acq_ptr(volatile void **p) +atomic_load_acq_ptr(volatile void *p) { return (void *)atomic_load_acq_32((volatile uint32_t *)p); } static __inline void -atomic_store_rel_ptr(volatile void **p, void *v) +atomic_store_rel_ptr(volatile void *p, void *v) { atomic_store_rel_32((volatile uint32_t *)p, (uint32_t)v); @@ -468,19 +468,19 @@ #define ATOMIC_PTR(NAME) \ static __inline void \ -atomic_##NAME##_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_32((volatile uint32_t *)p, v); \ } \ \ static __inline void \ -atomic_##NAME##_acq_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_acq_32((volatile uint32_t *)p, v); \ } \ \ static __inline void \ -atomic_##NAME##_rel_ptr(volatile void **p, uintptr_t v) \ +atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ { \ atomic_##NAME##_rel_32((volatile uint32_t *)p, v); \ } ==== //depot/projects/smpng/sys/sparc64/include/atomic.h#9 (text+ko) ==== @@ -278,7 +278,7 @@ ATOMIC_GEN(long, u_long *, u_long, u_long, 64); ATOMIC_GEN(64, uint64_t *, uint64_t, uint64_t, 64); -ATOMIC_GEN(ptr, void **, void *, uintptr_t, 64); +ATOMIC_GEN(ptr, void *, void *, uintptr_t, 64); #undef ATOMIC_GEN #undef atomic_cas ==== //depot/projects/smpng/sys/sys/_mutex.h#14 (text+ko) ==== @@ -36,7 +36,7 @@ */ struct mtx { struct lock_object mtx_object; /* Common lock properties. */ - volatile void * mtx_lock; /* Owner and flags. */ + volatile uintptr_t mtx_lock; /* Owner and flags. */ u_int mtx_recurse; /* Number of recursive holds. */ #ifdef MUTEX_PROFILING ==== //depot/projects/smpng/sys/sys/mutex.h#50 (text+ko) ==== @@ -169,7 +169,7 @@ \ critical_enter(); \ if (!_obtain_lock((mp), _tid)) { \ - if ((mp)->mtx_lock == _tid) \ + if ((mp)->mtx_lock == (uintptr_t)_tid) \ (mp)->mtx_recurse++; \ else \ _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ @@ -180,12 +180,11 @@ struct thread *_tid = (tid); \ \ critical_enter(); \ - if ((mp)->mtx_lock == _tid) \ + if ((mp)->mtx_lock == (uintptr_t)_tid) \ (mp)->mtx_recurse++; \ else { \ - KASSERT((mp)->mtx_lock == (void *)MTX_UNOWNED, \ - ("corrupt spinlock")); \ - (mp)->mtx_lock = _tid; \ + KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ + (mp)->mtx_lock = (uintptr_t)_tid; \ } \ } while (0) #endif /* SMP */ @@ -226,7 +225,7 @@ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else \ - (mp)->mtx_lock = (void *)MTX_UNOWNED; \ + (mp)->mtx_lock = MTX_UNOWNED; \ critical_exit(); \ } while (0) #endif /* SMP */ @@ -321,7 +320,7 @@ #define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED) -#define mtx_owned(m) (((uintptr_t)(m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread) +#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread) #define mtx_recursed(m) ((m)->mtx_recurse != 0)
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200503301517.j2UFHfQp041996>