Date: Tue, 9 Nov 2004 14:43:07 GMT From: John Baldwin <jhb@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 64688 for review Message-ID: <200411091443.iA9Eh7OJ020152@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=64688 Change 64688 by jhb@jhb_slimer on 2004/11/09 14:42:52 Revert most of the changes here to get a much simpler overall diff that uses the 386 versions with no membars for all UP kernels and only uses hard barriers for SMP kernels. Affected files ... .. //depot/projects/smpng/sys/i386/include/atomic.h#17 edit Differences ... ==== //depot/projects/smpng/sys/i386/include/atomic.h#17 (text+ko) ==== @@ -69,7 +69,7 @@ int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); -#define ATOMIC_STORE_LOAD(TYPE, LOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) @@ -172,15 +172,16 @@ #if defined(__GNUC__) || defined(__INTEL_COMPILER) -#if defined(I386_CPU) +#if !defined(SMP) /* * We assume that a = b will do atomic loads and stores. However, on a * PentiumPro or higher, reads may pass writes, so for that case we have - * to use a serializing instruction (i.e. with LOCK) to do the load. For - * the 386 case we can use a simple read since 386s don't support SMP. + * to use a serializing instruction (i.e. with LOCK) to do the load in + * SMP kernels. For UP kernels, however, the cache of the single processor + * is always consistent, so we don't need any memory barriers. */ -#define ATOMIC_STORE_LOAD(TYPE, LOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ { \ @@ -195,9 +196,9 @@ } \ struct __hack -#else /* !defined(I386_CPU) */ +#else /* defined(SMP) */ -#define ATOMIC_STORE_LOAD(TYPE, LOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ { \ @@ -211,21 +212,26 @@ return (res); \ } \ \ +/* \ + * The XCHG instruction asserts LOCK automagically. \ + */ \ static __inline void \ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ { \ - __asm __volatile("" : : : "memory"); \ - *p = v; \ + __asm __volatile(SOP \ + : "+m" (*p), /* 0 */ \ + "+r" (v) /* 1 */ \ + : : "memory"); \ } \ struct __hack -#endif /* defined(I386_CPU) */ +#endif /* !defined(SMP) */ #else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */ extern int atomic_cmpset_int(volatile u_int *, u_int, u_int); -#define ATOMIC_STORE_LOAD(TYPE, LOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) @@ -253,10 +259,10 @@ ATOMIC_ASM(add, long, "addl %1,%0", "ir", v); ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v); -ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1"); -ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1"); -ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1"); -ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1"); +ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); +ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); +ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); +ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0"); #undef ATOMIC_ASM #undef ATOMIC_STORE_LOAD
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200411091443.iA9Eh7OJ020152>