Date: Tue, 12 Sep 2000 23:13:25 +0100 (BST) From: Doug Rabson <dfr@nlsystems.com> To: John Baldwin <jhb@pike.osd.bsdi.com> Cc: alpha@freebsd.org Subject: Re: Mutex's aren't recursing Message-ID: <Pine.BSF.4.21.0009122312550.86297-100000@salmon.nlsystems.com> In-Reply-To: <200009122143.OAA08606@pike.osd.bsdi.com>
next in thread | previous in thread | raw e-mail | index | archive | help
On Tue, 12 Sep 2000, John Baldwin wrote: > Doug Rabson wrote: > > I sent you some mail yesterday about this. I got the constraints wrong for > > the inline assembler in atomic_cmpset. I disassembled some of the code in > > interrupt.c which is trying to enter the mutex. You can clearly see that > > it is misusing t1 as both an input and output to the inline. > > Thanks! I'll try this out. Also, I just discovered that swtch.s didn't import > sched_lock, so cpu_switch was modifying a private copy of sched_lock instead > of the global variable (I think). I'll have some feedback on this in just a > sec. BTW, this is my current version of atomic.h: Index: atomic.h =================================================================== RCS file: /home/ncvs/src/sys/alpha/include/atomic.h,v retrieving revision 1.3 diff -u -r1.3 atomic.h --- atomic.h 2000/09/06 11:20:53 1.3 +++ atomic.h 2000/09/12 20:46:23 @@ -44,15 +44,149 @@ void atomic_add_16(volatile u_int16_t *, u_int16_t); void atomic_subtract_16(volatile u_int16_t *, u_int16_t); -void atomic_set_32(volatile u_int32_t *, u_int32_t); -void atomic_clear_32(volatile u_int32_t *, u_int32_t); -void atomic_add_32(volatile u_int32_t *, u_int32_t); -void atomic_subtract_32(volatile u_int32_t *, u_int32_t); - -void atomic_set_64(volatile u_int64_t *, u_int64_t); -void atomic_clear_64(volatile u_int64_t *, u_int64_t); -void atomic_add_64(volatile u_int64_t *, u_int64_t); -void atomic_subtract_64(volatile u_int64_t *, u_int64_t); +static __inline void atomic_set_32(volatile u_int32_t *p, u_int32_t v) +{ + u_int32_t temp; + + __asm __volatile ( + "1:\tldl_l %0, %2\n\t" /* load old value */ + "bis %0, %3, %0\n\t" /* calculate new value */ + "stl_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} + +static __inline void atomic_clear_32(volatile u_int32_t *p, u_int32_t v) +{ + u_int32_t temp; + + __asm __volatile ( + "1:\tldl_l %0, %2\n\t" /* load old value */ + "bic %0, %3, %0\n\t" /* calculate new value */ + "stl_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} + +static __inline void atomic_add_32(volatile u_int32_t *p, u_int32_t v) +{ + u_int32_t temp; + + __asm __volatile ( + "1:\tldl_l %0, %2\n\t" /* load old value */ + "addl %0, %3, %0\n\t" /* calculate new value */ + "stl_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} + +static __inline void atomic_subtract_32(volatile u_int32_t *p, u_int32_t v) +{ + u_int32_t temp; + + __asm __volatile ( + "1:\tldl_l %0, %2\n\t" /* load old value */ + "subl %0, %3, %0\n\t" /* calculate new value */ + "stl_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} + +static __inline void atomic_set_64(volatile u_int64_t *p, u_int64_t v) +{ + u_int64_t temp; + + __asm __volatile ( + "1:\tldq_l %0, %2\n\t" /* load old value */ + "bis %0, %3, %0\n\t" /* calculate new value */ + "stq_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} + +static __inline void atomic_clear_64(volatile u_int64_t *p, u_int64_t v) +{ + u_int64_t temp; + + __asm __volatile ( + "1:\tldq_l %0, %2\n\t" /* load old value */ + "bic %0, %3, %0\n\t" /* calculate new value */ + "stq_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} + +static __inline void atomic_add_64(volatile u_int64_t *p, u_int64_t v) +{ + u_int64_t temp; + + __asm __volatile ( + "1:\tldq_l %0, %2\n\t" /* load old value */ + "addq %0, %3, %0\n\t" /* calculate new value */ + "stq_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} + +static __inline void atomic_subtract_64(volatile u_int64_t *p, u_int64_t v) +{ + u_int64_t temp; + + __asm __volatile ( + "1:\tldq_l %0, %2\n\t" /* load old value */ + "subq %0, %3, %0\n\t" /* calculate new value */ + "stq_c %0, %1\n\t" /* attempt to store */ + "beq %0, 2f\n\t" /* spin if failed */ + "mb\n\t" /* drain to memory */ + ".section .text3,\"ax\"\n" /* improve branch prediction */ + "2:\tbr 1b\n" /* try again */ + ".previous\n" + : "=&r" (temp), "=m" (*p) + : "m" (*p), "r" (v) + : "memory"); +} #define atomic_set_char atomic_set_8 #define atomic_clear_char atomic_clear_8 @@ -82,20 +216,20 @@ static __inline u_int32_t atomic_cmpset_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval) { - u_int32_t ret, temp; + u_int32_t ret; __asm __volatile ( - "1:\tldl_l %1, %5\n\t" /* load old value */ - "cmpeq %1, %3, %0\n\t" /* compare */ + "1:\tldl_l %0, %4\n\t" /* load old value */ + "cmpeq %0, %2, %0\n\t" /* compare */ "beq %0, 2f\n\t" /* exit if not equal */ - "mov %4, %1\n\t" /* value to store */ - "stl_c %1, %2\n\t" /* attempt to store */ - "beq %1, 3f\n\t" /* if it failed, spin */ + "mov %3, %0\n\t" /* value to store */ + "stl_c %0, %1\n\t" /* attempt to store */ + "beq %0, 3f\n\t" /* if it failed, spin */ "2:\n" /* done */ ".section .text3,\"ax\"\n" /* improve branch prediction */ "3:\tbr 1b\n" /* try again */ ".previous\n" - : "=&r" (ret), "=r" (temp), "=m" (*p) + : "=&r" (ret), "=m" (*p) : "r" (cmpval), "r" (newval), "m" (*p) : "memory"); @@ -110,20 +244,20 @@ static __inline u_int64_t atomic_cmpset_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval) { - u_int64_t ret, temp; + u_int64_t ret; __asm __volatile ( - "1:\tldq_l %1, %5\n\t" /* load old value */ - "cmpeq %1, %3, %0\n\t" /* compare */ + "1:\tldq_l %0, %4\n\t" /* load old value */ + "cmpeq %0, %2, %0\n\t" /* compare */ "beq %0, 2f\n\t" /* exit if not equal */ - "mov %4, %1\n\t" /* value to store */ - "stq_c %1, %2\n\t" /* attempt to store */ - "beq %1, 3f\n\t" /* if it failed, spin */ + "mov %3, %0\n\t" /* value to store */ + "stq_c %0, %1\n\t" /* attempt to store */ + "beq %0, 3f\n\t" /* if it failed, spin */ "2:\n" /* done */ ".section .text3,\"ax\"\n" /* improve branch prediction */ "3:\tbr 1b\n" /* try again */ ".previous\n" - : "=&r" (ret), "=r" (temp), "=m" (*p) + : "=&r" (ret), "=m" (*p) : "r" (cmpval), "r" (newval), "m" (*p) : "memory"); -- Doug Rabson Mail: dfr@nlsystems.com Nonlinear Systems Ltd. Phone: +44 20 8348 3944 To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe freebsd-alpha" in the body of the message
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?Pine.BSF.4.21.0009122312550.86297-100000>