From owner-svn-src-projects@FreeBSD.ORG Wed Nov 2 21:02:42 2011 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id DAE87106566C; Wed, 2 Nov 2011 21:02:42 +0000 (UTC) (envelope-from cognet@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id CA2488FC0A; Wed, 2 Nov 2011 21:02:42 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id pA2L2g8v038261; Wed, 2 Nov 2011 21:02:42 GMT (envelope-from cognet@svn.freebsd.org) Received: (from cognet@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id pA2L2g4M038259; Wed, 2 Nov 2011 21:02:42 GMT (envelope-from cognet@svn.freebsd.org) Message-Id: <201111022102.pA2L2g4M038259@svn.freebsd.org> From: Olivier Houchard Date: Wed, 2 Nov 2011 21:02:42 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r227035 - projects/armv6/sys/arm/include X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 02 Nov 2011 21:02:42 -0000 Author: cognet Date: Wed Nov 2 21:02:42 2011 New Revision: 227035 URL: http://svn.freebsd.org/changeset/base/227035 Log: Crude implementation of the atomic operations for armv6/v7 Modified: projects/armv6/sys/arm/include/atomic.h Modified: projects/armv6/sys/arm/include/atomic.h ============================================================================== --- projects/armv6/sys/arm/include/atomic.h Wed Nov 2 20:58:47 2011 (r227034) +++ projects/armv6/sys/arm/include/atomic.h Wed Nov 2 21:02:42 2011 (r227035) @@ -45,11 +45,13 @@ #ifndef _KERNEL #include +#else +#include #endif -#define mb() -#define wmb() -#define rmb() +#define mb() +#define wmb() +#define rmb() #ifndef I32_bit #define I32_bit (1 << 7) /* IRQ disable */ @@ -58,6 +60,118 @@ #define F32_bit (1 << 6) /* FIQ disable */ #endif +/* XXX: Rethink for userland later as those won't be defined */ +#if defined(ARM_ARCH_6) || defined(ARM_ARCH_7) + +static __inline void +atomic_set_32(volatile uint32_t *address, uint32_t setmask) +{ + uint32_t tmp = 0, tmp2 = 0; + + __asm __volatile("1: ldrex %0, [%2]\n" + "orr %0, %0, %3\n" + "strex %1, %0, [%2]\n" + "cmp %1, #0\n" + "bne 1b\n" + : "=&r" (tmp), "+r" (tmp2) + , "+r" (address), "+r" (setmask) : : "memory"); + +} + +static __inline void +atomic_clear_32(volatile uint32_t *address, uint32_t setmask) +{ + uint32_t tmp = 0, tmp2 = 0; + + __asm __volatile("1: ldrex %0, [%2]\n" + "bic %0, %0, %3\n" + "strex %1, %0, [%2]\n" + "cmp %1, #0\n" + "bne 1b\n" + : "=&r" (tmp), "+r" (tmp2) + ,"+r" (address), "+r" (setmask) : : "memory"); +} + +static __inline u_int32_t +atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) +{ + uint32_t ret; + + __asm __volatile("1: ldrex %0, [%1]\n" + "cmp %0, %2\n" + "movne %0, #0\n" + "bne 2f\n" + "strex %0, %3, [%1]\n" + "cmp %0, #0\n" + "bne 1b\n" + "moveq %0, #1\n" + "2:" + : "=&r" (ret) + ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "memory"); + return (ret); +} + +static __inline void +atomic_add_32(volatile u_int32_t *p, u_int32_t val) +{ + uint32_t tmp = 0, tmp2 = 0; + + __asm __volatile("1: ldrex %0, [%2]\n" + "add %0, %0, %3\n" + "strex %1, %0, [%2]\n" + "cmp %1, #0\n" + "bne 1b\n" + : "=&r" (tmp), "+r" (tmp2) + ,"+r" (p), "+r" (val) : : "memory"); +} + +static __inline void +atomic_subtract_32(volatile u_int32_t *p, u_int32_t val) +{ + uint32_t tmp = 0, tmp2 = 0; + + __asm __volatile("1: ldrex %0, [%2]\n" + "sub %0, %0, %3\n" + "strex %1, %0, [%2]\n" + "cmp %1, #0\n" + "bne 1b\n" + : "=&r" (tmp), "+r" (tmp2) + ,"+r" (p), "+r" (val) : : "memory"); +} + +static __inline uint32_t +atomic_fetchadd_32(volatile uint32_t *p, uint32_t val) +{ + uint32_t tmp = 0, tmp2 = 0, ret = 0; + + __asm __volatile("1: ldrex %0, [%3]\n" + "add %1, %0, %4\n" + "strex %2, %1, [%3]\n" + "cmp %2, #0\n" + "bne 1b\n" + : "+r" (ret), "=&r" (tmp), "+r" (tmp2) + ,"+r" (p), "+r" (val) : : "memory"); + return (ret); +} + +static __inline uint32_t +atomic_readandclear_32(volatile u_int32_t *p) +{ + uint32_t ret, tmp = 0, tmp2 = 0; + + __asm __volatile("1: ldrex %0, [%3]\n" + "mov %1, #0\n" + "strex %2, %1, [%3]\n" + "cmp %2, #0\n" + "bne 1b\n" + : "=r" (ret), "=&r" (tmp), "+r" (tmp2) + ,"+r" (p) : : "memory"); + return (ret); +} + + +#else /* < armv6 */ + #define __with_interrupts_disabled(expr) \ do { \ u_int cpsr_save, tmp; \ @@ -288,6 +402,20 @@ atomic_fetchadd_32(volatile uint32_t *p, #endif /* _KERNEL */ + +static __inline uint32_t +atomic_readandclear_32(volatile u_int32_t *p) +{ + + return (__swp(0, p)); +} + +#undef __with_interrupts_disabled + +#endif /* _LOCORE */ + +#endif /* Arch >= v6 */ + static __inline int atomic_load_32(volatile uint32_t *v) { @@ -301,17 +429,6 @@ atomic_store_32(volatile uint32_t *dst, *dst = src; } -static __inline uint32_t -atomic_readandclear_32(volatile u_int32_t *p) -{ - - return (__swp(0, p)); -} - -#undef __with_interrupts_disabled - -#endif /* _LOCORE */ - #define atomic_add_long(p, v) \ atomic_add_32((volatile u_int *)(p), (u_int)(v)) #define atomic_add_acq_long atomic_add_long @@ -385,4 +502,5 @@ atomic_readandclear_32(volatile u_int32_ #define atomic_load_acq_32 atomic_load_32 #define atomic_store_rel_32 atomic_store_32 + #endif /* _MACHINE_ATOMIC_H_ */