#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
#include <asm/xchg.h>
+/*
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ */
#define xchg(ptr, x) \
({ \
+ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
- sizeof(*(ptr))); \
+ smp_mb(); \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ smp_mb(); \
+ __ret; \
})
#define cmpxchg(ptr, o, n) \
({ \
+ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr)));\
+ smp_mb(); \
+ __ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ (unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
+ smp_mb(); \
+ __ret; \
})
#define cmpxchg64(ptr, o, n) \
* Atomic exchange.
* Since it can be used to implement critical sections
* it must clobber "memory" (also for interrupts in UP).
- *
- * The leading and the trailing memory barriers guarantee that these
- * operations are fully ordered.
- *
*/
static inline unsigned long
{
unsigned long ret, tmp, addr64;
- smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" insbl %1,%4,%1\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
- smp_mb();
return ret;
}
{
unsigned long ret, tmp, addr64;
- smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" inswl %1,%4,%1\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
- smp_mb();
return ret;
}
{
unsigned long dummy;
- smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%4\n"
" bis $31,%3,%1\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
- smp_mb();
return val;
}
{
unsigned long dummy;
- smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%4\n"
" bis $31,%3,%1\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
- smp_mb();
return val;
}
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
- *
- * The leading and the trailing memory barriers guarantee that these
- * operations are fully ordered.
- *
- * The trailing memory barrier is placed in SMP unconditionally, in
- * order to guarantee that dependency ordering is preserved when a
- * dependency is headed by an unsuccessful operation.
*/
static inline unsigned long
{
unsigned long prev, tmp, cmp, addr64;
- smp_mb();
__asm__ __volatile__(
" andnot %5,7,%4\n"
" insbl %1,%5,%1\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
- smp_mb();
return prev;
}
{
unsigned long prev, tmp, cmp, addr64;
- smp_mb();
__asm__ __volatile__(
" andnot %5,7,%4\n"
" inswl %1,%5,%1\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
- smp_mb();
return prev;
}
{
unsigned long prev, cmp;
- smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%5\n"
" cmpeq %0,%3,%1\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
- smp_mb();
return prev;
}
{
unsigned long prev, cmp;
- smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%5\n"
" cmpeq %0,%3,%1\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
- smp_mb();
return prev;
}