arch,powerpc: Convert smp_mb__*()
authorPeter Zijlstra <peterz@infradead.org>
Thu, 13 Mar 2014 18:00:35 +0000 (19:00 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 18 Apr 2014 12:20:41 +0000 (14:20 +0200)
Powerpc allows reordering over its ll/sc implementation. Implement the
two new barriers as appropriate.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-gg2ffgq32sjgy9b8lj6m3hsc@git.kernel.org
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/powerpc/include/asm/atomic.h
arch/powerpc/include/asm/barrier.h
arch/powerpc/include/asm/bitops.h
arch/powerpc/kernel/crash.c

index e3b1d41c89be73425595b364370c096206a6a843..28992d01292633f2d473eeae47e202497fa691a6 100644 (file)
@@ -8,6 +8,7 @@
 #ifdef __KERNEL__
 #include <linux/types.h>
 #include <asm/cmpxchg.h>
+#include <asm/barrier.h>
 
 #define ATOMIC_INIT(i)         { (i) }
 
@@ -270,11 +271,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
 }
 #define atomic_dec_if_positive atomic_dec_if_positive
 
-#define smp_mb__before_atomic_dec()     smp_mb()
-#define smp_mb__after_atomic_dec()      smp_mb()
-#define smp_mb__before_atomic_inc()     smp_mb()
-#define smp_mb__after_atomic_inc()      smp_mb()
-
 #ifdef __powerpc64__
 
 #define ATOMIC64_INIT(i)       { (i) }
index f89da808ce310e1f373da55e96fc52d58bd4ccc9..bab79a110c7b05ceb8a4320efe4290257e186250 100644 (file)
@@ -84,4 +84,7 @@ do {                                                                  \
        ___p1;                                                          \
 })
 
+#define smp_mb__before_atomic()     smp_mb()
+#define smp_mb__after_atomic()      smp_mb()
+
 #endif /* _ASM_POWERPC_BARRIER_H */
index a5e9a7d494d8bd685104397bff26d0265dc15e54..bd3bd573d0aeef4e402f3458757362b2a5c67dfc 100644 (file)
 #define PPC_BIT(bit)           (1UL << PPC_BITLSHIFT(bit))
 #define PPC_BITMASK(bs, be)    ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
 
-/*
- * clear_bit doesn't imply a memory barrier
- */
-#define smp_mb__before_clear_bit()     smp_mb()
-#define smp_mb__after_clear_bit()      smp_mb()
+#include <asm/barrier.h>
 
 /* Macro for generating the ***_bits() functions */
 #define DEFINE_BITOP(fn, op, prefix)           \
index 18d7c80ddeb98c0b9569ca039a630ac5bd5ba7c2..51dbace3269befc07e6955de30419e7840710cf9 100644 (file)
@@ -81,7 +81,7 @@ void crash_ipi_callback(struct pt_regs *regs)
        }
 
        atomic_inc(&cpus_in_crash);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 
        /*
         * Starting the kdump boot.