select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select WEAK_ORDERING
config CPU_SB1
bool "SB1"
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select WEAK_ORDERING
endchoice
config SYS_HAS_CPU_SB1
bool
+config WEAK_ORDERING
+ bool
endmenu
#
spin_lock(&smp_call_lock);
call_data = &data;
- mb();
+ smp_mb();
/* Send a message to all other CPUs and wait for them to respond */
for_each_online_cpu(i)
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function.
*/
- mb();
+ smp_mb();
atomic_inc(&call_data->started);
/*
irq_exit();
if (wait) {
- mb();
+ smp_mb();
atomic_inc(&call_data->finished);
}
}
#define _ASM_ATOMIC_H
#include <linux/irqflags.h>
+#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" sc %0, %2 \n"
" beqz %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" sc %0, %2 \n"
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" scd %0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" scd %0, %2 \n"
" beqz %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" scd %0, %2 \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" scd %0, %2 \n"
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" beqzl %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
" beqz %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+/*
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like this where there are no data dependencies.
+ */
+
+#define read_barrier_depends() do { } while(0)
+#define smp_read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_CPU_HAS_SYNC
+#define __sync() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ ".set mips2\n\t" \
+ "sync\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : /* no input */ \
+ : "memory")
+#else
+#define __sync() do { } while(0)
+#endif
+
+#define __fast_iob() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "lw $0,%0\n\t" \
+ "nop\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : "m" (*(int *)CKSEG1) \
+ : "memory")
+
+#define fast_wmb() __sync()
+#define fast_rmb() __sync()
+#define fast_mb() __sync()
+#define fast_iob() \
+ do { \
+ __sync(); \
+ __fast_iob(); \
+ } while (0)
+
+#ifdef CONFIG_CPU_HAS_WB
+
+#include <asm/wbflush.h>
+
+#define wmb() fast_wmb()
+#define rmb() fast_rmb()
+#define mb() wbflush()
+#define iob() wbflush()
+
+#else /* !CONFIG_CPU_HAS_WB */
+
+#define wmb() fast_wmb()
+#define rmb() fast_rmb()
+#define mb() fast_mb()
+#define iob() fast_iob()
+
+#endif /* !CONFIG_CPU_HAS_WB */
+
+#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
+#define __WEAK_ORDERING_MB " sync \n"
+#else
+#define __WEAK_ORDERING_MB " \n"
+#endif
+
+#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+
+#define set_mb(var, value) \
+ do { var = value; smp_mb(); } while (0)
+
+#endif /* __ASM_BARRIER_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_BITOPS_H
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/types.h>
+#include <asm/barrier.h>
#include <asm/bug.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/cpu-features.h>
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
" " __SC "%2, %1 \n"
" beqz %2, 1b \n"
" and %2, %0, %3 \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
return retval;
}
+
+ smp_mb();
}
/*
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
" " __SC "%2, %1 \n"
" beqz %2, 1b \n"
" and %2, %0, %3 \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
return retval;
}
+
+ smp_mb();
}
/*
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
" " __SC "\t%2, %1 \n"
" beqz %2, 1b \n"
" and %2, %0, %3 \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
return retval;
}
+
+ smp_mb();
}
#include <asm-generic/bitops/non-atomic.h>
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
+#include <asm/barrier.h>
#include <asm/errno.h>
#include <asm/uaccess.h>
#include <asm/war.h>
-#ifdef CONFIG_SMP
-#define __FUTEX_SMP_SYNC " sync \n"
-#else
-#define __FUTEX_SMP_SYNC
-#endif
-
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
if (cpu_has_llsc && R10000_LLSC_WAR) { \
" .set mips3 \n" \
"2: sc $1, %2 \n" \
" beqzl $1, 1b \n" \
- __FUTEX_SMP_SYNC \
+ __WEAK_ORDERING_MB \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
" .set mips3 \n" \
"2: sc $1, %2 \n" \
" beqz $1, 1b \n" \
- __FUTEX_SMP_SYNC \
+ __WEAK_ORDERING_MB \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
" .set mips3 \n"
"2: sc $1, %1 \n"
" beqzl $1, 1b \n"
- __FUTEX_SMP_SYNC
+ __WEAK_ORDERING_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
" .set mips3 \n"
"2: sc $1, %1 \n"
" beqz $1, 1b \n"
- __FUTEX_SMP_SYNC
+ __WEAK_ORDERING_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 06 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
+#include <asm/barrier.h>
#include <asm/war.h>
/*
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
- " sync \n"
" .set reorder \n"
: "=m" (lock->lock), "=&r" (tmp)
: "m" (lock->lock)
" li %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
- " sync \n"
+ " nop \n"
" .set reorder \n"
: "=m" (lock->lock), "=&r" (tmp)
: "m" (lock->lock)
: "memory");
}
+
+ smp_mb();
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
+ smp_mb();
+
__asm__ __volatile__(
" .set noreorder # __raw_spin_unlock \n"
- " sync \n"
" sw $0, %0 \n"
" .set\treorder \n"
: "=m" (lock->lock)
" beqzl %2, 1b \n"
" nop \n"
" andi %2, %0, 1 \n"
- " sync \n"
" .set reorder"
: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
: "m" (lock->lock)
" sc %2, %1 \n"
" beqz %2, 1b \n"
" andi %2, %0, 1 \n"
- " sync \n"
" .set reorder"
: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
: "m" (lock->lock)
: "memory");
}
+ smp_mb();
+
return res == 0;
}
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
- " sync \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
- " sync \n"
+ " nop \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
}
+
+ smp_mb();
}
/* Note the use of sub, not subu which will make the kernel die with an
{
unsigned int tmp;
+ smp_mb();
+
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
"1: ll %1, %2 # __raw_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
- " sync \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
- " sync \n"
+ " nop \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
- " sync \n"
+ " nop \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
- " sync \n"
+ " nop \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
}
+
+ smp_mb();
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
+ smp_mb();
+
__asm__ __volatile__(
- " sync # __raw_write_unlock \n"
+ " # __raw_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
" bnez %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
- " beqzl %1, 1b \n"
" .set reorder \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
+ " beqzl %1, 1b \n"
+ " nop \n"
+ __WEAK_ORDERING_MB
" li %2, 1 \n"
"2: \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
+ " nop \n"
" .set reorder \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
+ __WEAK_ORDERING_MB
" li %2, 1 \n"
"2: \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
- " sync \n"
+ " nop \n"
+ __WEAK_ORDERING_MB
" li %2, 1 \n"
" .set reorder \n"
"2: \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
- " sync \n"
+ " nop \n"
+ __WEAK_ORDERING_MB
" li %2, 1 \n"
" .set reorder \n"
"2: \n"
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
#include <linux/irqflags.h>
#include <asm/addrspace.h>
+#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/dsp.h>
#include <asm/ptrace.h>
#include <asm/war.h>
-/*
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- */
-
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_CPU_HAS_SYNC
-#define __sync() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- ".set mips2\n\t" \
- "sync\n\t" \
- ".set pop" \
- : /* no output */ \
- : /* no input */ \
- : "memory")
-#else
-#define __sync() do { } while(0)
-#endif
-
-#define __fast_iob() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "lw $0,%0\n\t" \
- "nop\n\t" \
- ".set pop" \
- : /* no output */ \
- : "m" (*(int *)CKSEG1) \
- : "memory")
-
-#define fast_wmb() __sync()
-#define fast_rmb() __sync()
-#define fast_mb() __sync()
-#define fast_iob() \
- do { \
- __sync(); \
- __fast_iob(); \
- } while (0)
-
-#ifdef CONFIG_CPU_HAS_WB
-
-#include <asm/wbflush.h>
-
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-#define mb() wbflush()
-#define iob() wbflush()
-
-#else /* !CONFIG_CPU_HAS_WB */
-
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-#define mb() fast_mb()
-#define iob() fast_iob()
-
-#endif /* !CONFIG_CPU_HAS_WB */
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
/*
* switch_to(n) should switch tasks to task nr n, first
" .set mips3 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
" .set mips3 \n"
" sc %2, %1 \n"
" beqz %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqzl %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqz %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
#else
" .set mips3 \n"
" sc $1, %1 \n"
" beqzl $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
" .set mips3 \n"
" sc $1, %1 \n"
" beqz $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
" move $1, %z4 \n"
" scd $1, %1 \n"
" beqzl $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
" move $1, %z4 \n"
" scd $1, %1 \n"
" beqz $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
#else