The mmiowb() macro is horribly difficult to use and drivers will continue
to work most of the time if they omit a call when it is required.
Rather than rely on driver authors getting this right, push mmiowb() into
arch_spin_unlock() for mips. If this is deemed to be a performance issue,
a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide
the barrier in cases where no I/O writes were performed inside the
critical section.
Acked-by: Paul Burton <paul.burton@mips.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()
-/* Some callers use this older API instead. */
-#define mmiowb() iobarrier_w()
-
/*
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MMIOWB_H
+#define _ASM_MMIOWB_H
+
+#include <asm/io.h>
+
+#define mmiowb() iobarrier_w()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_MMIOWB_H */
#include <asm/processor.h>
#include <asm/qrwlock.h>
+
+#include <asm-generic/qspinlock_types.h>
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
+ smp_store_release(&lock->locked, 0);
+}
+
#include <asm/qspinlock.h>
#endif /* _ASM_SPINLOCK_H */