locking/core: Remove break_lock field when CONFIG_GENERIC_LOCKBREAK=y
authorWill Deacon <will.deacon@arm.com>
Tue, 28 Nov 2017 18:42:19 +0000 (18:42 +0000)
committerIngo Molnar <mingo@kernel.org>
Tue, 12 Dec 2017 10:24:01 +0000 (11:24 +0100)
When CONFIG_GENERIC_LOCKBEAK=y, locking structures grow an extra int ->break_lock
field which is used to implement raw_spin_is_contended() by setting the field
to 1 when waiting on a lock and clearing it to zero when holding a lock.
However, there are a few problems with this approach:

  - There is a write-write race between a CPU successfully taking the lock
    (and subsequently writing break_lock = 0) and a waiter waiting on
    the lock (and subsequently writing break_lock = 1). This could result
    in a contended lock being reported as uncontended and vice-versa.

  - On machines with store buffers, nothing guarantees that the writes
    to break_lock are visible to other CPUs at any particular time.

  - READ_ONCE/WRITE_ONCE are not used, so the field is potentially
    susceptible to harmful compiler optimisations,

Consequently, the usefulness of this field is unclear and we'd be better off
removing it and allowing architectures to implement raw_spin_is_contended() by
providing a definition of arch_spin_is_contended(), as they can when
CONFIG_GENERIC_LOCKBREAK=n.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1511894539-7988-3-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/rwlock_types.h
include/linux/spinlock.h
include/linux/spinlock_types.h
kernel/locking/spinlock.c

index cc0072e93e360722f40a19928c001a1feab272bb..857a72ceb794252eb8cb22c55ef85d17453c34a0 100644 (file)
@@ -10,9 +10,6 @@
  */
 typedef struct {
        arch_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
-       unsigned int break_lock;
-#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        unsigned int magic, owner_cpu;
        void *owner;
index a39186194cd6782ac0b6705f02af6179a86f7813..3bf273538840103c7b6c634b148b8c74ce754843 100644 (file)
@@ -107,16 +107,11 @@ do {                                                              \
 
 #define raw_spin_is_locked(lock)       arch_spin_is_locked(&(lock)->raw_lock)
 
-#ifdef CONFIG_GENERIC_LOCKBREAK
-#define raw_spin_is_contended(lock) ((lock)->break_lock)
-#else
-
 #ifdef arch_spin_is_contended
 #define raw_spin_is_contended(lock)    arch_spin_is_contended(&(lock)->raw_lock)
 #else
 #define raw_spin_is_contended(lock)    (((void)(lock), 0))
 #endif /*arch_spin_is_contended*/
-#endif
 
 /*
  * This barrier must provide two things:
index 73548eb13a5ddc82ea16c6a292a8d704471eb7ac..24b4e6f2c1a22fe2a6717f4d6076ea87d595994b 100644 (file)
@@ -19,9 +19,6 @@
 
 typedef struct raw_spinlock {
        arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
-       unsigned int break_lock;
-#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        unsigned int magic, owner_cpu;
        void *owner;
index 0ebb253e21999232b639b1248763d963bc790e8c..936f3d14dd6bfeda3ef7921266fef5dc5b36e2a8 100644 (file)
@@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock)                        \
                        break;                                          \
                preempt_enable();                                       \
                                                                        \
-               if (!(lock)->break_lock)                                \
-                       (lock)->break_lock = 1;                         \
-                                                                       \
                arch_##op##_relax(&lock->raw_lock);                     \
        }                                                               \
-       (lock)->break_lock = 0;                                         \
 }                                                                      \
                                                                        \
 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
@@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)       \
                local_irq_restore(flags);                               \
                preempt_enable();                                       \
                                                                        \
-               if (!(lock)->break_lock)                                \
-                       (lock)->break_lock = 1;                         \
-                                                                       \
                arch_##op##_relax(&lock->raw_lock);                     \
        }                                                               \
-       (lock)->break_lock = 0;                                         \
+                                                                       \
        return flags;                                                   \
 }                                                                      \
                                                                        \