arm64: Use a raw spinlock in __install_bp_hardening_cb()
authorJames Morse <james.morse@arm.com>
Tue, 27 Nov 2018 15:35:21 +0000 (15:35 +0000)
committerWill Deacon <will.deacon@arm.com>
Tue, 27 Nov 2018 18:01:34 +0000 (18:01 +0000)
__install_bp_hardening_cb() is called via stop_machine() as part
of the cpu_enable callback. To force each CPU to take its turn
when allocating slots, they take a spinlock.

With the RT patches applied, the spinlock becomes a mutex,
and we get warnings about sleeping while in stop_machine():
| [    0.319176] CPU features: detected: RAS Extension Support
| [    0.319950] BUG: scheduling while atomic: migration/3/36/0x00000002
| [    0.319955] Modules linked in:
| [    0.319958] Preemption disabled at:
| [    0.319969] [<ffff000008181ae4>] cpu_stopper_thread+0x7c/0x108
| [    0.319973] CPU: 3 PID: 36 Comm: migration/3 Not tainted 4.19.1-rt3-00250-g330fc2c2a880 #2
| [    0.319975] Hardware name: linux,dummy-virt (DT)
| [    0.319976] Call trace:
| [    0.319981]  dump_backtrace+0x0/0x148
| [    0.319983]  show_stack+0x14/0x20
| [    0.319987]  dump_stack+0x80/0xa4
| [    0.319989]  __schedule_bug+0x94/0xb0
| [    0.319991]  __schedule+0x510/0x560
| [    0.319992]  schedule+0x38/0xe8
| [    0.319994]  rt_spin_lock_slowlock_locked+0xf0/0x278
| [    0.319996]  rt_spin_lock_slowlock+0x5c/0x90
| [    0.319998]  rt_spin_lock+0x54/0x58
| [    0.320000]  enable_smccc_arch_workaround_1+0xdc/0x260
| [    0.320001]  __enable_cpu_capability+0x10/0x20
| [    0.320003]  multi_cpu_stop+0x84/0x108
| [    0.320004]  cpu_stopper_thread+0x84/0x108
| [    0.320008]  smpboot_thread_fn+0x1e8/0x2b0
| [    0.320009]  kthread+0x124/0x128
| [    0.320010]  ret_from_fork+0x10/0x18

Switch this to a raw spinlock, as we know this is only called with
IRQs masked.

Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/kernel/cpu_errata.c

index a509e35132d225a4eef28af288969abab47ef9b3..00d93a91c4d52a3a0e4dff76b755a0702a85ea84 100644 (file)
@@ -135,7 +135,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
                                      const char *hyp_vecs_start,
                                      const char *hyp_vecs_end)
 {
-       static DEFINE_SPINLOCK(bp_lock);
+       static DEFINE_RAW_SPINLOCK(bp_lock);
        int cpu, slot = -1;
 
        /*
@@ -147,7 +147,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
                return;
        }
 
-       spin_lock(&bp_lock);
+       raw_spin_lock(&bp_lock);
        for_each_possible_cpu(cpu) {
                if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
                        slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
@@ -163,7 +163,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 
        __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
        __this_cpu_write(bp_hardening_data.fn, fn);
-       spin_unlock(&bp_lock);
+       raw_spin_unlock(&bp_lock);
 }
 #else
 #define __smccc_workaround_1_smc_start         NULL