x86/amd: Simplify AMD E400 aware idle routine
authorBorislav Petkov <bp@alien8.de>
Fri, 9 Dec 2016 18:29:11 +0000 (19:29 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 9 Dec 2016 20:23:21 +0000 (21:23 +0100)
Reorganize the E400 detection now that we have everything in place:
switch the CPUs to broadcast mode after the LAPIC has been initialized
and remove the facilities that were used previously on the idle path.

Unfortunately static_cpu_has_bug() cannpt be used in the E400 idle routine
because alternatives have been applied when the actual detection happens,
so the static switching does not take effect and the test will stay
false. Use boot_cpu_has_bug() instead which is definitely an improvement
over the RDMSR and the cpumask handling.

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/20161209182912.2726-5-bp@alien8.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/include/asm/acpi.h
arch/x86/include/asm/processor.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
drivers/acpi/processor_idle.c

index 5391b0ae7cc353d119d963894485e2391e547a5c..395b69551fce8172ab1938ab730eee034154c638 100644 (file)
@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
            boot_cpu_data.x86_model <= 0x05 &&
            boot_cpu_data.x86_mask < 0x0A)
                return 1;
-       else if (amd_e400_c1e_detected)
+       else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
                return 1;
        else
                return max_cstate;
index 984a7bf17f6a7295c3a7c8531e68cb807a722c35..fa609c6f6ba9d529c22ba344a7196dede7939b58 100644 (file)
@@ -621,10 +621,9 @@ static inline void sync_core(void)
 }
 
 extern void select_idle_routine(const struct cpuinfo_x86 *c);
-extern void init_amd_e400_c1e_mask(void);
+extern void amd_e400_c1e_apic_setup(void);
 
 extern unsigned long           boot_option_idle_override;
-extern bool                    amd_e400_c1e_detected;
 
 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
                         IDLE_POLL};
index 88c657b057e223eb0eaccdac699aa91505638129..cc89ce20018390c28b1d555f235e1786981b3cc1 100644 (file)
@@ -894,11 +894,13 @@ void __init setup_boot_APIC_clock(void)
 
        /* Setup the lapic or request the broadcast */
        setup_APIC_timer();
+       amd_e400_c1e_apic_setup();
 }
 
 void setup_secondary_APIC_clock(void)
 {
        setup_APIC_timer();
+       amd_e400_c1e_apic_setup();
 }
 
 /*
index 9bd910a7dd0abc0d994c6e50250c6d104550dc1e..e1f98ff9a3f05161ad970d5d1c2e79a8ef096ec1 100644 (file)
@@ -1144,7 +1144,6 @@ void enable_sep_cpu(void)
 void __init identify_boot_cpu(void)
 {
        identify_cpu(&boot_cpu_data);
-       init_amd_e400_c1e_mask();
 #ifdef CONFIG_X86_32
        sysenter_setup();
        enable_sep_cpu();
index ced76f13d20da4c78635f8c6afef4d51c41253f3..2c2b55ab41e71551ebc8b5daf7ed4d9fc9af6521 100644 (file)
@@ -289,59 +289,33 @@ void stop_this_cpu(void *dummy)
                halt();
 }
 
-bool amd_e400_c1e_detected;
-EXPORT_SYMBOL(amd_e400_c1e_detected);
-
-static cpumask_var_t amd_e400_c1e_mask;
-
-void amd_e400_remove_cpu(int cpu)
-{
-       if (amd_e400_c1e_mask != NULL)
-               cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
-}
-
 /*
- * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
- * pending message MSR. If we detect C1E, then we handle it the same
- * way as C3 power states (local apic timer and TSC stop)
+ * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
+ * states (local apic timer and TSC stop).
  */
 static void amd_e400_idle(void)
 {
-       if (!amd_e400_c1e_detected) {
-               u32 lo, hi;
-
-               rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
-
-               if (lo & K8_INTP_C1E_ACTIVE_MASK) {
-                       amd_e400_c1e_detected = true;
-                       if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
-                               mark_tsc_unstable("TSC halt in AMD C1E");
-                       pr_info("System has AMD C1E enabled\n");
-               }
+       /*
+        * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
+        * gets set after static_cpu_has() places have been converted via
+        * alternatives.
+        */
+       if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
+               default_idle();
+               return;
        }
 
-       if (amd_e400_c1e_detected) {
-               int cpu = smp_processor_id();
+       tick_broadcast_enter();
 
-               if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
-                       cpumask_set_cpu(cpu, amd_e400_c1e_mask);
-                       /* Force broadcast so ACPI can not interfere. */
-                       tick_broadcast_force();
-                       pr_info("Switch to broadcast mode on CPU%d\n", cpu);
-               }
-               tick_broadcast_enter();
+       default_idle();
 
-               default_idle();
-
-               /*
-                * The switch back from broadcast mode needs to be
-                * called with interrupts disabled.
-                */
-               local_irq_disable();
-               tick_broadcast_exit();
-               local_irq_enable();
-       } else
-               default_idle();
+       /*
+        * The switch back from broadcast mode needs to be called with
+        * interrupts disabled.
+        */
+       local_irq_disable();
+       tick_broadcast_exit();
+       local_irq_enable();
 }
 
 /*
@@ -411,11 +385,14 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
                x86_idle = default_idle;
 }
 
-void __init init_amd_e400_c1e_mask(void)
+void amd_e400_c1e_apic_setup(void)
 {
-       /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
-       if (x86_idle == amd_e400_idle)
-               zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
+       if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
+               pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
+               local_irq_disable();
+               tick_broadcast_force();
+               local_irq_enable();
+       }
 }
 
 void __init arch_post_acpi_subsys_init(void)
index 42f5eb7b4f6c85251f4ab65d6de44268b6de06ea..0229ccbfcd66b412719e923ae729238850ed2cb4 100644 (file)
@@ -1575,7 +1575,6 @@ void play_dead_common(void)
 {
        idle_task_exit();
        reset_lazy_tlbstate();
-       amd_e400_remove_cpu(raw_smp_processor_id());
 
        /* Ack it */
        (void)cpu_report_death();
index 2237d3f24f0e735a1e04294ea273595cce98e049..5c8aa9cf62d70de12b240e62887aaf59311e4a99 100644 (file)
@@ -141,7 +141,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
        if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
                return;
 
-       if (amd_e400_c1e_detected)
+       if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
                type = ACPI_STATE_C1;
 
        /*