x86/speculation: Consolidate CPU whitelists
authorThomas Gleixner <tglx@linutronix.de>
Wed, 27 Feb 2019 09:10:23 +0000 (10:10 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 6 Mar 2019 20:52:10 +0000 (21:52 +0100)
The CPU vulnerability whitelists have some overlap and there are more
whitelists coming along.

Use the driver_data field in the x86_cpu_id struct to denote the
whitelisted vulnerabilities and combine all whitelists into one.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Jon Masters <jcm@redhat.com>
Tested-by: Jon Masters <jcm@redhat.com>
arch/x86/kernel/cpu/common.c

index cb28e98a0659abc5051b1e7fcb936b692a1a1264..26ec15034f86de0ba0559d4c4f7ed406d84b3c02 100644 (file)
@@ -948,61 +948,72 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #endif
 }
 
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL,    X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL_TABLET,     X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL_MID,        X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_BONNELL,     X86_FEATURE_ANY },
-       { X86_VENDOR_CENTAUR,   5 },
-       { X86_VENDOR_INTEL,     5 },
-       { X86_VENDOR_NSC,       5 },
-       { X86_VENDOR_ANY,       4 },
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN    BIT(1)
+#define NO_SSB         BIT(2)
+#define NO_L1TF                BIT(3)
+
+#define VULNWL(_vendor, _family, _model, _whitelist)   \
+       { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+
+#define VULNWL_INTEL(model, whitelist)         \
+       VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist)          \
+       VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+#define VULNWL_HYGON(family, whitelist)                \
+       VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+       VULNWL(ANY,     4, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(CENTAUR, 5, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(INTEL,   5, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
+
+       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION),
+       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION),
+       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION),
+       VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION),
+       VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION),
+
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF),
+       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF),
+
+       VULNWL_INTEL(CORE_YONAH,                NO_SSB),
+
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_L1TF),
+
+       VULNWL_AMD(0x0f,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+       VULNWL_AMD(0x10,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+       VULNWL_AMD(0x11,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+       VULNWL_AMD(0x12,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+
+       /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF),
+       VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF),
        {}
 };
 
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
-       { X86_VENDOR_AMD },
-       { X86_VENDOR_HYGON },
-       {}
-};
-
-/* Only list CPUs which speculate but are non susceptible to SSB */
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_X    },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_MID  },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_CORE_YONAH           },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
-       { X86_VENDOR_AMD,       0x12,                                   },
-       { X86_VENDOR_AMD,       0x11,                                   },
-       { X86_VENDOR_AMD,       0x10,                                   },
-       { X86_VENDOR_AMD,       0xf,                                    },
-       {}
-};
+static bool __init cpu_matches(unsigned long which)
+{
+       const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
 
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
-       /* in addition to cpu_no_speculation */
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_X    },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_MID  },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT_MID     },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT        },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT_X      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT_PLUS   },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
-       {}
-};
+       return m && !!(m->driver_data & which);
+}
 
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
        u64 ia32_cap = 0;
 
-       if (x86_match_cpu(cpu_no_speculation))
+       if (cpu_matches(NO_SPECULATION))
                return;
 
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
@@ -1011,15 +1022,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
                rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
-       if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
-          !(ia32_cap & ARCH_CAP_SSB_NO) &&
+       if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
        if (ia32_cap & ARCH_CAP_IBRS_ALL)
                setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
 
-       if (x86_match_cpu(cpu_no_meltdown))
+       if (cpu_matches(NO_MELTDOWN))
                return;
 
        /* Rogue Data Cache Load? No! */
@@ -1028,7 +1038,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
-       if (x86_match_cpu(cpu_no_l1tf))
+       if (cpu_matches(NO_L1TF))
                return;
 
        setup_force_cpu_bug(X86_BUG_L1TF);