--- ------#include <linux/init.h>
--- ------#include <linux/kernel.h>
--- ------#include <linux/sched.h>
--- ------#include <linux/string.h>
#include <linux/bootmem.h>
+++ ++++++#include <linux/linkage.h>
#include <linux/bitops.h>
+++ ++++++#include <linux/kernel.h>
#include <linux/module.h>
--- ------#include <linux/kgdb.h>
--- ------#include <linux/topology.h>
+++ ++++++#include <linux/percpu.h>
+++ ++++++#include <linux/string.h>
#include <linux/delay.h>
+++ ++++++#include <linux/sched.h>
+++ ++++++#include <linux/init.h>
+++ ++++++#include <linux/kgdb.h>
#include <linux/smp.h>
--- ------#include <linux/percpu.h>
--- ------#include <asm/i387.h>
--- ------#include <asm/msr.h>
--- ------#include <asm/io.h>
--- ------#include <asm/linkage.h>
+++ ++++++#include <linux/io.h>
+++ ++++++
+++ ++++++#include <asm/stackprotector.h>
#include <asm/mmu_context.h>
+++ ++++++#include <asm/hypervisor.h>
+++ ++++++#include <asm/processor.h>
+++ ++++++#include <asm/sections.h>
+++ ++++++#include <asm/topology.h>
+++ ++++++#include <asm/cpumask.h>
+++ ++++++#include <asm/pgtable.h>
+++ ++++++#include <asm/atomic.h>
+++ ++++++#include <asm/proto.h>
+++ ++++++#include <asm/setup.h>
+++ ++++++#include <asm/apic.h>
+++ ++++++#include <asm/desc.h>
+++ ++++++#include <asm/i387.h>
#include <asm/mtrr.h>
+++ ++++++#include <asm/numa.h>
+++ ++++++#include <asm/asm.h>
+++ ++++++#include <asm/cpu.h>
#include <asm/mce.h>
+++ ++++++#include <asm/msr.h>
#include <asm/pat.h>
--- ------#include <asm/asm.h>
--- ------#include <asm/numa.h>
#include <asm/smp.h>
--- ----- #include <asm/cpu.h>
--- ----- #include <asm/cpumask.h>
--- ----- #include <asm/apic.h>
+
#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#include <mach_apic.h>
-#include <asm/genapic.h>
+#include <asm/uv/uv.h>
#endif
-#include <asm/pda.h>
--- ------#include <asm/pgtable.h>
--- ------#include <asm/processor.h>
--- ------#include <asm/desc.h>
--- ------#include <asm/atomic.h>
--- ------#include <asm/proto.h>
--- ------#include <asm/sections.h>
--- ------#include <asm/setup.h>
--- ------#include <asm/hypervisor.h>
--- ----- #include <asm/stackprotector.h>
--- ------
#include "cpu.h"
#ifdef CONFIG_X86_64
/* representing cpus for which sibling maps can be computed */
cpumask_var_t cpu_sibling_setup_mask;
+/* correctly size the local cpu masks */
+void __init setup_cpu_local_masks(void)
+{
+ alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+ alloc_bootmem_cpumask_var(&cpu_callin_mask);
+ alloc_bootmem_cpumask_var(&cpu_callout_mask);
+ alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
#else /* CONFIG_X86_32 */
--- ------cpumask_t cpu_callin_map;
+++ ++++++cpumask_t cpu_sibling_setup_map;
cpumask_t cpu_callout_map;
cpumask_t cpu_initialized;
--- ------cpumask_t cpu_sibling_setup_map;
+++ ++++++cpumask_t cpu_callin_map;
#endif /* CONFIG_X86_32 */
---- -----static struct cpu_dev *this_cpu __cpuinitdata;
++++ +++++static const struct cpu_dev *this_cpu __cpuinitdata;
+DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#ifdef CONFIG_X86_64
-/* We need valid kernel segments for data and code in long mode too
- * IRET will check the segment types kkeil 2000/10/28
- * Also sysret mandates a special GDT layout
- */
-/* The TLS descriptors are currently at a different place compared to i386.
- Hopefully nobody expects them at a fixed place (Wine?) */
-DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
-} };
+ /*
+ * We need valid kernel segments for data and code in long mode too
+ * IRET will check the segment types kkeil 2000/10/28
+ * Also sysret mandates a special GDT layout
+ *
--- ----- * The TLS descriptors are currently at a different place compared to i386.
+++ ++++++ * TLS descriptors are currently at a different place compared to i386.
+ * Hopefully nobody expects them at a fixed place (Wine?)
+ */
--- ----- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
--- ----- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
--- ----- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
--- ----- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
--- ----- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
--- ----- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
#else
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
--- ------ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
--- ------ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
--- ------ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
--- ------ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
/*
* Segments used for calling PnP BIOS have byte granularity.
* They code segments and data segments have fixed 64k limits,
* are set at run time. All have 64k limits.
*/
/* 32-bit code */
--- ------ [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+++ ++++++ [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
/* 16-bit code */
--- ------ [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
+++ ++++++ [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
/* data */
--- ------ [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+++ ++++++ [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
--- ------ [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
--- ----- [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
- [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
-} };
+++ ++++++ [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
+++ ++++++ [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+ GDT_STACK_CANARY_INIT
#endif
+} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
#ifdef CONFIG_X86_32
}
#endif
--- ----- if (cpu_has(c, df->feature) &&
--- ----- ((s32)df->level < 0 ?
+/*
+ * Some CPU features depend on higher CPUID levels, which may not always
+ * be available due to CPUID level capping or broken virtualization
+ * software. Add those features to this table to auto-disable them.
+ */
+struct cpuid_dependent_feature {
+ u32 feature;
+ u32 level;
+};
+++ ++++++
+static const struct cpuid_dependent_feature __cpuinitconst
+cpuid_dependent_features[] = {
+ { X86_FEATURE_MWAIT, 0x00000005 },
+ { X86_FEATURE_DCA, 0x00000009 },
+ { X86_FEATURE_XSAVE, 0x0000000d },
+ { 0, 0 }
+};
+
+static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+{
+ const struct cpuid_dependent_feature *df;
+++ ++++++
+ for (df = cpuid_dependent_features; df->feature; df++) {
+++ ++++++
+++ ++++++ if (!cpu_has(c, df->feature))
+++ ++++++ continue;
+ /*
+ * Note: cpuid_level is set to -1 if unavailable, but
+ * extended_extended_level is set to 0 if unavailable
+ * and the legitimate extended levels are all negative
+ * when signed; hence the weird messing around with
+ * signs here...
+ */
--- ----- (s32)df->level > (s32)c->cpuid_level)) {
--- ----- clear_cpu_cap(c, df->feature);
--- ----- if (warn)
--- ----- printk(KERN_WARNING
--- ----- "CPU: CPU feature %s disabled "
--- ----- "due to lack of CPUID level 0x%x\n",
--- ----- x86_cap_flags[df->feature],
--- ----- df->level);
--- ----- }
+++ ++++++ if (!((s32)df->level < 0 ?
+ (u32)df->level > (u32)c->extended_cpuid_level :
+++ ++++++ (s32)df->level > (s32)c->cpuid_level))
+++ ++++++ continue;
+++ ++++++
+++ ++++++ clear_cpu_cap(c, df->feature);
+++ ++++++ if (!warn)
+++ ++++++ continue;
+++ ++++++
+++ ++++++ printk(KERN_WARNING
+++ ++++++ "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
+++ ++++++ x86_cap_flags[df->feature], df->level);
+ }
+}
+
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
-/* Current gdt points %fs at the "master" per-cpu area: after this,
- * it's on the real one. */
-void switch_to_new_gdt(void)
+void load_percpu_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+ loadsegment(fs, __KERNEL_PERCPU);
+#else
+ loadsegment(gs, 0);
+ wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
+#endif
+ load_stack_canary_segment();
+}
+
--- ----- /* Current gdt points %fs at the "master" per-cpu area: after this,
--- ----- * it's on the real one. */
+++ ++++++/*
+++ ++++++ * Current gdt points %fs at the "master" per-cpu area: after this,
+++ ++++++ * it's on the real one.
+++ ++++++ */
+void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
- gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
+ gdt_descr.address = (long)get_cpu_gdt_table(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
-#ifdef CONFIG_X86_32
- asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
-#endif
+ /* Reload the per-cpu base */
+
+ load_percpu_segment(cpu);
}
---- -----static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
++++ +++++static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
static void __cpuinit default_init(struct cpuinfo_x86 *c)
{
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
--- ------ } else if (smp_num_siblings > 1) {
+++ ++++++ goto out;
+++ ++++++ }
--- ------ if (smp_num_siblings > nr_cpu_ids) {
--- ------ printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
--- ------ smp_num_siblings);
--- ------ smp_num_siblings = 1;
--- ------ return;
--- ------ }
+++ ++++++ if (smp_num_siblings <= 1)
+++ ++++++ goto out;
+++ +++++
- index_msb = get_count_order(smp_num_siblings);
-#ifdef CONFIG_X86_64
- c->phys_proc_id = phys_pkg_id(index_msb);
-#else
- c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
-#endif
+++ ++++++ if (smp_num_siblings > nr_cpu_ids) {
+++ ++++++ pr_warning("CPU: Unsupported number of siblings %d",
+++ ++++++ smp_num_siblings);
+++ ++++++ smp_num_siblings = 1;
+++ ++++++ return;
+++ ++++++ }
--- ----- index_msb = get_count_order(smp_num_siblings);
--- ----- c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
- smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+++ ++++++ index_msb = get_count_order(smp_num_siblings);
+++ ++++++ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
--- ----- smp_num_siblings = smp_num_siblings / c->x86_max_cores;
- index_msb = get_count_order(smp_num_siblings);
+++ ++++++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
--- ----- index_msb = get_count_order(smp_num_siblings);
- core_bits = get_count_order(c->x86_max_cores);
+++ ++++++ index_msb = get_count_order(smp_num_siblings);
--- ----- core_bits = get_count_order(c->x86_max_cores);
-#ifdef CONFIG_X86_64
- c->cpu_core_id = phys_pkg_id(index_msb) &
- ((1 << core_bits) - 1);
-#else
- c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
- ((1 << core_bits) - 1);
-#endif
- }
+++ ++++++ core_bits = get_count_order(c->x86_max_cores);
+
--- ----- c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
--- ----- ((1 << core_bits) - 1);
--- ----- }
+++ ++++++ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+++ ++++++ ((1 << core_bits) - 1);
out:
if ((c->x86_max_cores * smp_num_siblings) > 1) {
void __init early_cpu_init(void)
{
---- ----- struct cpu_dev **cdev;
++++ +++++ const struct cpu_dev *const *cdev;
int count = 0;
--- ------ printk("KERNEL supported cpus:\n");
+++ ++++++ printk(KERN_INFO "KERNEL supported cpus:\n");
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
---- ----- struct cpu_dev *cpudev = *cdev;
++++ +++++ const struct cpu_dev *cpudev = *cdev;
unsigned int j;
if (count >= X86_VENDOR_NUM)
squash_the_stupid_serial_number(c);
/*
--- ------ * The vendor-specific functions might have changed features. Now
--- ------ * we do "generic changes."
+++ ++++++ * The vendor-specific functions might have changed features.
+++ ++++++ * Now we do "generic changes."
*/
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
---- ----- char *p;
++++ +++++ const char *p;
p = table_lookup_model(c);
if (p)
strcpy(c->x86_model_id, p);
}
struct msr_range {
--- ------ unsigned min;
--- ------ unsigned max;
+++ ++++++ unsigned min;
+++ ++++++ unsigned max;
};
---- -----static struct msr_range msr_range_array[] __cpuinitdata = {
++++ +++++static const struct msr_range msr_range_array[] __cpuinitconst = {
{ 0x00000000, 0x00000418},
{ 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
{
---- ----- char *vendor = NULL;
++++ +++++ const char *vendor = NULL;
--- ------ if (c->x86_vendor < X86_VENDOR_NUM)
+++ ++++++ if (c->x86_vendor < X86_VENDOR_NUM) {
vendor = this_cpu->c_vendor;
--- ------ else if (c->cpuid_level >= 0)
--- ------ vendor = c->x86_vendor_id;
+++ ++++++ } else {
+++ ++++++ if (c->cpuid_level >= 0)
+++ ++++++ vendor = c->x86_vendor_id;
+++ ++++++ }
if (vendor && !strstr(c->x86_model_id, vendor))
printk(KERN_CONT "%s ", vendor);
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
-struct x8664_pda **_cpu_pda __read_mostly;
-EXPORT_SYMBOL(_cpu_pda);
-
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
-static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
+DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ irq_stack_union) __aligned(PAGE_SIZE);
+++ +++++
-void __cpuinit pda_init(int cpu)
-{
- struct x8664_pda *pda = cpu_pda(cpu);
+DEFINE_PER_CPU(char *, irq_stack_ptr) =
+ init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
- /* Setup up data that may be needed in __get_free_pages early */
- loadsegment(fs, 0);
- loadsegment(gs, 0);
- /* Memory clobbers used to order PDA accessed */
- mb();
- wrmsrl(MSR_GS_BASE, pda);
- mb();
-
- pda->cpunumber = cpu;
- pda->irqcount = -1;
- pda->kernelstack = (unsigned long)stack_thread_info() -
- PDA_STACKOFFSET + THREAD_SIZE;
- pda->active_mm = &init_mm;
- pda->mmu_state = 0;
-
- if (cpu == 0) {
- /* others are initialized in smpboot.c */
- pda->pcurrent = &init_task;
- pda->irqstackptr = boot_cpu_stack;
- pda->irqstackptr += IRQSTACKSIZE - 64;
- } else {
- if (!pda->irqstackptr) {
- pda->irqstackptr = (char *)
- __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
- if (!pda->irqstackptr)
- panic("cannot allocate irqstack for cpu %d",
- cpu);
- pda->irqstackptr += IRQSTACKSIZE - 64;
- }
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
+ (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
- if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
- pda->nodenumber = cpu_to_node(cpu);
- }
-}
+DEFINE_PER_CPU(unsigned int, irq_count) = -1;
-static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
- DEBUG_STKSZ] __page_aligned_bss;
+++ ++++++/*
+++ ++++++ * Special IST stacks which the CPU switches to when it calls
+++ ++++++ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+++ ++++++ * limit), all of them are 4K, except the debug stack which
+++ ++++++ * is 8K.
+++ ++++++ */
+++ ++++++static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+++ ++++++ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
+++ ++++++ [DEBUG_STACK - 1] = DEBUG_STKSZ
+++ ++++++};
+++ +++++
-extern asmlinkage void ignore_sysret(void);
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+ [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
+ __aligned(PAGE_SIZE);
--- ----- extern asmlinkage void ignore_sysret(void);
--- -----
/* May not be marked __init: used by software suspend */
void syscall_init(void)
{
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
--- ----- #else /* x86_64 */
-#else
+++ ++++++#else /* CONFIG_X86_64 */
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+DEFINE_PER_CPU(unsigned long, stack_canary);
+#endif
-/* Make sure %fs is initialized properly in idle threads */
+/* Make sure %fs and %gs are initialized properly in idle threads */
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
regs->fs = __KERNEL_PERCPU;
+ regs->gs = __KERNEL_STACK_CANARY;
+++ ++++++
return regs;
}
--- ----- #endif /* x86_64 */
-#endif
+++ ++++++#endif /* CONFIG_X86_64 */
+++ ++++++
+++ ++++++/*
+++ ++++++ * Clear all 6 debug registers:
+++ ++++++ */
+++ ++++++static void clear_all_debug_regs(void)
+++ ++++++{
+++ ++++++ int i;
+++ ++++++
+++ ++++++ for (i = 0; i < 8; i++) {
+++ ++++++ /* Ignore db4, db5 */
+++ ++++++ if ((i == 4) || (i == 5))
+++ ++++++ continue;
+++ ++++++
+++ ++++++ set_debugreg(0, i);
+++ ++++++ }
+++ ++++++}
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* A lot of state is already set up in PDA init for 64 bit
*/
#ifdef CONFIG_X86_64
+++ ++++++
void __cpuinit cpu_init(void)
{
--- ------ int cpu = stack_smp_processor_id();
--- ------ struct tss_struct *t = &per_cpu(init_tss, cpu);
--- ------ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
--- ------ unsigned long v;
- char *estacks = NULL;
+++ ++++++ struct orig_ist *orig_ist;
struct task_struct *me;
+++ ++++++ struct tss_struct *t;
+++ ++++++ unsigned long v;
+++ ++++++ int cpu;
int i;
- /* CPU 0 is initialised in head64.c */
- if (cpu != 0)
- pda_init(cpu);
- else
- estacks = boot_exception_stacks;
+++ ++++++ cpu = stack_smp_processor_id();
+++ ++++++ t = &per_cpu(init_tss, cpu);
+++ ++++++ orig_ist = &per_cpu(orig_ist, cpu);
+++ ++++++
+#ifdef CONFIG_NUMA
+ if (cpu != 0 && percpu_read(node_number) == 0 &&
+ cpu_to_node(cpu) != NUMA_NO_NODE)
+ percpu_write(node_number, cpu_to_node(cpu));
+#endif
me = current;
* set up and load the per-CPU TSS
*/
if (!orig_ist->ist[0]) {
--- ----- static const unsigned int sizes[N_EXCEPTION_STACKS] = {
--- ----- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
--- ----- [DEBUG_STACK - 1] = DEBUG_STKSZ
- static const unsigned int order[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
- [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
--- ------ };
+ char *estacks = per_cpu(exception_stacks, cpu);
+++ ++++++
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
--- ----- estacks += sizes[v];
- if (cpu) {
- estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
- if (!estacks)
- panic("Cannot allocate exception "
- "stack %ld %d\n", v, cpu);
- }
- estacks += PAGE_SIZE << order[v];
+++ ++++++ estacks += exception_stack_sizes[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
*/
if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
- else {
-#endif
- /*
- * Clear all 6 debug registers:
- */
-
- set_debugreg(0UL, 0);
- set_debugreg(0UL, 1);
- set_debugreg(0UL, 2);
- set_debugreg(0UL, 3);
- set_debugreg(0UL, 6);
- set_debugreg(0UL, 7);
-#ifdef CONFIG_KGDB
- /* If the kgdb is connected no debug regs should be altered. */
- }
+ else
#endif
--- ----- {
--- ----- /*
--- ----- * Clear all 6 debug registers:
--- ----- */
--- ----- set_debugreg(0UL, 0);
--- ----- set_debugreg(0UL, 1);
--- ----- set_debugreg(0UL, 2);
--- ----- set_debugreg(0UL, 3);
--- ----- set_debugreg(0UL, 6);
--- ----- set_debugreg(0UL, 7);
--- ----- }
+++ ++++++ clear_all_debug_regs();
fpu_init();
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
#endif
- /* Clear %gs. */
- asm volatile ("mov %0, %%gs" : : "r" (0));
-
--- ------ /* Clear all 6 debug registers: */
--- ------ set_debugreg(0, 0);
--- ------ set_debugreg(0, 1);
--- ------ set_debugreg(0, 2);
--- ------ set_debugreg(0, 3);
--- ------ set_debugreg(0, 6);
--- ------ set_debugreg(0, 7);
+++ ++++++ clear_all_debug_regs();
/*
* Force FPU initialization: