unsigned disabled_cpus __cpuinitdata;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
+unsigned int max_physical_apicid;
EXPORT_SYMBOL(boot_cpu_physical_apicid);
- DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
- EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
-
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
#endif
#endif
+void __init parse_setup_data(void)
+{
+ struct setup_data *data;
+ u64 pa_data;
+
+ if (boot_params.hdr.version < 0x0209)
+ return;
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = early_ioremap(pa_data, PAGE_SIZE);
+ switch (data->type) {
+ case SETUP_E820_EXT:
+ parse_e820_ext(data, pa_data);
+ break;
+ default:
+ break;
+ }
+#ifndef CONFIG_DEBUG_BOOT_PARAMS
+ free_early(pa_data, pa_data+sizeof(*data)+data->len);
+#endif
+ pa_data = data->next;
+ early_iounmap(data, PAGE_SIZE);
+ }
+}
++
+ #ifdef X86_64_NUMA
+
+ /*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: node_to_cpumask() is not valid until after this is done.
+ */
+ static void __init setup_node_to_cpumask_map(void)
+ {
+ unsigned int node, num = 0;
+ cpumask_t *map;
+
+ /* setup nr_node_ids if not done yet */
+ if (nr_node_ids == MAX_NUMNODES) {
+ for_each_node_mask(node, node_possible_map)
+ num = node;
+ nr_node_ids = num + 1;
+ }
+
+ /* allocate the map */
+ map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
+
+ Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
+ map, nr_node_ids);
+
+ /* node_to_cpumask() will now work */
+ node_to_cpumask_map = map;
+ }
+
+ void __cpuinit numa_set_node(int cpu, int node)
+ {
+ int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
+
+ if (cpu_pda(cpu) && node != NUMA_NO_NODE)
+ cpu_pda(cpu)->nodenumber = node;
+
+ if (cpu_to_node_map)
+ cpu_to_node_map[cpu] = node;
+
+ else if (per_cpu_offset(cpu))
+ per_cpu(x86_cpu_to_node_map, cpu) = node;
+
+ else
+ Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
+ }
+
+ void __cpuinit numa_clear_node(int cpu)
+ {
+ numa_set_node(cpu, NUMA_NO_NODE);
+ }
+
+ #ifndef CONFIG_DEBUG_PER_CPU_MAPS
+
+ void __cpuinit numa_add_cpu(int cpu)
+ {
+ cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+ }
+
+ void __cpuinit numa_remove_cpu(int cpu)
+ {
+ cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
+ }
+
+ #else /* CONFIG_DEBUG_PER_CPU_MAPS */
+
+ /*
+ * --------- debug versions of the numa functions ---------
+ */
+ static void __cpuinit numa_set_cpumask(int cpu, int enable)
+ {
+ int node = cpu_to_node(cpu);
+ cpumask_t *mask;
+ char buf[64];
+
+ if (node_to_cpumask_map == NULL) {
+ printk(KERN_ERR "node_to_cpumask_map NULL\n");
+ dump_stack();
+ return;
+ }
+
+ mask = &node_to_cpumask_map[node];
+ if (enable)
+ cpu_set(cpu, *mask);
+ else
+ cpu_clear(cpu, *mask);
+
+ cpulist_scnprintf(buf, sizeof(buf), *mask);
+ printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+ enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
+ }
+
+ void __cpuinit numa_add_cpu(int cpu)
+ {
+ numa_set_cpumask(cpu, 1);
+ }
+
+ void __cpuinit numa_remove_cpu(int cpu)
+ {
+ numa_set_cpumask(cpu, 0);
+ }
+
+ int cpu_to_node(int cpu)
+ {
+ if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
+ printk(KERN_WARNING
+ "cpu_to_node(%d): usage too early!\n", cpu);
+ dump_stack();
+ return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+ }
+ return per_cpu(x86_cpu_to_node_map, cpu);
+ }
+ EXPORT_SYMBOL(cpu_to_node);
+
+ /*
+ * Same function as cpu_to_node() but used if called before the
+ * per_cpu areas are setup.
+ */
+ int early_cpu_to_node(int cpu)
+ {
+ if (early_per_cpu_ptr(x86_cpu_to_node_map))
+ return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+
+ if (!per_cpu_offset(cpu)) {
+ printk(KERN_WARNING
+ "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
+ dump_stack();
+ return NUMA_NO_NODE;
+ }
+ return per_cpu(x86_cpu_to_node_map, cpu);
+ }
+
+ /*
+ * Returns a pointer to the bitmask of CPUs on Node 'node'.
+ */
+ cpumask_t *_node_to_cpumask_ptr(int node)
+ {
+ if (node_to_cpumask_map == NULL) {
+ printk(KERN_WARNING
+ "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
+ node);
+ dump_stack();
+ return &cpu_online_map;
+ }
+ BUG_ON(node >= nr_node_ids);
+ return &node_to_cpumask_map[node];
+ }
+ EXPORT_SYMBOL(_node_to_cpumask_ptr);
+
+ /*
+ * Returns a bitmask of CPUs on Node 'node'.
+ */
+ cpumask_t node_to_cpumask(int node)
+ {
+ if (node_to_cpumask_map == NULL) {
+ printk(KERN_WARNING
+ "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
+ dump_stack();
+ return cpu_online_map;
+ }
+ BUG_ON(node >= nr_node_ids);
+ return node_to_cpumask_map[node];
+ }
+ EXPORT_SYMBOL(node_to_cpumask);
+
+ /*
+ * --------- end of debug versions of the numa functions ---------
+ */
+
+ #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+
+ #endif /* X86_64_NUMA */
static void set_mca_bus(int x) { }
#endif
- #ifdef CONFIG_NUMA
- /*
- * In the golden day, when everything among i386 and x86_64 will be
- * integrated, this will not live here
- */
- void *x86_cpu_to_node_map_early_ptr;
- int x86_cpu_to_node_map_init[NR_CPUS] = {
- [0 ... NR_CPUS-1] = NUMA_NO_NODE
- };
- DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
- #endif
-
-/* Overridden in paravirt.c if CONFIG_PARAVIRT */
-char * __init __attribute__((weak)) memory_setup(void)
-{
- return machine_specific_memory_setup();
-}
+static void probe_roms(void);
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
relocate_initrd();
#endif
- paravirt_post_allocator_init();
-
- dmi_scan_machine();
+ remapped_pgdat_init();
+ sparse_init();
+ zone_sizes_init();
- io_delay_init();
+ paravirt_post_allocator_init();
- #ifdef CONFIG_X86_SMP
- /*
- * setup to use the early static init tables during kernel startup
- * X86_SMP will exclude sub-arches that don't deal well with it.
- */
- x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
- x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
- #ifdef CONFIG_NUMA
- x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
- #endif
- #endif
-
#ifdef CONFIG_X86_GENERICARCH
generic_apic_probe();
#endif