aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r--arch/x86/kernel/setup.c28
1 files changed, 11 insertions, 17 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 1179aa06cdbf..dc7940955b7a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -10,7 +10,7 @@
10#include <asm/setup.h> 10#include <asm/setup.h>
11#include <asm/topology.h> 11#include <asm/topology.h>
12 12
13#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 13#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP)
14/* 14/*
15 * Copy data used in early init routines from the initial arrays to the 15 * Copy data used in early init routines from the initial arrays to the
16 * per cpu data areas. These arrays then become expendable and the 16 * per cpu data areas. These arrays then become expendable and the
@@ -21,22 +21,13 @@ static void __init setup_per_cpu_maps(void)
21 int cpu; 21 int cpu;
22 22
23 for_each_possible_cpu(cpu) { 23 for_each_possible_cpu(cpu) {
24#ifdef CONFIG_SMP 24 per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu];
25 if (per_cpu_offset(cpu)) { 25 per_cpu(x86_bios_cpu_apicid, cpu) =
26#endif
27 per_cpu(x86_cpu_to_apicid, cpu) =
28 x86_cpu_to_apicid_init[cpu];
29 per_cpu(x86_bios_cpu_apicid, cpu) =
30 x86_bios_cpu_apicid_init[cpu]; 26 x86_bios_cpu_apicid_init[cpu];
31#ifdef CONFIG_NUMA 27#ifdef CONFIG_NUMA
32 per_cpu(x86_cpu_to_node_map, cpu) = 28 per_cpu(x86_cpu_to_node_map, cpu) =
33 x86_cpu_to_node_map_init[cpu]; 29 x86_cpu_to_node_map_init[cpu];
34#endif 30#endif
35#ifdef CONFIG_SMP
36 } else
37 printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
38 cpu);
39#endif
40 } 31 }
41 32
42 /* indicate the early static arrays will soon be gone */ 33 /* indicate the early static arrays will soon be gone */
@@ -72,17 +63,20 @@ void __init setup_per_cpu_areas(void)
72 63
73 /* Copy section for each CPU (we discard the original) */ 64 /* Copy section for each CPU (we discard the original) */
74 size = PERCPU_ENOUGH_ROOM; 65 size = PERCPU_ENOUGH_ROOM;
75
76 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", 66 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
77 size); 67 size);
78 for_each_cpu_mask(i, cpu_possible_map) { 68
69 for_each_possible_cpu(i) {
79 char *ptr; 70 char *ptr;
80#ifndef CONFIG_NEED_MULTIPLE_NODES 71#ifndef CONFIG_NEED_MULTIPLE_NODES
81 ptr = alloc_bootmem_pages(size); 72 ptr = alloc_bootmem_pages(size);
82#else 73#else
83 int node = early_cpu_to_node(i); 74 int node = early_cpu_to_node(i);
84 if (!node_online(node) || !NODE_DATA(node)) 75 if (!node_online(node) || !NODE_DATA(node)) {
85 ptr = alloc_bootmem_pages(size); 76 ptr = alloc_bootmem_pages(size);
77 printk(KERN_INFO
78 "cpu %d has no node or node-local memory\n", i);
79 }
86 else 80 else
87 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); 81 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
88#endif 82#endif
@@ -96,7 +90,7 @@ void __init setup_per_cpu_areas(void)
96 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 90 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
97 } 91 }
98 92
99 /* setup percpu data maps early */ 93 /* Setup percpu data maps */
100 setup_per_cpu_maps(); 94 setup_per_cpu_maps();
101} 95}
102 96