aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c689
1 files changed, 369 insertions, 320 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 01161077a49c..c29f301d3885 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -7,402 +7,451 @@
7#include <linux/crash_dump.h> 7#include <linux/crash_dump.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/topology.h> 9#include <linux/topology.h>
10#include <linux/pfn.h>
10#include <asm/sections.h> 11#include <asm/sections.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/setup.h> 13#include <asm/setup.h>
13#include <asm/mpspec.h> 14#include <asm/mpspec.h>
14#include <asm/apicdef.h> 15#include <asm/apicdef.h>
15#include <asm/highmem.h> 16#include <asm/highmem.h>
17#include <asm/proto.h>
18#include <asm/cpumask.h>
19#include <asm/cpu.h>
20#include <asm/stackprotector.h>
16 21
17#ifdef CONFIG_X86_LOCAL_APIC 22#ifdef CONFIG_DEBUG_PER_CPU_MAPS
18unsigned int num_processors; 23# define DBG(x...) printk(KERN_DEBUG x)
19unsigned disabled_cpus __cpuinitdata; 24#else
20/* Processor that is doing the boot up */ 25# define DBG(x...)
21unsigned int boot_cpu_physical_apicid = -1U;
22EXPORT_SYMBOL(boot_cpu_physical_apicid);
23unsigned int max_physical_apicid;
24
25/* Bitmask of physically existing CPUs */
26physid_mask_t phys_cpu_present_map;
27#endif 26#endif
28 27
29/* map cpu index to physical APIC ID */ 28DEFINE_PER_CPU(int, cpu_number);
30DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); 29EXPORT_PER_CPU_SYMBOL(cpu_number);
31DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
32EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
34
35#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
36#define X86_64_NUMA 1
37
38/* map cpu index to node index */
39DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
40EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
41
42/* which logical CPUs are on which nodes */
43cpumask_t *node_to_cpumask_map;
44EXPORT_SYMBOL(node_to_cpumask_map);
45
46/* setup node_to_cpumask_map */
47static void __init setup_node_to_cpumask_map(void);
48 30
31#ifdef CONFIG_X86_64
32#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
49#else 33#else
50static inline void setup_node_to_cpumask_map(void) { } 34#define BOOT_PERCPU_OFFSET 0
51#endif 35#endif
52 36
53#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) 37DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
54/* 38EXPORT_PER_CPU_SYMBOL(this_cpu_off);
55 * Copy data used in early init routines from the initial arrays to the
56 * per cpu data areas. These arrays then become expendable and the
57 * *_early_ptr's are zeroed indicating that the static arrays are gone.
58 */
59static void __init setup_per_cpu_maps(void)
60{
61 int cpu;
62
63 for_each_possible_cpu(cpu) {
64 per_cpu(x86_cpu_to_apicid, cpu) =
65 early_per_cpu_map(x86_cpu_to_apicid, cpu);
66 per_cpu(x86_bios_cpu_apicid, cpu) =
67 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
68#ifdef X86_64_NUMA
69 per_cpu(x86_cpu_to_node_map, cpu) =
70 early_per_cpu_map(x86_cpu_to_node_map, cpu);
71#endif
72 }
73 39
74 /* indicate the early static arrays will soon be gone */ 40unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
75 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
76 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 42};
77#ifdef X86_64_NUMA
78 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
79#endif
80}
81
82#ifdef CONFIG_X86_32
83/*
84 * Great future not-so-futuristic plan: make i386 and x86_64 do it
85 * the same way
86 */
87unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
88EXPORT_SYMBOL(__per_cpu_offset); 43EXPORT_SYMBOL(__per_cpu_offset);
89static inline void setup_cpu_pda_map(void) { }
90
91#elif !defined(CONFIG_SMP)
92static inline void setup_cpu_pda_map(void) { }
93 44
94#else /* CONFIG_SMP && CONFIG_X86_64 */ 45/**
95 46 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
96/* 47 *
97 * Allocate cpu_pda pointer table and array via alloc_bootmem. 48 * If NUMA is not configured or there is only one NUMA node available,
49 * there is no reason to consider NUMA. This function determines
50 * whether percpu allocation should consider NUMA or not.
51 *
52 * RETURNS:
53 * true if NUMA should be considered; otherwise, false.
98 */ 54 */
99static void __init setup_cpu_pda_map(void) 55static bool __init pcpu_need_numa(void)
100{ 56{
101 char *pda; 57#ifdef CONFIG_NEED_MULTIPLE_NODES
102 struct x8664_pda **new_cpu_pda; 58 pg_data_t *last = NULL;
103 unsigned long size; 59 unsigned int cpu;
104 int cpu;
105 60
106 size = roundup(sizeof(struct x8664_pda), cache_line_size()); 61 for_each_possible_cpu(cpu) {
62 int node = early_cpu_to_node(cpu);
107 63
108 /* allocate cpu_pda array and pointer table */ 64 if (node_online(node) && NODE_DATA(node) &&
109 { 65 last && last != NODE_DATA(node))
110 unsigned long tsize = nr_cpu_ids * sizeof(void *); 66 return true;
111 unsigned long asize = size * (nr_cpu_ids - 1);
112 67
113 tsize = roundup(tsize, cache_line_size()); 68 last = NODE_DATA(node);
114 new_cpu_pda = alloc_bootmem(tsize + asize);
115 pda = (char *)new_cpu_pda + tsize;
116 } 69 }
70#endif
71 return false;
72}
117 73
118 /* initialize pointer table to static pda's */ 74/**
119 for_each_possible_cpu(cpu) { 75 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
120 if (cpu == 0) { 76 * @cpu: cpu to allocate for
121 /* leave boot cpu pda in place */ 77 * @size: size allocation in bytes
122 new_cpu_pda[0] = cpu_pda(0); 78 * @align: alignment
123 continue; 79 *
124 } 80 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
125 new_cpu_pda[cpu] = (struct x8664_pda *)pda; 81 * does the right thing for NUMA regardless of the current
126 new_cpu_pda[cpu]->in_bootmem = 1; 82 * configuration.
127 pda += size; 83 *
84 * RETURNS:
85 * Pointer to the allocated area on success, NULL on failure.
86 */
87static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
88 unsigned long align)
89{
90 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
91#ifdef CONFIG_NEED_MULTIPLE_NODES
92 int node = early_cpu_to_node(cpu);
93 void *ptr;
94
95 if (!node_online(node) || !NODE_DATA(node)) {
96 ptr = __alloc_bootmem_nopanic(size, align, goal);
97 pr_info("cpu %d has no node %d or node-local memory\n",
98 cpu, node);
99 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
100 cpu, size, __pa(ptr));
101 } else {
102 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
103 size, align, goal);
104 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
105 "%016lx\n", cpu, size, node, __pa(ptr));
128 } 106 }
129 107 return ptr;
130 /* point to new pointer table */ 108#else
131 _cpu_pda = new_cpu_pda; 109 return __alloc_bootmem_nopanic(size, align, goal);
110#endif
132} 111}
133 112
134#endif /* CONFIG_SMP && CONFIG_X86_64 */ 113/*
135 114 * Remap allocator
136#ifdef CONFIG_X86_64 115 *
116 * This allocator uses PMD page as unit. A PMD page is allocated for
117 * each cpu and each is remapped into vmalloc area using PMD mapping.
118 * As PMD page is quite large, only part of it is used for the first
119 * chunk. Unused part is returned to the bootmem allocator.
120 *
121 * So, the PMD pages are mapped twice - once to the physical mapping
122 * and to the vmalloc area for the first percpu chunk. The double
123 * mapping does add one more PMD TLB entry pressure but still is much
124 * better than only using 4k mappings while still being NUMA friendly.
125 */
126#ifdef CONFIG_NEED_MULTIPLE_NODES
127static size_t pcpur_size __initdata;
128static void **pcpur_ptrs __initdata;
137 129
138/* correctly size the local cpu masks */ 130static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
139static void __init setup_cpu_local_masks(void)
140{ 131{
141 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 132 size_t off = (size_t)pageno << PAGE_SHIFT;
142 alloc_bootmem_cpumask_var(&cpu_callin_mask);
143 alloc_bootmem_cpumask_var(&cpu_callout_mask);
144 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
145}
146 133
147#else /* CONFIG_X86_32 */ 134 if (off >= pcpur_size)
135 return NULL;
148 136
149static inline void setup_cpu_local_masks(void) 137 return virt_to_page(pcpur_ptrs[cpu] + off);
150{
151} 138}
152 139
153#endif /* CONFIG_X86_32 */ 140static ssize_t __init setup_pcpu_remap(size_t static_size)
154
155/*
156 * Great future plan:
157 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
158 * Always point %gs to its beginning
159 */
160void __init setup_per_cpu_areas(void)
161{ 141{
162 ssize_t size, old_size; 142 static struct vm_struct vm;
163 char *ptr; 143 pg_data_t *last;
164 int cpu; 144 size_t ptrs_size;
165 unsigned long align = 1; 145 unsigned int cpu;
166 146 ssize_t ret;
167 /* Setup cpu_pda map */ 147
168 setup_cpu_pda_map(); 148 /*
149 * If large page isn't supported, there's no benefit in doing
150 * this. Also, on non-NUMA, embedding is better.
151 */
152 if (!cpu_has_pse || pcpu_need_numa())
153 return -EINVAL;
154
155 last = NULL;
156 for_each_possible_cpu(cpu) {
157 int node = early_cpu_to_node(cpu);
169 158
170 /* Copy section for each CPU (we discard the original) */ 159 if (node_online(node) && NODE_DATA(node) &&
171 old_size = PERCPU_ENOUGH_ROOM; 160 last && last != NODE_DATA(node))
172 align = max_t(unsigned long, PAGE_SIZE, align); 161 goto proceed;
173 size = roundup(old_size, align);
174 162
175 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 163 last = NODE_DATA(node);
176 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 164 }
165 return -EINVAL;
166
167proceed:
168 /*
169 * Currently supports only single page. Supporting multiple
170 * pages won't be too difficult if it ever becomes necessary.
171 */
172 pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
173 if (pcpur_size > PMD_SIZE) {
174 pr_warning("PERCPU: static data is larger than large page, "
175 "can't use large page\n");
176 return -EINVAL;
177 }
177 178
178 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); 179 /* allocate pointer array and alloc large pages */
180 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
181 pcpur_ptrs = alloc_bootmem(ptrs_size);
179 182
180 for_each_possible_cpu(cpu) { 183 for_each_possible_cpu(cpu) {
181#ifndef CONFIG_NEED_MULTIPLE_NODES 184 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
182 ptr = __alloc_bootmem(size, align, 185 if (!pcpur_ptrs[cpu])
183 __pa(MAX_DMA_ADDRESS)); 186 goto enomem;
184#else 187
185 int node = early_cpu_to_node(cpu); 188 /*
186 if (!node_online(node) || !NODE_DATA(node)) { 189 * Only use pcpur_size bytes and give back the rest.
187 ptr = __alloc_bootmem(size, align, 190 *
188 __pa(MAX_DMA_ADDRESS)); 191 * Ingo: The 2MB up-rounding bootmem is needed to make
189 pr_info("cpu %d has no node %d or node-local memory\n", 192 * sure the partial 2MB page is still fully RAM - it's
190 cpu, node); 193 * not well-specified to have a PAT-incompatible area
191 pr_debug("per cpu data for cpu%d at %016lx\n", 194 * (unmapped RAM, device memory, etc.) in that hole.
192 cpu, __pa(ptr)); 195 */
193 } else { 196 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
194 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 197 PMD_SIZE - pcpur_size);
195 __pa(MAX_DMA_ADDRESS)); 198
196 pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 199 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
197 cpu, node, __pa(ptr));
198 }
199#endif
200 per_cpu_offset(cpu) = ptr - __per_cpu_start;
201 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
202 } 200 }
203 201
204 /* Setup percpu data maps */ 202 /* allocate address and map */
205 setup_per_cpu_maps(); 203 vm.flags = VM_ALLOC;
204 vm.size = num_possible_cpus() * PMD_SIZE;
205 vm_area_register_early(&vm, PMD_SIZE);
206 206
207 /* Setup node to cpumask map */ 207 for_each_possible_cpu(cpu) {
208 setup_node_to_cpumask_map(); 208 pmd_t *pmd;
209 209
210 /* Setup cpu initialized, callin, callout masks */ 210 pmd = populate_extra_pmd((unsigned long)vm.addr
211 setup_cpu_local_masks(); 211 + cpu * PMD_SIZE);
212} 212 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
213 PAGE_KERNEL_LARGE));
214 }
213 215
216 /* we're ready, commit */
217 pr_info("PERCPU: Remapped at %p with large pages, static data "
218 "%zu bytes\n", vm.addr, static_size);
219
220 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE,
221 pcpur_size - static_size, vm.addr, NULL);
222 goto out_free_ar;
223
224enomem:
225 for_each_possible_cpu(cpu)
226 if (pcpur_ptrs[cpu])
227 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
228 ret = -ENOMEM;
229out_free_ar:
230 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
231 return ret;
232}
233#else
234static ssize_t __init setup_pcpu_remap(size_t static_size)
235{
236 return -EINVAL;
237}
214#endif 238#endif
215 239
216#ifdef X86_64_NUMA
217
218/* 240/*
219 * Allocate node_to_cpumask_map based on number of available nodes 241 * Embedding allocator
220 * Requires node_possible_map to be valid.
221 * 242 *
222 * Note: node_to_cpumask() is not valid until after this is done. 243 * The first chunk is sized to just contain the static area plus
244 * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using
245 * bootmem allocator and used as-is without being mapped into vmalloc
246 * area. This enables the first chunk to piggy back on the linear
247 * physical PMD mapping and doesn't add any additional pressure to
248 * TLB.
223 */ 249 */
224static void __init setup_node_to_cpumask_map(void) 250static void *pcpue_ptr __initdata;
225{ 251static size_t pcpue_unit_size __initdata;
226 unsigned int node, num = 0;
227 cpumask_t *map;
228
229 /* setup nr_node_ids if not done yet */
230 if (nr_node_ids == MAX_NUMNODES) {
231 for_each_node_mask(node, node_possible_map)
232 num = node;
233 nr_node_ids = num + 1;
234 }
235
236 /* allocate the map */
237 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
238
239 pr_debug("Node to cpumask map at %p for %d nodes\n",
240 map, nr_node_ids);
241
242 /* node_to_cpumask() will now work */
243 node_to_cpumask_map = map;
244}
245 252
246void __cpuinit numa_set_node(int cpu, int node) 253static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
247{ 254{
248 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 255 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size
249 256 + ((size_t)pageno << PAGE_SHIFT));
250 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
251 cpu_pda(cpu)->nodenumber = node;
252
253 if (cpu_to_node_map)
254 cpu_to_node_map[cpu] = node;
255
256 else if (per_cpu_offset(cpu))
257 per_cpu(x86_cpu_to_node_map, cpu) = node;
258
259 else
260 pr_debug("Setting node for non-present cpu %d\n", cpu);
261} 257}
262 258
263void __cpuinit numa_clear_node(int cpu) 259static ssize_t __init setup_pcpu_embed(size_t static_size)
264{ 260{
265 numa_set_node(cpu, NUMA_NO_NODE); 261 unsigned int cpu;
262
263 /*
264 * If large page isn't supported, there's no benefit in doing
265 * this. Also, embedding allocation doesn't play well with
266 * NUMA.
267 */
268 if (!cpu_has_pse || pcpu_need_numa())
269 return -EINVAL;
270
271 /* allocate and copy */
272 pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
273 pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE);
274 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
275 PAGE_SIZE);
276 if (!pcpue_ptr)
277 return -ENOMEM;
278
279 for_each_possible_cpu(cpu)
280 memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load,
281 static_size);
282
283 /* we're ready, commit */
284 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
285 pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size);
286
287 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
288 pcpue_unit_size,
289 pcpue_unit_size - static_size, pcpue_ptr,
290 NULL);
266} 291}
267 292
268#ifndef CONFIG_DEBUG_PER_CPU_MAPS 293/*
294 * 4k page allocator
295 *
296 * This is the basic allocator. Static percpu area is allocated
297 * page-by-page and most of initialization is done by the generic
298 * setup function.
299 */
300static struct page **pcpu4k_pages __initdata;
301static int pcpu4k_nr_static_pages __initdata;
269 302
270void __cpuinit numa_add_cpu(int cpu) 303static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
271{ 304{
272 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 305 if (pageno < pcpu4k_nr_static_pages)
306 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
307 return NULL;
273} 308}
274 309
275void __cpuinit numa_remove_cpu(int cpu) 310static void __init pcpu4k_populate_pte(unsigned long addr)
276{ 311{
277 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); 312 populate_extra_pte(addr);
278} 313}
279 314
280#else /* CONFIG_DEBUG_PER_CPU_MAPS */ 315static ssize_t __init setup_pcpu_4k(size_t static_size)
281
282/*
283 * --------- debug versions of the numa functions ---------
284 */
285static void __cpuinit numa_set_cpumask(int cpu, int enable)
286{ 316{
287 int node = cpu_to_node(cpu); 317 size_t pages_size;
288 cpumask_t *mask; 318 unsigned int cpu;
289 char buf[64]; 319 int i, j;
290 320 ssize_t ret;
291 if (node_to_cpumask_map == NULL) { 321
292 printk(KERN_ERR "node_to_cpumask_map NULL\n"); 322 pcpu4k_nr_static_pages = PFN_UP(static_size);
293 dump_stack(); 323
294 return; 324 /* unaligned allocations can't be freed, round up to page size */
295 } 325 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
296 326 * sizeof(pcpu4k_pages[0]));
297 mask = &node_to_cpumask_map[node]; 327 pcpu4k_pages = alloc_bootmem(pages_size);
298 if (enable) 328
299 cpu_set(cpu, *mask); 329 /* allocate and copy */
300 else 330 j = 0;
301 cpu_clear(cpu, *mask); 331 for_each_possible_cpu(cpu)
302 332 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
303 cpulist_scnprintf(buf, sizeof(buf), mask); 333 void *ptr;
304 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 334
305 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); 335 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
306} 336 if (!ptr)
337 goto enomem;
338
339 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
340 pcpu4k_pages[j++] = virt_to_page(ptr);
341 }
307 342
308void __cpuinit numa_add_cpu(int cpu) 343 /* we're ready, commit */
309{ 344 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
310 numa_set_cpumask(cpu, 1); 345 pcpu4k_nr_static_pages, static_size);
346
347 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
348 pcpu4k_populate_pte);
349 goto out_free_ar;
350
351enomem:
352 while (--j >= 0)
353 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
354 ret = -ENOMEM;
355out_free_ar:
356 free_bootmem(__pa(pcpu4k_pages), pages_size);
357 return ret;
311} 358}
312 359
313void __cpuinit numa_remove_cpu(int cpu) 360static inline void setup_percpu_segment(int cpu)
314{ 361{
315 numa_set_cpumask(cpu, 0); 362#ifdef CONFIG_X86_32
316} 363 struct desc_struct gdt;
317 364
318int cpu_to_node(int cpu) 365 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
319{ 366 0x2 | DESCTYPE_S, 0x8);
320 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 367 gdt.s = 1;
321 printk(KERN_WARNING 368 write_gdt_entry(get_cpu_gdt_table(cpu),
322 "cpu_to_node(%d): usage too early!\n", cpu); 369 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
323 dump_stack(); 370#endif
324 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
325 }
326 return per_cpu(x86_cpu_to_node_map, cpu);
327} 371}
328EXPORT_SYMBOL(cpu_to_node);
329 372
330/* 373/*
331 * Same function as cpu_to_node() but used if called before the 374 * Great future plan:
332 * per_cpu areas are setup. 375 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
376 * Always point %gs to its beginning
333 */ 377 */
334int early_cpu_to_node(int cpu) 378void __init setup_per_cpu_areas(void)
335{ 379{
336 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 380 size_t static_size = __per_cpu_end - __per_cpu_start;
337 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 381 unsigned int cpu;
338 382 unsigned long delta;
339 if (!per_cpu_offset(cpu)) { 383 size_t pcpu_unit_size;
340 printk(KERN_WARNING 384 ssize_t ret;
341 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
342 dump_stack();
343 return NUMA_NO_NODE;
344 }
345 return per_cpu(x86_cpu_to_node_map, cpu);
346}
347
348 385
349/* empty cpumask */ 386 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
350static const cpumask_t cpu_mask_none; 387 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
351
352/*
353 * Returns a pointer to the bitmask of CPUs on Node 'node'.
354 */
355const cpumask_t *cpumask_of_node(int node)
356{
357 if (node_to_cpumask_map == NULL) {
358 printk(KERN_WARNING
359 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
360 node);
361 dump_stack();
362 return (const cpumask_t *)&cpu_online_map;
363 }
364 if (node >= nr_node_ids) {
365 printk(KERN_WARNING
366 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
367 node, nr_node_ids);
368 dump_stack();
369 return &cpu_mask_none;
370 }
371 return &node_to_cpumask_map[node];
372}
373EXPORT_SYMBOL(cpumask_of_node);
374 388
375/* 389 /*
376 * Returns a bitmask of CPUs on Node 'node'. 390 * Allocate percpu area. If PSE is supported, try to make use
377 * 391 * of large page mappings. Please read comments on top of
378 * Side note: this function creates the returned cpumask on the stack 392 * each allocator for details.
379 * so with a high NR_CPUS count, excessive stack space is used. The 393 */
380 * node_to_cpumask_ptr function should be used whenever possible. 394 ret = setup_pcpu_remap(static_size);
381 */ 395 if (ret < 0)
382cpumask_t node_to_cpumask(int node) 396 ret = setup_pcpu_embed(static_size);
383{ 397 if (ret < 0)
384 if (node_to_cpumask_map == NULL) { 398 ret = setup_pcpu_4k(static_size);
385 printk(KERN_WARNING 399 if (ret < 0)
386 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); 400 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
387 dump_stack(); 401 static_size, ret);
388 return cpu_online_map; 402
389 } 403 pcpu_unit_size = ret;
390 if (node >= nr_node_ids) { 404
391 printk(KERN_WARNING 405 /* alrighty, percpu areas up and running */
392 "node_to_cpumask(%d): node > nr_node_ids(%d)\n", 406 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
393 node, nr_node_ids); 407 for_each_possible_cpu(cpu) {
394 dump_stack(); 408 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
395 return cpu_mask_none; 409 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
410 per_cpu(cpu_number, cpu) = cpu;
411 setup_percpu_segment(cpu);
412 setup_stack_canary_segment(cpu);
413 /*
414 * Copy data used in early init routines from the
415 * initial arrays to the per cpu data areas. These
416 * arrays then become expendable and the *_early_ptr's
417 * are zeroed indicating that the static arrays are
418 * gone.
419 */
420#ifdef CONFIG_X86_LOCAL_APIC
421 per_cpu(x86_cpu_to_apicid, cpu) =
422 early_per_cpu_map(x86_cpu_to_apicid, cpu);
423 per_cpu(x86_bios_cpu_apicid, cpu) =
424 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
425#endif
426#ifdef CONFIG_X86_64
427 per_cpu(irq_stack_ptr, cpu) =
428 per_cpu(irq_stack_union.irq_stack, cpu) +
429 IRQ_STACK_SIZE - 64;
430#ifdef CONFIG_NUMA
431 per_cpu(x86_cpu_to_node_map, cpu) =
432 early_per_cpu_map(x86_cpu_to_node_map, cpu);
433#endif
434#endif
435 /*
436 * Up to this point, the boot CPU has been using .data.init
437 * area. Reload any changed state for the boot CPU.
438 */
439 if (cpu == boot_cpu_id)
440 switch_to_new_gdt(cpu);
396 } 441 }
397 return node_to_cpumask_map[node];
398}
399EXPORT_SYMBOL(node_to_cpumask);
400
401/*
402 * --------- end of debug versions of the numa functions ---------
403 */
404 442
405#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ 443 /* indicate the early static arrays will soon be gone */
444#ifdef CONFIG_X86_LOCAL_APIC
445 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
446 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
447#endif
448#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
449 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
450#endif
406 451
407#endif /* X86_64_NUMA */ 452 /* Setup node to cpumask map */
453 setup_node_to_cpumask_map();
408 454
455 /* Setup cpu initialized, callin, callout masks */
456 setup_cpu_local_masks();
457}