aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c367
1 files changed, 339 insertions, 28 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index d992e6cff730..c29f301d3885 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -7,6 +7,7 @@
7#include <linux/crash_dump.h> 7#include <linux/crash_dump.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/topology.h> 9#include <linux/topology.h>
10#include <linux/pfn.h>
10#include <asm/sections.h> 11#include <asm/sections.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/setup.h> 13#include <asm/setup.h>
@@ -41,6 +42,321 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41}; 42};
42EXPORT_SYMBOL(__per_cpu_offset); 43EXPORT_SYMBOL(__per_cpu_offset);
43 44
45/**
46 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
47 *
48 * If NUMA is not configured or there is only one NUMA node available,
49 * there is no reason to consider NUMA. This function determines
50 * whether percpu allocation should consider NUMA or not.
51 *
52 * RETURNS:
53 * true if NUMA should be considered; otherwise, false.
54 */
55static bool __init pcpu_need_numa(void)
56{
57#ifdef CONFIG_NEED_MULTIPLE_NODES
58 pg_data_t *last = NULL;
59 unsigned int cpu;
60
61 for_each_possible_cpu(cpu) {
62 int node = early_cpu_to_node(cpu);
63
64 if (node_online(node) && NODE_DATA(node) &&
65 last && last != NODE_DATA(node))
66 return true;
67
68 last = NODE_DATA(node);
69 }
70#endif
71 return false;
72}
73
74/**
75 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
76 * @cpu: cpu to allocate for
77 * @size: size allocation in bytes
78 * @align: alignment
79 *
80 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
81 * does the right thing for NUMA regardless of the current
82 * configuration.
83 *
84 * RETURNS:
85 * Pointer to the allocated area on success, NULL on failure.
86 */
87static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
88 unsigned long align)
89{
90 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
91#ifdef CONFIG_NEED_MULTIPLE_NODES
92 int node = early_cpu_to_node(cpu);
93 void *ptr;
94
95 if (!node_online(node) || !NODE_DATA(node)) {
96 ptr = __alloc_bootmem_nopanic(size, align, goal);
97 pr_info("cpu %d has no node %d or node-local memory\n",
98 cpu, node);
99 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
100 cpu, size, __pa(ptr));
101 } else {
102 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
103 size, align, goal);
104 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
105 "%016lx\n", cpu, size, node, __pa(ptr));
106 }
107 return ptr;
108#else
109 return __alloc_bootmem_nopanic(size, align, goal);
110#endif
111}
112
113/*
114 * Remap allocator
115 *
116 * This allocator uses PMD page as unit. A PMD page is allocated for
117 * each cpu and each is remapped into vmalloc area using PMD mapping.
118 * As PMD page is quite large, only part of it is used for the first
119 * chunk. Unused part is returned to the bootmem allocator.
120 *
121 * So, the PMD pages are mapped twice - once to the physical mapping
122 * and to the vmalloc area for the first percpu chunk. The double
123 * mapping does add one more PMD TLB entry pressure but still is much
124 * better than only using 4k mappings while still being NUMA friendly.
125 */
126#ifdef CONFIG_NEED_MULTIPLE_NODES
127static size_t pcpur_size __initdata;
128static void **pcpur_ptrs __initdata;
129
130static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
131{
132 size_t off = (size_t)pageno << PAGE_SHIFT;
133
134 if (off >= pcpur_size)
135 return NULL;
136
137 return virt_to_page(pcpur_ptrs[cpu] + off);
138}
139
140static ssize_t __init setup_pcpu_remap(size_t static_size)
141{
142 static struct vm_struct vm;
143 pg_data_t *last;
144 size_t ptrs_size;
145 unsigned int cpu;
146 ssize_t ret;
147
148 /*
149 * If large page isn't supported, there's no benefit in doing
150 * this. Also, on non-NUMA, embedding is better.
151 */
152 if (!cpu_has_pse || pcpu_need_numa())
153 return -EINVAL;
154
155 last = NULL;
156 for_each_possible_cpu(cpu) {
157 int node = early_cpu_to_node(cpu);
158
159 if (node_online(node) && NODE_DATA(node) &&
160 last && last != NODE_DATA(node))
161 goto proceed;
162
163 last = NODE_DATA(node);
164 }
165 return -EINVAL;
166
167proceed:
168 /*
169 * Currently supports only single page. Supporting multiple
170 * pages won't be too difficult if it ever becomes necessary.
171 */
172 pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
173 if (pcpur_size > PMD_SIZE) {
174 pr_warning("PERCPU: static data is larger than large page, "
175 "can't use large page\n");
176 return -EINVAL;
177 }
178
179 /* allocate pointer array and alloc large pages */
180 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
181 pcpur_ptrs = alloc_bootmem(ptrs_size);
182
183 for_each_possible_cpu(cpu) {
184 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
185 if (!pcpur_ptrs[cpu])
186 goto enomem;
187
188 /*
189 * Only use pcpur_size bytes and give back the rest.
190 *
191 * Ingo: The 2MB up-rounding bootmem is needed to make
192 * sure the partial 2MB page is still fully RAM - it's
193 * not well-specified to have a PAT-incompatible area
194 * (unmapped RAM, device memory, etc.) in that hole.
195 */
196 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
197 PMD_SIZE - pcpur_size);
198
199 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
200 }
201
202 /* allocate address and map */
203 vm.flags = VM_ALLOC;
204 vm.size = num_possible_cpus() * PMD_SIZE;
205 vm_area_register_early(&vm, PMD_SIZE);
206
207 for_each_possible_cpu(cpu) {
208 pmd_t *pmd;
209
210 pmd = populate_extra_pmd((unsigned long)vm.addr
211 + cpu * PMD_SIZE);
212 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
213 PAGE_KERNEL_LARGE));
214 }
215
216 /* we're ready, commit */
217 pr_info("PERCPU: Remapped at %p with large pages, static data "
218 "%zu bytes\n", vm.addr, static_size);
219
220 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE,
221 pcpur_size - static_size, vm.addr, NULL);
222 goto out_free_ar;
223
224enomem:
225 for_each_possible_cpu(cpu)
226 if (pcpur_ptrs[cpu])
227 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
228 ret = -ENOMEM;
229out_free_ar:
230 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
231 return ret;
232}
233#else
234static ssize_t __init setup_pcpu_remap(size_t static_size)
235{
236 return -EINVAL;
237}
238#endif
239
240/*
241 * Embedding allocator
242 *
243 * The first chunk is sized to just contain the static area plus
244 * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using
245 * bootmem allocator and used as-is without being mapped into vmalloc
246 * area. This enables the first chunk to piggy back on the linear
247 * physical PMD mapping and doesn't add any additional pressure to
248 * TLB.
249 */
250static void *pcpue_ptr __initdata;
251static size_t pcpue_unit_size __initdata;
252
253static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
254{
255 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size
256 + ((size_t)pageno << PAGE_SHIFT));
257}
258
259static ssize_t __init setup_pcpu_embed(size_t static_size)
260{
261 unsigned int cpu;
262
263 /*
264 * If large page isn't supported, there's no benefit in doing
265 * this. Also, embedding allocation doesn't play well with
266 * NUMA.
267 */
268 if (!cpu_has_pse || pcpu_need_numa())
269 return -EINVAL;
270
271 /* allocate and copy */
272 pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
273 pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE);
274 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
275 PAGE_SIZE);
276 if (!pcpue_ptr)
277 return -ENOMEM;
278
279 for_each_possible_cpu(cpu)
280 memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load,
281 static_size);
282
283 /* we're ready, commit */
284 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
285 pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size);
286
287 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
288 pcpue_unit_size,
289 pcpue_unit_size - static_size, pcpue_ptr,
290 NULL);
291}
292
293/*
294 * 4k page allocator
295 *
296 * This is the basic allocator. Static percpu area is allocated
297 * page-by-page and most of initialization is done by the generic
298 * setup function.
299 */
300static struct page **pcpu4k_pages __initdata;
301static int pcpu4k_nr_static_pages __initdata;
302
303static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
304{
305 if (pageno < pcpu4k_nr_static_pages)
306 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
307 return NULL;
308}
309
310static void __init pcpu4k_populate_pte(unsigned long addr)
311{
312 populate_extra_pte(addr);
313}
314
315static ssize_t __init setup_pcpu_4k(size_t static_size)
316{
317 size_t pages_size;
318 unsigned int cpu;
319 int i, j;
320 ssize_t ret;
321
322 pcpu4k_nr_static_pages = PFN_UP(static_size);
323
324 /* unaligned allocations can't be freed, round up to page size */
325 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
326 * sizeof(pcpu4k_pages[0]));
327 pcpu4k_pages = alloc_bootmem(pages_size);
328
329 /* allocate and copy */
330 j = 0;
331 for_each_possible_cpu(cpu)
332 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
333 void *ptr;
334
335 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
336 if (!ptr)
337 goto enomem;
338
339 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
340 pcpu4k_pages[j++] = virt_to_page(ptr);
341 }
342
343 /* we're ready, commit */
344 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
345 pcpu4k_nr_static_pages, static_size);
346
347 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
348 pcpu4k_populate_pte);
349 goto out_free_ar;
350
351enomem:
352 while (--j >= 0)
353 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
354 ret = -ENOMEM;
355out_free_ar:
356 free_bootmem(__pa(pcpu4k_pages), pages_size);
357 return ret;
358}
359
44static inline void setup_percpu_segment(int cpu) 360static inline void setup_percpu_segment(int cpu)
45{ 361{
46#ifdef CONFIG_X86_32 362#ifdef CONFIG_X86_32
@@ -61,38 +377,35 @@ static inline void setup_percpu_segment(int cpu)
61 */ 377 */
62void __init setup_per_cpu_areas(void) 378void __init setup_per_cpu_areas(void)
63{ 379{
64 ssize_t size; 380 size_t static_size = __per_cpu_end - __per_cpu_start;
65 char *ptr; 381 unsigned int cpu;
66 int cpu; 382 unsigned long delta;
67 383 size_t pcpu_unit_size;
68 /* Copy section for each CPU (we discard the original) */ 384 ssize_t ret;
69 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
70 385
71 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 386 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
72 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 387 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
73 388
74 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); 389 /*
390 * Allocate percpu area. If PSE is supported, try to make use
391 * of large page mappings. Please read comments on top of
392 * each allocator for details.
393 */
394 ret = setup_pcpu_remap(static_size);
395 if (ret < 0)
396 ret = setup_pcpu_embed(static_size);
397 if (ret < 0)
398 ret = setup_pcpu_4k(static_size);
399 if (ret < 0)
400 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
401 static_size, ret);
75 402
76 for_each_possible_cpu(cpu) { 403 pcpu_unit_size = ret;
77#ifndef CONFIG_NEED_MULTIPLE_NODES
78 ptr = alloc_bootmem_pages(size);
79#else
80 int node = early_cpu_to_node(cpu);
81 if (!node_online(node) || !NODE_DATA(node)) {
82 ptr = alloc_bootmem_pages(size);
83 pr_info("cpu %d has no node %d or node-local memory\n",
84 cpu, node);
85 pr_debug("per cpu data for cpu%d at %016lx\n",
86 cpu, __pa(ptr));
87 } else {
88 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
89 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
90 cpu, node, __pa(ptr));
91 }
92#endif
93 404
94 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); 405 /* alrighty, percpu areas up and running */
95 per_cpu_offset(cpu) = ptr - __per_cpu_start; 406 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
407 for_each_possible_cpu(cpu) {
408 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
96 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 409 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
97 per_cpu(cpu_number, cpu) = cpu; 410 per_cpu(cpu_number, cpu) = cpu;
98 setup_percpu_segment(cpu); 411 setup_percpu_segment(cpu);
@@ -125,8 +438,6 @@ void __init setup_per_cpu_areas(void)
125 */ 438 */
126 if (cpu == boot_cpu_id) 439 if (cpu == boot_cpu_id)
127 switch_to_new_gdt(cpu); 440 switch_to_new_gdt(cpu);
128
129 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
130 } 441 }
131 442
132 /* indicate the early static arrays will soon be gone */ 443 /* indicate the early static arrays will soon be gone */