aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-02-23 21:57:21 -0500
committerTejun Heo <tj@kernel.org>2009-02-23 21:57:21 -0500
commit5f5d8405d1c50f5cf7e1dbfe9c9b44e2f015c8fd (patch)
tree28cb5db560124643709e76c9ef33e1de25f79591
parent8d408b4be37bc49c9086531f2ebe411cf5731746 (diff)
x86: separate out setup_pcpu_4k() from setup_per_cpu_areas()
Impact: modularize percpu first chunk allocation x86 is gonna have a few different strategies for the first chunk allocation. Modularize it by separating out the current allocation mechanism into pcpu_alloc_bootmem() and setup_pcpu_4k(). Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--arch/x86/kernel/setup_percpu.c144
1 files changed, 102 insertions, 42 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index d928e8887201..4a17c96f4f6c 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -7,6 +7,7 @@
7#include <linux/crash_dump.h> 7#include <linux/crash_dump.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/topology.h> 9#include <linux/topology.h>
10#include <linux/pfn.h>
10#include <asm/sections.h> 11#include <asm/sections.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/setup.h> 13#include <asm/setup.h>
@@ -41,6 +42,52 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41}; 42};
42EXPORT_SYMBOL(__per_cpu_offset); 43EXPORT_SYMBOL(__per_cpu_offset);
43 44
45/**
46 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
47 * @cpu: cpu to allocate for
48 * @size: size allocation in bytes
49 * @align: alignment
50 *
51 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
52 * does the right thing for NUMA regardless of the current
53 * configuration.
54 *
55 * RETURNS:
56 * Pointer to the allocated area on success, NULL on failure.
57 */
58static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
59 unsigned long align)
60{
61 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
62#ifdef CONFIG_NEED_MULTIPLE_NODES
63 int node = early_cpu_to_node(cpu);
64 void *ptr;
65
66 if (!node_online(node) || !NODE_DATA(node)) {
67 ptr = __alloc_bootmem_nopanic(size, align, goal);
68 pr_info("cpu %d has no node %d or node-local memory\n",
69 cpu, node);
70 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
71 cpu, size, __pa(ptr));
72 } else {
73 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
74 size, align, goal);
75 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
76 "%016lx\n", cpu, size, node, __pa(ptr));
77 }
78 return ptr;
79#else
80 return __alloc_bootmem_nopanic(size, align, goal);
81#endif
82}
83
84/*
85 * 4k page allocator
86 *
87 * This is the basic allocator. Static percpu area is allocated
88 * page-by-page and most of initialization is done by the generic
89 * setup function.
90 */
44static struct page **pcpu4k_pages __initdata; 91static struct page **pcpu4k_pages __initdata;
45static int pcpu4k_nr_static_pages __initdata; 92static int pcpu4k_nr_static_pages __initdata;
46 93
@@ -56,6 +103,51 @@ static void __init pcpu4k_populate_pte(unsigned long addr)
56 populate_extra_pte(addr); 103 populate_extra_pte(addr);
57} 104}
58 105
106static ssize_t __init setup_pcpu_4k(size_t static_size)
107{
108 size_t pages_size;
109 unsigned int cpu;
110 int i, j;
111 ssize_t ret;
112
113 pcpu4k_nr_static_pages = PFN_UP(static_size);
114
115 /* unaligned allocations can't be freed, round up to page size */
116 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
117 * sizeof(pcpu4k_pages[0]));
118 pcpu4k_pages = alloc_bootmem(pages_size);
119
120 /* allocate and copy */
121 j = 0;
122 for_each_possible_cpu(cpu)
123 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
124 void *ptr;
125
126 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
127 if (!ptr)
128 goto enomem;
129
130 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
131 pcpu4k_pages[j++] = virt_to_page(ptr);
132 }
133
134 /* we're ready, commit */
135 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
136 pcpu4k_nr_static_pages, static_size);
137
138 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
139 pcpu4k_populate_pte);
140 goto out_free_ar;
141
142enomem:
143 while (--j >= 0)
144 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
145 ret = -ENOMEM;
146out_free_ar:
147 free_bootmem(__pa(pcpu4k_pages), pages_size);
148 return ret;
149}
150
59static inline void setup_percpu_segment(int cpu) 151static inline void setup_percpu_segment(int cpu)
60{ 152{
61#ifdef CONFIG_X86_32 153#ifdef CONFIG_X86_32
@@ -76,56 +168,24 @@ static inline void setup_percpu_segment(int cpu)
76 */ 168 */
77void __init setup_per_cpu_areas(void) 169void __init setup_per_cpu_areas(void)
78{ 170{
79 ssize_t size = __per_cpu_end - __per_cpu_start; 171 size_t static_size = __per_cpu_end - __per_cpu_start;
80 unsigned int nr_cpu_pages = DIV_ROUND_UP(size, PAGE_SIZE); 172 unsigned int cpu;
81 static struct page **pages;
82 size_t pages_size;
83 unsigned int cpu, i, j;
84 unsigned long delta; 173 unsigned long delta;
85 size_t pcpu_unit_size; 174 size_t pcpu_unit_size;
175 ssize_t ret;
86 176
87 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 177 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
88 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 178 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
89 pr_info("PERCPU: Allocating %zd bytes for static per cpu data\n", size);
90
91 pages_size = nr_cpu_pages * num_possible_cpus() * sizeof(pages[0]);
92 pages = alloc_bootmem(pages_size);
93
94 j = 0;
95 for_each_possible_cpu(cpu) {
96 void *ptr;
97
98 for (i = 0; i < nr_cpu_pages; i++) {
99#ifndef CONFIG_NEED_MULTIPLE_NODES
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101#else
102 int node = early_cpu_to_node(cpu);
103
104 if (!node_online(node) || !NODE_DATA(node)) {
105 ptr = alloc_bootmem_pages(PAGE_SIZE);
106 pr_info("cpu %d has no node %d or node-local "
107 "memory\n", cpu, node);
108 pr_debug("per cpu data for cpu%d at %016lx\n",
109 cpu, __pa(ptr));
110 } else {
111 ptr = alloc_bootmem_pages_node(NODE_DATA(node),
112 PAGE_SIZE);
113 pr_debug("per cpu data for cpu%d on node%d "
114 "at %016lx\n", cpu, node, __pa(ptr));
115 }
116#endif
117 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
118 pages[j++] = virt_to_page(ptr);
119 }
120 }
121 179
122 pcpu4k_pages = pages; 180 /* allocate percpu area */
123 pcpu4k_nr_static_pages = nr_cpu_pages; 181 ret = setup_pcpu_4k(static_size);
124 pcpu_unit_size = pcpu_setup_first_chunk(pcpu4k_get_page, size, 0, 0, 182 if (ret < 0)
125 NULL, pcpu4k_populate_pte); 183 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
184 static_size, ret);
126 185
127 free_bootmem(__pa(pages), pages_size); 186 pcpu_unit_size = ret;
128 187
188 /* alrighty, percpu areas up and running */
129 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 189 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
130 for_each_possible_cpu(cpu) { 190 for_each_possible_cpu(cpu) {
131 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 191 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;