aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/contig.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/contig.c')
-rw-r--r--arch/ia64/mm/contig.c58
1 files changed, 53 insertions, 5 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 351da0a06cd0..54bf54059811 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,11 +163,11 @@ per_cpu_init (void)
163 first_time = false; 163 first_time = false;
164 164
165 /* 165 /*
166 * get_free_pages() cannot be used before cpu_init() done. BSP 166 * get_free_pages() cannot be used before cpu_init() done.
167 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls 167 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
168 * get_zeroed_page(). 168 * to avoid that AP calls get_zeroed_page().
169 */ 169 */
170 for (cpu = 0; cpu < NR_CPUS; cpu++) { 170 for_each_possible_cpu(cpu) {
171 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; 171 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
172 172
173 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); 173 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
@@ -196,9 +196,57 @@ skip:
196static inline void 196static inline void
197alloc_per_cpu_data(void) 197alloc_per_cpu_data(void)
198{ 198{
199 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, 199 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
200 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 200 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
201} 201}
202
203/**
204 * setup_per_cpu_areas - setup percpu areas
205 *
206 * Arch code has already allocated and initialized percpu areas. All
207 * this function has to do is to teach the determined layout to the
208 * dynamic percpu allocator, which happens to be more complex than
209 * creating whole new ones using helpers.
210 */
211void __init
212setup_per_cpu_areas(void)
213{
214 struct pcpu_alloc_info *ai;
215 struct pcpu_group_info *gi;
216 unsigned int cpu;
217 ssize_t static_size, reserved_size, dyn_size;
218 int rc;
219
220 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
221 if (!ai)
222 panic("failed to allocate pcpu_alloc_info");
223 gi = &ai->groups[0];
224
225 /* units are assigned consecutively to possible cpus */
226 for_each_possible_cpu(cpu)
227 gi->cpu_map[gi->nr_units++] = cpu;
228
229 /* set parameters */
230 static_size = __per_cpu_end - __per_cpu_start;
231 reserved_size = PERCPU_MODULE_RESERVE;
232 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
233 if (dyn_size < 0)
234 panic("percpu area overflow static=%zd reserved=%zd\n",
235 static_size, reserved_size);
236
237 ai->static_size = static_size;
238 ai->reserved_size = reserved_size;
239 ai->dyn_size = dyn_size;
240 ai->unit_size = PERCPU_PAGE_SIZE;
241 ai->atom_size = PAGE_SIZE;
242 ai->alloc_size = PERCPU_PAGE_SIZE;
243
244 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
245 if (rc)
246 panic("failed to setup percpu area (err=%d)", rc);
247
248 pcpu_free_alloc_info(ai);
249}
202#else 250#else
203#define alloc_per_cpu_data() do { } while (0) 251#define alloc_per_cpu_data() do { } while (0)
204#endif /* CONFIG_SMP */ 252#endif /* CONFIG_SMP */