diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:39:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:39:44 -0400 |
commit | ada3fa15057205b7d3f727bba5cd26b5912e350f (patch) | |
tree | 60962fc9e4021b92f484d1a58e72cd3906d4f3db /init/main.c | |
parent | 2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff) | |
parent | 5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits)
powerpc64: convert to dynamic percpu allocator
sparc64: use embedding percpu first chunk allocator
percpu: kill lpage first chunk allocator
x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA
percpu: update embedding first chunk allocator to handle sparse units
percpu: use group information to allocate vmap areas sparsely
vmalloc: implement pcpu_get_vm_areas()
vmalloc: separate out insert_vmalloc_vm()
percpu: add chunk->base_addr
percpu: add pcpu_unit_offsets[]
percpu: introduce pcpu_alloc_info and pcpu_group_info
percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward
percpu: add @align to pcpu_fc_alloc_fn_t
percpu: make @dyn_size mandatory for pcpu_setup_first_chunk()
percpu: drop @static_size from first chunk allocators
percpu: generalize first chunk allocator selection
percpu: build first chunk allocators selectively
percpu: rename 4k first chunk allocator to page
percpu: improve boot messages
percpu: fix pcpu_reclaim() locking
...
Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'init/main.c')
-rw-r--r-- | init/main.c | 24 |
1 files changed, 0 insertions, 24 deletions
diff --git a/init/main.c b/init/main.c index b34fd8e5edef..63904bb6ae37 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -353,7 +353,6 @@ static void __init smp_init(void) | |||
353 | #define smp_init() do { } while (0) | 353 | #define smp_init() do { } while (0) |
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | static inline void setup_per_cpu_areas(void) { } | ||
357 | static inline void setup_nr_cpu_ids(void) { } | 356 | static inline void setup_nr_cpu_ids(void) { } |
358 | static inline void smp_prepare_cpus(unsigned int maxcpus) { } | 357 | static inline void smp_prepare_cpus(unsigned int maxcpus) { } |
359 | 358 | ||
@@ -374,29 +373,6 @@ static void __init setup_nr_cpu_ids(void) | |||
374 | nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; | 373 | nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; |
375 | } | 374 | } |
376 | 375 | ||
377 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
378 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | ||
379 | |||
380 | EXPORT_SYMBOL(__per_cpu_offset); | ||
381 | |||
382 | static void __init setup_per_cpu_areas(void) | ||
383 | { | ||
384 | unsigned long size, i; | ||
385 | char *ptr; | ||
386 | unsigned long nr_possible_cpus = num_possible_cpus(); | ||
387 | |||
388 | /* Copy section for each CPU (we discard the original) */ | ||
389 | size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); | ||
390 | ptr = alloc_bootmem_pages(size * nr_possible_cpus); | ||
391 | |||
392 | for_each_possible_cpu(i) { | ||
393 | __per_cpu_offset[i] = ptr - __per_cpu_start; | ||
394 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
395 | ptr += size; | ||
396 | } | ||
397 | } | ||
398 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | ||
399 | |||
400 | /* Called by boot processor to activate the rest. */ | 376 | /* Called by boot processor to activate the rest. */ |
401 | static void __init smp_init(void) | 377 | static void __init smp_init(void) |
402 | { | 378 | { |