diff options
author | Tejun Heo <tj@kernel.org> | 2009-08-14 01:41:02 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-08-14 01:45:31 -0400 |
commit | 384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch) | |
tree | 04c93f391a1b65c8bf8d7ba8643c07d26c26590a /mm/percpu.c | |
parent | a76761b621bcd8336065c4fe3a74f046858bc34c (diff) | |
parent | 142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff) |
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts:
arch/sparc/kernel/smp_64.c
arch/x86/kernel/cpu/perf_counter.c
arch/x86/kernel/setup_percpu.c
drivers/cpufreq/cpufreq_ondemand.c
mm/percpu.c
Conflicts in core and arch percpu codes are mostly from commit
ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many
num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all
the first chunk allocators into mm/percpu.c, the changes are moved
from arch code to mm/percpu.c.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index b3d0bcff8c7c..3f9f182f9b44 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1004,7 +1004,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) | |||
1004 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; | 1004 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; |
1005 | chunk->map[chunk->map_used++] = pcpu_unit_size; | 1005 | chunk->map[chunk->map_used++] = pcpu_unit_size; |
1006 | 1006 | ||
1007 | chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); | 1007 | chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); |
1008 | if (!chunk->vm) { | 1008 | if (!chunk->vm) { |
1009 | free_pcpu_chunk(chunk); | 1009 | free_pcpu_chunk(chunk); |
1010 | return NULL; | 1010 | return NULL; |
@@ -1325,7 +1325,7 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, | |||
1325 | int *identity_map; | 1325 | int *identity_map; |
1326 | 1326 | ||
1327 | /* #units == #cpus, identity mapped */ | 1327 | /* #units == #cpus, identity mapped */ |
1328 | identity_map = alloc_bootmem(num_possible_cpus() * | 1328 | identity_map = alloc_bootmem(nr_cpu_ids * |
1329 | sizeof(identity_map[0])); | 1329 | sizeof(identity_map[0])); |
1330 | 1330 | ||
1331 | for_each_possible_cpu(cpu) | 1331 | for_each_possible_cpu(cpu) |
@@ -1333,7 +1333,7 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, | |||
1333 | 1333 | ||
1334 | pcpu_first_unit_cpu = 0; | 1334 | pcpu_first_unit_cpu = 0; |
1335 | pcpu_last_unit_cpu = pcpu_nr_units - 1; | 1335 | pcpu_last_unit_cpu = pcpu_nr_units - 1; |
1336 | pcpu_nr_units = num_possible_cpus(); | 1336 | pcpu_nr_units = nr_cpu_ids; |
1337 | pcpu_unit_map = identity_map; | 1337 | pcpu_unit_map = identity_map; |
1338 | } | 1338 | } |
1339 | 1339 | ||
@@ -1464,7 +1464,7 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1464 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); | 1464 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); |
1465 | 1465 | ||
1466 | unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | 1466 | unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); |
1467 | chunk_size = unit_size * num_possible_cpus(); | 1467 | chunk_size = unit_size * nr_cpu_ids; |
1468 | 1468 | ||
1469 | base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, | 1469 | base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, |
1470 | __pa(MAX_DMA_ADDRESS)); | 1470 | __pa(MAX_DMA_ADDRESS)); |
@@ -1475,11 +1475,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | /* return the leftover and copy */ | 1477 | /* return the leftover and copy */ |
1478 | for_each_possible_cpu(cpu) { | 1478 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { |
1479 | void *ptr = base + cpu * unit_size; | 1479 | void *ptr = base + cpu * unit_size; |
1480 | 1480 | ||
1481 | free_bootmem(__pa(ptr + size_sum), unit_size - size_sum); | 1481 | if (cpu_possible(cpu)) { |
1482 | memcpy(ptr, __per_cpu_load, static_size); | 1482 | free_bootmem(__pa(ptr + size_sum), |
1483 | unit_size - size_sum); | ||
1484 | memcpy(ptr, __per_cpu_load, static_size); | ||
1485 | } else | ||
1486 | free_bootmem(__pa(ptr), unit_size); | ||
1483 | } | 1487 | } |
1484 | 1488 | ||
1485 | /* we're ready, commit */ | 1489 | /* we're ready, commit */ |
@@ -1525,8 +1529,7 @@ ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size, | |||
1525 | PCPU_MIN_UNIT_SIZE)); | 1529 | PCPU_MIN_UNIT_SIZE)); |
1526 | 1530 | ||
1527 | /* unaligned allocations can't be freed, round up to page size */ | 1531 | /* unaligned allocations can't be freed, round up to page size */ |
1528 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * | 1532 | pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0])); |
1529 | sizeof(pages[0])); | ||
1530 | pages = alloc_bootmem(pages_size); | 1533 | pages = alloc_bootmem(pages_size); |
1531 | 1534 | ||
1532 | /* allocate pages */ | 1535 | /* allocate pages */ |
@@ -1546,7 +1549,7 @@ ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size, | |||
1546 | 1549 | ||
1547 | /* allocate vm area, map the pages and copy static data */ | 1550 | /* allocate vm area, map the pages and copy static data */ |
1548 | vm.flags = VM_ALLOC; | 1551 | vm.flags = VM_ALLOC; |
1549 | vm.size = num_possible_cpus() * unit_pages << PAGE_SHIFT; | 1552 | vm.size = nr_cpu_ids * unit_pages << PAGE_SHIFT; |
1550 | vm_area_register_early(&vm, PAGE_SIZE); | 1553 | vm_area_register_early(&vm, PAGE_SIZE); |
1551 | 1554 | ||
1552 | for_each_possible_cpu(cpu) { | 1555 | for_each_possible_cpu(cpu) { |