diff options
author | Tejun Heo <tj@kernel.org> | 2009-07-03 19:11:00 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-07-03 19:11:00 -0400 |
commit | ce3141a277ff6cc37e51008b8888dc2cb7456ef1 (patch) | |
tree | 8bd43d595d85fa37de5f3a7030580aa56b590028 /arch/sparc | |
parent | c8a51be4cabb7009db5f865169389242d49c4c60 (diff) |
percpu: drop pcpu_chunk->page[]
percpu core doesn't need to tack all the allocated pages. It needs to
know whether certain pages are populated and a way to reverse map
address to page when freeing. This patch drops pcpu_chunk->page[] and
use populated bitmap and vmalloc_to_page() lookup instead. Using
vmalloc_to_page() exclusively is also possible but complicates first
chunk handling, inflates cache footprint and prevents non-standard
memory allocation for percpu memory.
pcpu_chunk->page[] was used to track each page's allocation and
allowed asymmetric population which happens during failure path;
however, with single bitmap for all units, this is no longer possible.
Bite the bullet and rewrite (de)populate functions so that things are
done in clearly separated steps such that asymmetric population
doesn't happen. This makes the (de)population process much more
modular and will also ease implementing non-standard memory usage in
the future (e.g. large pages).
This makes @get_page_fn parameter to pcpu_setup_first_chunk()
unnecessary. The parameter is dropped and all first chunk helpers are
updated accordingly. Please note that despite the volume most changes
to first chunk helpers are symbol renames for variables which don't
need to be referenced outside of the helper anymore.
This change reduces memory usage and cache footprint of pcpu_chunk.
Now only #unit_pages bits are necessary per chunk.
[ Impact: reduced memory usage and cache footprint for bookkeeping ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: David Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 42 |
1 files changed, 15 insertions, 27 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index ccad7b20ae75..f2f22ee97a7a 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -1415,19 +1415,6 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
1415 | #endif | 1415 | #endif |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | static size_t pcpur_size __initdata; | ||
1419 | static void **pcpur_ptrs __initdata; | ||
1420 | |||
1421 | static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) | ||
1422 | { | ||
1423 | size_t off = (size_t)pageno << PAGE_SHIFT; | ||
1424 | |||
1425 | if (off >= pcpur_size) | ||
1426 | return NULL; | ||
1427 | |||
1428 | return virt_to_page(pcpur_ptrs[cpu] + off); | ||
1429 | } | ||
1430 | |||
1431 | #define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) | 1418 | #define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) |
1432 | 1419 | ||
1433 | static void __init pcpu_map_range(unsigned long start, unsigned long end, | 1420 | static void __init pcpu_map_range(unsigned long start, unsigned long end, |
@@ -1491,25 +1478,26 @@ void __init setup_per_cpu_areas(void) | |||
1491 | size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; | 1478 | size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; |
1492 | static struct vm_struct vm; | 1479 | static struct vm_struct vm; |
1493 | unsigned long delta, cpu; | 1480 | unsigned long delta, cpu; |
1494 | size_t pcpu_unit_size; | 1481 | size_t size_sum, pcpu_unit_size; |
1495 | size_t ptrs_size; | 1482 | size_t ptrs_size; |
1483 | void **ptrs; | ||
1496 | 1484 | ||
1497 | pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + | 1485 | size_sum = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + |
1498 | PERCPU_DYNAMIC_RESERVE); | 1486 | PERCPU_DYNAMIC_RESERVE); |
1499 | dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; | 1487 | dyn_size = size_sum - static_size - PERCPU_MODULE_RESERVE; |
1500 | 1488 | ||
1501 | 1489 | ||
1502 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | 1490 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(ptrs[0])); |
1503 | pcpur_ptrs = alloc_bootmem(ptrs_size); | 1491 | ptrs = alloc_bootmem(ptrs_size); |
1504 | 1492 | ||
1505 | for_each_possible_cpu(cpu) { | 1493 | for_each_possible_cpu(cpu) { |
1506 | pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, | 1494 | ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, |
1507 | PCPU_CHUNK_SIZE); | 1495 | PCPU_CHUNK_SIZE); |
1508 | 1496 | ||
1509 | free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), | 1497 | free_bootmem(__pa(ptrs[cpu] + size_sum), |
1510 | PCPU_CHUNK_SIZE - pcpur_size); | 1498 | PCPU_CHUNK_SIZE - size_sum); |
1511 | 1499 | ||
1512 | memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); | 1500 | memcpy(ptrs[cpu], __per_cpu_load, static_size); |
1513 | } | 1501 | } |
1514 | 1502 | ||
1515 | /* allocate address and map */ | 1503 | /* allocate address and map */ |
@@ -1523,14 +1511,14 @@ void __init setup_per_cpu_areas(void) | |||
1523 | 1511 | ||
1524 | start += cpu * PCPU_CHUNK_SIZE; | 1512 | start += cpu * PCPU_CHUNK_SIZE; |
1525 | end = start + PCPU_CHUNK_SIZE; | 1513 | end = start + PCPU_CHUNK_SIZE; |
1526 | pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu])); | 1514 | pcpu_map_range(start, end, virt_to_page(ptrs[cpu])); |
1527 | } | 1515 | } |
1528 | 1516 | ||
1529 | pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size, | 1517 | pcpu_unit_size = pcpu_setup_first_chunk(static_size, |
1530 | PERCPU_MODULE_RESERVE, dyn_size, | 1518 | PERCPU_MODULE_RESERVE, dyn_size, |
1531 | PCPU_CHUNK_SIZE, vm.addr); | 1519 | PCPU_CHUNK_SIZE, vm.addr); |
1532 | 1520 | ||
1533 | free_bootmem(__pa(pcpur_ptrs), ptrs_size); | 1521 | free_bootmem(__pa(ptrs), ptrs_size); |
1534 | 1522 | ||
1535 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | 1523 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
1536 | for_each_possible_cpu(cpu) { | 1524 | for_each_possible_cpu(cpu) { |