diff options
author | Tejun Heo <tj@kernel.org> | 2009-08-14 02:00:50 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-08-14 02:00:50 -0400 |
commit | 9a7737691e90d3cce0e5248f91826c50e5aa3fcf (patch) | |
tree | dbe43cb2d2a19539ca3fb58c52e4cbd68b51d24b /mm/percpu.c | |
parent | f58dc01ba2ca9fe3ab2ba4ca43d9c8a735cf62d8 (diff) |
percpu: drop @static_size from first chunk allocators
First chunk allocators assume percpu areas have been linked using one
of PERCPU_*() macros and depend on __per_cpu_load symbol defined by
those macros, so there isn't much point in passing in static area size
explicitly when it can be easily calculated from __per_cpu_start and
__per_cpu_end. Drop @static_size from all percpu first chunk
allocators and helpers.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 7fb40bb1555a..e2ac58a39bb2 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1464,7 +1464,6 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size, | |||
1464 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | 1464 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
1465 | /** | 1465 | /** |
1466 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | 1466 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem |
1467 | * @static_size: the size of static percpu area in bytes | ||
1468 | * @reserved_size: the size of reserved percpu area in bytes | 1467 | * @reserved_size: the size of reserved percpu area in bytes |
1469 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | 1468 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto |
1470 | * | 1469 | * |
@@ -1489,9 +1488,9 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size, | |||
1489 | * The determined pcpu_unit_size which can be used to initialize | 1488 | * The determined pcpu_unit_size which can be used to initialize |
1490 | * percpu access on success, -errno on failure. | 1489 | * percpu access on success, -errno on failure. |
1491 | */ | 1490 | */ |
1492 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | 1491 | ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size) |
1493 | ssize_t dyn_size) | ||
1494 | { | 1492 | { |
1493 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1495 | size_t size_sum, unit_size, chunk_size; | 1494 | size_t size_sum, unit_size, chunk_size; |
1496 | void *base; | 1495 | void *base; |
1497 | unsigned int cpu; | 1496 | unsigned int cpu; |
@@ -1536,7 +1535,6 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1536 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | 1535 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
1537 | /** | 1536 | /** |
1538 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages | 1537 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
1539 | * @static_size: the size of static percpu area in bytes | ||
1540 | * @reserved_size: the size of reserved percpu area in bytes | 1538 | * @reserved_size: the size of reserved percpu area in bytes |
1541 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | 1539 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE |
1542 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE | 1540 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE |
@@ -1552,12 +1550,13 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1552 | * The determined pcpu_unit_size which can be used to initialize | 1550 | * The determined pcpu_unit_size which can be used to initialize |
1553 | * percpu access on success, -errno on failure. | 1551 | * percpu access on success, -errno on failure. |
1554 | */ | 1552 | */ |
1555 | ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size, | 1553 | ssize_t __init pcpu_page_first_chunk(size_t reserved_size, |
1556 | pcpu_fc_alloc_fn_t alloc_fn, | 1554 | pcpu_fc_alloc_fn_t alloc_fn, |
1557 | pcpu_fc_free_fn_t free_fn, | 1555 | pcpu_fc_free_fn_t free_fn, |
1558 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | 1556 | pcpu_fc_populate_pte_fn_t populate_pte_fn) |
1559 | { | 1557 | { |
1560 | static struct vm_struct vm; | 1558 | static struct vm_struct vm; |
1559 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1561 | char psize_str[16]; | 1560 | char psize_str[16]; |
1562 | int unit_pages; | 1561 | int unit_pages; |
1563 | size_t pages_size; | 1562 | size_t pages_size; |
@@ -1641,7 +1640,6 @@ out_free_ar: | |||
1641 | #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK | 1640 | #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK |
1642 | /** | 1641 | /** |
1643 | * pcpu_lpage_build_unit_map - build unit_map for large page remapping | 1642 | * pcpu_lpage_build_unit_map - build unit_map for large page remapping |
1644 | * @static_size: the size of static percpu area in bytes | ||
1645 | * @reserved_size: the size of reserved percpu area in bytes | 1643 | * @reserved_size: the size of reserved percpu area in bytes |
1646 | * @dyn_sizep: in/out parameter for dynamic size, -1 for auto | 1644 | * @dyn_sizep: in/out parameter for dynamic size, -1 for auto |
1647 | * @unit_sizep: out parameter for unit size | 1645 | * @unit_sizep: out parameter for unit size |
@@ -1661,13 +1659,14 @@ out_free_ar: | |||
1661 | * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and | 1659 | * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and |
1662 | * returns the number of units to be allocated. -errno on failure. | 1660 | * returns the number of units to be allocated. -errno on failure. |
1663 | */ | 1661 | */ |
1664 | int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size, | 1662 | int __init pcpu_lpage_build_unit_map(size_t reserved_size, ssize_t *dyn_sizep, |
1665 | ssize_t *dyn_sizep, size_t *unit_sizep, | 1663 | size_t *unit_sizep, size_t lpage_size, |
1666 | size_t lpage_size, int *unit_map, | 1664 | int *unit_map, |
1667 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | 1665 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) |
1668 | { | 1666 | { |
1669 | static int group_map[NR_CPUS] __initdata; | 1667 | static int group_map[NR_CPUS] __initdata; |
1670 | static int group_cnt[NR_CPUS] __initdata; | 1668 | static int group_cnt[NR_CPUS] __initdata; |
1669 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1671 | int group_cnt_max = 0; | 1670 | int group_cnt_max = 0; |
1672 | size_t size_sum, min_unit_size, alloc_size; | 1671 | size_t size_sum, min_unit_size, alloc_size; |
1673 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | 1672 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ |
@@ -1819,7 +1818,6 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size, | |||
1819 | 1818 | ||
1820 | /** | 1819 | /** |
1821 | * pcpu_lpage_first_chunk - remap the first percpu chunk using large page | 1820 | * pcpu_lpage_first_chunk - remap the first percpu chunk using large page |
1822 | * @static_size: the size of static percpu area in bytes | ||
1823 | * @reserved_size: the size of reserved percpu area in bytes | 1821 | * @reserved_size: the size of reserved percpu area in bytes |
1824 | * @dyn_size: free size for dynamic allocation in bytes | 1822 | * @dyn_size: free size for dynamic allocation in bytes |
1825 | * @unit_size: unit size in bytes | 1823 | * @unit_size: unit size in bytes |
@@ -1850,15 +1848,15 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size, | |||
1850 | * The determined pcpu_unit_size which can be used to initialize | 1848 | * The determined pcpu_unit_size which can be used to initialize |
1851 | * percpu access on success, -errno on failure. | 1849 | * percpu access on success, -errno on failure. |
1852 | */ | 1850 | */ |
1853 | ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size, | 1851 | ssize_t __init pcpu_lpage_first_chunk(size_t reserved_size, size_t dyn_size, |
1854 | size_t dyn_size, size_t unit_size, | 1852 | size_t unit_size, size_t lpage_size, |
1855 | size_t lpage_size, const int *unit_map, | 1853 | const int *unit_map, int nr_units, |
1856 | int nr_units, | ||
1857 | pcpu_fc_alloc_fn_t alloc_fn, | 1854 | pcpu_fc_alloc_fn_t alloc_fn, |
1858 | pcpu_fc_free_fn_t free_fn, | 1855 | pcpu_fc_free_fn_t free_fn, |
1859 | pcpu_fc_map_fn_t map_fn) | 1856 | pcpu_fc_map_fn_t map_fn) |
1860 | { | 1857 | { |
1861 | static struct vm_struct vm; | 1858 | static struct vm_struct vm; |
1859 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1862 | size_t chunk_size = unit_size * nr_units; | 1860 | size_t chunk_size = unit_size * nr_units; |
1863 | size_t map_size; | 1861 | size_t map_size; |
1864 | unsigned int cpu; | 1862 | unsigned int cpu; |
@@ -2037,7 +2035,6 @@ EXPORT_SYMBOL(__per_cpu_offset); | |||
2037 | 2035 | ||
2038 | void __init setup_per_cpu_areas(void) | 2036 | void __init setup_per_cpu_areas(void) |
2039 | { | 2037 | { |
2040 | size_t static_size = __per_cpu_end - __per_cpu_start; | ||
2041 | ssize_t unit_size; | 2038 | ssize_t unit_size; |
2042 | unsigned long delta; | 2039 | unsigned long delta; |
2043 | unsigned int cpu; | 2040 | unsigned int cpu; |
@@ -2046,7 +2043,7 @@ void __init setup_per_cpu_areas(void) | |||
2046 | * Always reserve area for module percpu variables. That's | 2043 | * Always reserve area for module percpu variables. That's |
2047 | * what the legacy allocator did. | 2044 | * what the legacy allocator did. |
2048 | */ | 2045 | */ |
2049 | unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, | 2046 | unit_size = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
2050 | PERCPU_DYNAMIC_RESERVE); | 2047 | PERCPU_DYNAMIC_RESERVE); |
2051 | if (unit_size < 0) | 2048 | if (unit_size < 0) |
2052 | panic("Failed to initialized percpu areas."); | 2049 | panic("Failed to initialized percpu areas."); |