aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-27 12:49:59 -0400
committerTejun Heo <tj@kernel.org>2010-06-27 12:49:59 -0400
commit4ba6ce250e406b20bcd6f0f3aed6b3d80965e6c2 (patch)
tree3ae4cdd7c53626dca141bd32037a524a2bc3643c /mm/percpu.c
parent9983b6f0cf8263e51bcf4c8a9dc0c1ef175b3c60 (diff)
percpu: make @dyn_size always mean min dyn_size in first chunk init functions
In pcpu_build_alloc_info() and pcpu_embed_first_chunk(), @dyn_size was ssize_t, -1 meant auto-size, 0 forced 0 and positive meant minimum size. There's no use case for forcing 0 and the upcoming early alloc support always requires non-zero dynamic size. Make @dyn_size always mean minimum dyn_size. While at it, make pcpu_build_alloc_info() static which doesn't have any external caller as suggested by David Rientjes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: David Rientjes <rientjes@google.com>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c35
1 files changed, 10 insertions, 25 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 6470e7710231..c3e7010c6d71 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1013,20 +1013,6 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
1013 return page_to_phys(pcpu_addr_to_page(addr)); 1013 return page_to_phys(pcpu_addr_to_page(addr));
1014} 1014}
1015 1015
1016static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1017 size_t reserved_size,
1018 ssize_t *dyn_sizep)
1019{
1020 size_t size_sum;
1021
1022 size_sum = PFN_ALIGN(static_size + reserved_size +
1023 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1024 if (*dyn_sizep != 0)
1025 *dyn_sizep = size_sum - static_size - reserved_size;
1026
1027 return size_sum;
1028}
1029
1030/** 1016/**
1031 * pcpu_alloc_alloc_info - allocate percpu allocation info 1017 * pcpu_alloc_alloc_info - allocate percpu allocation info
1032 * @nr_groups: the number of groups 1018 * @nr_groups: the number of groups
@@ -1085,7 +1071,7 @@ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1085/** 1071/**
1086 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1072 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1087 * @reserved_size: the size of reserved percpu area in bytes 1073 * @reserved_size: the size of reserved percpu area in bytes
1088 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1074 * @dyn_size: minimum free size for dynamic allocation in bytes
1089 * @atom_size: allocation atom size 1075 * @atom_size: allocation atom size
1090 * @cpu_distance_fn: callback to determine distance between cpus, optional 1076 * @cpu_distance_fn: callback to determine distance between cpus, optional
1091 * 1077 *
@@ -1103,8 +1089,8 @@ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1103 * On success, pointer to the new allocation_info is returned. On 1089 * On success, pointer to the new allocation_info is returned. On
1104 * failure, ERR_PTR value is returned. 1090 * failure, ERR_PTR value is returned.
1105 */ 1091 */
1106struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1092static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1107 size_t reserved_size, ssize_t dyn_size, 1093 size_t reserved_size, size_t dyn_size,
1108 size_t atom_size, 1094 size_t atom_size,
1109 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1095 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1110{ 1096{
@@ -1123,13 +1109,15 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1123 memset(group_map, 0, sizeof(group_map)); 1109 memset(group_map, 0, sizeof(group_map));
1124 memset(group_cnt, 0, sizeof(group_cnt)); 1110 memset(group_cnt, 0, sizeof(group_cnt));
1125 1111
1112 size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size);
1113 dyn_size = size_sum - static_size - reserved_size;
1114
1126 /* 1115 /*
1127 * Determine min_unit_size, alloc_size and max_upa such that 1116 * Determine min_unit_size, alloc_size and max_upa such that
1128 * alloc_size is multiple of atom_size and is the smallest 1117 * alloc_size is multiple of atom_size and is the smallest
1129 * which can accomodate 4k aligned segments which are equal to 1118 * which can accomodate 4k aligned segments which are equal to
1130 * or larger than min_unit_size. 1119 * or larger than min_unit_size.
1131 */ 1120 */
1132 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1133 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1121 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1134 1122
1135 alloc_size = roundup(min_unit_size, atom_size); 1123 alloc_size = roundup(min_unit_size, atom_size);
@@ -1532,7 +1520,7 @@ early_param("percpu_alloc", percpu_alloc_setup);
1532/** 1520/**
1533 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 1521 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1534 * @reserved_size: the size of reserved percpu area in bytes 1522 * @reserved_size: the size of reserved percpu area in bytes
1535 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1523 * @dyn_size: minimum free size for dynamic allocation in bytes
1536 * @atom_size: allocation atom size 1524 * @atom_size: allocation atom size
1537 * @cpu_distance_fn: callback to determine distance between cpus, optional 1525 * @cpu_distance_fn: callback to determine distance between cpus, optional
1538 * @alloc_fn: function to allocate percpu page 1526 * @alloc_fn: function to allocate percpu page
@@ -1553,10 +1541,7 @@ early_param("percpu_alloc", percpu_alloc_setup);
1553 * vmalloc space is not orders of magnitude larger than distances 1541 * vmalloc space is not orders of magnitude larger than distances
1554 * between node memory addresses (ie. 32bit NUMA machines). 1542 * between node memory addresses (ie. 32bit NUMA machines).
1555 * 1543 *
1556 * When @dyn_size is positive, dynamic area might be larger than 1544 * @dyn_size specifies the minimum dynamic area size.
1557 * specified to fill page alignment. When @dyn_size is auto,
1558 * @dyn_size is just big enough to fill page alignment after static
1559 * and reserved areas.
1560 * 1545 *
1561 * If the needed size is smaller than the minimum or specified unit 1546 * If the needed size is smaller than the minimum or specified unit
1562 * size, the leftover is returned using @free_fn. 1547 * size, the leftover is returned using @free_fn.
@@ -1564,7 +1549,7 @@ early_param("percpu_alloc", percpu_alloc_setup);
1564 * RETURNS: 1549 * RETURNS:
1565 * 0 on success, -errno on failure. 1550 * 0 on success, -errno on failure.
1566 */ 1551 */
1567int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, 1552int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1568 size_t atom_size, 1553 size_t atom_size,
1569 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1554 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1570 pcpu_fc_alloc_fn_t alloc_fn, 1555 pcpu_fc_alloc_fn_t alloc_fn,
@@ -1695,7 +1680,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
1695 1680
1696 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 1681 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1697 1682
1698 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL); 1683 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1699 if (IS_ERR(ai)) 1684 if (IS_ERR(ai))
1700 return PTR_ERR(ai); 1685 return PTR_ERR(ai);
1701 BUG_ON(ai->nr_groups != 1); 1686 BUG_ON(ai->nr_groups != 1);