aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 19:10:58 -0400
committerTejun Heo <tj@kernel.org>2009-07-03 19:10:58 -0400
commit788e5abc5441e9046dd91c995c6f1f75bbd144bf (patch)
tree5ad49583625c81000759307f4928179f9aa9ca41
parent79ba6ac825fac187894e236c9df1ba5fcbf53fd3 (diff)
percpu: drop @unit_size from embed first chunk allocator
The only extra feature @unit_size provides is making dead space at the end of the first chunk which doesn't have any valid usecase. Drop the parameter. This will increase consistency with generalized 4k allocator. James Bottomley spotted missing conversion for the default setup_per_cpu_areas() which caused build breakage on all arcsh which use it. [ Impact: drop unused code path ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--include/linux/percpu.h2
-rw-r--r--mm/percpu.c18
3 files changed, 8 insertions, 14 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 29a3eef7cf4a..14728206fb52 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -342,7 +342,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
342 return -EINVAL; 342 return -EINVAL;
343 343
344 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 344 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
345 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); 345 reserve - PERCPU_FIRST_CHUNK_RESERVE);
346} 346}
347 347
348/* 348/*
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index e5000343dd61..83bff053bd1c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -69,7 +69,7 @@ extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
69 69
70extern ssize_t __init pcpu_embed_first_chunk( 70extern ssize_t __init pcpu_embed_first_chunk(
71 size_t static_size, size_t reserved_size, 71 size_t static_size, size_t reserved_size,
72 ssize_t dyn_size, ssize_t unit_size); 72 ssize_t dyn_size);
73 73
74/* 74/*
75 * Use this to get to a cpu's version of the per-cpu object 75 * Use this to get to a cpu's version of the per-cpu object
diff --git a/mm/percpu.c b/mm/percpu.c
index 19dd83b5cbdc..fc6babe6e554 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1207,7 +1207,6 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1207 * @static_size: the size of static percpu area in bytes 1207 * @static_size: the size of static percpu area in bytes
1208 * @reserved_size: the size of reserved percpu area in bytes 1208 * @reserved_size: the size of reserved percpu area in bytes
1209 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1209 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1210 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1211 * 1210 *
1212 * This is a helper to ease setting up embedded first percpu chunk and 1211 * This is a helper to ease setting up embedded first percpu chunk and
1213 * can be called where pcpu_setup_first_chunk() is expected. 1212 * can be called where pcpu_setup_first_chunk() is expected.
@@ -1219,9 +1218,9 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1219 * page size. 1218 * page size.
1220 * 1219 *
1221 * When @dyn_size is positive, dynamic area might be larger than 1220 * When @dyn_size is positive, dynamic area might be larger than
1222 * specified to fill page alignment. Also, when @dyn_size is auto, 1221 * specified to fill page alignment. When @dyn_size is auto,
1223 * @dyn_size does not fill the whole first chunk but only what's 1222 * @dyn_size is just big enough to fill page alignment after static
1224 * necessary for page alignment after static and reserved areas. 1223 * and reserved areas.
1225 * 1224 *
1226 * If the needed size is smaller than the minimum or specified unit 1225 * If the needed size is smaller than the minimum or specified unit
1227 * size, the leftover is returned to the bootmem allocator. 1226 * size, the leftover is returned to the bootmem allocator.
@@ -1231,7 +1230,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1231 * percpu access on success, -errno on failure. 1230 * percpu access on success, -errno on failure.
1232 */ 1231 */
1233ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 1232ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1234 ssize_t dyn_size, ssize_t unit_size) 1233 ssize_t dyn_size)
1235{ 1234{
1236 size_t chunk_size; 1235 size_t chunk_size;
1237 unsigned int cpu; 1236 unsigned int cpu;
@@ -1242,12 +1241,7 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1242 if (dyn_size != 0) 1241 if (dyn_size != 0)
1243 dyn_size = pcpue_size - static_size - reserved_size; 1242 dyn_size = pcpue_size - static_size - reserved_size;
1244 1243
1245 if (unit_size >= 0) { 1244 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1246 BUG_ON(unit_size < pcpue_size);
1247 pcpue_unit_size = unit_size;
1248 } else
1249 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1250
1251 chunk_size = pcpue_unit_size * num_possible_cpus(); 1245 chunk_size = pcpue_unit_size * num_possible_cpus();
1252 1246
1253 pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, 1247 pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
@@ -1304,7 +1298,7 @@ void __init setup_per_cpu_areas(void)
1304 * what the legacy allocator did. 1298 * what the legacy allocator did.
1305 */ 1299 */
1306 unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, 1300 unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE,
1307 PERCPU_DYNAMIC_RESERVE, -1); 1301 PERCPU_DYNAMIC_RESERVE);
1308 if (unit_size < 0) 1302 if (unit_size < 0)
1309 panic("Failed to initialized percpu areas."); 1303 panic("Failed to initialized percpu areas.");
1310 1304