aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2012-07-31 19:42:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:41 -0400
commitfd07383b6bbc1418b1bdd5f295d13e600222fffa (patch)
treed69a0c77708256a4d8bad40fe7b40b8e87240ec4
parent4f774b912dd1d5752cd33b696509531b0321c3e0 (diff)
mm/memblock.c:memblock_double_array(): cosmetic cleanups
This function is an 80-column eyesore, quite unnecessarily. Clean that up, and use standard comment layout style. Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Greg Pearson <greg.pearson@hp.com> Cc: Tejun Heo <tj@kernel.org> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memblock.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 5cc6731b00c..4d9393c7edc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -222,13 +222,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
222 /* Try to find some space for it. 222 /* Try to find some space for it.
223 * 223 *
224 * WARNING: We assume that either slab_is_available() and we use it or 224 * WARNING: We assume that either slab_is_available() and we use it or
225 * we use MEMBLOCK for allocations. That means that this is unsafe to use 225 * we use MEMBLOCK for allocations. That means that this is unsafe to
226 * when bootmem is currently active (unless bootmem itself is implemented 226 * use when bootmem is currently active (unless bootmem itself is
227 * on top of MEMBLOCK which isn't the case yet) 227 * implemented on top of MEMBLOCK which isn't the case yet)
228 * 228 *
229 * This should however not be an issue for now, as we currently only 229 * This should however not be an issue for now, as we currently only
230 * call into MEMBLOCK while it's still active, or much later when slab is 230 * call into MEMBLOCK while it's still active, or much later when slab
231 * active for memory hotplug operations 231 * is active for memory hotplug operations
232 */ 232 */
233 if (use_slab) { 233 if (use_slab) {
234 new_array = kmalloc(new_size, GFP_KERNEL); 234 new_array = kmalloc(new_size, GFP_KERNEL);
@@ -243,8 +243,8 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
243 new_alloc_size, PAGE_SIZE); 243 new_alloc_size, PAGE_SIZE);
244 if (!addr && new_area_size) 244 if (!addr && new_area_size)
245 addr = memblock_find_in_range(0, 245 addr = memblock_find_in_range(0,
246 min(new_area_start, memblock.current_limit), 246 min(new_area_start, memblock.current_limit),
247 new_alloc_size, PAGE_SIZE); 247 new_alloc_size, PAGE_SIZE);
248 248
249 new_array = addr ? __va(addr) : 0; 249 new_array = addr ? __va(addr) : 0;
250 } 250 }
@@ -254,12 +254,14 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
254 return -1; 254 return -1;
255 } 255 }
256 256
257 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", 257 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
258 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); 258 memblock_type_name(type), type->max * 2, (u64)addr,
259 (u64)addr + new_size - 1);
259 260
260 /* Found space, we now need to move the array over before 261 /*
261 * we add the reserved region since it may be our reserved 262 * Found space, we now need to move the array over before we add the
262 * array itself that is full. 263 * reserved region since it may be our reserved array itself that is
264 * full.
263 */ 265 */
264 memcpy(new_array, type->regions, old_size); 266 memcpy(new_array, type->regions, old_size);
265 memset(new_array + type->max, 0, old_size); 267 memset(new_array + type->max, 0, old_size);
@@ -267,17 +269,16 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
267 type->regions = new_array; 269 type->regions = new_array;
268 type->max <<= 1; 270 type->max <<= 1;
269 271
270 /* Free old array. We needn't free it if the array is the 272 /* Free old array. We needn't free it if the array is the static one */
271 * static one
272 */
273 if (*in_slab) 273 if (*in_slab)
274 kfree(old_array); 274 kfree(old_array);
275 else if (old_array != memblock_memory_init_regions && 275 else if (old_array != memblock_memory_init_regions &&
276 old_array != memblock_reserved_init_regions) 276 old_array != memblock_reserved_init_regions)
277 memblock_free(__pa(old_array), old_alloc_size); 277 memblock_free(__pa(old_array), old_alloc_size);
278 278
279 /* Reserve the new array if that comes from the memblock. 279 /*
280 * Otherwise, we needn't do it 280 * Reserve the new array if that comes from the memblock. Otherwise, we
281 * needn't do it
281 */ 282 */
282 if (!use_slab) 283 if (!use_slab)
283 BUG_ON(memblock_reserve(addr, new_alloc_size)); 284 BUG_ON(memblock_reserve(addr, new_alloc_size));