diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-12 05:15:55 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:47:41 -0400 |
commit | 784656f9c680d334e7b4cdb6951c5c913e5a26bf (patch) | |
tree | e70487038744f31ebab8ff16e14991f4a6b74222 /mm/memblock.c | |
parent | ed7b56a799cade11f458cd83e1150af54a66b7e8 (diff) |
memblock: Reimplement memblock_add_region()
memblock_add_region() carefully checked for merge and overlap
conditions while adding a new region, which is complicated and makes
it difficult to allow arbitrary overlaps or add more merge conditions
(e.g. node ID).
This re-implements memblock_add_region() such that insertion is done
in two steps - all non-overlapping portions of new area are inserted
as separate regions first and then memblock_merge_regions() scan and
merge all neighbouring compatible regions.
This makes addition logic simpler and more versatile and enables
adding node information to memblock.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310462166-31469-3-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 195 |
1 files changed, 110 insertions, 85 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index bd3a3a9591d4..992aa1807473 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -251,117 +251,142 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | static long __init_memblock memblock_add_region(struct memblock_type *type, | 254 | /** |
255 | phys_addr_t base, phys_addr_t size) | 255 | * memblock_merge_regions - merge neighboring compatible regions |
256 | * @type: memblock type to scan | ||
257 | * | ||
258 | * Scan @type and merge neighboring compatible regions. | ||
259 | */ | ||
260 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) | ||
256 | { | 261 | { |
257 | phys_addr_t end = base + size; | 262 | int i = 0; |
258 | int i, slot = -1; | ||
259 | 263 | ||
260 | /* First try and coalesce this MEMBLOCK with others */ | 264 | /* cnt never goes below 1 */ |
261 | for (i = 0; i < type->cnt; i++) { | 265 | while (i < type->cnt - 1) { |
262 | struct memblock_region *rgn = &type->regions[i]; | 266 | struct memblock_region *this = &type->regions[i]; |
263 | phys_addr_t rend = rgn->base + rgn->size; | 267 | struct memblock_region *next = &type->regions[i + 1]; |
264 | 268 | ||
265 | /* Exit if there's no possible hits */ | 269 | if (this->base + this->size != next->base) { |
266 | if (rgn->base > end || rgn->size == 0) | 270 | BUG_ON(this->base + this->size > next->base); |
267 | break; | 271 | i++; |
268 | 272 | continue; | |
269 | /* Check if we are fully enclosed within an existing | 273 | } |
270 | * block | ||
271 | */ | ||
272 | if (rgn->base <= base && rend >= end) | ||
273 | return 0; | ||
274 | 274 | ||
275 | /* Check if we overlap or are adjacent with the bottom | 275 | this->size += next->size; |
276 | * of a block. | 276 | memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); |
277 | */ | 277 | type->cnt--; |
278 | if (base < rgn->base && end >= rgn->base) { | 278 | } |
279 | /* We extend the bottom of the block down to our | 279 | } |
280 | * base | ||
281 | */ | ||
282 | rgn->base = base; | ||
283 | rgn->size = rend - base; | ||
284 | 280 | ||
285 | /* Return if we have nothing else to allocate | 281 | /** |
286 | * (fully coalesced) | 282 | * memblock_insert_region - insert new memblock region |
287 | */ | 283 | * @type: memblock type to insert into |
288 | if (rend >= end) | 284 | * @idx: index for the insertion point |
289 | return 0; | 285 | * @base: base address of the new region |
286 | * @size: size of the new region | ||
287 | * | ||
288 | * Insert new memblock region [@base,@base+@size) into @type at @idx. | ||
289 | * @type must already have extra room to accomodate the new region. | ||
290 | */ | ||
291 | static void __init_memblock memblock_insert_region(struct memblock_type *type, | ||
292 | int idx, phys_addr_t base, | ||
293 | phys_addr_t size) | ||
294 | { | ||
295 | struct memblock_region *rgn = &type->regions[idx]; | ||
290 | 296 | ||
291 | /* We continue processing from the end of the | 297 | BUG_ON(type->cnt >= type->max); |
292 | * coalesced block. | 298 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); |
293 | */ | 299 | rgn->base = base; |
294 | base = rend; | 300 | rgn->size = size; |
295 | size = end - base; | 301 | type->cnt++; |
296 | } | 302 | } |
297 | 303 | ||
298 | /* Now check if we overlap or are adjacent with the | 304 | /** |
299 | * top of a block | 305 | * memblock_add_region - add new memblock region |
300 | */ | 306 | * @type: memblock type to add new region into |
301 | if (base <= rend && end >= rend) { | 307 | * @base: base address of the new region |
302 | /* We adjust our base down to enclose the | 308 | * @size: size of the new region |
303 | * original block and destroy it. It will be | 309 | * |
304 | * part of our new allocation. Since we've | 310 | * Add new memblock region [@base,@base+@size) into @type. The new region |
305 | * freed an entry, we know we won't fail | 311 | * is allowed to overlap with existing ones - overlaps don't affect already |
306 | * to allocate one later, so we won't risk | 312 | * existing regions. @type is guaranteed to be minimal (all neighbouring |
307 | * losing the original block allocation. | 313 | * compatible regions are merged) after the addition. |
308 | */ | 314 | * |
309 | size += (base - rgn->base); | 315 | * RETURNS: |
310 | base = rgn->base; | 316 | * 0 on success, -errno on failure. |
311 | memblock_remove_region(type, i--); | 317 | */ |
312 | } | 318 | static long __init_memblock memblock_add_region(struct memblock_type *type, |
313 | } | 319 | phys_addr_t base, phys_addr_t size) |
320 | { | ||
321 | bool insert = false; | ||
322 | phys_addr_t obase = base, end = base + size; | ||
323 | int i, nr_new; | ||
314 | 324 | ||
315 | /* If the array is empty, special case, replace the fake | 325 | /* special case for empty array */ |
316 | * filler region and return | 326 | if (type->regions[0].size == 0) { |
317 | */ | 327 | WARN_ON(type->cnt != 1); |
318 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { | ||
319 | type->regions[0].base = base; | 328 | type->regions[0].base = base; |
320 | type->regions[0].size = size; | 329 | type->regions[0].size = size; |
321 | return 0; | 330 | return 0; |
322 | } | 331 | } |
323 | 332 | repeat: | |
324 | /* If we are out of space, we fail. It's too late to resize the array | 333 | /* |
325 | * but then this shouldn't have happened in the first place. | 334 | * The following is executed twice. Once with %false @insert and |
335 | * then with %true. The first counts the number of regions needed | ||
336 | * to accomodate the new area. The second actually inserts them. | ||
326 | */ | 337 | */ |
327 | if (WARN_ON(type->cnt >= type->max)) | 338 | base = obase; |
328 | return -1; | 339 | nr_new = 0; |
340 | |||
341 | for (i = 0; i < type->cnt; i++) { | ||
342 | struct memblock_region *rgn = &type->regions[i]; | ||
343 | phys_addr_t rbase = rgn->base; | ||
344 | phys_addr_t rend = rbase + rgn->size; | ||
329 | 345 | ||
330 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | 346 | if (rbase >= end) |
331 | for (i = type->cnt - 1; i >= 0; i--) { | ||
332 | if (base < type->regions[i].base) { | ||
333 | type->regions[i+1].base = type->regions[i].base; | ||
334 | type->regions[i+1].size = type->regions[i].size; | ||
335 | } else { | ||
336 | type->regions[i+1].base = base; | ||
337 | type->regions[i+1].size = size; | ||
338 | slot = i + 1; | ||
339 | break; | 347 | break; |
348 | if (rend <= base) | ||
349 | continue; | ||
350 | /* | ||
351 | * @rgn overlaps. If it separates the lower part of new | ||
352 | * area, insert that portion. | ||
353 | */ | ||
354 | if (rbase > base) { | ||
355 | nr_new++; | ||
356 | if (insert) | ||
357 | memblock_insert_region(type, i++, base, | ||
358 | rbase - base); | ||
340 | } | 359 | } |
360 | /* area below @rend is dealt with, forget about it */ | ||
361 | base = min(rend, end); | ||
341 | } | 362 | } |
342 | if (base < type->regions[0].base) { | 363 | |
343 | type->regions[0].base = base; | 364 | /* insert the remaining portion */ |
344 | type->regions[0].size = size; | 365 | if (base < end) { |
345 | slot = 0; | 366 | nr_new++; |
367 | if (insert) | ||
368 | memblock_insert_region(type, i, base, end - base); | ||
346 | } | 369 | } |
347 | type->cnt++; | ||
348 | 370 | ||
349 | /* The array is full ? Try to resize it. If that fails, we undo | 371 | /* |
350 | * our allocation and return an error | 372 | * If this was the first round, resize array and repeat for actual |
373 | * insertions; otherwise, merge and return. | ||
351 | */ | 374 | */ |
352 | if (type->cnt == type->max && memblock_double_array(type)) { | 375 | if (!insert) { |
353 | BUG_ON(slot < 0); | 376 | while (type->cnt + nr_new > type->max) |
354 | memblock_remove_region(type, slot); | 377 | if (memblock_double_array(type) < 0) |
355 | return -1; | 378 | return -ENOMEM; |
379 | insert = true; | ||
380 | goto repeat; | ||
381 | } else { | ||
382 | memblock_merge_regions(type); | ||
383 | return 0; | ||
356 | } | 384 | } |
357 | |||
358 | return 0; | ||
359 | } | 385 | } |
360 | 386 | ||
361 | long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) | 387 | long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
362 | { | 388 | { |
363 | return memblock_add_region(&memblock.memory, base, size); | 389 | return memblock_add_region(&memblock.memory, base, size); |
364 | |||
365 | } | 390 | } |
366 | 391 | ||
367 | static long __init_memblock __memblock_remove(struct memblock_type *type, | 392 | static long __init_memblock __memblock_remove(struct memblock_type *type, |