diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-12 05:15:59 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:47:47 -0400 |
commit | 35fd0808d7d8d001cd72f112e3bca84664b596a3 (patch) | |
tree | d4e21e003e182bc4824a74fd850ac3869380b53b /mm/memblock.c | |
parent | ab5d140b9eafae402aa3e673a63c5ef6164a9dd2 (diff) |
memblock: Implement for_each_free_mem_range()
Implement for_each_free_mem_range() which iterates over free memory
areas according to memblock (memory && !reserved). This will be used
to simplify memblock users.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310462166-31469-7-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index e815f4b75809..c4a8750406fc 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -461,6 +461,82 @@ long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | |||
461 | return memblock_add_region(_rgn, base, size); | 461 | return memblock_add_region(_rgn, base, size); |
462 | } | 462 | } |
463 | 463 | ||
464 | /** | ||
465 | * __next_free_mem_range - next function for for_each_free_mem_range() | ||
466 | * @idx: pointer to u64 loop variable | ||
467 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | ||
468 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
469 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
470 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
471 | * | ||
472 | * Find the first free area from *@idx which matches @nid, fill the out | ||
473 | * parameters, and update *@idx for the next iteration. The lower 32bit of | ||
474 | * *@idx contains index into memory region and the upper 32bit indexes the | ||
475 | * areas before each reserved region. For example, if reserved regions | ||
476 | * look like the following, | ||
477 | * | ||
478 | * 0:[0-16), 1:[32-48), 2:[128-130) | ||
479 | * | ||
480 | * The upper 32bit indexes the following regions. | ||
481 | * | ||
482 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) | ||
483 | * | ||
484 | * As both region arrays are sorted, the function advances the two indices | ||
485 | * in lockstep and returns each intersection. | ||
486 | */ | ||
487 | void __init_memblock __next_free_mem_range(u64 *idx, int nid, | ||
488 | phys_addr_t *out_start, | ||
489 | phys_addr_t *out_end, int *out_nid) | ||
490 | { | ||
491 | struct memblock_type *mem = &memblock.memory; | ||
492 | struct memblock_type *rsv = &memblock.reserved; | ||
493 | int mi = *idx & 0xffffffff; | ||
494 | int ri = *idx >> 32; | ||
495 | |||
496 | for ( ; mi < mem->cnt; mi++) { | ||
497 | struct memblock_region *m = &mem->regions[mi]; | ||
498 | phys_addr_t m_start = m->base; | ||
499 | phys_addr_t m_end = m->base + m->size; | ||
500 | |||
501 | /* only memory regions are associated with nodes, check it */ | ||
502 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) | ||
503 | continue; | ||
504 | |||
505 | /* scan areas before each reservation for intersection */ | ||
506 | for ( ; ri < rsv->cnt + 1; ri++) { | ||
507 | struct memblock_region *r = &rsv->regions[ri]; | ||
508 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | ||
509 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | ||
510 | |||
511 | /* if ri advanced past mi, break out to advance mi */ | ||
512 | if (r_start >= m_end) | ||
513 | break; | ||
514 | /* if the two regions intersect, we're done */ | ||
515 | if (m_start < r_end) { | ||
516 | if (out_start) | ||
517 | *out_start = max(m_start, r_start); | ||
518 | if (out_end) | ||
519 | *out_end = min(m_end, r_end); | ||
520 | if (out_nid) | ||
521 | *out_nid = memblock_get_region_node(m); | ||
522 | /* | ||
523 | * The region which ends first is advanced | ||
524 | * for the next iteration. | ||
525 | */ | ||
526 | if (m_end <= r_end) | ||
527 | mi++; | ||
528 | else | ||
529 | ri++; | ||
530 | *idx = (u32)mi | (u64)ri << 32; | ||
531 | return; | ||
532 | } | ||
533 | } | ||
534 | } | ||
535 | |||
536 | /* signal end of iteration */ | ||
537 | *idx = ULLONG_MAX; | ||
538 | } | ||
539 | |||
464 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 540 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
465 | /* | 541 | /* |
466 | * Common iterator interface used to define for_each_mem_range(). | 542 | * Common iterator interface used to define for_each_mem_range(). |