diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-14 03:46:10 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:45:29 -0400 |
commit | 5dfe8660a3d7f1ee1265c3536433ee53da3f98a3 (patch) | |
tree | c58232b88741ba1d8cce417b62f3f658369ad9c2 /mm/page_alloc.c | |
parent | fc769a8e70a3348d5de49e5f69f6aff810157360 (diff) |
bootmem: Replace work_with_active_regions() with for_each_mem_pfn_range()
Callback based iteration is cumbersome and much less useful than
for_each_*() iterator. This patch implements for_each_mem_pfn_range()
which replaces work_with_active_regions(). All the current users of
work_with_active_regions() are converted.
This simplifies walking over early_node_map and will allow converting
internal logics in page_alloc to use iterator instead of walking
early_node_map directly, which in turn will enable moving node
information to memblock.
powerpc change is only compile tested.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20110714074610.GD3455@htj.dyndns.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 40 |
1 files changed, 28 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c7f0e5be4a31..69fffabf61b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3903,18 +3903,6 @@ int __init add_from_early_node_map(struct range *range, int az, | |||
3903 | return nr_range; | 3903 | return nr_range; |
3904 | } | 3904 | } |
3905 | 3905 | ||
3906 | void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) | ||
3907 | { | ||
3908 | int i; | ||
3909 | int ret; | ||
3910 | |||
3911 | for_each_active_range_index_in_nid(i, nid) { | ||
3912 | ret = work_fn(early_node_map[i].start_pfn, | ||
3913 | early_node_map[i].end_pfn, data); | ||
3914 | if (ret) | ||
3915 | break; | ||
3916 | } | ||
3917 | } | ||
3918 | /** | 3906 | /** |
3919 | * sparse_memory_present_with_active_regions - Call memory_present for each active range | 3907 | * sparse_memory_present_with_active_regions - Call memory_present for each active range |
3920 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. | 3908 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. |
@@ -4421,6 +4409,34 @@ static inline void setup_nr_node_ids(void) | |||
4421 | } | 4409 | } |
4422 | #endif | 4410 | #endif |
4423 | 4411 | ||
4412 | /* | ||
4413 | * Common iterator interface used to define for_each_mem_pfn_range(). | ||
4414 | */ | ||
4415 | void __meminit __next_mem_pfn_range(int *idx, int nid, | ||
4416 | unsigned long *out_start_pfn, | ||
4417 | unsigned long *out_end_pfn, int *out_nid) | ||
4418 | { | ||
4419 | struct node_active_region *r = NULL; | ||
4420 | |||
4421 | while (++*idx < nr_nodemap_entries) { | ||
4422 | if (nid == MAX_NUMNODES || nid == early_node_map[*idx].nid) { | ||
4423 | r = &early_node_map[*idx]; | ||
4424 | break; | ||
4425 | } | ||
4426 | } | ||
4427 | if (!r) { | ||
4428 | *idx = -1; | ||
4429 | return; | ||
4430 | } | ||
4431 | |||
4432 | if (out_start_pfn) | ||
4433 | *out_start_pfn = r->start_pfn; | ||
4434 | if (out_end_pfn) | ||
4435 | *out_end_pfn = r->end_pfn; | ||
4436 | if (out_nid) | ||
4437 | *out_nid = r->nid; | ||
4438 | } | ||
4439 | |||
4424 | /** | 4440 | /** |
4425 | * add_active_range - Register a range of PFNs backed by physical memory | 4441 | * add_active_range - Register a range of PFNs backed by physical memory |
4426 | * @nid: The node ID the range resides on | 4442 | * @nid: The node ID the range resides on |