diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-12 05:16:02 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:47:49 -0400 |
commit | 8a9ca34c11e1695dab7aff3cfa7780fbfe76b2f8 (patch) | |
tree | 5bd341215c7e829b89d1fcb481cfff71ef81f0f5 | |
parent | 64a02daacbc880bac1d6b3aeefbcd226a9341fa7 (diff) |
memblock, x86: Replace __get_free_all_memory_range() with for_each_free_mem_range()
__get_free_all_memory_range() walks memblock, calculates free memory
areas and fills in the specified range. It can be easily replaced
with for_each_free_mem_range().
Convert free_low_memory_core_early() and
add_highpages_with_active_regions() to for_each_free_mem_range().
This leaves __get_free_all_memory_range() without any user. Kill it
and related functions.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310462166-31469-10-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/include/asm/memblock.h | 3 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 28 | ||||
-rw-r--r-- | arch/x86/mm/memblock.c | 59 | ||||
-rw-r--r-- | mm/nobootmem.c | 28 |
4 files changed, 27 insertions, 91 deletions
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 6c72ecaee577..bc9e44b0812d 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h | |||
@@ -5,9 +5,6 @@ | |||
5 | 5 | ||
6 | void memblock_x86_reserve_range(u64 start, u64 end, char *name); | 6 | void memblock_x86_reserve_range(u64 start, u64 end, char *name); |
7 | void memblock_x86_free_range(u64 start, u64 end); | 7 | void memblock_x86_free_range(u64 start, u64 end); |
8 | struct range; | ||
9 | int __get_free_all_memory_range(struct range **range, int nodeid, | ||
10 | unsigned long start_pfn, unsigned long end_pfn); | ||
11 | 8 | ||
12 | u64 memblock_x86_hole_size(u64 start, u64 end); | 9 | u64 memblock_x86_hole_size(u64 start, u64 end); |
13 | u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); | 10 | u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5d173db93c4e..0c1da394a634 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -427,23 +427,17 @@ static void __init add_one_highpage_init(struct page *page) | |||
427 | void __init add_highpages_with_active_regions(int nid, | 427 | void __init add_highpages_with_active_regions(int nid, |
428 | unsigned long start_pfn, unsigned long end_pfn) | 428 | unsigned long start_pfn, unsigned long end_pfn) |
429 | { | 429 | { |
430 | struct range *range; | 430 | phys_addr_t start, end; |
431 | int nr_range; | 431 | u64 i; |
432 | int i; | 432 | |
433 | 433 | for_each_free_mem_range(i, nid, &start, &end, NULL) { | |
434 | nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); | 434 | unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), |
435 | 435 | start_pfn, end_pfn); | |
436 | for (i = 0; i < nr_range; i++) { | 436 | unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), |
437 | struct page *page; | 437 | start_pfn, end_pfn); |
438 | int node_pfn; | 438 | for ( ; pfn < e_pfn; pfn++) |
439 | 439 | if (pfn_valid(pfn)) | |
440 | for (node_pfn = range[i].start; node_pfn < range[i].end; | 440 | add_one_highpage_init(pfn_to_page(pfn)); |
441 | node_pfn++) { | ||
442 | if (!pfn_valid(node_pfn)) | ||
443 | continue; | ||
444 | page = pfn_to_page(node_pfn); | ||
445 | add_one_highpage_init(page); | ||
446 | } | ||
447 | } | 441 | } |
448 | } | 442 | } |
449 | #else | 443 | #else |
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 0e8442a9baff..4107c1a32b78 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c | |||
@@ -30,65 +30,6 @@ static __init struct range *find_range_array(int count) | |||
30 | return range; | 30 | return range; |
31 | } | 31 | } |
32 | 32 | ||
33 | static void __init memblock_x86_subtract_reserved(struct range *range, int az) | ||
34 | { | ||
35 | u64 final_start, final_end; | ||
36 | struct memblock_region *r; | ||
37 | |||
38 | /* Take out region array itself at first*/ | ||
39 | memblock_free_reserved_regions(); | ||
40 | |||
41 | memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt); | ||
42 | |||
43 | for_each_memblock(reserved, r) { | ||
44 | memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); | ||
45 | final_start = PFN_DOWN(r->base); | ||
46 | final_end = PFN_UP(r->base + r->size); | ||
47 | if (final_start >= final_end) | ||
48 | continue; | ||
49 | subtract_range(range, az, final_start, final_end); | ||
50 | } | ||
51 | |||
52 | /* Put region array back ? */ | ||
53 | memblock_reserve_reserved_regions(); | ||
54 | } | ||
55 | |||
56 | static int __init count_early_node_map(int nodeid) | ||
57 | { | ||
58 | int i, cnt = 0; | ||
59 | |||
60 | for_each_mem_pfn_range(i, nodeid, NULL, NULL, NULL) | ||
61 | cnt++; | ||
62 | return cnt; | ||
63 | } | ||
64 | |||
65 | int __init __get_free_all_memory_range(struct range **rangep, int nodeid, | ||
66 | unsigned long start_pfn, unsigned long end_pfn) | ||
67 | { | ||
68 | int count; | ||
69 | struct range *range; | ||
70 | int nr_range; | ||
71 | |||
72 | count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2; | ||
73 | |||
74 | range = find_range_array(count); | ||
75 | nr_range = 0; | ||
76 | |||
77 | /* | ||
78 | * Use early_node_map[] and memblock.reserved.region to get range array | ||
79 | * at first | ||
80 | */ | ||
81 | nr_range = add_from_early_node_map(range, count, nr_range, nodeid); | ||
82 | subtract_range(range, count, 0, start_pfn); | ||
83 | subtract_range(range, count, end_pfn, -1ULL); | ||
84 | |||
85 | memblock_x86_subtract_reserved(range, count); | ||
86 | nr_range = clean_sort_range(range, count); | ||
87 | |||
88 | *rangep = range; | ||
89 | return nr_range; | ||
90 | } | ||
91 | |||
92 | static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) | 33 | static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) |
93 | { | 34 | { |
94 | int i, count; | 35 | int i, count; |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 2037a8a04761..7075bc00fa84 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -108,21 +108,25 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) | |||
108 | 108 | ||
109 | unsigned long __init free_low_memory_core_early(int nodeid) | 109 | unsigned long __init free_low_memory_core_early(int nodeid) |
110 | { | 110 | { |
111 | int i; | ||
112 | u64 start, end; | ||
113 | unsigned long count = 0; | 111 | unsigned long count = 0; |
114 | struct range *range = NULL; | 112 | phys_addr_t start, end; |
115 | int nr_range; | 113 | u64 i; |
116 | 114 | ||
117 | nr_range = __get_free_all_memory_range(&range, nodeid, 0, max_low_pfn); | 115 | /* free reserved array temporarily so that it's treated as free area */ |
118 | 116 | memblock_free_reserved_regions(); | |
119 | for (i = 0; i < nr_range; i++) { | 117 | |
120 | start = range[i].start; | 118 | for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { |
121 | end = range[i].end; | 119 | unsigned long start_pfn = PFN_UP(start); |
122 | count += end - start; | 120 | unsigned long end_pfn = min_t(unsigned long, |
123 | __free_pages_memory(start, end); | 121 | PFN_DOWN(end), max_low_pfn); |
122 | if (start_pfn < end_pfn) { | ||
123 | __free_pages_memory(start_pfn, end_pfn); | ||
124 | count += end_pfn - start_pfn; | ||
125 | } | ||
124 | } | 126 | } |
125 | 127 | ||
128 | /* put region array back? */ | ||
129 | memblock_reserve_reserved_regions(); | ||
126 | return count; | 130 | return count; |
127 | } | 131 | } |
128 | 132 | ||