diff options
author | Tang Chen <tangchen@cn.fujitsu.com> | 2013-02-22 19:33:37 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:14 -0500 |
commit | 34b71f1e04fcba578e719e675b4882eeeb2a1f6f (patch) | |
tree | beafbed8dce82b108e95baa2c2424ee0f3e94701 /mm/page_alloc.c | |
parent | 4d59a75125d5a4717e57e9fc62c64b3d346e603e (diff) |
page_alloc: add movable_memmap kernel parameter
Add functions to parse movablemem_map boot option. Since the option
could be specified more then once, all the maps will be stored in the
global variable movablemem_map.map array.
And also, we keep the array in monotonic increasing order by start_pfn.
And merge all overlapped ranges.
[akpm@linux-foundation.org: improve comment]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: remove unneeded parens]
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Reviewed-by: Wen Congyang <wency@cn.fujitsu.com>
Tested-by: Lin Feng <linfeng@cn.fujitsu.com>
Cc: Wu Jianguo <wujianguo@huawei.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 131 |
1 files changed, 131 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 703944809666..aa1cc5fe9904 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -202,6 +202,9 @@ static unsigned long __meminitdata nr_all_pages; | |||
202 | static unsigned long __meminitdata dma_reserve; | 202 | static unsigned long __meminitdata dma_reserve; |
203 | 203 | ||
204 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 204 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
205 | /* Movable memory ranges, will also be used by memblock subsystem. */ | ||
206 | struct movablemem_map movablemem_map; | ||
207 | |||
205 | static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; | 208 | static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; |
206 | static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; | 209 | static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; |
207 | static unsigned long __initdata required_kernelcore; | 210 | static unsigned long __initdata required_kernelcore; |
@@ -5078,6 +5081,134 @@ static int __init cmdline_parse_movablecore(char *p) | |||
5078 | early_param("kernelcore", cmdline_parse_kernelcore); | 5081 | early_param("kernelcore", cmdline_parse_kernelcore); |
5079 | early_param("movablecore", cmdline_parse_movablecore); | 5082 | early_param("movablecore", cmdline_parse_movablecore); |
5080 | 5083 | ||
5084 | /** | ||
5085 | * insert_movablemem_map - Insert a memory range in to movablemem_map.map. | ||
5086 | * @start_pfn: start pfn of the range | ||
5087 | * @end_pfn: end pfn of the range | ||
5088 | * | ||
5089 | * This function will also merge the overlapped ranges, and sort the array | ||
5090 | * by start_pfn in monotonic increasing order. | ||
5091 | */ | ||
5092 | static void __init insert_movablemem_map(unsigned long start_pfn, | ||
5093 | unsigned long end_pfn) | ||
5094 | { | ||
5095 | int pos, overlap; | ||
5096 | |||
5097 | /* | ||
5098 | * pos will be at the 1st overlapped range, or the position | ||
5099 | * where the element should be inserted. | ||
5100 | */ | ||
5101 | for (pos = 0; pos < movablemem_map.nr_map; pos++) | ||
5102 | if (start_pfn <= movablemem_map.map[pos].end_pfn) | ||
5103 | break; | ||
5104 | |||
5105 | /* If there is no overlapped range, just insert the element. */ | ||
5106 | if (pos == movablemem_map.nr_map || | ||
5107 | end_pfn < movablemem_map.map[pos].start_pfn) { | ||
5108 | /* | ||
5109 | * If pos is not the end of array, we need to move all | ||
5110 | * the rest elements backward. | ||
5111 | */ | ||
5112 | if (pos < movablemem_map.nr_map) | ||
5113 | memmove(&movablemem_map.map[pos+1], | ||
5114 | &movablemem_map.map[pos], | ||
5115 | sizeof(struct movablemem_entry) * | ||
5116 | (movablemem_map.nr_map - pos)); | ||
5117 | movablemem_map.map[pos].start_pfn = start_pfn; | ||
5118 | movablemem_map.map[pos].end_pfn = end_pfn; | ||
5119 | movablemem_map.nr_map++; | ||
5120 | return; | ||
5121 | } | ||
5122 | |||
5123 | /* overlap will be at the last overlapped range */ | ||
5124 | for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++) | ||
5125 | if (end_pfn < movablemem_map.map[overlap].start_pfn) | ||
5126 | break; | ||
5127 | |||
5128 | /* | ||
5129 | * If there are more ranges overlapped, we need to merge them, | ||
5130 | * and move the rest elements forward. | ||
5131 | */ | ||
5132 | overlap--; | ||
5133 | movablemem_map.map[pos].start_pfn = min(start_pfn, | ||
5134 | movablemem_map.map[pos].start_pfn); | ||
5135 | movablemem_map.map[pos].end_pfn = max(end_pfn, | ||
5136 | movablemem_map.map[overlap].end_pfn); | ||
5137 | |||
5138 | if (pos != overlap && overlap + 1 != movablemem_map.nr_map) | ||
5139 | memmove(&movablemem_map.map[pos+1], | ||
5140 | &movablemem_map.map[overlap+1], | ||
5141 | sizeof(struct movablemem_entry) * | ||
5142 | (movablemem_map.nr_map - overlap - 1)); | ||
5143 | |||
5144 | movablemem_map.nr_map -= overlap - pos; | ||
5145 | } | ||
5146 | |||
5147 | /** | ||
5148 | * movablemem_map_add_region - Add a memory range into movablemem_map. | ||
5149 | * @start: physical start address of range | ||
5150 | * @end: physical end address of range | ||
5151 | * | ||
5152 | * This function transform the physical address into pfn, and then add the | ||
5153 | * range into movablemem_map by calling insert_movablemem_map(). | ||
5154 | */ | ||
5155 | static void __init movablemem_map_add_region(u64 start, u64 size) | ||
5156 | { | ||
5157 | unsigned long start_pfn, end_pfn; | ||
5158 | |||
5159 | /* In case size == 0 or start + size overflows */ | ||
5160 | if (start + size <= start) | ||
5161 | return; | ||
5162 | |||
5163 | if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) { | ||
5164 | pr_err("movablemem_map: too many entries;" | ||
5165 | " ignoring [mem %#010llx-%#010llx]\n", | ||
5166 | (unsigned long long) start, | ||
5167 | (unsigned long long) (start + size - 1)); | ||
5168 | return; | ||
5169 | } | ||
5170 | |||
5171 | start_pfn = PFN_DOWN(start); | ||
5172 | end_pfn = PFN_UP(start + size); | ||
5173 | insert_movablemem_map(start_pfn, end_pfn); | ||
5174 | } | ||
5175 | |||
5176 | /* | ||
5177 | * cmdline_parse_movablemem_map - Parse boot option movablemem_map. | ||
5178 | * @p: The boot option of the following format: | ||
5179 | * movablemem_map=nn[KMG]@ss[KMG] | ||
5180 | * | ||
5181 | * This option sets the memory range [ss, ss+nn) to be used as movable memory. | ||
5182 | * | ||
5183 | * Return: 0 on success or -EINVAL on failure. | ||
5184 | */ | ||
5185 | static int __init cmdline_parse_movablemem_map(char *p) | ||
5186 | { | ||
5187 | char *oldp; | ||
5188 | u64 start_at, mem_size; | ||
5189 | |||
5190 | if (!p) | ||
5191 | goto err; | ||
5192 | |||
5193 | oldp = p; | ||
5194 | mem_size = memparse(p, &p); | ||
5195 | if (p == oldp) | ||
5196 | goto err; | ||
5197 | |||
5198 | if (*p == '@') { | ||
5199 | oldp = ++p; | ||
5200 | start_at = memparse(p, &p); | ||
5201 | if (p == oldp || *p != '\0') | ||
5202 | goto err; | ||
5203 | |||
5204 | movablemem_map_add_region(start_at, mem_size); | ||
5205 | return 0; | ||
5206 | } | ||
5207 | err: | ||
5208 | return -EINVAL; | ||
5209 | } | ||
5210 | early_param("movablemem_map", cmdline_parse_movablemem_map); | ||
5211 | |||
5081 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 5212 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
5082 | 5213 | ||
5083 | /** | 5214 | /** |