aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTang Chen <tangchen@cn.fujitsu.com>2013-02-22 19:33:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:14 -0500
commit34b71f1e04fcba578e719e675b4882eeeb2a1f6f (patch)
treebeafbed8dce82b108e95baa2c2424ee0f3e94701
parent4d59a75125d5a4717e57e9fc62c64b3d346e603e (diff)
page_alloc: add movable_memmap kernel parameter
Add functions to parse movablemem_map boot option. Since the option could be specified more then once, all the maps will be stored in the global variable movablemem_map.map array. And also, we keep the array in monotonic increasing order by start_pfn. And merge all overlapped ranges. [akpm@linux-foundation.org: improve comment] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: remove unneeded parens] Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Reviewed-by: Wen Congyang <wency@cn.fujitsu.com> Tested-by: Lin Feng <linfeng@cn.fujitsu.com> Cc: Wu Jianguo <wujianguo@huawei.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/kernel-parameters.txt17
-rw-r--r--include/linux/mm.h11
-rw-r--r--mm/page_alloc.c131
3 files changed, 159 insertions, 0 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9aa8ff3e54dc..722a74161246 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1640,6 +1640,23 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1640 that the amount of memory usable for all allocations 1640 that the amount of memory usable for all allocations
1641 is not too small. 1641 is not too small.
1642 1642
1643 movablemem_map=nn[KMG]@ss[KMG]
1644 [KNL,X86,IA-64,PPC] This parameter is similar to
1645 memmap except it specifies the memory map of
1646 ZONE_MOVABLE.
1647 If more areas are all within one node, then from
1648 lowest ss to the end of the node will be ZONE_MOVABLE.
1649 If an area covers two or more nodes, the area from
1650 ss to the end of the 1st node will be ZONE_MOVABLE,
1651 and all the rest nodes will only have ZONE_MOVABLE.
1652 If memmap is specified at the same time, the
1653 movablemem_map will be limited within the memmap
1654 areas. If kernelcore or movablecore is also specified,
1655 movablemem_map will have higher priority to be
1656 satisfied. So the administrator should be careful that
1657 the amount of movablemem_map areas are not too large.
1658 Otherwise kernel won't have enough memory to start.
1659
1643 MTD_Partition= [MTD] 1660 MTD_Partition= [MTD]
1644 Format: <name>,<region-number>,<size>,<offset> 1661 Format: <name>,<region-number>,<size>,<offset>
1645 1662
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9d659491c0ae..ce9bd3049836 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1359,6 +1359,17 @@ extern void free_bootmem_with_active_regions(int nid,
1359 unsigned long max_low_pfn); 1359 unsigned long max_low_pfn);
1360extern void sparse_memory_present_with_active_regions(int nid); 1360extern void sparse_memory_present_with_active_regions(int nid);
1361 1361
1362#define MOVABLEMEM_MAP_MAX MAX_NUMNODES
1363struct movablemem_entry {
1364 unsigned long start_pfn; /* start pfn of memory segment */
1365 unsigned long end_pfn; /* end pfn of memory segment (exclusive) */
1366};
1367
1368struct movablemem_map {
1369 int nr_map;
1370 struct movablemem_entry map[MOVABLEMEM_MAP_MAX];
1371};
1372
1362#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1373#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1363 1374
1364#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1375#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 703944809666..aa1cc5fe9904 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -202,6 +202,9 @@ static unsigned long __meminitdata nr_all_pages;
202static unsigned long __meminitdata dma_reserve; 202static unsigned long __meminitdata dma_reserve;
203 203
204#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 204#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
205/* Movable memory ranges, will also be used by memblock subsystem. */
206struct movablemem_map movablemem_map;
207
205static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 208static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
206static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 209static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
207static unsigned long __initdata required_kernelcore; 210static unsigned long __initdata required_kernelcore;
@@ -5078,6 +5081,134 @@ static int __init cmdline_parse_movablecore(char *p)
5078early_param("kernelcore", cmdline_parse_kernelcore); 5081early_param("kernelcore", cmdline_parse_kernelcore);
5079early_param("movablecore", cmdline_parse_movablecore); 5082early_param("movablecore", cmdline_parse_movablecore);
5080 5083
5084/**
5085 * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
5086 * @start_pfn: start pfn of the range
5087 * @end_pfn: end pfn of the range
5088 *
5089 * This function will also merge the overlapped ranges, and sort the array
5090 * by start_pfn in monotonic increasing order.
5091 */
5092static void __init insert_movablemem_map(unsigned long start_pfn,
5093 unsigned long end_pfn)
5094{
5095 int pos, overlap;
5096
5097 /*
5098 * pos will be at the 1st overlapped range, or the position
5099 * where the element should be inserted.
5100 */
5101 for (pos = 0; pos < movablemem_map.nr_map; pos++)
5102 if (start_pfn <= movablemem_map.map[pos].end_pfn)
5103 break;
5104
5105 /* If there is no overlapped range, just insert the element. */
5106 if (pos == movablemem_map.nr_map ||
5107 end_pfn < movablemem_map.map[pos].start_pfn) {
5108 /*
5109 * If pos is not the end of array, we need to move all
5110 * the rest elements backward.
5111 */
5112 if (pos < movablemem_map.nr_map)
5113 memmove(&movablemem_map.map[pos+1],
5114 &movablemem_map.map[pos],
5115 sizeof(struct movablemem_entry) *
5116 (movablemem_map.nr_map - pos));
5117 movablemem_map.map[pos].start_pfn = start_pfn;
5118 movablemem_map.map[pos].end_pfn = end_pfn;
5119 movablemem_map.nr_map++;
5120 return;
5121 }
5122
5123 /* overlap will be at the last overlapped range */
5124 for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
5125 if (end_pfn < movablemem_map.map[overlap].start_pfn)
5126 break;
5127
5128 /*
5129 * If there are more ranges overlapped, we need to merge them,
5130 * and move the rest elements forward.
5131 */
5132 overlap--;
5133 movablemem_map.map[pos].start_pfn = min(start_pfn,
5134 movablemem_map.map[pos].start_pfn);
5135 movablemem_map.map[pos].end_pfn = max(end_pfn,
5136 movablemem_map.map[overlap].end_pfn);
5137
5138 if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
5139 memmove(&movablemem_map.map[pos+1],
5140 &movablemem_map.map[overlap+1],
5141 sizeof(struct movablemem_entry) *
5142 (movablemem_map.nr_map - overlap - 1));
5143
5144 movablemem_map.nr_map -= overlap - pos;
5145}
5146
5147/**
5148 * movablemem_map_add_region - Add a memory range into movablemem_map.
5149 * @start: physical start address of range
5150 * @end: physical end address of range
5151 *
5152 * This function transform the physical address into pfn, and then add the
5153 * range into movablemem_map by calling insert_movablemem_map().
5154 */
5155static void __init movablemem_map_add_region(u64 start, u64 size)
5156{
5157 unsigned long start_pfn, end_pfn;
5158
5159 /* In case size == 0 or start + size overflows */
5160 if (start + size <= start)
5161 return;
5162
5163 if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
5164 pr_err("movablemem_map: too many entries;"
5165 " ignoring [mem %#010llx-%#010llx]\n",
5166 (unsigned long long) start,
5167 (unsigned long long) (start + size - 1));
5168 return;
5169 }
5170
5171 start_pfn = PFN_DOWN(start);
5172 end_pfn = PFN_UP(start + size);
5173 insert_movablemem_map(start_pfn, end_pfn);
5174}
5175
5176/*
5177 * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
5178 * @p: The boot option of the following format:
5179 * movablemem_map=nn[KMG]@ss[KMG]
5180 *
5181 * This option sets the memory range [ss, ss+nn) to be used as movable memory.
5182 *
5183 * Return: 0 on success or -EINVAL on failure.
5184 */
5185static int __init cmdline_parse_movablemem_map(char *p)
5186{
5187 char *oldp;
5188 u64 start_at, mem_size;
5189
5190 if (!p)
5191 goto err;
5192
5193 oldp = p;
5194 mem_size = memparse(p, &p);
5195 if (p == oldp)
5196 goto err;
5197
5198 if (*p == '@') {
5199 oldp = ++p;
5200 start_at = memparse(p, &p);
5201 if (p == oldp || *p != '\0')
5202 goto err;
5203
5204 movablemem_map_add_region(start_at, mem_size);
5205 return 0;
5206 }
5207err:
5208 return -EINVAL;
5209}
5210early_param("movablemem_map", cmdline_parse_movablemem_map);
5211
5081#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5212#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5082 5213
5083/** 5214/**