aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/srat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/srat.c')
-rw-r--r--arch/x86/mm/srat.c71
1 files changed, 66 insertions, 5 deletions
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 3e90039e52e0..79836d01f789 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,16 +142,72 @@ static inline int save_add_info(void) {return 0;}
142#endif 142#endif
143 143
144#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 144#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
145static void __init handle_movablemem(int node, u64 start, u64 end) 145static void __init
146handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
146{ 147{
147 int overlap; 148 int overlap, i;
148 unsigned long start_pfn, end_pfn; 149 unsigned long start_pfn, end_pfn;
149 150
150 start_pfn = PFN_DOWN(start); 151 start_pfn = PFN_DOWN(start);
151 end_pfn = PFN_UP(end); 152 end_pfn = PFN_UP(end);
152 153
153 /* 154 /*
154 * For movablecore_map=nn[KMG]@ss[KMG]: 155 * For movablemem_map=acpi:
156 *
157 * SRAT: |_____| |_____| |_________| |_________| ......
158 * node id: 0 1 1 2
159 * hotpluggable: n y y n
160 * movablemem_map: |_____| |_________|
161 *
162 * Using movablemem_map, we can prevent memblock from allocating memory
163 * on ZONE_MOVABLE at boot time.
164 *
165 * Before parsing SRAT, memblock has already reserve some memory ranges
166 * for other purposes, such as for kernel image. We cannot prevent
167 * kernel from using these memory, so we need to exclude these memory
168 * even if it is hotpluggable.
169 * Furthermore, to ensure the kernel has enough memory to boot, we make
170 * all the memory on the node which the kernel resides in
171 * un-hotpluggable.
172 */
173 if (hotpluggable && movablemem_map.acpi) {
174 /* Exclude ranges reserved by memblock. */
175 struct memblock_type *rgn = &memblock.reserved;
176
177 for (i = 0; i < rgn->cnt; i++) {
178 if (end <= rgn->regions[i].base ||
179 start >= rgn->regions[i].base +
180 rgn->regions[i].size)
181 continue;
182
183 /*
184 * If the memory range overlaps the memory reserved by
185 * memblock, then the kernel resides in this node.
186 */
187 node_set(node, movablemem_map.numa_nodes_kernel);
188
189 goto out;
190 }
191
192 /*
193 * If the kernel resides in this node, then the whole node
194 * should not be hotpluggable.
195 */
196 if (node_isset(node, movablemem_map.numa_nodes_kernel))
197 goto out;
198
199 insert_movablemem_map(start_pfn, end_pfn);
200
201 /*
202 * numa_nodes_hotplug nodemask represents which nodes are put
203 * into movablemem_map.map[].
204 */
205 node_set(node, movablemem_map.numa_nodes_hotplug);
206 goto out;
207 }
208
209 /*
210 * For movablemem_map=nn[KMG]@ss[KMG]:
155 * 211 *
156 * SRAT: |_____| |_____| |_________| |_________| ...... 212 * SRAT: |_____| |_____| |_________| |_________| ......
157 * node id: 0 1 1 2 213 * node id: 0 1 1 2
@@ -160,6 +216,8 @@ static void __init handle_movablemem(int node, u64 start, u64 end)
160 * 216 *
161 * Using movablemem_map, we can prevent memblock from allocating memory 217 * Using movablemem_map, we can prevent memblock from allocating memory
162 * on ZONE_MOVABLE at boot time. 218 * on ZONE_MOVABLE at boot time.
219 *
220 * NOTE: In this case, SRAT info will be ingored.
163 */ 221 */
164 overlap = movablemem_map_overlap(start_pfn, end_pfn); 222 overlap = movablemem_map_overlap(start_pfn, end_pfn);
165 if (overlap >= 0) { 223 if (overlap >= 0) {
@@ -187,9 +245,12 @@ static void __init handle_movablemem(int node, u64 start, u64 end)
187 */ 245 */
188 insert_movablemem_map(start_pfn, end_pfn); 246 insert_movablemem_map(start_pfn, end_pfn);
189 } 247 }
248out:
249 return;
190} 250}
191#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 251#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
192static inline void handle_movablemem(int node, u64 start, u64 end) 252static inline void
253handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
193{ 254{
194} 255}
195#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 256#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
@@ -234,7 +295,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
234 (unsigned long long) start, (unsigned long long) end - 1, 295 (unsigned long long) start, (unsigned long long) end - 1,
235 hotpluggable ? "Hot Pluggable": ""); 296 hotpluggable ? "Hot Pluggable": "");
236 297
237 handle_movablemem(node, start, end); 298 handle_movablemem(node, start, end, hotpluggable);
238 299
239 return 0; 300 return 0;
240out_err_bad_srat: 301out_err_bad_srat: