diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-09-10 02:34:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-13 04:19:59 -0400 |
commit | 927604c7592473742891dc13e1da09febc06e01b (patch) | |
tree | 74355d389d386d26952d5eb664aef1f589328770 /arch/x86/mm/numa_32.c | |
parent | 0cefa5b9b0a61b62442c5d0ca00a304c5896b6e9 (diff) |
x86: rename discontig_32.c to numa_32.c
name it in line with its purpose.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/numa_32.c')
-rw-r--r-- | arch/x86/mm/numa_32.c | 444 |
1 files changed, 444 insertions, 0 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c new file mode 100644 index 000000000000..847c164725f4 --- /dev/null +++ b/arch/x86/mm/numa_32.c | |||
@@ -0,0 +1,444 @@ | |||
1 | /* | ||
2 | * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation | ||
3 | * August 2002: added remote node KVA remap - Martin J. Bligh | ||
4 | * | ||
5 | * Copyright (C) 2002, IBM Corp. | ||
6 | * | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
17 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
18 | * details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | */ | ||
24 | |||
25 | #include <linux/mm.h> | ||
26 | #include <linux/bootmem.h> | ||
27 | #include <linux/mmzone.h> | ||
28 | #include <linux/highmem.h> | ||
29 | #include <linux/initrd.h> | ||
30 | #include <linux/nodemask.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/kexec.h> | ||
33 | #include <linux/pfn.h> | ||
34 | #include <linux/swap.h> | ||
35 | #include <linux/acpi.h> | ||
36 | |||
37 | #include <asm/e820.h> | ||
38 | #include <asm/setup.h> | ||
39 | #include <asm/mmzone.h> | ||
40 | #include <asm/bios_ebda.h> | ||
41 | #include <asm/proto.h> | ||
42 | |||
43 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | ||
44 | EXPORT_SYMBOL(node_data); | ||
45 | |||
46 | /* | ||
47 | * numa interface - we expect the numa architecture specific code to have | ||
48 | * populated the following initialisation. | ||
49 | * | ||
50 | * 1) node_online_map - the map of all nodes configured (online) in the system | ||
51 | * 2) node_start_pfn - the starting page frame number for a node | ||
52 | * 3) node_end_pfn - the ending page fram number for a node | ||
53 | */ | ||
54 | unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly; | ||
55 | unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly; | ||
56 | |||
57 | |||
58 | #ifdef CONFIG_DISCONTIGMEM | ||
59 | /* | ||
60 | * 4) physnode_map - the mapping between a pfn and owning node | ||
61 | * physnode_map keeps track of the physical memory layout of a generic | ||
62 | * numa node on a 64Mb break (each element of the array will | ||
63 | * represent 64Mb of memory and will be marked by the node id. so, | ||
64 | * if the first gig is on node 0, and the second gig is on node 1 | ||
65 | * physnode_map will contain: | ||
66 | * | ||
67 | * physnode_map[0-15] = 0; | ||
68 | * physnode_map[16-31] = 1; | ||
69 | * physnode_map[32- ] = -1; | ||
70 | */ | ||
71 | s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; | ||
72 | EXPORT_SYMBOL(physnode_map); | ||
73 | |||
74 | void memory_present(int nid, unsigned long start, unsigned long end) | ||
75 | { | ||
76 | unsigned long pfn; | ||
77 | |||
78 | printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n", | ||
79 | nid, start, end); | ||
80 | printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); | ||
81 | printk(KERN_DEBUG " "); | ||
82 | for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { | ||
83 | physnode_map[pfn / PAGES_PER_ELEMENT] = nid; | ||
84 | printk(KERN_CONT "%lx ", pfn); | ||
85 | } | ||
86 | printk(KERN_CONT "\n"); | ||
87 | } | ||
88 | |||
89 | unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, | ||
90 | unsigned long end_pfn) | ||
91 | { | ||
92 | unsigned long nr_pages = end_pfn - start_pfn; | ||
93 | |||
94 | if (!nr_pages) | ||
95 | return 0; | ||
96 | |||
97 | return (nr_pages + 1) * sizeof(struct page); | ||
98 | } | ||
99 | #endif | ||
100 | |||
101 | extern unsigned long find_max_low_pfn(void); | ||
102 | extern unsigned long highend_pfn, highstart_pfn; | ||
103 | |||
104 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) | ||
105 | |||
106 | unsigned long node_remap_size[MAX_NUMNODES]; | ||
107 | static void *node_remap_start_vaddr[MAX_NUMNODES]; | ||
108 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | ||
109 | |||
110 | static unsigned long kva_start_pfn; | ||
111 | static unsigned long kva_pages; | ||
112 | /* | ||
113 | * FLAT - support for basic PC memory model with discontig enabled, essentially | ||
114 | * a single node with all available processors in it with a flat | ||
115 | * memory map. | ||
116 | */ | ||
117 | int __init get_memcfg_numa_flat(void) | ||
118 | { | ||
119 | printk(KERN_DEBUG "NUMA - single node, flat memory mode\n"); | ||
120 | |||
121 | node_start_pfn[0] = 0; | ||
122 | node_end_pfn[0] = max_pfn; | ||
123 | e820_register_active_regions(0, 0, max_pfn); | ||
124 | memory_present(0, 0, max_pfn); | ||
125 | node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); | ||
126 | |||
127 | /* Indicate there is one node available. */ | ||
128 | nodes_clear(node_online_map); | ||
129 | node_set_online(0); | ||
130 | return 1; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Find the highest page frame number we have available for the node | ||
135 | */ | ||
136 | static void __init propagate_e820_map_node(int nid) | ||
137 | { | ||
138 | if (node_end_pfn[nid] > max_pfn) | ||
139 | node_end_pfn[nid] = max_pfn; | ||
140 | /* | ||
141 | * if a user has given mem=XXXX, then we need to make sure | ||
142 | * that the node _starts_ before that, too, not just ends | ||
143 | */ | ||
144 | if (node_start_pfn[nid] > max_pfn) | ||
145 | node_start_pfn[nid] = max_pfn; | ||
146 | BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Allocate memory for the pg_data_t for this node via a crude pre-bootmem | ||
151 | * method. For node zero take this from the bottom of memory, for | ||
152 | * subsequent nodes place them at node_remap_start_vaddr which contains | ||
153 | * node local data in physically node local memory. See setup_memory() | ||
154 | * for details. | ||
155 | */ | ||
156 | static void __init allocate_pgdat(int nid) | ||
157 | { | ||
158 | char buf[16]; | ||
159 | |||
160 | if (node_has_online_mem(nid) && node_remap_start_vaddr[nid]) | ||
161 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; | ||
162 | else { | ||
163 | unsigned long pgdat_phys; | ||
164 | pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT, | ||
165 | max_pfn_mapped<<PAGE_SHIFT, | ||
166 | sizeof(pg_data_t), | ||
167 | PAGE_SIZE); | ||
168 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT)); | ||
169 | memset(buf, 0, sizeof(buf)); | ||
170 | sprintf(buf, "NODE_DATA %d", nid); | ||
171 | reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); | ||
172 | } | ||
173 | printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", | ||
174 | nid, (unsigned long)NODE_DATA(nid)); | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel | ||
179 | * virtual address space (KVA) is reserved and portions of nodes are mapped | ||
180 | * using it. This is to allow node-local memory to be allocated for | ||
181 | * structures that would normally require ZONE_NORMAL. The memory is | ||
182 | * allocated with alloc_remap() and callers should be prepared to allocate | ||
183 | * from the bootmem allocator instead. | ||
184 | */ | ||
185 | static unsigned long node_remap_start_pfn[MAX_NUMNODES]; | ||
186 | static void *node_remap_end_vaddr[MAX_NUMNODES]; | ||
187 | static void *node_remap_alloc_vaddr[MAX_NUMNODES]; | ||
188 | static unsigned long node_remap_offset[MAX_NUMNODES]; | ||
189 | |||
190 | void *alloc_remap(int nid, unsigned long size) | ||
191 | { | ||
192 | void *allocation = node_remap_alloc_vaddr[nid]; | ||
193 | |||
194 | size = ALIGN(size, L1_CACHE_BYTES); | ||
195 | |||
196 | if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) | ||
197 | return 0; | ||
198 | |||
199 | node_remap_alloc_vaddr[nid] += size; | ||
200 | memset(allocation, 0, size); | ||
201 | |||
202 | return allocation; | ||
203 | } | ||
204 | |||
205 | static void __init remap_numa_kva(void) | ||
206 | { | ||
207 | void *vaddr; | ||
208 | unsigned long pfn; | ||
209 | int node; | ||
210 | |||
211 | for_each_online_node(node) { | ||
212 | printk(KERN_DEBUG "remap_numa_kva: node %d\n", node); | ||
213 | for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { | ||
214 | vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT); | ||
215 | printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n", | ||
216 | (unsigned long)vaddr, | ||
217 | node_remap_start_pfn[node] + pfn); | ||
218 | set_pmd_pfn((ulong) vaddr, | ||
219 | node_remap_start_pfn[node] + pfn, | ||
220 | PAGE_KERNEL_LARGE); | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static unsigned long calculate_numa_remap_pages(void) | ||
226 | { | ||
227 | int nid; | ||
228 | unsigned long size, reserve_pages = 0; | ||
229 | |||
230 | for_each_online_node(nid) { | ||
231 | u64 node_kva_target; | ||
232 | u64 node_kva_final; | ||
233 | |||
234 | /* | ||
235 | * The acpi/srat node info can show hot-add memroy zones | ||
236 | * where memory could be added but not currently present. | ||
237 | */ | ||
238 | printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", | ||
239 | nid, node_start_pfn[nid], node_end_pfn[nid]); | ||
240 | if (node_start_pfn[nid] > max_pfn) | ||
241 | continue; | ||
242 | if (!node_end_pfn[nid]) | ||
243 | continue; | ||
244 | if (node_end_pfn[nid] > max_pfn) | ||
245 | node_end_pfn[nid] = max_pfn; | ||
246 | |||
247 | /* ensure the remap includes space for the pgdat. */ | ||
248 | size = node_remap_size[nid] + sizeof(pg_data_t); | ||
249 | |||
250 | /* convert size to large (pmd size) pages, rounding up */ | ||
251 | size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; | ||
252 | /* now the roundup is correct, convert to PAGE_SIZE pages */ | ||
253 | size = size * PTRS_PER_PTE; | ||
254 | |||
255 | node_kva_target = round_down(node_end_pfn[nid] - size, | ||
256 | PTRS_PER_PTE); | ||
257 | node_kva_target <<= PAGE_SHIFT; | ||
258 | do { | ||
259 | node_kva_final = find_e820_area(node_kva_target, | ||
260 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, | ||
261 | ((u64)size)<<PAGE_SHIFT, | ||
262 | LARGE_PAGE_BYTES); | ||
263 | node_kva_target -= LARGE_PAGE_BYTES; | ||
264 | } while (node_kva_final == -1ULL && | ||
265 | (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); | ||
266 | |||
267 | if (node_kva_final == -1ULL) | ||
268 | panic("Can not get kva ram\n"); | ||
269 | |||
270 | node_remap_size[nid] = size; | ||
271 | node_remap_offset[nid] = reserve_pages; | ||
272 | reserve_pages += size; | ||
273 | printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" | ||
274 | " node %d at %llx\n", | ||
275 | size, nid, node_kva_final>>PAGE_SHIFT); | ||
276 | |||
277 | /* | ||
278 | * prevent kva address below max_low_pfn want it on system | ||
279 | * with less memory later. | ||
280 | * layout will be: KVA address , KVA RAM | ||
281 | * | ||
282 | * we are supposed to only record the one less then max_low_pfn | ||
283 | * but we could have some hole in high memory, and it will only | ||
284 | * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide | ||
285 | * to use it as free. | ||
286 | * So reserve_early here, hope we don't run out of that array | ||
287 | */ | ||
288 | reserve_early(node_kva_final, | ||
289 | node_kva_final+(((u64)size)<<PAGE_SHIFT), | ||
290 | "KVA RAM"); | ||
291 | |||
292 | node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; | ||
293 | remove_active_range(nid, node_remap_start_pfn[nid], | ||
294 | node_remap_start_pfn[nid] + size); | ||
295 | } | ||
296 | printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", | ||
297 | reserve_pages); | ||
298 | return reserve_pages; | ||
299 | } | ||
300 | |||
301 | static void init_remap_allocator(int nid) | ||
302 | { | ||
303 | node_remap_start_vaddr[nid] = pfn_to_kaddr( | ||
304 | kva_start_pfn + node_remap_offset[nid]); | ||
305 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + | ||
306 | (node_remap_size[nid] * PAGE_SIZE); | ||
307 | node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + | ||
308 | ALIGN(sizeof(pg_data_t), PAGE_SIZE); | ||
309 | |||
310 | printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid, | ||
311 | (ulong) node_remap_start_vaddr[nid], | ||
312 | (ulong) node_remap_end_vaddr[nid]); | ||
313 | } | ||
314 | |||
315 | void __init initmem_init(unsigned long start_pfn, | ||
316 | unsigned long end_pfn) | ||
317 | { | ||
318 | int nid; | ||
319 | long kva_target_pfn; | ||
320 | |||
321 | /* | ||
322 | * When mapping a NUMA machine we allocate the node_mem_map arrays | ||
323 | * from node local memory. They are then mapped directly into KVA | ||
324 | * between zone normal and vmalloc space. Calculate the size of | ||
325 | * this space and use it to adjust the boundary between ZONE_NORMAL | ||
326 | * and ZONE_HIGHMEM. | ||
327 | */ | ||
328 | |||
329 | get_memcfg_numa(); | ||
330 | |||
331 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); | ||
332 | |||
333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | ||
334 | do { | ||
335 | kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT, | ||
336 | max_low_pfn<<PAGE_SHIFT, | ||
337 | kva_pages<<PAGE_SHIFT, | ||
338 | PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; | ||
339 | kva_target_pfn -= PTRS_PER_PTE; | ||
340 | } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn); | ||
341 | |||
342 | if (kva_start_pfn == -1UL) | ||
343 | panic("Can not get kva space\n"); | ||
344 | |||
345 | printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", | ||
346 | kva_start_pfn, max_low_pfn); | ||
347 | printk(KERN_INFO "max_pfn = %lx\n", max_pfn); | ||
348 | |||
349 | /* avoid clash with initrd */ | ||
350 | reserve_early(kva_start_pfn<<PAGE_SHIFT, | ||
351 | (kva_start_pfn + kva_pages)<<PAGE_SHIFT, | ||
352 | "KVA PG"); | ||
353 | #ifdef CONFIG_HIGHMEM | ||
354 | highstart_pfn = highend_pfn = max_pfn; | ||
355 | if (max_pfn > max_low_pfn) | ||
356 | highstart_pfn = max_low_pfn; | ||
357 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | ||
358 | pages_to_mb(highend_pfn - highstart_pfn)); | ||
359 | num_physpages = highend_pfn; | ||
360 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | ||
361 | #else | ||
362 | num_physpages = max_low_pfn; | ||
363 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | ||
364 | #endif | ||
365 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | ||
366 | pages_to_mb(max_low_pfn)); | ||
367 | printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n", | ||
368 | max_low_pfn, highstart_pfn); | ||
369 | |||
370 | printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", | ||
371 | (ulong) pfn_to_kaddr(max_low_pfn)); | ||
372 | for_each_online_node(nid) { | ||
373 | init_remap_allocator(nid); | ||
374 | |||
375 | allocate_pgdat(nid); | ||
376 | } | ||
377 | remap_numa_kva(); | ||
378 | |||
379 | printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", | ||
380 | (ulong) pfn_to_kaddr(highstart_pfn)); | ||
381 | for_each_online_node(nid) | ||
382 | propagate_e820_map_node(nid); | ||
383 | |||
384 | for_each_online_node(nid) | ||
385 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | ||
386 | |||
387 | NODE_DATA(0)->bdata = &bootmem_node_data[0]; | ||
388 | setup_bootmem_allocator(); | ||
389 | } | ||
390 | |||
391 | void __init set_highmem_pages_init(void) | ||
392 | { | ||
393 | #ifdef CONFIG_HIGHMEM | ||
394 | struct zone *zone; | ||
395 | int nid; | ||
396 | |||
397 | for_each_zone(zone) { | ||
398 | unsigned long zone_start_pfn, zone_end_pfn; | ||
399 | |||
400 | if (!is_highmem(zone)) | ||
401 | continue; | ||
402 | |||
403 | zone_start_pfn = zone->zone_start_pfn; | ||
404 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | ||
405 | |||
406 | nid = zone_to_nid(zone); | ||
407 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", | ||
408 | zone->name, nid, zone_start_pfn, zone_end_pfn); | ||
409 | |||
410 | add_highpages_with_active_regions(nid, zone_start_pfn, | ||
411 | zone_end_pfn); | ||
412 | } | ||
413 | totalram_pages += totalhigh_pages; | ||
414 | #endif | ||
415 | } | ||
416 | |||
417 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
418 | static int paddr_to_nid(u64 addr) | ||
419 | { | ||
420 | int nid; | ||
421 | unsigned long pfn = PFN_DOWN(addr); | ||
422 | |||
423 | for_each_node(nid) | ||
424 | if (node_start_pfn[nid] <= pfn && | ||
425 | pfn < node_end_pfn[nid]) | ||
426 | return nid; | ||
427 | |||
428 | return -1; | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * This function is used to ask node id BEFORE memmap and mem_section's | ||
433 | * initialization (pfn_to_nid() can't be used yet). | ||
434 | * If _PXM is not defined on ACPI's DSDT, node id must be found by this. | ||
435 | */ | ||
436 | int memory_add_physaddr_to_nid(u64 addr) | ||
437 | { | ||
438 | int nid = paddr_to_nid(addr); | ||
439 | return (nid >= 0) ? nid : 0; | ||
440 | } | ||
441 | |||
442 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | ||
443 | #endif | ||
444 | |||