diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-15 07:45:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-15 07:45:59 -0400 |
commit | 91d0322bef047e2916b3e52741411bffc63929cb (patch) | |
tree | 44c4fd13cc17755a6db8a3d70cffb86e57a838dc /arch/x86/mm/discontig_32.c | |
parent | 065cb3dfe24978651caedfa54da585388ad15dde (diff) | |
parent | 50515af207d410c9f228380e529c56f43c3de0bd (diff) |
Merge branch 'linus' into x86/urgent
Diffstat (limited to 'arch/x86/mm/discontig_32.c')
-rw-r--r-- | arch/x86/mm/discontig_32.c | 285 |
1 files changed, 126 insertions, 159 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 914ccf983687..5dfef9fa061a 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/setup.h> | 38 | #include <asm/setup.h> |
39 | #include <asm/mmzone.h> | 39 | #include <asm/mmzone.h> |
40 | #include <asm/bios_ebda.h> | 40 | #include <asm/bios_ebda.h> |
41 | #include <asm/proto.h> | ||
41 | 42 | ||
42 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | 43 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
43 | EXPORT_SYMBOL(node_data); | 44 | EXPORT_SYMBOL(node_data); |
@@ -59,14 +60,14 @@ unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly; | |||
59 | /* | 60 | /* |
60 | * 4) physnode_map - the mapping between a pfn and owning node | 61 | * 4) physnode_map - the mapping between a pfn and owning node |
61 | * physnode_map keeps track of the physical memory layout of a generic | 62 | * physnode_map keeps track of the physical memory layout of a generic |
62 | * numa node on a 256Mb break (each element of the array will | 63 | * numa node on a 64Mb break (each element of the array will |
63 | * represent 256Mb of memory and will be marked by the node id. so, | 64 | * represent 64Mb of memory and will be marked by the node id. so, |
64 | * if the first gig is on node 0, and the second gig is on node 1 | 65 | * if the first gig is on node 0, and the second gig is on node 1 |
65 | * physnode_map will contain: | 66 | * physnode_map will contain: |
66 | * | 67 | * |
67 | * physnode_map[0-3] = 0; | 68 | * physnode_map[0-15] = 0; |
68 | * physnode_map[4-7] = 1; | 69 | * physnode_map[16-31] = 1; |
69 | * physnode_map[8- ] = -1; | 70 | * physnode_map[32- ] = -1; |
70 | */ | 71 | */ |
71 | s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; | 72 | s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; |
72 | EXPORT_SYMBOL(physnode_map); | 73 | EXPORT_SYMBOL(physnode_map); |
@@ -75,15 +76,15 @@ void memory_present(int nid, unsigned long start, unsigned long end) | |||
75 | { | 76 | { |
76 | unsigned long pfn; | 77 | unsigned long pfn; |
77 | 78 | ||
78 | printk(KERN_INFO "Node: %d, start_pfn: %ld, end_pfn: %ld\n", | 79 | printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n", |
79 | nid, start, end); | 80 | nid, start, end); |
80 | printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); | 81 | printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); |
81 | printk(KERN_DEBUG " "); | 82 | printk(KERN_DEBUG " "); |
82 | for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { | 83 | for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { |
83 | physnode_map[pfn / PAGES_PER_ELEMENT] = nid; | 84 | physnode_map[pfn / PAGES_PER_ELEMENT] = nid; |
84 | printk("%ld ", pfn); | 85 | printk(KERN_CONT "%lx ", pfn); |
85 | } | 86 | } |
86 | printk("\n"); | 87 | printk(KERN_CONT "\n"); |
87 | } | 88 | } |
88 | 89 | ||
89 | unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, | 90 | unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, |
@@ -99,7 +100,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, | |||
99 | #endif | 100 | #endif |
100 | 101 | ||
101 | extern unsigned long find_max_low_pfn(void); | 102 | extern unsigned long find_max_low_pfn(void); |
102 | extern void add_one_highpage_init(struct page *, int, int); | ||
103 | extern unsigned long highend_pfn, highstart_pfn; | 103 | extern unsigned long highend_pfn, highstart_pfn; |
104 | 104 | ||
105 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) | 105 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) |
@@ -117,13 +117,13 @@ static unsigned long kva_pages; | |||
117 | */ | 117 | */ |
118 | int __init get_memcfg_numa_flat(void) | 118 | int __init get_memcfg_numa_flat(void) |
119 | { | 119 | { |
120 | printk("NUMA - single node, flat memory mode\n"); | 120 | printk(KERN_DEBUG "NUMA - single node, flat memory mode\n"); |
121 | 121 | ||
122 | /* Run the memory configuration and find the top of memory. */ | ||
123 | propagate_e820_map(); | ||
124 | node_start_pfn[0] = 0; | 122 | node_start_pfn[0] = 0; |
125 | node_end_pfn[0] = max_pfn; | 123 | node_end_pfn[0] = max_pfn; |
124 | e820_register_active_regions(0, 0, max_pfn); | ||
126 | memory_present(0, 0, max_pfn); | 125 | memory_present(0, 0, max_pfn); |
126 | node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); | ||
127 | 127 | ||
128 | /* Indicate there is one node available. */ | 128 | /* Indicate there is one node available. */ |
129 | nodes_clear(node_online_map); | 129 | nodes_clear(node_online_map); |
@@ -156,24 +156,32 @@ static void __init propagate_e820_map_node(int nid) | |||
156 | */ | 156 | */ |
157 | static void __init allocate_pgdat(int nid) | 157 | static void __init allocate_pgdat(int nid) |
158 | { | 158 | { |
159 | if (nid && node_has_online_mem(nid)) | 159 | char buf[16]; |
160 | |||
161 | if (node_has_online_mem(nid) && node_remap_start_vaddr[nid]) | ||
160 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; | 162 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; |
161 | else { | 163 | else { |
162 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(min_low_pfn)); | 164 | unsigned long pgdat_phys; |
163 | min_low_pfn += PFN_UP(sizeof(pg_data_t)); | 165 | pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT, |
166 | max_pfn_mapped<<PAGE_SHIFT, | ||
167 | sizeof(pg_data_t), | ||
168 | PAGE_SIZE); | ||
169 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT)); | ||
170 | memset(buf, 0, sizeof(buf)); | ||
171 | sprintf(buf, "NODE_DATA %d", nid); | ||
172 | reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); | ||
164 | } | 173 | } |
174 | printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", | ||
175 | nid, (unsigned long)NODE_DATA(nid)); | ||
165 | } | 176 | } |
166 | 177 | ||
167 | #ifdef CONFIG_DISCONTIGMEM | ||
168 | /* | 178 | /* |
169 | * In the discontig memory model, a portion of the kernel virtual area (KVA) | 179 | * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel |
170 | * is reserved and portions of nodes are mapped using it. This is to allow | 180 | * virtual address space (KVA) is reserved and portions of nodes are mapped |
171 | * node-local memory to be allocated for structures that would normally require | 181 | * using it. This is to allow node-local memory to be allocated for |
172 | * ZONE_NORMAL. The memory is allocated with alloc_remap() and callers | 182 | * structures that would normally require ZONE_NORMAL. The memory is |
173 | * should be prepared to allocate from the bootmem allocator instead. This KVA | 183 | * allocated with alloc_remap() and callers should be prepared to allocate |
174 | * mechanism is incompatible with SPARSEMEM as it makes assumptions about the | 184 | * from the bootmem allocator instead. |
175 | * layout of memory that are broken if alloc_remap() succeeds for some of the | ||
176 | * map and fails for others | ||
177 | */ | 185 | */ |
178 | static unsigned long node_remap_start_pfn[MAX_NUMNODES]; | 186 | static unsigned long node_remap_start_pfn[MAX_NUMNODES]; |
179 | static void *node_remap_end_vaddr[MAX_NUMNODES]; | 187 | static void *node_remap_end_vaddr[MAX_NUMNODES]; |
@@ -195,15 +203,19 @@ void *alloc_remap(int nid, unsigned long size) | |||
195 | return allocation; | 203 | return allocation; |
196 | } | 204 | } |
197 | 205 | ||
198 | void __init remap_numa_kva(void) | 206 | static void __init remap_numa_kva(void) |
199 | { | 207 | { |
200 | void *vaddr; | 208 | void *vaddr; |
201 | unsigned long pfn; | 209 | unsigned long pfn; |
202 | int node; | 210 | int node; |
203 | 211 | ||
204 | for_each_online_node(node) { | 212 | for_each_online_node(node) { |
213 | printk(KERN_DEBUG "remap_numa_kva: node %d\n", node); | ||
205 | for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { | 214 | for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { |
206 | vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT); | 215 | vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT); |
216 | printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n", | ||
217 | (unsigned long)vaddr, | ||
218 | node_remap_start_pfn[node] + pfn); | ||
207 | set_pmd_pfn((ulong) vaddr, | 219 | set_pmd_pfn((ulong) vaddr, |
208 | node_remap_start_pfn[node] + pfn, | 220 | node_remap_start_pfn[node] + pfn, |
209 | PAGE_KERNEL_LARGE); | 221 | PAGE_KERNEL_LARGE); |
@@ -215,17 +227,21 @@ static unsigned long calculate_numa_remap_pages(void) | |||
215 | { | 227 | { |
216 | int nid; | 228 | int nid; |
217 | unsigned long size, reserve_pages = 0; | 229 | unsigned long size, reserve_pages = 0; |
218 | unsigned long pfn; | ||
219 | 230 | ||
220 | for_each_online_node(nid) { | 231 | for_each_online_node(nid) { |
221 | unsigned old_end_pfn = node_end_pfn[nid]; | 232 | u64 node_kva_target; |
233 | u64 node_kva_final; | ||
222 | 234 | ||
223 | /* | 235 | /* |
224 | * The acpi/srat node info can show hot-add memroy zones | 236 | * The acpi/srat node info can show hot-add memroy zones |
225 | * where memory could be added but not currently present. | 237 | * where memory could be added but not currently present. |
226 | */ | 238 | */ |
239 | printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", | ||
240 | nid, node_start_pfn[nid], node_end_pfn[nid]); | ||
227 | if (node_start_pfn[nid] > max_pfn) | 241 | if (node_start_pfn[nid] > max_pfn) |
228 | continue; | 242 | continue; |
243 | if (!node_end_pfn[nid]) | ||
244 | continue; | ||
229 | if (node_end_pfn[nid] > max_pfn) | 245 | if (node_end_pfn[nid] > max_pfn) |
230 | node_end_pfn[nid] = max_pfn; | 246 | node_end_pfn[nid] = max_pfn; |
231 | 247 | ||
@@ -237,41 +253,48 @@ static unsigned long calculate_numa_remap_pages(void) | |||
237 | /* now the roundup is correct, convert to PAGE_SIZE pages */ | 253 | /* now the roundup is correct, convert to PAGE_SIZE pages */ |
238 | size = size * PTRS_PER_PTE; | 254 | size = size * PTRS_PER_PTE; |
239 | 255 | ||
240 | /* | 256 | node_kva_target = round_down(node_end_pfn[nid] - size, |
241 | * Validate the region we are allocating only contains valid | 257 | PTRS_PER_PTE); |
242 | * pages. | 258 | node_kva_target <<= PAGE_SHIFT; |
243 | */ | 259 | do { |
244 | for (pfn = node_end_pfn[nid] - size; | 260 | node_kva_final = find_e820_area(node_kva_target, |
245 | pfn < node_end_pfn[nid]; pfn++) | 261 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, |
246 | if (!page_is_ram(pfn)) | 262 | ((u64)size)<<PAGE_SHIFT, |
247 | break; | 263 | LARGE_PAGE_BYTES); |
248 | 264 | node_kva_target -= LARGE_PAGE_BYTES; | |
249 | if (pfn != node_end_pfn[nid]) | 265 | } while (node_kva_final == -1ULL && |
250 | size = 0; | 266 | (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); |
267 | |||
268 | if (node_kva_final == -1ULL) | ||
269 | panic("Can not get kva ram\n"); | ||
251 | 270 | ||
252 | printk("Reserving %ld pages of KVA for lmem_map of node %d\n", | ||
253 | size, nid); | ||
254 | node_remap_size[nid] = size; | 271 | node_remap_size[nid] = size; |
255 | node_remap_offset[nid] = reserve_pages; | 272 | node_remap_offset[nid] = reserve_pages; |
256 | reserve_pages += size; | 273 | reserve_pages += size; |
257 | printk("Shrinking node %d from %ld pages to %ld pages\n", | 274 | printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" |
258 | nid, node_end_pfn[nid], node_end_pfn[nid] - size); | 275 | " node %d at %llx\n", |
259 | 276 | size, nid, node_kva_final>>PAGE_SHIFT); | |
260 | if (node_end_pfn[nid] & (PTRS_PER_PTE-1)) { | 277 | |
261 | /* | 278 | /* |
262 | * Align node_end_pfn[] and node_remap_start_pfn[] to | 279 | * prevent kva address below max_low_pfn want it on system |
263 | * pmd boundary. remap_numa_kva will barf otherwise. | 280 | * with less memory later. |
264 | */ | 281 | * layout will be: KVA address , KVA RAM |
265 | printk("Shrinking node %d further by %ld pages for proper alignment\n", | 282 | * |
266 | nid, node_end_pfn[nid] & (PTRS_PER_PTE-1)); | 283 | * we are supposed to only record the one less then max_low_pfn |
267 | size += node_end_pfn[nid] & (PTRS_PER_PTE-1); | 284 | * but we could have some hole in high memory, and it will only |
268 | } | 285 | * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide |
286 | * to use it as free. | ||
287 | * So reserve_early here, hope we don't run out of that array | ||
288 | */ | ||
289 | reserve_early(node_kva_final, | ||
290 | node_kva_final+(((u64)size)<<PAGE_SHIFT), | ||
291 | "KVA RAM"); | ||
269 | 292 | ||
270 | node_end_pfn[nid] -= size; | 293 | node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; |
271 | node_remap_start_pfn[nid] = node_end_pfn[nid]; | 294 | remove_active_range(nid, node_remap_start_pfn[nid], |
272 | shrink_active_range(nid, old_end_pfn, node_end_pfn[nid]); | 295 | node_remap_start_pfn[nid] + size); |
273 | } | 296 | } |
274 | printk("Reserving total of %ld pages for numa KVA remap\n", | 297 | printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", |
275 | reserve_pages); | 298 | reserve_pages); |
276 | return reserve_pages; | 299 | return reserve_pages; |
277 | } | 300 | } |
@@ -285,37 +308,16 @@ static void init_remap_allocator(int nid) | |||
285 | node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + | 308 | node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + |
286 | ALIGN(sizeof(pg_data_t), PAGE_SIZE); | 309 | ALIGN(sizeof(pg_data_t), PAGE_SIZE); |
287 | 310 | ||
288 | printk ("node %d will remap to vaddr %08lx - %08lx\n", nid, | 311 | printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid, |
289 | (ulong) node_remap_start_vaddr[nid], | 312 | (ulong) node_remap_start_vaddr[nid], |
290 | (ulong) pfn_to_kaddr(highstart_pfn | 313 | (ulong) node_remap_end_vaddr[nid]); |
291 | + node_remap_offset[nid] + node_remap_size[nid])); | ||
292 | } | ||
293 | #else | ||
294 | void *alloc_remap(int nid, unsigned long size) | ||
295 | { | ||
296 | return NULL; | ||
297 | } | ||
298 | |||
299 | static unsigned long calculate_numa_remap_pages(void) | ||
300 | { | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static void init_remap_allocator(int nid) | ||
305 | { | ||
306 | } | ||
307 | |||
308 | void __init remap_numa_kva(void) | ||
309 | { | ||
310 | } | 314 | } |
311 | #endif /* CONFIG_DISCONTIGMEM */ | ||
312 | 315 | ||
313 | extern void setup_bootmem_allocator(void); | 316 | void __init initmem_init(unsigned long start_pfn, |
314 | unsigned long __init setup_memory(void) | 317 | unsigned long end_pfn) |
315 | { | 318 | { |
316 | int nid; | 319 | int nid; |
317 | unsigned long system_start_pfn, system_max_low_pfn; | 320 | long kva_target_pfn; |
318 | unsigned long wasted_pages; | ||
319 | 321 | ||
320 | /* | 322 | /* |
321 | * When mapping a NUMA machine we allocate the node_mem_map arrays | 323 | * When mapping a NUMA machine we allocate the node_mem_map arrays |
@@ -324,109 +326,77 @@ unsigned long __init setup_memory(void) | |||
324 | * this space and use it to adjust the boundary between ZONE_NORMAL | 326 | * this space and use it to adjust the boundary between ZONE_NORMAL |
325 | * and ZONE_HIGHMEM. | 327 | * and ZONE_HIGHMEM. |
326 | */ | 328 | */ |
327 | get_memcfg_numa(); | ||
328 | 329 | ||
329 | kva_pages = calculate_numa_remap_pages(); | 330 | get_memcfg_numa(); |
330 | 331 | ||
331 | /* partially used pages are not usable - thus round upwards */ | 332 | kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); |
332 | system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); | ||
333 | 333 | ||
334 | kva_start_pfn = find_max_low_pfn() - kva_pages; | 334 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); |
335 | do { | ||
336 | kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT, | ||
337 | max_low_pfn<<PAGE_SHIFT, | ||
338 | kva_pages<<PAGE_SHIFT, | ||
339 | PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; | ||
340 | kva_target_pfn -= PTRS_PER_PTE; | ||
341 | } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn); | ||
335 | 342 | ||
336 | #ifdef CONFIG_BLK_DEV_INITRD | 343 | if (kva_start_pfn == -1UL) |
337 | /* Numa kva area is below the initrd */ | 344 | panic("Can not get kva space\n"); |
338 | if (initrd_start) | ||
339 | kva_start_pfn = PFN_DOWN(initrd_start - PAGE_OFFSET) | ||
340 | - kva_pages; | ||
341 | #endif | ||
342 | 345 | ||
343 | /* | 346 | printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", |
344 | * We waste pages past at the end of the KVA for no good reason other | ||
345 | * than how it is located. This is bad. | ||
346 | */ | ||
347 | wasted_pages = kva_start_pfn & (PTRS_PER_PTE-1); | ||
348 | kva_start_pfn -= wasted_pages; | ||
349 | kva_pages += wasted_pages; | ||
350 | |||
351 | system_max_low_pfn = max_low_pfn = find_max_low_pfn(); | ||
352 | printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n", | ||
353 | kva_start_pfn, max_low_pfn); | 347 | kva_start_pfn, max_low_pfn); |
354 | printk("max_pfn = %ld\n", max_pfn); | 348 | printk(KERN_INFO "max_pfn = %lx\n", max_pfn); |
349 | |||
350 | /* avoid clash with initrd */ | ||
351 | reserve_early(kva_start_pfn<<PAGE_SHIFT, | ||
352 | (kva_start_pfn + kva_pages)<<PAGE_SHIFT, | ||
353 | "KVA PG"); | ||
355 | #ifdef CONFIG_HIGHMEM | 354 | #ifdef CONFIG_HIGHMEM |
356 | highstart_pfn = highend_pfn = max_pfn; | 355 | highstart_pfn = highend_pfn = max_pfn; |
357 | if (max_pfn > system_max_low_pfn) | 356 | if (max_pfn > max_low_pfn) |
358 | highstart_pfn = system_max_low_pfn; | 357 | highstart_pfn = max_low_pfn; |
359 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 358 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
360 | pages_to_mb(highend_pfn - highstart_pfn)); | 359 | pages_to_mb(highend_pfn - highstart_pfn)); |
361 | num_physpages = highend_pfn; | 360 | num_physpages = highend_pfn; |
362 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | 361 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
363 | #else | 362 | #else |
364 | num_physpages = system_max_low_pfn; | 363 | num_physpages = max_low_pfn; |
365 | high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1; | 364 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
366 | #endif | 365 | #endif |
367 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | 366 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
368 | pages_to_mb(system_max_low_pfn)); | 367 | pages_to_mb(max_low_pfn)); |
369 | printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n", | 368 | printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n", |
370 | min_low_pfn, max_low_pfn, highstart_pfn); | 369 | max_low_pfn, highstart_pfn); |
371 | 370 | ||
372 | printk("Low memory ends at vaddr %08lx\n", | 371 | printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", |
373 | (ulong) pfn_to_kaddr(max_low_pfn)); | 372 | (ulong) pfn_to_kaddr(max_low_pfn)); |
374 | for_each_online_node(nid) { | 373 | for_each_online_node(nid) { |
375 | init_remap_allocator(nid); | 374 | init_remap_allocator(nid); |
376 | 375 | ||
377 | allocate_pgdat(nid); | 376 | allocate_pgdat(nid); |
378 | } | 377 | } |
379 | printk("High memory starts at vaddr %08lx\n", | 378 | remap_numa_kva(); |
379 | |||
380 | printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", | ||
380 | (ulong) pfn_to_kaddr(highstart_pfn)); | 381 | (ulong) pfn_to_kaddr(highstart_pfn)); |
381 | for_each_online_node(nid) | 382 | for_each_online_node(nid) |
382 | propagate_e820_map_node(nid); | 383 | propagate_e820_map_node(nid); |
383 | 384 | ||
384 | memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); | 385 | for_each_online_node(nid) |
386 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | ||
387 | |||
385 | NODE_DATA(0)->bdata = &node0_bdata; | 388 | NODE_DATA(0)->bdata = &node0_bdata; |
386 | setup_bootmem_allocator(); | 389 | setup_bootmem_allocator(); |
387 | return max_low_pfn; | ||
388 | } | ||
389 | |||
390 | void __init numa_kva_reserve(void) | ||
391 | { | ||
392 | if (kva_pages) | ||
393 | reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages), | ||
394 | BOOTMEM_DEFAULT); | ||
395 | } | 390 | } |
396 | 391 | ||
397 | void __init zone_sizes_init(void) | 392 | void __init set_highmem_pages_init(void) |
398 | { | ||
399 | int nid; | ||
400 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
401 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
402 | max_zone_pfns[ZONE_DMA] = | ||
403 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | ||
404 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
405 | #ifdef CONFIG_HIGHMEM | ||
406 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | ||
407 | #endif | ||
408 | |||
409 | /* If SRAT has not registered memory, register it now */ | ||
410 | if (find_max_pfn_with_active_regions() == 0) { | ||
411 | for_each_online_node(nid) { | ||
412 | if (node_has_online_mem(nid)) | ||
413 | add_active_range(nid, node_start_pfn[nid], | ||
414 | node_end_pfn[nid]); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | free_area_init_nodes(max_zone_pfns); | ||
419 | return; | ||
420 | } | ||
421 | |||
422 | void __init set_highmem_pages_init(int bad_ppro) | ||
423 | { | 393 | { |
424 | #ifdef CONFIG_HIGHMEM | 394 | #ifdef CONFIG_HIGHMEM |
425 | struct zone *zone; | 395 | struct zone *zone; |
426 | struct page *page; | 396 | int nid; |
427 | 397 | ||
428 | for_each_zone(zone) { | 398 | for_each_zone(zone) { |
429 | unsigned long node_pfn, zone_start_pfn, zone_end_pfn; | 399 | unsigned long zone_start_pfn, zone_end_pfn; |
430 | 400 | ||
431 | if (!is_highmem(zone)) | 401 | if (!is_highmem(zone)) |
432 | continue; | 402 | continue; |
@@ -434,16 +404,12 @@ void __init set_highmem_pages_init(int bad_ppro) | |||
434 | zone_start_pfn = zone->zone_start_pfn; | 404 | zone_start_pfn = zone->zone_start_pfn; |
435 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | 405 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; |
436 | 406 | ||
437 | printk("Initializing %s for node %d (%08lx:%08lx)\n", | 407 | nid = zone_to_nid(zone); |
438 | zone->name, zone_to_nid(zone), | 408 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", |
439 | zone_start_pfn, zone_end_pfn); | 409 | zone->name, nid, zone_start_pfn, zone_end_pfn); |
440 | 410 | ||
441 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { | 411 | add_highpages_with_active_regions(nid, zone_start_pfn, |
442 | if (!pfn_valid(node_pfn)) | 412 | zone_end_pfn); |
443 | continue; | ||
444 | page = pfn_to_page(node_pfn); | ||
445 | add_one_highpage_init(page, node_pfn, bad_ppro); | ||
446 | } | ||
447 | } | 413 | } |
448 | totalram_pages += totalhigh_pages; | 414 | totalram_pages += totalhigh_pages; |
449 | #endif | 415 | #endif |
@@ -476,3 +442,4 @@ int memory_add_physaddr_to_nid(u64 addr) | |||
476 | 442 | ||
477 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | 443 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
478 | #endif | 444 | #endif |
445 | |||