aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/discontig.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2005-07-11 01:58:04 -0400
committerDmitry Torokhov <dtor_core@ameritech.net>2005-07-11 01:58:04 -0400
commite0d21d9cca25f424f3129649be48a63c128ed42d (patch)
tree0a7d407639876e02deef1721817615eaa8c673a3 /arch/ia64/mm/discontig.c
parentbeffbdc2211826b174c68307b1b48c93c05d7ded (diff)
parent5c23804a0941a111752fdacefe0bea2db1b4d93f (diff)
Merge rsync://www.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/ia64/mm/discontig.c')
-rw-r--r--arch/ia64/mm/discontig.c432
1 files changed, 199 insertions, 233 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index f3fd528ead3b..b5c90e548195 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -44,150 +44,7 @@ struct early_node_data {
44}; 44};
45 45
46static struct early_node_data mem_data[MAX_NUMNODES] __initdata; 46static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
47 47static nodemask_t memory_less_mask __initdata;
48/**
49 * reassign_cpu_only_nodes - called from find_memory to move CPU-only nodes to a memory node
50 *
51 * This function will move nodes with only CPUs (no memory)
52 * to a node with memory which is at the minimum numa_slit distance.
53 * Any reassigments will result in the compression of the nodes
54 * and renumbering the nid values where appropriate.
55 * The static declarations below are to avoid large stack size which
56 * makes the code not re-entrant.
57 */
58static void __init reassign_cpu_only_nodes(void)
59{
60 struct node_memblk_s *p;
61 int i, j, k, nnode, nid, cpu, cpunid, pxm;
62 u8 cslit, slit;
63 static DECLARE_BITMAP(nodes_with_mem, MAX_NUMNODES) __initdata;
64 static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata;
65 static int node_flip[MAX_NUMNODES] __initdata;
66 static int old_nid_map[NR_CPUS] __initdata;
67
68 for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
69 if (!test_bit(p->nid, (void *) nodes_with_mem)) {
70 set_bit(p->nid, (void *) nodes_with_mem);
71 nnode++;
72 }
73
74 /*
75 * All nids with memory.
76 */
77 if (nnode == num_online_nodes())
78 return;
79
80 /*
81 * Change nids and attempt to migrate CPU-only nodes
82 * to the best numa_slit (closest neighbor) possible.
83 * For reassigned CPU nodes a nid can't be arrived at
84 * until after this loop because the target nid's new
85 * identity might not have been established yet. So
86 * new nid values are fabricated above num_online_nodes() and
87 * mapped back later to their true value.
88 */
89 /* MCD - This code is a bit complicated, but may be unnecessary now.
90 * We can now handle much more interesting node-numbering.
91 * The old requirement that 0 <= nid <= numnodes <= MAX_NUMNODES
92 * and that there be no holes in the numbering 0..numnodes
93 * has become simply 0 <= nid <= MAX_NUMNODES.
94 */
95 nid = 0;
96 for_each_online_node(i) {
97 if (test_bit(i, (void *) nodes_with_mem)) {
98 /*
99 * Save original nid value for numa_slit
100 * fixup and node_cpuid reassignments.
101 */
102 node_flip[nid] = i;
103
104 if (i == nid) {
105 nid++;
106 continue;
107 }
108
109 for (p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
110 if (p->nid == i)
111 p->nid = nid;
112
113 cpunid = nid;
114 nid++;
115 } else
116 cpunid = MAX_NUMNODES;
117
118 for (cpu = 0; cpu < NR_CPUS; cpu++)
119 if (node_cpuid[cpu].nid == i) {
120 /*
121 * For nodes not being reassigned just
122 * fix the cpu's nid and reverse pxm map
123 */
124 if (cpunid < MAX_NUMNODES) {
125 pxm = nid_to_pxm_map[i];
126 pxm_to_nid_map[pxm] =
127 node_cpuid[cpu].nid = cpunid;
128 continue;
129 }
130
131 /*
132 * For nodes being reassigned, find best node by
133 * numa_slit information and then make a temporary
134 * nid value based on current nid and num_online_nodes().
135 */
136 slit = 0xff;
137 k = 2*num_online_nodes();
138 for_each_online_node(j) {
139 if (i == j)
140 continue;
141 else if (test_bit(j, (void *) nodes_with_mem)) {
142 cslit = numa_slit[i * num_online_nodes() + j];
143 if (cslit < slit) {
144 k = num_online_nodes() + j;
145 slit = cslit;
146 }
147 }
148 }
149
150 /* save old nid map so we can update the pxm */
151 old_nid_map[cpu] = node_cpuid[cpu].nid;
152 node_cpuid[cpu].nid = k;
153 }
154 }
155
156 /*
157 * Fixup temporary nid values for CPU-only nodes.
158 */
159 for (cpu = 0; cpu < NR_CPUS; cpu++)
160 if (node_cpuid[cpu].nid == (2*num_online_nodes())) {
161 pxm = nid_to_pxm_map[old_nid_map[cpu]];
162 pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = nnode - 1;
163 } else {
164 for (i = 0; i < nnode; i++) {
165 if (node_flip[i] != (node_cpuid[cpu].nid - num_online_nodes()))
166 continue;
167
168 pxm = nid_to_pxm_map[old_nid_map[cpu]];
169 pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = i;
170 break;
171 }
172 }
173
174 /*
175 * Fix numa_slit by compressing from larger
176 * nid array to reduced nid array.
177 */
178 for (i = 0; i < nnode; i++)
179 for (j = 0; j < nnode; j++)
180 numa_slit_fix[i * nnode + j] =
181 numa_slit[node_flip[i] * num_online_nodes() + node_flip[j]];
182
183 memcpy(numa_slit, numa_slit_fix, sizeof (numa_slit));
184
185 nodes_clear(node_online_map);
186 for (i = 0; i < nnode; i++)
187 node_set_online(i);
188
189 return;
190}
191 48
192/* 49/*
193 * To prevent cache aliasing effects, align per-node structures so that they 50 * To prevent cache aliasing effects, align per-node structures so that they
@@ -233,44 +90,101 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
233} 90}
234 91
235/** 92/**
236 * early_nr_phys_cpus_node - return number of physical cpus on a given node 93 * early_nr_cpus_node - return number of cpus on a given node
237 * @node: node to check 94 * @node: node to check
238 * 95 *
239 * Count the number of physical cpus on @node. These are cpus that actually 96 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
240 * exist. We can't use nr_cpus_node() yet because
241 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been 97 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
242 * called yet. 98 * called yet. Note that node 0 will also count all non-existent cpus.
243 */ 99 */
244static int early_nr_phys_cpus_node(int node) 100static int __init early_nr_cpus_node(int node)
245{ 101{
246 int cpu, n = 0; 102 int cpu, n = 0;
247 103
248 for (cpu = 0; cpu < NR_CPUS; cpu++) 104 for (cpu = 0; cpu < NR_CPUS; cpu++)
249 if (node == node_cpuid[cpu].nid) 105 if (node == node_cpuid[cpu].nid)
250 if ((cpu == 0) || node_cpuid[cpu].phys_id) 106 n++;
251 n++;
252 107
253 return n; 108 return n;
254} 109}
255 110
111/**
112 * compute_pernodesize - compute size of pernode data
113 * @node: the node id.
114 */
115static unsigned long __init compute_pernodesize(int node)
116{
117 unsigned long pernodesize = 0, cpus;
118
119 cpus = early_nr_cpus_node(node);
120 pernodesize += PERCPU_PAGE_SIZE * cpus;
121 pernodesize += node * L1_CACHE_BYTES;
122 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
123 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
124 pernodesize = PAGE_ALIGN(pernodesize);
125 return pernodesize;
126}
256 127
257/** 128/**
258 * early_nr_cpus_node - return number of cpus on a given node 129 * per_cpu_node_setup - setup per-cpu areas on each node
259 * @node: node to check 130 * @cpu_data: per-cpu area on this node
131 * @node: node to setup
260 * 132 *
261 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because 133 * Copy the static per-cpu data into the region we just set aside and then
262 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been 134 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
263 * called yet. Note that node 0 will also count all non-existent cpus. 135 * the end of the area.
264 */ 136 */
265static int early_nr_cpus_node(int node) 137static void *per_cpu_node_setup(void *cpu_data, int node)
266{ 138{
267 int cpu, n = 0; 139#ifdef CONFIG_SMP
140 int cpu;
268 141
269 for (cpu = 0; cpu < NR_CPUS; cpu++) 142 for (cpu = 0; cpu < NR_CPUS; cpu++) {
270 if (node == node_cpuid[cpu].nid) 143 if (node == node_cpuid[cpu].nid) {
271 n++; 144 memcpy(__va(cpu_data), __phys_per_cpu_start,
145 __per_cpu_end - __per_cpu_start);
146 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
147 __per_cpu_start;
148 cpu_data += PERCPU_PAGE_SIZE;
149 }
150 }
151#endif
152 return cpu_data;
153}
272 154
273 return n; 155/**
156 * fill_pernode - initialize pernode data.
157 * @node: the node id.
158 * @pernode: physical address of pernode data
159 * @pernodesize: size of the pernode data
160 */
161static void __init fill_pernode(int node, unsigned long pernode,
162 unsigned long pernodesize)
163{
164 void *cpu_data;
165 int cpus = early_nr_cpus_node(node);
166 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
167
168 mem_data[node].pernode_addr = pernode;
169 mem_data[node].pernode_size = pernodesize;
170 memset(__va(pernode), 0, pernodesize);
171
172 cpu_data = (void *)pernode;
173 pernode += PERCPU_PAGE_SIZE * cpus;
174 pernode += node * L1_CACHE_BYTES;
175
176 mem_data[node].pgdat = __va(pernode);
177 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
178
179 mem_data[node].node_data = __va(pernode);
180 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
181
182 mem_data[node].pgdat->bdata = bdp;
183 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
184
185 cpu_data = per_cpu_node_setup(cpu_data, node);
186
187 return;
274} 188}
275 189
276/** 190/**
@@ -304,9 +218,8 @@ static int early_nr_cpus_node(int node)
304static int __init find_pernode_space(unsigned long start, unsigned long len, 218static int __init find_pernode_space(unsigned long start, unsigned long len,
305 int node) 219 int node)
306{ 220{
307 unsigned long epfn, cpu, cpus, phys_cpus; 221 unsigned long epfn;
308 unsigned long pernodesize = 0, pernode, pages, mapsize; 222 unsigned long pernodesize = 0, pernode, pages, mapsize;
309 void *cpu_data;
310 struct bootmem_data *bdp = &mem_data[node].bootmem_data; 223 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
311 224
312 epfn = (start + len) >> PAGE_SHIFT; 225 epfn = (start + len) >> PAGE_SHIFT;
@@ -329,49 +242,12 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
329 * Calculate total size needed, incl. what's necessary 242 * Calculate total size needed, incl. what's necessary
330 * for good alignment and alias prevention. 243 * for good alignment and alias prevention.
331 */ 244 */
332 cpus = early_nr_cpus_node(node); 245 pernodesize = compute_pernodesize(node);
333 phys_cpus = early_nr_phys_cpus_node(node);
334 pernodesize += PERCPU_PAGE_SIZE * cpus;
335 pernodesize += node * L1_CACHE_BYTES;
336 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
337 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
338 pernodesize = PAGE_ALIGN(pernodesize);
339 pernode = NODEDATA_ALIGN(start, node); 246 pernode = NODEDATA_ALIGN(start, node);
340 247
341 /* Is this range big enough for what we want to store here? */ 248 /* Is this range big enough for what we want to store here? */
342 if (start + len > (pernode + pernodesize + mapsize)) { 249 if (start + len > (pernode + pernodesize + mapsize))
343 mem_data[node].pernode_addr = pernode; 250 fill_pernode(node, pernode, pernodesize);
344 mem_data[node].pernode_size = pernodesize;
345 memset(__va(pernode), 0, pernodesize);
346
347 cpu_data = (void *)pernode;
348 pernode += PERCPU_PAGE_SIZE * cpus;
349 pernode += node * L1_CACHE_BYTES;
350
351 mem_data[node].pgdat = __va(pernode);
352 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
353
354 mem_data[node].node_data = __va(pernode);
355 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
356
357 mem_data[node].pgdat->bdata = bdp;
358 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
359
360 /*
361 * Copy the static per-cpu data into the region we
362 * just set aside and then setup __per_cpu_offset
363 * for each CPU on this node.
364 */
365 for (cpu = 0; cpu < NR_CPUS; cpu++) {
366 if (node == node_cpuid[cpu].nid) {
367 memcpy(__va(cpu_data), __phys_per_cpu_start,
368 __per_cpu_end - __per_cpu_start);
369 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
370 __per_cpu_start;
371 cpu_data += PERCPU_PAGE_SIZE;
372 }
373 }
374 }
375 251
376 return 0; 252 return 0;
377} 253}
@@ -411,6 +287,9 @@ static void __init reserve_pernode_space(void)
411 for_each_online_node(node) { 287 for_each_online_node(node) {
412 pg_data_t *pdp = mem_data[node].pgdat; 288 pg_data_t *pdp = mem_data[node].pgdat;
413 289
290 if (node_isset(node, memory_less_mask))
291 continue;
292
414 bdp = pdp->bdata; 293 bdp = pdp->bdata;
415 294
416 /* First the bootmem_map itself */ 295 /* First the bootmem_map itself */
@@ -436,8 +315,8 @@ static void __init reserve_pernode_space(void)
436 */ 315 */
437static void __init initialize_pernode_data(void) 316static void __init initialize_pernode_data(void)
438{ 317{
439 int cpu, node;
440 pg_data_t *pgdat_list[MAX_NUMNODES]; 318 pg_data_t *pgdat_list[MAX_NUMNODES];
319 int cpu, node;
441 320
442 for_each_online_node(node) 321 for_each_online_node(node)
443 pgdat_list[node] = mem_data[node].pgdat; 322 pgdat_list[node] = mem_data[node].pgdat;
@@ -447,12 +326,99 @@ static void __init initialize_pernode_data(void)
447 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, 326 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
448 sizeof(pgdat_list)); 327 sizeof(pgdat_list));
449 } 328 }
450 329#ifdef CONFIG_SMP
451 /* Set the node_data pointer for each per-cpu struct */ 330 /* Set the node_data pointer for each per-cpu struct */
452 for (cpu = 0; cpu < NR_CPUS; cpu++) { 331 for (cpu = 0; cpu < NR_CPUS; cpu++) {
453 node = node_cpuid[cpu].nid; 332 node = node_cpuid[cpu].nid;
454 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 333 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
455 } 334 }
335#else
336 {
337 struct cpuinfo_ia64 *cpu0_cpu_info;
338 cpu = 0;
339 node = node_cpuid[cpu].nid;
340 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
341 ((char *)&per_cpu__cpu_info - __per_cpu_start));
342 cpu0_cpu_info->node_data = mem_data[node].node_data;
343 }
344#endif /* CONFIG_SMP */
345}
346
347/**
348 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
349 * node but fall back to any other node when __alloc_bootmem_node fails
350 * for best.
351 * @nid: node id
352 * @pernodesize: size of this node's pernode data
353 * @align: alignment to use for this node's pernode data
354 */
355static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize,
356 unsigned long align)
357{
358 void *ptr = NULL;
359 u8 best = 0xff;
360 int bestnode = -1, node;
361
362 for_each_online_node(node) {
363 if (node_isset(node, memory_less_mask))
364 continue;
365 else if (node_distance(nid, node) < best) {
366 best = node_distance(nid, node);
367 bestnode = node;
368 }
369 }
370
371 ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat,
372 pernodesize, align, __pa(MAX_DMA_ADDRESS));
373
374 if (!ptr)
375 panic("NO memory for memory less node\n");
376 return ptr;
377}
378
379/**
380 * pgdat_insert - insert the pgdat into global pgdat_list
381 * @pgdat: the pgdat for a node.
382 */
383static void __init pgdat_insert(pg_data_t *pgdat)
384{
385 pg_data_t *prev = NULL, *next;
386
387 for_each_pgdat(next)
388 if (pgdat->node_id < next->node_id)
389 break;
390 else
391 prev = next;
392
393 if (prev) {
394 prev->pgdat_next = pgdat;
395 pgdat->pgdat_next = next;
396 } else {
397 pgdat->pgdat_next = pgdat_list;
398 pgdat_list = pgdat;
399 }
400
401 return;
402}
403
404/**
405 * memory_less_nodes - allocate and initialize CPU only nodes pernode
406 * information.
407 */
408static void __init memory_less_nodes(void)
409{
410 unsigned long pernodesize;
411 void *pernode;
412 int node;
413
414 for_each_node_mask(node, memory_less_mask) {
415 pernodesize = compute_pernodesize(node);
416 pernode = memory_less_node_alloc(node, pernodesize,
417 (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024));
418 fill_pernode(node, __pa(pernode), pernodesize);
419 }
420
421 return;
456} 422}
457 423
458/** 424/**
@@ -472,16 +438,19 @@ void __init find_memory(void)
472 node_set_online(0); 438 node_set_online(0);
473 } 439 }
474 440
441 nodes_or(memory_less_mask, memory_less_mask, node_online_map);
475 min_low_pfn = -1; 442 min_low_pfn = -1;
476 max_low_pfn = 0; 443 max_low_pfn = 0;
477 444
478 if (num_online_nodes() > 1)
479 reassign_cpu_only_nodes();
480
481 /* These actually end up getting called by call_pernode_memory() */ 445 /* These actually end up getting called by call_pernode_memory() */
482 efi_memmap_walk(filter_rsvd_memory, build_node_maps); 446 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
483 efi_memmap_walk(filter_rsvd_memory, find_pernode_space); 447 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
484 448
449 for_each_online_node(node)
450 if (mem_data[node].bootmem_data.node_low_pfn) {
451 node_clear(node, memory_less_mask);
452 mem_data[node].min_pfn = ~0UL;
453 }
485 /* 454 /*
486 * Initialize the boot memory maps in reverse order since that's 455 * Initialize the boot memory maps in reverse order since that's
487 * what the bootmem allocator expects 456 * what the bootmem allocator expects
@@ -492,17 +461,14 @@ void __init find_memory(void)
492 461
493 if (!node_online(node)) 462 if (!node_online(node))
494 continue; 463 continue;
464 else if (node_isset(node, memory_less_mask))
465 continue;
495 466
496 bdp = &mem_data[node].bootmem_data; 467 bdp = &mem_data[node].bootmem_data;
497 pernode = mem_data[node].pernode_addr; 468 pernode = mem_data[node].pernode_addr;
498 pernodesize = mem_data[node].pernode_size; 469 pernodesize = mem_data[node].pernode_size;
499 map = pernode + pernodesize; 470 map = pernode + pernodesize;
500 471
501 /* Sanity check... */
502 if (!pernode)
503 panic("pernode space for node %d "
504 "could not be allocated!", node);
505
506 init_bootmem_node(mem_data[node].pgdat, 472 init_bootmem_node(mem_data[node].pgdat,
507 map>>PAGE_SHIFT, 473 map>>PAGE_SHIFT,
508 bdp->node_boot_start>>PAGE_SHIFT, 474 bdp->node_boot_start>>PAGE_SHIFT,
@@ -512,6 +478,7 @@ void __init find_memory(void)
512 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); 478 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
513 479
514 reserve_pernode_space(); 480 reserve_pernode_space();
481 memory_less_nodes();
515 initialize_pernode_data(); 482 initialize_pernode_data();
516 483
517 max_pfn = max_low_pfn; 484 max_pfn = max_low_pfn;
@@ -519,6 +486,7 @@ void __init find_memory(void)
519 find_initrd(); 486 find_initrd();
520} 487}
521 488
489#ifdef CONFIG_SMP
522/** 490/**
523 * per_cpu_init - setup per-cpu variables 491 * per_cpu_init - setup per-cpu variables
524 * 492 *
@@ -529,15 +497,15 @@ void *per_cpu_init(void)
529{ 497{
530 int cpu; 498 int cpu;
531 499
532 if (smp_processor_id() == 0) { 500 if (smp_processor_id() != 0)
533 for (cpu = 0; cpu < NR_CPUS; cpu++) { 501 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
534 per_cpu(local_per_cpu_offset, cpu) = 502
535 __per_cpu_offset[cpu]; 503 for (cpu = 0; cpu < NR_CPUS; cpu++)
536 } 504 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
537 }
538 505
539 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 506 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
540} 507}
508#endif /* CONFIG_SMP */
541 509
542/** 510/**
543 * show_mem - give short summary of memory stats 511 * show_mem - give short summary of memory stats
@@ -680,12 +648,13 @@ void __init paging_init(void)
680 648
681 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 649 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
682 650
683 /* so min() will work in count_node_pages */
684 for_each_online_node(node)
685 mem_data[node].min_pfn = ~0UL;
686
687 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 651 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
688 652
653 vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
654 vmem_map = (struct page *) vmalloc_end;
655 efi_memmap_walk(create_mem_map_page_table, NULL);
656 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
657
689 for_each_online_node(node) { 658 for_each_online_node(node) {
690 memset(zones_size, 0, sizeof(zones_size)); 659 memset(zones_size, 0, sizeof(zones_size));
691 memset(zholes_size, 0, sizeof(zholes_size)); 660 memset(zholes_size, 0, sizeof(zholes_size));
@@ -719,15 +688,6 @@ void __init paging_init(void)
719 mem_data[node].num_dma_physpages); 688 mem_data[node].num_dma_physpages);
720 } 689 }
721 690
722 if (node == 0) {
723 vmalloc_end -=
724 PAGE_ALIGN(max_low_pfn * sizeof(struct page));
725 vmem_map = (struct page *) vmalloc_end;
726
727 efi_memmap_walk(create_mem_map_page_table, NULL);
728 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
729 }
730
731 pfn_offset = mem_data[node].min_pfn; 691 pfn_offset = mem_data[node].min_pfn;
732 692
733 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; 693 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
@@ -735,5 +695,11 @@ void __init paging_init(void)
735 pfn_offset, zholes_size); 695 pfn_offset, zholes_size);
736 } 696 }
737 697
698 /*
699 * Make memory less nodes become a member of the known nodes.
700 */
701 for_each_node_mask(node, memory_less_mask)
702 pgdat_insert(mem_data[node].pgdat);
703
738 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 704 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
739} 705}