aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/numa_emulation.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/numa_emulation.c')
-rw-r--r--arch/x86/mm/numa_emulation.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index ad091e4cff17..d0ed086b6247 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -5,6 +5,7 @@
5#include <linux/errno.h> 5#include <linux/errno.h>
6#include <linux/topology.h> 6#include <linux/topology.h>
7#include <linux/memblock.h> 7#include <linux/memblock.h>
8#include <linux/bootmem.h>
8#include <asm/dma.h> 9#include <asm/dma.h>
9 10
10#include "numa_internal.h" 11#include "numa_internal.h"
@@ -84,7 +85,13 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
84 nr_nodes = MAX_NUMNODES; 85 nr_nodes = MAX_NUMNODES;
85 } 86 }
86 87
87 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; 88 /*
89 * Calculate target node size. x86_32 freaks on __udivdi3() so do
90 * the division in ulong number of pages and convert back.
91 */
92 size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
93 size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
94
88 /* 95 /*
89 * Calculate the number of big nodes that can be allocated as a result 96 * Calculate the number of big nodes that can be allocated as a result
90 * of consolidating the remainder. 97 * of consolidating the remainder.
@@ -226,7 +233,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
226 */ 233 */
227 while (nodes_weight(physnode_mask)) { 234 while (nodes_weight(physnode_mask)) {
228 for_each_node_mask(i, physnode_mask) { 235 for_each_node_mask(i, physnode_mask) {
229 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; 236 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
230 u64 start, limit, end; 237 u64 start, limit, end;
231 int phys_blk; 238 int phys_blk;
232 239
@@ -298,7 +305,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
298{ 305{
299 static struct numa_meminfo ei __initdata; 306 static struct numa_meminfo ei __initdata;
300 static struct numa_meminfo pi __initdata; 307 static struct numa_meminfo pi __initdata;
301 const u64 max_addr = max_pfn << PAGE_SHIFT; 308 const u64 max_addr = PFN_PHYS(max_pfn);
302 u8 *phys_dist = NULL; 309 u8 *phys_dist = NULL;
303 size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); 310 size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]);
304 int max_emu_nid, dfl_phys_nid; 311 int max_emu_nid, dfl_phys_nid;
@@ -342,8 +349,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
342 if (numa_dist_cnt) { 349 if (numa_dist_cnt) {
343 u64 phys; 350 u64 phys;
344 351
345 phys = memblock_find_in_range(0, 352 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
346 (u64)max_pfn_mapped << PAGE_SHIFT,
347 phys_size, PAGE_SIZE); 353 phys_size, PAGE_SIZE);
348 if (phys == MEMBLOCK_ERROR) { 354 if (phys == MEMBLOCK_ERROR) {
349 pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); 355 pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
@@ -454,10 +460,9 @@ void __cpuinit numa_remove_cpu(int cpu)
454 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 460 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
455} 461}
456#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 462#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
457static void __cpuinit numa_set_cpumask(int cpu, int enable) 463static void __cpuinit numa_set_cpumask(int cpu, bool enable)
458{ 464{
459 struct cpumask *mask; 465 int nid, physnid;
460 int nid, physnid, i;
461 466
462 nid = early_cpu_to_node(cpu); 467 nid = early_cpu_to_node(cpu);
463 if (nid == NUMA_NO_NODE) { 468 if (nid == NUMA_NO_NODE) {
@@ -467,28 +472,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
467 472
468 physnid = emu_nid_to_phys[nid]; 473 physnid = emu_nid_to_phys[nid];
469 474
470 for_each_online_node(i) { 475 for_each_online_node(nid) {
471 if (emu_nid_to_phys[nid] != physnid) 476 if (emu_nid_to_phys[nid] != physnid)
472 continue; 477 continue;
473 478
474 mask = debug_cpumask_set_cpu(cpu, enable); 479 debug_cpumask_set_cpu(cpu, nid, enable);
475 if (!mask)
476 return;
477
478 if (enable)
479 cpumask_set_cpu(cpu, mask);
480 else
481 cpumask_clear_cpu(cpu, mask);
482 } 480 }
483} 481}
484 482
485void __cpuinit numa_add_cpu(int cpu) 483void __cpuinit numa_add_cpu(int cpu)
486{ 484{
487 numa_set_cpumask(cpu, 1); 485 numa_set_cpumask(cpu, true);
488} 486}
489 487
490void __cpuinit numa_remove_cpu(int cpu) 488void __cpuinit numa_remove_cpu(int cpu)
491{ 489{
492 numa_set_cpumask(cpu, 0); 490 numa_set_cpumask(cpu, false);
493} 491}
494#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 492#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */