diff options
Diffstat (limited to 'arch/x86/mm/numa_64.c')
-rw-r--r-- | arch/x86/mm/numa_64.c | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index f3516da035d..d73aaa89237 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -33,6 +33,15 @@ int numa_off __initdata; | |||
33 | static unsigned long __initdata nodemap_addr; | 33 | static unsigned long __initdata nodemap_addr; |
34 | static unsigned long __initdata nodemap_size; | 34 | static unsigned long __initdata nodemap_size; |
35 | 35 | ||
36 | DEFINE_PER_CPU(int, node_number) = 0; | ||
37 | EXPORT_PER_CPU_SYMBOL(node_number); | ||
38 | |||
39 | /* | ||
40 | * Map cpu index to node index | ||
41 | */ | ||
42 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | ||
43 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | ||
44 | |||
36 | /* | 45 | /* |
37 | * Given a shift value, try to populate memnodemap[] | 46 | * Given a shift value, try to populate memnodemap[] |
38 | * Returns : | 47 | * Returns : |
@@ -640,3 +649,116 @@ void __init init_cpu_to_node(void) | |||
640 | #endif | 649 | #endif |
641 | 650 | ||
642 | 651 | ||
652 | void __cpuinit numa_set_node(int cpu, int node) | ||
653 | { | ||
654 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | ||
655 | |||
656 | /* early setting, no percpu area yet */ | ||
657 | if (cpu_to_node_map) { | ||
658 | cpu_to_node_map[cpu] = node; | ||
659 | return; | ||
660 | } | ||
661 | |||
662 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
663 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | ||
664 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | ||
665 | dump_stack(); | ||
666 | return; | ||
667 | } | ||
668 | #endif | ||
669 | per_cpu(x86_cpu_to_node_map, cpu) = node; | ||
670 | |||
671 | if (node != NUMA_NO_NODE) | ||
672 | per_cpu(node_number, cpu) = node; | ||
673 | } | ||
674 | |||
675 | void __cpuinit numa_clear_node(int cpu) | ||
676 | { | ||
677 | numa_set_node(cpu, NUMA_NO_NODE); | ||
678 | } | ||
679 | |||
680 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | ||
681 | |||
682 | void __cpuinit numa_add_cpu(int cpu) | ||
683 | { | ||
684 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
685 | } | ||
686 | |||
687 | void __cpuinit numa_remove_cpu(int cpu) | ||
688 | { | ||
689 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
690 | } | ||
691 | |||
692 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ | ||
693 | |||
694 | /* | ||
695 | * --------- debug versions of the numa functions --------- | ||
696 | */ | ||
697 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
698 | { | ||
699 | int node = early_cpu_to_node(cpu); | ||
700 | struct cpumask *mask; | ||
701 | char buf[64]; | ||
702 | |||
703 | mask = node_to_cpumask_map[node]; | ||
704 | if (mask == NULL) { | ||
705 | printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node); | ||
706 | dump_stack(); | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | if (enable) | ||
711 | cpumask_set_cpu(cpu, mask); | ||
712 | else | ||
713 | cpumask_clear_cpu(cpu, mask); | ||
714 | |||
715 | cpulist_scnprintf(buf, sizeof(buf), mask); | ||
716 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | ||
717 | enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); | ||
718 | } | ||
719 | |||
720 | void __cpuinit numa_add_cpu(int cpu) | ||
721 | { | ||
722 | numa_set_cpumask(cpu, 1); | ||
723 | } | ||
724 | |||
725 | void __cpuinit numa_remove_cpu(int cpu) | ||
726 | { | ||
727 | numa_set_cpumask(cpu, 0); | ||
728 | } | ||
729 | |||
730 | int cpu_to_node(int cpu) | ||
731 | { | ||
732 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | ||
733 | printk(KERN_WARNING | ||
734 | "cpu_to_node(%d): usage too early!\n", cpu); | ||
735 | dump_stack(); | ||
736 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
737 | } | ||
738 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
739 | } | ||
740 | EXPORT_SYMBOL(cpu_to_node); | ||
741 | |||
742 | /* | ||
743 | * Same function as cpu_to_node() but used if called before the | ||
744 | * per_cpu areas are setup. | ||
745 | */ | ||
746 | int early_cpu_to_node(int cpu) | ||
747 | { | ||
748 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | ||
749 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
750 | |||
751 | if (!cpu_possible(cpu)) { | ||
752 | printk(KERN_WARNING | ||
753 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | ||
754 | dump_stack(); | ||
755 | return NUMA_NO_NODE; | ||
756 | } | ||
757 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
758 | } | ||
759 | |||
760 | /* | ||
761 | * --------- end of debug versions of the numa functions --------- | ||
762 | */ | ||
763 | |||
764 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | ||