diff options
Diffstat (limited to 'arch')
46 files changed, 260 insertions, 192 deletions
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h index 149532e162c4..b4f284c72ff3 100644 --- a/arch/alpha/include/asm/topology.h +++ b/arch/alpha/include/asm/topology.h | |||
@@ -39,7 +39,24 @@ static inline cpumask_t node_to_cpumask(int node) | |||
39 | return node_cpu_mask; | 39 | return node_cpu_mask; |
40 | } | 40 | } |
41 | 41 | ||
42 | extern struct cpumask node_to_cpumask_map[]; | ||
43 | /* FIXME: This is dumb, recalculating every time. But simple. */ | ||
44 | static const struct cpumask *cpumask_of_node(int node) | ||
45 | { | ||
46 | int cpu; | ||
47 | |||
48 | cpumask_clear(&node_to_cpumask_map[node]); | ||
49 | |||
50 | for_each_online_cpu(cpu) { | ||
51 | if (cpu_to_node(cpu) == node) | ||
52 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | ||
53 | } | ||
54 | |||
55 | return &node_to_cpumask_map[node]; | ||
56 | } | ||
57 | |||
42 | #define pcibus_to_cpumask(bus) (cpu_online_map) | 58 | #define pcibus_to_cpumask(bus) (cpu_online_map) |
59 | #define cpumask_of_pcibus(bus) (cpu_online_mask) | ||
43 | 60 | ||
44 | #endif /* !CONFIG_NUMA */ | 61 | #endif /* !CONFIG_NUMA */ |
45 | # include <asm-generic/topology.h> | 62 | # include <asm-generic/topology.h> |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index d0f1620007f7..703731accda6 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -50,7 +50,8 @@ int irq_select_affinity(unsigned int irq) | |||
50 | if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) | 50 | if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) |
51 | return 1; | 51 | return 1; |
52 | 52 | ||
53 | while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity)) | 53 | while (!cpu_possible(cpu) || |
54 | !cpumask_test_cpu(cpu, irq_default_affinity)) | ||
54 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | 55 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
55 | last_cpu = cpu; | 56 | last_cpu = cpu; |
56 | 57 | ||
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index a449e999027c..02bee6983ce2 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c | |||
@@ -79,6 +79,11 @@ int alpha_l3_cacheshape; | |||
79 | unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; | 79 | unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | #ifdef CONFIG_NUMA | ||
83 | struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly; | ||
84 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
85 | #endif | ||
86 | |||
82 | /* Which processor we booted from. */ | 87 | /* Which processor we booted from. */ |
83 | int boot_cpuid; | 88 | int boot_cpuid; |
84 | 89 | ||
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h index 1a50b69b1a19..f7dd5f71edf7 100644 --- a/arch/avr32/include/asm/bitops.h +++ b/arch/avr32/include/asm/bitops.h | |||
@@ -263,6 +263,11 @@ static inline int fls(unsigned long word) | |||
263 | return 32 - result; | 263 | return 32 - result; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline int __fls(unsigned long word) | ||
267 | { | ||
268 | return fls(word) - 1; | ||
269 | } | ||
270 | |||
266 | unsigned long find_first_zero_bit(const unsigned long *addr, | 271 | unsigned long find_first_zero_bit(const unsigned long *addr, |
267 | unsigned long size); | 272 | unsigned long size); |
268 | unsigned long find_next_zero_bit(const unsigned long *addr, | 273 | unsigned long find_next_zero_bit(const unsigned long *addr, |
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index b39a175c79c1..c428e4106f89 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h | |||
@@ -213,6 +213,7 @@ static __inline__ int __test_bit(int nr, const void *addr) | |||
213 | #endif /* __KERNEL__ */ | 213 | #endif /* __KERNEL__ */ |
214 | 214 | ||
215 | #include <asm-generic/bitops/fls.h> | 215 | #include <asm-generic/bitops/fls.h> |
216 | #include <asm-generic/bitops/__fls.h> | ||
216 | #include <asm-generic/bitops/fls64.h> | 217 | #include <asm-generic/bitops/fls64.h> |
217 | 218 | ||
218 | #endif /* _BLACKFIN_BITOPS_H */ | 219 | #endif /* _BLACKFIN_BITOPS_H */ |
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index c0e62f811e09..9e69cfb7f134 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h | |||
@@ -148,6 +148,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
148 | #define ffs kernel_ffs | 148 | #define ffs kernel_ffs |
149 | 149 | ||
150 | #include <asm-generic/bitops/fls.h> | 150 | #include <asm-generic/bitops/fls.h> |
151 | #include <asm-generic/bitops/__fls.h> | ||
151 | #include <asm-generic/bitops/fls64.h> | 152 | #include <asm-generic/bitops/fls64.h> |
152 | #include <asm-generic/bitops/hweight.h> | 153 | #include <asm-generic/bitops/hweight.h> |
153 | #include <asm-generic/bitops/find.h> | 154 | #include <asm-generic/bitops/find.h> |
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h index cb18e3b0aa94..cb9ddf5fc54f 100644 --- a/arch/h8300/include/asm/bitops.h +++ b/arch/h8300/include/asm/bitops.h | |||
@@ -207,6 +207,7 @@ static __inline__ unsigned long __ffs(unsigned long word) | |||
207 | #endif /* __KERNEL__ */ | 207 | #endif /* __KERNEL__ */ |
208 | 208 | ||
209 | #include <asm-generic/bitops/fls.h> | 209 | #include <asm-generic/bitops/fls.h> |
210 | #include <asm-generic/bitops/__fls.h> | ||
210 | #include <asm-generic/bitops/fls64.h> | 211 | #include <asm-generic/bitops/fls64.h> |
211 | 212 | ||
212 | #endif /* _H8300_BITOPS_H */ | 213 | #endif /* _H8300_BITOPS_H */ |
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h index 3627116fb0e2..36429a532630 100644 --- a/arch/ia64/include/asm/irq.h +++ b/arch/ia64/include/asm/irq.h | |||
@@ -27,7 +27,7 @@ irq_canonicalize (int irq) | |||
27 | } | 27 | } |
28 | 28 | ||
29 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); | 29 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); |
30 | bool is_affinity_mask_valid(cpumask_t cpumask); | 30 | bool is_affinity_mask_valid(cpumask_var_t cpumask); |
31 | 31 | ||
32 | #define is_affinity_mask_valid is_affinity_mask_valid | 32 | #define is_affinity_mask_valid is_affinity_mask_valid |
33 | 33 | ||
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index a3cc9f65f954..76a33a91ca69 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h | |||
@@ -34,6 +34,7 @@ | |||
34 | * Returns a bitmask of CPUs on Node 'node'. | 34 | * Returns a bitmask of CPUs on Node 'node'. |
35 | */ | 35 | */ |
36 | #define node_to_cpumask(node) (node_to_cpu_mask[node]) | 36 | #define node_to_cpumask(node) (node_to_cpu_mask[node]) |
37 | #define cpumask_of_node(node) (&node_to_cpu_mask[node]) | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Returns the number of the node containing Node 'nid'. | 40 | * Returns the number of the node containing Node 'nid'. |
@@ -45,7 +46,7 @@ | |||
45 | /* | 46 | /* |
46 | * Returns the number of the first CPU on Node 'node'. | 47 | * Returns the number of the first CPU on Node 'node'. |
47 | */ | 48 | */ |
48 | #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) | 49 | #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) |
49 | 50 | ||
50 | /* | 51 | /* |
51 | * Determines the node for a given pci bus | 52 | * Determines the node for a given pci bus |
@@ -109,6 +110,8 @@ void build_cpu_to_node_map(void); | |||
109 | #define topology_core_id(cpu) (cpu_data(cpu)->core_id) | 110 | #define topology_core_id(cpu) (cpu_data(cpu)->core_id) |
110 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) | 111 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) |
111 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 112 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
113 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
114 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
112 | #define smt_capable() (smp_num_siblings > 1) | 115 | #define smt_capable() (smp_num_siblings > 1) |
113 | #endif | 116 | #endif |
114 | 117 | ||
@@ -119,6 +122,10 @@ extern void arch_fix_phys_package_id(int num, u32 slot); | |||
119 | node_to_cpumask(pcibus_to_node(bus)) \ | 122 | node_to_cpumask(pcibus_to_node(bus)) \ |
120 | ) | 123 | ) |
121 | 124 | ||
125 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
126 | cpu_all_mask : \ | ||
127 | cpumask_from_node(pcibus_to_node(bus))) | ||
128 | |||
122 | #include <asm-generic/topology.h> | 129 | #include <asm-generic/topology.h> |
123 | 130 | ||
124 | #endif /* _ASM_IA64_TOPOLOGY_H */ | 131 | #endif /* _ASM_IA64_TOPOLOGY_H */ |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index bd7acc71e8a9..0553648b7595 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -202,7 +202,6 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) | |||
202 | Boot-time Table Parsing | 202 | Boot-time Table Parsing |
203 | -------------------------------------------------------------------------- */ | 203 | -------------------------------------------------------------------------- */ |
204 | 204 | ||
205 | static int total_cpus __initdata; | ||
206 | static int available_cpus __initdata; | 205 | static int available_cpus __initdata; |
207 | struct acpi_table_madt *acpi_madt __initdata; | 206 | struct acpi_table_madt *acpi_madt __initdata; |
208 | static u8 has_8259; | 207 | static u8 has_8259; |
@@ -1001,7 +1000,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | |||
1001 | node = pxm_to_node(pxm); | 1000 | node = pxm_to_node(pxm); |
1002 | 1001 | ||
1003 | if (node >= MAX_NUMNODES || !node_online(node) || | 1002 | if (node >= MAX_NUMNODES || !node_online(node) || |
1004 | cpus_empty(node_to_cpumask(node))) | 1003 | cpumask_empty(cpumask_of_node(node))) |
1005 | return AE_OK; | 1004 | return AE_OK; |
1006 | 1005 | ||
1007 | /* We know a gsi to node mapping! */ | 1006 | /* We know a gsi to node mapping! */ |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index c8adecd5b416..5cfd3d91001a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq) | |||
695 | #ifdef CONFIG_NUMA | 695 | #ifdef CONFIG_NUMA |
696 | { | 696 | { |
697 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; | 697 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; |
698 | cpumask_t cpu_mask; | 698 | const struct cpumask *cpu_mask; |
699 | 699 | ||
700 | iosapic_index = find_iosapic(gsi); | 700 | iosapic_index = find_iosapic(gsi); |
701 | if (iosapic_index < 0 || | 701 | if (iosapic_index < 0 || |
702 | iosapic_lists[iosapic_index].node == MAX_NUMNODES) | 702 | iosapic_lists[iosapic_index].node == MAX_NUMNODES) |
703 | goto skip_numa_setup; | 703 | goto skip_numa_setup; |
704 | 704 | ||
705 | cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); | 705 | cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); |
706 | cpus_and(cpu_mask, cpu_mask, domain); | 706 | num_cpus = 0; |
707 | for_each_cpu_mask(numa_cpu, cpu_mask) { | 707 | for_each_cpu_and(numa_cpu, cpu_mask, &domain) { |
708 | if (!cpu_online(numa_cpu)) | 708 | if (cpu_online(numa_cpu)) |
709 | cpu_clear(numa_cpu, cpu_mask); | 709 | num_cpus++; |
710 | } | 710 | } |
711 | 711 | ||
712 | num_cpus = cpus_weight(cpu_mask); | ||
713 | |||
714 | if (!num_cpus) | 712 | if (!num_cpus) |
715 | goto skip_numa_setup; | 713 | goto skip_numa_setup; |
716 | 714 | ||
717 | /* Use irq assignment to distribute across cpus in node */ | 715 | /* Use irq assignment to distribute across cpus in node */ |
718 | cpu_index = irq % num_cpus; | 716 | cpu_index = irq % num_cpus; |
719 | 717 | ||
720 | for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) | 718 | for_each_cpu_and(numa_cpu, cpu_mask, &domain) |
721 | numa_cpu = next_cpu(numa_cpu, cpu_mask); | 719 | if (cpu_online(numa_cpu) && i++ >= cpu_index) |
720 | break; | ||
722 | 721 | ||
723 | if (numa_cpu != NR_CPUS) | 722 | if (numa_cpu < nr_cpu_ids) |
724 | return cpu_physical_id(numa_cpu); | 723 | return cpu_physical_id(numa_cpu); |
725 | } | 724 | } |
726 | skip_numa_setup: | 725 | skip_numa_setup: |
@@ -731,7 +730,7 @@ skip_numa_setup: | |||
731 | * case of NUMA.) | 730 | * case of NUMA.) |
732 | */ | 731 | */ |
733 | do { | 732 | do { |
734 | if (++cpu >= NR_CPUS) | 733 | if (++cpu >= nr_cpu_ids) |
735 | cpu = 0; | 734 | cpu = 0; |
736 | } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); | 735 | } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); |
737 | 736 | ||
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 0b6db53fedcf..95ff16cb05d8 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -112,11 +112,11 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | bool is_affinity_mask_valid(cpumask_t cpumask) | 115 | bool is_affinity_mask_valid(cpumask_var_t cpumask) |
116 | { | 116 | { |
117 | if (ia64_platform_is("sn2")) { | 117 | if (ia64_platform_is("sn2")) { |
118 | /* Only allow one CPU to be specified in the smp_affinity mask */ | 118 | /* Only allow one CPU to be specified in the smp_affinity mask */ |
119 | if (cpus_weight(cpumask) != 1) | 119 | if (cpumask_weight(cpumask) != 1) |
120 | return false; | 120 | return false; |
121 | } | 121 | } |
122 | return true; | 122 | return true; |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 636588e7e068..be339477f906 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -385,7 +385,6 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
385 | int j; | 385 | int j; |
386 | const char *slabname; | 386 | const char *slabname; |
387 | int ordinal; | 387 | int ordinal; |
388 | cpumask_t cpumask; | ||
389 | char slice; | 388 | char slice; |
390 | struct cpuinfo_ia64 *c; | 389 | struct cpuinfo_ia64 *c; |
391 | struct sn_hwperf_port_info *ptdata; | 390 | struct sn_hwperf_port_info *ptdata; |
@@ -473,23 +472,21 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
473 | * CPUs on this node, if any | 472 | * CPUs on this node, if any |
474 | */ | 473 | */ |
475 | if (!SN_HWPERF_IS_IONODE(obj)) { | 474 | if (!SN_HWPERF_IS_IONODE(obj)) { |
476 | cpumask = node_to_cpumask(ordinal); | 475 | for_each_cpu_and(i, cpu_online_mask, |
477 | for_each_online_cpu(i) { | 476 | cpumask_of_node(ordinal)) { |
478 | if (cpu_isset(i, cpumask)) { | 477 | slice = 'a' + cpuid_to_slice(i); |
479 | slice = 'a' + cpuid_to_slice(i); | 478 | c = cpu_data(i); |
480 | c = cpu_data(i); | 479 | seq_printf(s, "cpu %d %s%c local" |
481 | seq_printf(s, "cpu %d %s%c local" | 480 | " freq %luMHz, arch ia64", |
482 | " freq %luMHz, arch ia64", | 481 | i, obj->location, slice, |
483 | i, obj->location, slice, | 482 | c->proc_freq / 1000000); |
484 | c->proc_freq / 1000000); | 483 | for_each_online_cpu(j) { |
485 | for_each_online_cpu(j) { | 484 | seq_printf(s, j ? ":%d" : ", dist %d", |
486 | seq_printf(s, j ? ":%d" : ", dist %d", | 485 | node_distance( |
487 | node_distance( | ||
488 | cpu_to_node(i), | 486 | cpu_to_node(i), |
489 | cpu_to_node(j))); | 487 | cpu_to_node(j))); |
490 | } | ||
491 | seq_putc(s, '\n'); | ||
492 | } | 488 | } |
489 | seq_putc(s, '\n'); | ||
493 | } | 490 | } |
494 | } | 491 | } |
495 | } | 492 | } |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 0f06b3722e96..2547d6c4a827 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -592,7 +592,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
592 | * accounting. At that time they also adjust their APIC timers | 592 | * accounting. At that time they also adjust their APIC timers |
593 | * accordingly. | 593 | * accordingly. |
594 | */ | 594 | */ |
595 | for (i = 0; i < NR_CPUS; ++i) | 595 | for_each_possible_cpu(i) |
596 | per_cpu(prof_multiplier, i) = multiplier; | 596 | per_cpu(prof_multiplier, i) = multiplier; |
597 | 597 | ||
598 | return 0; | 598 | return 0; |
diff --git a/arch/m68knommu/include/asm/bitops.h b/arch/m68knommu/include/asm/bitops.h index 6f3685eab44c..9d3cbe5fad1e 100644 --- a/arch/m68knommu/include/asm/bitops.h +++ b/arch/m68knommu/include/asm/bitops.h | |||
@@ -331,6 +331,7 @@ found_middle: | |||
331 | #endif /* __KERNEL__ */ | 331 | #endif /* __KERNEL__ */ |
332 | 332 | ||
333 | #include <asm-generic/bitops/fls.h> | 333 | #include <asm-generic/bitops/fls.h> |
334 | #include <asm-generic/bitops/__fls.h> | ||
334 | #include <asm-generic/bitops/fls64.h> | 335 | #include <asm-generic/bitops/fls64.h> |
335 | 336 | ||
336 | #endif /* _M68KNOMMU_BITOPS_H */ | 337 | #endif /* _M68KNOMMU_BITOPS_H */ |
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h index 1fb959f98982..55d481569a1f 100644 --- a/arch/mips/include/asm/mach-ip27/topology.h +++ b/arch/mips/include/asm/mach-ip27/topology.h | |||
@@ -25,11 +25,13 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS]; | |||
25 | #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) | 25 | #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) |
26 | #define parent_node(node) (node) | 26 | #define parent_node(node) (node) |
27 | #define node_to_cpumask(node) (hub_data(node)->h_cpus) | 27 | #define node_to_cpumask(node) (hub_data(node)->h_cpus) |
28 | #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) | 28 | #define cpumask_of_node(node) (&hub_data(node)->h_cpus) |
29 | #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) | ||
29 | struct pci_bus; | 30 | struct pci_bus; |
30 | extern int pcibus_to_node(struct pci_bus *); | 31 | extern int pcibus_to_node(struct pci_bus *); |
31 | 32 | ||
32 | #define pcibus_to_cpumask(bus) (cpu_online_map) | 33 | #define pcibus_to_cpumask(bus) (cpu_online_map) |
34 | #define cpumask_of_pcibus(bus) (cpu_online_mask) | ||
33 | 35 | ||
34 | extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; | 36 | extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; |
35 | 37 | ||
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index 409e698f4361..6ef4b7867b1b 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <linux/cpumask.h> | 16 | #include <linux/cpumask.h> |
17 | typedef unsigned long address_t; | 17 | typedef unsigned long address_t; |
18 | 18 | ||
19 | extern cpumask_t cpu_online_map; | ||
20 | |||
21 | 19 | ||
22 | /* | 20 | /* |
23 | * Private routines/data | 21 | * Private routines/data |
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 373fca394a54..375258559ae6 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h | |||
@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node) | |||
22 | return numa_cpumask_lookup_table[node]; | 22 | return numa_cpumask_lookup_table[node]; |
23 | } | 23 | } |
24 | 24 | ||
25 | #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) | ||
26 | |||
25 | static inline int node_to_first_cpu(int node) | 27 | static inline int node_to_first_cpu(int node) |
26 | { | 28 | { |
27 | cpumask_t tmp; | 29 | return cpumask_first(cpumask_of_node(node)); |
28 | tmp = node_to_cpumask(node); | ||
29 | return first_cpu(tmp); | ||
30 | } | 30 | } |
31 | 31 | ||
32 | int of_node_to_nid(struct device_node *device); | 32 | int of_node_to_nid(struct device_node *device); |
@@ -46,6 +46,10 @@ static inline int pcibus_to_node(struct pci_bus *bus) | |||
46 | node_to_cpumask(pcibus_to_node(bus)) \ | 46 | node_to_cpumask(pcibus_to_node(bus)) \ |
47 | ) | 47 | ) |
48 | 48 | ||
49 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
50 | cpu_all_mask : \ | ||
51 | cpumask_of_node(pcibus_to_node(bus))) | ||
52 | |||
49 | /* sched_domains SD_NODE_INIT for PPC64 machines */ | 53 | /* sched_domains SD_NODE_INIT for PPC64 machines */ |
50 | #define SD_NODE_INIT (struct sched_domain) { \ | 54 | #define SD_NODE_INIT (struct sched_domain) { \ |
51 | .parent = NULL, \ | 55 | .parent = NULL, \ |
@@ -108,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, | |||
108 | 112 | ||
109 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 113 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
110 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | 114 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) |
115 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
116 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | ||
111 | #define topology_core_id(cpu) (cpu_to_core_id(cpu)) | 117 | #define topology_core_id(cpu) (cpu_to_core_id(cpu)) |
112 | #endif | 118 | #endif |
113 | #endif | 119 | #endif |
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c index 906a0a2a9fe1..1410443731eb 100644 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c | |||
@@ -80,10 +80,10 @@ static void cpu_affinity_set(struct spu *spu, int cpu) | |||
80 | u64 route; | 80 | u64 route; |
81 | 81 | ||
82 | if (nr_cpus_node(spu->node)) { | 82 | if (nr_cpus_node(spu->node)) { |
83 | cpumask_t spumask = node_to_cpumask(spu->node); | 83 | const struct cpumask *spumask = cpumask_of_node(spu->node), |
84 | cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); | 84 | *cpumask = cpumask_of_node(cpu_to_node(cpu)); |
85 | 85 | ||
86 | if (!cpus_intersects(spumask, cpumask)) | 86 | if (!cpumask_intersects(spumask, cpumask)) |
87 | return; | 87 | return; |
88 | } | 88 | } |
89 | 89 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 2ad914c47493..6a0ad196aeb3 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -166,9 +166,9 @@ void spu_update_sched_info(struct spu_context *ctx) | |||
166 | static int __node_allowed(struct spu_context *ctx, int node) | 166 | static int __node_allowed(struct spu_context *ctx, int node) |
167 | { | 167 | { |
168 | if (nr_cpus_node(node)) { | 168 | if (nr_cpus_node(node)) { |
169 | cpumask_t mask = node_to_cpumask(node); | 169 | const struct cpumask *mask = cpumask_of_node(node); |
170 | 170 | ||
171 | if (cpus_intersects(mask, ctx->cpus_allowed)) | 171 | if (cpumask_intersects(mask, &ctx->cpus_allowed)) |
172 | return 1; | 172 | return 1; |
173 | } | 173 | } |
174 | 174 | ||
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index d96c91643458..c93eb50e1d09 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -6,10 +6,12 @@ | |||
6 | #define mc_capable() (1) | 6 | #define mc_capable() (1) |
7 | 7 | ||
8 | cpumask_t cpu_coregroup_map(unsigned int cpu); | 8 | cpumask_t cpu_coregroup_map(unsigned int cpu); |
9 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | ||
9 | 10 | ||
10 | extern cpumask_t cpu_core_map[NR_CPUS]; | 11 | extern cpumask_t cpu_core_map[NR_CPUS]; |
11 | 12 | ||
12 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) | 13 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) |
14 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
13 | 15 | ||
14 | int topology_set_cpu_management(int fc); | 16 | int topology_set_cpu_management(int fc); |
15 | void topology_schedule_update(void); | 17 | void topology_schedule_update(void); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 90e9ba11eba1..cc362c9ea8f1 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -97,6 +97,11 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) | |||
97 | return mask; | 97 | return mask; |
98 | } | 98 | } |
99 | 99 | ||
100 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | ||
101 | { | ||
102 | return &cpu_core_map[cpu]; | ||
103 | } | ||
104 | |||
100 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | 105 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) |
101 | { | 106 | { |
102 | unsigned int cpu; | 107 | unsigned int cpu; |
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 279d9cc4a007..066f0fba590e 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #define parent_node(node) ((void)(node),0) | 32 | #define parent_node(node) ((void)(node),0) |
33 | 33 | ||
34 | #define node_to_cpumask(node) ((void)node, cpu_online_map) | 34 | #define node_to_cpumask(node) ((void)node, cpu_online_map) |
35 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | ||
35 | #define node_to_first_cpu(node) ((void)(node),0) | 36 | #define node_to_first_cpu(node) ((void)(node),0) |
36 | 37 | ||
37 | #define pcibus_to_node(bus) ((void)(bus), -1) | 38 | #define pcibus_to_node(bus) ((void)(bus), -1) |
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 001c04027c82..b8a65b64e1df 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
@@ -16,8 +16,12 @@ static inline cpumask_t node_to_cpumask(int node) | |||
16 | { | 16 | { |
17 | return numa_cpumask_lookup_table[node]; | 17 | return numa_cpumask_lookup_table[node]; |
18 | } | 18 | } |
19 | #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) | ||
19 | 20 | ||
20 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ | 21 | /* |
22 | * Returns a pointer to the cpumask of CPUs on Node 'node'. | ||
23 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
24 | */ | ||
21 | #define node_to_cpumask_ptr(v, node) \ | 25 | #define node_to_cpumask_ptr(v, node) \ |
22 | cpumask_t *v = &(numa_cpumask_lookup_table[node]) | 26 | cpumask_t *v = &(numa_cpumask_lookup_table[node]) |
23 | 27 | ||
@@ -26,9 +30,7 @@ static inline cpumask_t node_to_cpumask(int node) | |||
26 | 30 | ||
27 | static inline int node_to_first_cpu(int node) | 31 | static inline int node_to_first_cpu(int node) |
28 | { | 32 | { |
29 | cpumask_t tmp; | 33 | return cpumask_first(cpumask_of_node(node)); |
30 | tmp = node_to_cpumask(node); | ||
31 | return first_cpu(tmp); | ||
32 | } | 34 | } |
33 | 35 | ||
34 | struct pci_bus; | 36 | struct pci_bus; |
@@ -77,10 +79,13 @@ static inline int pcibus_to_node(struct pci_bus *pbus) | |||
77 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) | 79 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) |
78 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) | 80 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) |
79 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 81 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
82 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
83 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
80 | #define mc_capable() (sparc64_multi_core) | 84 | #define mc_capable() (sparc64_multi_core) |
81 | #define smt_capable() (sparc64_multi_core) | 85 | #define smt_capable() (sparc64_multi_core) |
82 | #endif /* CONFIG_SMP */ | 86 | #endif /* CONFIG_SMP */ |
83 | 87 | ||
84 | #define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) | 88 | #define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) |
89 | #define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) | ||
85 | 90 | ||
86 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ | 91 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ |
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 322046cdf85f..4873f28905b0 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c | |||
@@ -778,7 +778,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op, | |||
778 | out: | 778 | out: |
779 | nid = of_node_to_nid(dp); | 779 | nid = of_node_to_nid(dp); |
780 | if (nid != -1) { | 780 | if (nid != -1) { |
781 | cpumask_t numa_mask = node_to_cpumask(nid); | 781 | cpumask_t numa_mask = *cpumask_of_node(nid); |
782 | 782 | ||
783 | irq_set_affinity(irq, &numa_mask); | 783 | irq_set_affinity(irq, &numa_mask); |
784 | } | 784 | } |
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 0d0cd815e83e..4ef282e81912 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c | |||
@@ -286,7 +286,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm, | |||
286 | 286 | ||
287 | nid = pbm->numa_node; | 287 | nid = pbm->numa_node; |
288 | if (nid != -1) { | 288 | if (nid != -1) { |
289 | cpumask_t numa_mask = node_to_cpumask(nid); | 289 | cpumask_t numa_mask = *cpumask_of_node(nid); |
290 | 290 | ||
291 | irq_set_affinity(irq, &numa_mask); | 291 | irq_set_affinity(irq, &numa_mask); |
292 | } | 292 | } |
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 51ac1230294e..bc53d5ef1386 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h | |||
@@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) | |||
157 | 157 | ||
158 | num_bits_set = cpumask_weight(cpumask); | 158 | num_bits_set = cpumask_weight(cpumask); |
159 | /* Return id to all */ | 159 | /* Return id to all */ |
160 | if (num_bits_set == NR_CPUS) | 160 | if (num_bits_set == nr_cpu_ids) |
161 | return 0xFF; | 161 | return 0xFF; |
162 | /* | 162 | /* |
163 | * The cpus in the mask must all be on the apic cluster. If are not | 163 | * The cpus in the mask must all be on the apic cluster. If are not |
@@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) | |||
190 | 190 | ||
191 | num_bits_set = cpus_weight(*cpumask); | 191 | num_bits_set = cpus_weight(*cpumask); |
192 | /* Return id to all */ | 192 | /* Return id to all */ |
193 | if (num_bits_set == NR_CPUS) | 193 | if (num_bits_set == nr_cpu_ids) |
194 | return cpu_to_logical_apicid(0); | 194 | return cpu_to_logical_apicid(0); |
195 | /* | 195 | /* |
196 | * The cpus in the mask must all be on the apic cluster. If are not | 196 | * The cpus in the mask must all be on the apic cluster. If are not |
@@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) | |||
218 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | 218 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, |
219 | const struct cpumask *andmask) | 219 | const struct cpumask *andmask) |
220 | { | 220 | { |
221 | int num_bits_set; | ||
222 | int cpus_found = 0; | ||
223 | int cpu; | ||
224 | int apicid = cpu_to_logical_apicid(0); | 221 | int apicid = cpu_to_logical_apicid(0); |
225 | cpumask_var_t cpumask; | 222 | cpumask_var_t cpumask; |
226 | 223 | ||
@@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | |||
229 | 226 | ||
230 | cpumask_and(cpumask, inmask, andmask); | 227 | cpumask_and(cpumask, inmask, andmask); |
231 | cpumask_and(cpumask, cpumask, cpu_online_mask); | 228 | cpumask_and(cpumask, cpumask, cpu_online_mask); |
229 | apicid = cpu_mask_to_apicid(cpumask); | ||
232 | 230 | ||
233 | num_bits_set = cpumask_weight(cpumask); | ||
234 | /* Return id to all */ | ||
235 | if (num_bits_set == NR_CPUS) | ||
236 | goto exit; | ||
237 | /* | ||
238 | * The cpus in the mask must all be on the apic cluster. If are not | ||
239 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
240 | */ | ||
241 | cpu = cpumask_first(cpumask); | ||
242 | apicid = cpu_to_logical_apicid(cpu); | ||
243 | while (cpus_found < num_bits_set) { | ||
244 | if (cpumask_test_cpu(cpu, cpumask)) { | ||
245 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
246 | if (apicid_cluster(apicid) != | ||
247 | apicid_cluster(new_apicid)){ | ||
248 | printk ("%s: Not a valid mask!\n", __func__); | ||
249 | return cpu_to_logical_apicid(0); | ||
250 | } | ||
251 | apicid = new_apicid; | ||
252 | cpus_found++; | ||
253 | } | ||
254 | cpu++; | ||
255 | } | ||
256 | exit: | ||
257 | free_cpumask_var(cpumask); | 231 | free_cpumask_var(cpumask); |
258 | return apicid; | 232 | return apicid; |
259 | } | 233 | } |
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index d28a507cef39..1caf57628b9c 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #define SHARED_SWITCHER_PAGES \ | 15 | #define SHARED_SWITCHER_PAGES \ |
16 | DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) | 16 | DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) |
17 | /* Pages for switcher itself, then two pages per cpu */ | 17 | /* Pages for switcher itself, then two pages per cpu */ |
18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) | 18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) |
19 | 19 | ||
20 | /* We map at -4M for ease of mapping into the guest (one PTE page). */ | 20 | /* We map at -4M for ease of mapping into the guest (one PTE page). */ |
21 | #define SWITCHER_ADDR 0xFFC00000 | 21 | #define SWITCHER_ADDR 0xFFC00000 |
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index c80f00d29965..bf37bc49bd8e 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h | |||
@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | |||
63 | extern u8 cpu_2_logical_apicid[]; | 63 | extern u8 cpu_2_logical_apicid[]; |
64 | static inline int cpu_to_logical_apicid(int cpu) | 64 | static inline int cpu_to_logical_apicid(int cpu) |
65 | { | 65 | { |
66 | if (cpu >= NR_CPUS) | 66 | if (cpu >= nr_cpu_ids) |
67 | return BAD_APICID; | 67 | return BAD_APICID; |
68 | return (int)cpu_2_logical_apicid[cpu]; | 68 | return (int)cpu_2_logical_apicid[cpu]; |
69 | } | 69 | } |
70 | 70 | ||
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 66834c41c049..a977de23cb4d 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -102,9 +102,9 @@ extern void pci_iommu_alloc(void); | |||
102 | 102 | ||
103 | #ifdef CONFIG_NUMA | 103 | #ifdef CONFIG_NUMA |
104 | /* Returns the node based on pci bus */ | 104 | /* Returns the node based on pci bus */ |
105 | static inline int __pcibus_to_node(struct pci_bus *bus) | 105 | static inline int __pcibus_to_node(const struct pci_bus *bus) |
106 | { | 106 | { |
107 | struct pci_sysdata *sd = bus->sysdata; | 107 | const struct pci_sysdata *sd = bus->sysdata; |
108 | 108 | ||
109 | return sd->node; | 109 | return sd->node; |
110 | } | 110 | } |
@@ -113,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) | |||
113 | { | 113 | { |
114 | return node_to_cpumask(__pcibus_to_node(bus)); | 114 | return node_to_cpumask(__pcibus_to_node(bus)); |
115 | } | 115 | } |
116 | |||
117 | static inline const struct cpumask * | ||
118 | cpumask_of_pcibus(const struct pci_bus *bus) | ||
119 | { | ||
120 | return cpumask_of_node(__pcibus_to_node(bus)); | ||
121 | } | ||
116 | #endif | 122 | #endif |
117 | 123 | ||
118 | #endif /* _ASM_X86_PCI_H */ | 124 | #endif /* _ASM_X86_PCI_H */ |
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 99327d1be49f..4bb5fb34f030 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h | |||
@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void) | |||
52 | int i; | 52 | int i; |
53 | 53 | ||
54 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | 54 | /* Create logical APIC IDs by counting CPUs already in cluster. */ |
55 | for (count = 0, i = NR_CPUS; --i >= 0; ) { | 55 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { |
56 | lid = cpu_2_logical_apicid[i]; | 56 | lid = cpu_2_logical_apicid[i]; |
57 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | 57 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) |
58 | ++count; | 58 | ++count; |
@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid) | |||
97 | static inline int cpu_to_logical_apicid(int cpu) | 97 | static inline int cpu_to_logical_apicid(int cpu) |
98 | { | 98 | { |
99 | #ifdef CONFIG_SMP | 99 | #ifdef CONFIG_SMP |
100 | if (cpu >= NR_CPUS) | 100 | if (cpu >= nr_cpu_ids) |
101 | return BAD_APICID; | 101 | return BAD_APICID; |
102 | return (int)cpu_2_logical_apicid[cpu]; | 102 | return (int)cpu_2_logical_apicid[cpu]; |
103 | #else | 103 | #else |
104 | return logical_smp_processor_id(); | 104 | return logical_smp_processor_id(); |
@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
107 | 107 | ||
108 | static inline int cpu_present_to_apicid(int mps_cpu) | 108 | static inline int cpu_present_to_apicid(int mps_cpu) |
109 | { | 109 | { |
110 | if (mps_cpu < NR_CPUS) | 110 | if (mps_cpu < nr_cpu_ids) |
111 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | 111 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
112 | else | 112 | else |
113 | return BAD_APICID; | 113 | return BAD_APICID; |
@@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) | |||
146 | 146 | ||
147 | num_bits_set = cpus_weight(*cpumask); | 147 | num_bits_set = cpus_weight(*cpumask); |
148 | /* Return id to all */ | 148 | /* Return id to all */ |
149 | if (num_bits_set == NR_CPUS) | 149 | if (num_bits_set >= nr_cpu_ids) |
150 | return (int) 0xFF; | 150 | return (int) 0xFF; |
151 | /* | 151 | /* |
152 | * The cpus in the mask must all be on the apic cluster. If are not | 152 | * The cpus in the mask must all be on the apic cluster. If are not |
@@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) | |||
173 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | 173 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, |
174 | const struct cpumask *andmask) | 174 | const struct cpumask *andmask) |
175 | { | 175 | { |
176 | int num_bits_set; | 176 | int apicid = cpu_to_logical_apicid(0); |
177 | int cpus_found = 0; | ||
178 | int cpu; | ||
179 | int apicid = 0xFF; | ||
180 | cpumask_var_t cpumask; | 177 | cpumask_var_t cpumask; |
181 | 178 | ||
182 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | 179 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) |
183 | return (int) 0xFF; | 180 | return apicid; |
184 | 181 | ||
185 | cpumask_and(cpumask, inmask, andmask); | 182 | cpumask_and(cpumask, inmask, andmask); |
186 | cpumask_and(cpumask, cpumask, cpu_online_mask); | 183 | cpumask_and(cpumask, cpumask, cpu_online_mask); |
184 | apicid = cpu_mask_to_apicid(cpumask); | ||
187 | 185 | ||
188 | num_bits_set = cpumask_weight(cpumask); | ||
189 | /* Return id to all */ | ||
190 | if (num_bits_set == nr_cpu_ids) | ||
191 | goto exit; | ||
192 | /* | ||
193 | * The cpus in the mask must all be on the apic cluster. If are not | ||
194 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
195 | */ | ||
196 | cpu = cpumask_first(cpumask); | ||
197 | apicid = cpu_to_logical_apicid(cpu); | ||
198 | while (cpus_found < num_bits_set) { | ||
199 | if (cpumask_test_cpu(cpu, cpumask)) { | ||
200 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
201 | if (apicid_cluster(apicid) != | ||
202 | apicid_cluster(new_apicid)){ | ||
203 | printk ("%s: Not a valid mask!\n", __func__); | ||
204 | return 0xFF; | ||
205 | } | ||
206 | apicid = apicid | new_apicid; | ||
207 | cpus_found++; | ||
208 | } | ||
209 | cpu++; | ||
210 | } | ||
211 | exit: | ||
212 | free_cpumask_var(cpumask); | 186 | free_cpumask_var(cpumask); |
213 | return apicid; | 187 | return apicid; |
214 | } | 188 | } |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 79e31e9dcdda..4e2f2e0aab27 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu) | |||
61 | * | 61 | * |
62 | * Side note: this function creates the returned cpumask on the stack | 62 | * Side note: this function creates the returned cpumask on the stack |
63 | * so with a high NR_CPUS count, excessive stack space is used. The | 63 | * so with a high NR_CPUS count, excessive stack space is used. The |
64 | * node_to_cpumask_ptr function should be used whenever possible. | 64 | * cpumask_of_node function should be used whenever possible. |
65 | */ | 65 | */ |
66 | static inline cpumask_t node_to_cpumask(int node) | 66 | static inline cpumask_t node_to_cpumask(int node) |
67 | { | 67 | { |
68 | return node_to_cpumask_map[node]; | 68 | return node_to_cpumask_map[node]; |
69 | } | 69 | } |
70 | 70 | ||
71 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
72 | static inline const struct cpumask *cpumask_of_node(int node) | ||
73 | { | ||
74 | return &node_to_cpumask_map[node]; | ||
75 | } | ||
76 | |||
71 | #else /* CONFIG_X86_64 */ | 77 | #else /* CONFIG_X86_64 */ |
72 | 78 | ||
73 | /* Mappings between node number and cpus on that node. */ | 79 | /* Mappings between node number and cpus on that node. */ |
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); | |||
82 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 88 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
83 | extern int cpu_to_node(int cpu); | 89 | extern int cpu_to_node(int cpu); |
84 | extern int early_cpu_to_node(int cpu); | 90 | extern int early_cpu_to_node(int cpu); |
85 | extern const cpumask_t *_node_to_cpumask_ptr(int node); | 91 | extern const cpumask_t *cpumask_of_node(int node); |
86 | extern cpumask_t node_to_cpumask(int node); | 92 | extern cpumask_t node_to_cpumask(int node); |
87 | 93 | ||
88 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 94 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu) | |||
103 | } | 109 | } |
104 | 110 | ||
105 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ | 111 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ |
106 | static inline const cpumask_t *_node_to_cpumask_ptr(int node) | 112 | static inline const cpumask_t *cpumask_of_node(int node) |
107 | { | 113 | { |
108 | return &node_to_cpumask_map[node]; | 114 | return &node_to_cpumask_map[node]; |
109 | } | 115 | } |
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node) | |||
116 | 122 | ||
117 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 123 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
118 | 124 | ||
119 | /* Replace default node_to_cpumask_ptr with optimized version */ | 125 | /* |
126 | * Replace default node_to_cpumask_ptr with optimized version | ||
127 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
128 | */ | ||
120 | #define node_to_cpumask_ptr(v, node) \ | 129 | #define node_to_cpumask_ptr(v, node) \ |
121 | const cpumask_t *v = _node_to_cpumask_ptr(node) | 130 | const cpumask_t *v = cpumask_of_node(node) |
122 | 131 | ||
123 | #define node_to_cpumask_ptr_next(v, node) \ | 132 | #define node_to_cpumask_ptr_next(v, node) \ |
124 | v = _node_to_cpumask_ptr(node) | 133 | v = cpumask_of_node(node) |
125 | 134 | ||
126 | #endif /* CONFIG_X86_64 */ | 135 | #endif /* CONFIG_X86_64 */ |
127 | 136 | ||
@@ -187,7 +196,7 @@ extern int __node_distance(int, int); | |||
187 | #define cpu_to_node(cpu) 0 | 196 | #define cpu_to_node(cpu) 0 |
188 | #define early_cpu_to_node(cpu) 0 | 197 | #define early_cpu_to_node(cpu) 0 |
189 | 198 | ||
190 | static inline const cpumask_t *_node_to_cpumask_ptr(int node) | 199 | static inline const cpumask_t *cpumask_of_node(int node) |
191 | { | 200 | { |
192 | return &cpu_online_map; | 201 | return &cpu_online_map; |
193 | } | 202 | } |
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node) | |||
200 | return first_cpu(cpu_online_map); | 209 | return first_cpu(cpu_online_map); |
201 | } | 210 | } |
202 | 211 | ||
203 | /* Replace default node_to_cpumask_ptr with optimized version */ | 212 | /* |
213 | * Replace default node_to_cpumask_ptr with optimized version | ||
214 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
215 | */ | ||
204 | #define node_to_cpumask_ptr(v, node) \ | 216 | #define node_to_cpumask_ptr(v, node) \ |
205 | const cpumask_t *v = _node_to_cpumask_ptr(node) | 217 | const cpumask_t *v = cpumask_of_node(node) |
206 | 218 | ||
207 | #define node_to_cpumask_ptr_next(v, node) \ | 219 | #define node_to_cpumask_ptr_next(v, node) \ |
208 | v = _node_to_cpumask_ptr(node) | 220 | v = cpumask_of_node(node) |
209 | #endif | 221 | #endif |
210 | 222 | ||
211 | #include <asm-generic/topology.h> | 223 | #include <asm-generic/topology.h> |
@@ -214,12 +226,12 @@ static inline int node_to_first_cpu(int node) | |||
214 | /* Returns the number of the first CPU on Node 'node'. */ | 226 | /* Returns the number of the first CPU on Node 'node'. */ |
215 | static inline int node_to_first_cpu(int node) | 227 | static inline int node_to_first_cpu(int node) |
216 | { | 228 | { |
217 | node_to_cpumask_ptr(mask, node); | 229 | return cpumask_first(cpumask_of_node(node)); |
218 | return first_cpu(*mask); | ||
219 | } | 230 | } |
220 | #endif | 231 | #endif |
221 | 232 | ||
222 | extern cpumask_t cpu_coregroup_map(int cpu); | 233 | extern cpumask_t cpu_coregroup_map(int cpu); |
234 | extern const struct cpumask *cpu_coregroup_mask(int cpu); | ||
223 | 235 | ||
224 | #ifdef ENABLE_TOPO_DEFINES | 236 | #ifdef ENABLE_TOPO_DEFINES |
225 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | 237 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 65d0b72777ea..29dc0c89d4af 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -538,9 +538,10 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
538 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 538 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
539 | union acpi_object *obj; | 539 | union acpi_object *obj; |
540 | struct acpi_madt_local_apic *lapic; | 540 | struct acpi_madt_local_apic *lapic; |
541 | cpumask_t tmp_map, new_map; | 541 | cpumask_var_t tmp_map, new_map; |
542 | u8 physid; | 542 | u8 physid; |
543 | int cpu; | 543 | int cpu; |
544 | int retval = -ENOMEM; | ||
544 | 545 | ||
545 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | 546 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) |
546 | return -EINVAL; | 547 | return -EINVAL; |
@@ -569,23 +570,37 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
569 | buffer.length = ACPI_ALLOCATE_BUFFER; | 570 | buffer.length = ACPI_ALLOCATE_BUFFER; |
570 | buffer.pointer = NULL; | 571 | buffer.pointer = NULL; |
571 | 572 | ||
572 | tmp_map = cpu_present_map; | 573 | if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) |
574 | goto out; | ||
575 | |||
576 | if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) | ||
577 | goto free_tmp_map; | ||
578 | |||
579 | cpumask_copy(tmp_map, cpu_present_mask); | ||
573 | acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); | 580 | acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); |
574 | 581 | ||
575 | /* | 582 | /* |
576 | * If mp_register_lapic successfully generates a new logical cpu | 583 | * If mp_register_lapic successfully generates a new logical cpu |
577 | * number, then the following will get us exactly what was mapped | 584 | * number, then the following will get us exactly what was mapped |
578 | */ | 585 | */ |
579 | cpus_andnot(new_map, cpu_present_map, tmp_map); | 586 | cpumask_andnot(new_map, cpu_present_mask, tmp_map); |
580 | if (cpus_empty(new_map)) { | 587 | if (cpumask_empty(new_map)) { |
581 | printk ("Unable to map lapic to logical cpu number\n"); | 588 | printk ("Unable to map lapic to logical cpu number\n"); |
582 | return -EINVAL; | 589 | retval = -EINVAL; |
590 | goto free_new_map; | ||
583 | } | 591 | } |
584 | 592 | ||
585 | cpu = first_cpu(new_map); | 593 | cpu = cpumask_first(new_map); |
586 | 594 | ||
587 | *pcpu = cpu; | 595 | *pcpu = cpu; |
588 | return 0; | 596 | retval = 0; |
597 | |||
598 | free_new_map: | ||
599 | free_cpumask_var(new_map); | ||
600 | free_tmp_map: | ||
601 | free_cpumask_var(tmp_map); | ||
602 | out: | ||
603 | return retval; | ||
589 | } | 604 | } |
590 | 605 | ||
591 | /* wrapper to silence section mismatch warning */ | 606 | /* wrapper to silence section mismatch warning */ |
@@ -598,7 +613,7 @@ EXPORT_SYMBOL(acpi_map_lsapic); | |||
598 | int acpi_unmap_lsapic(int cpu) | 613 | int acpi_unmap_lsapic(int cpu) |
599 | { | 614 | { |
600 | per_cpu(x86_cpu_to_apicid, cpu) = -1; | 615 | per_cpu(x86_cpu_to_apicid, cpu) = -1; |
601 | cpu_clear(cpu, cpu_present_map); | 616 | set_cpu_present(cpu, false); |
602 | num_processors--; | 617 | num_processors--; |
603 | 618 | ||
604 | return (0); | 619 | return (0); |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index d652515e2855..b13d3c4dbd42 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta, | |||
140 | struct clock_event_device *evt); | 140 | struct clock_event_device *evt); |
141 | static void lapic_timer_setup(enum clock_event_mode mode, | 141 | static void lapic_timer_setup(enum clock_event_mode mode, |
142 | struct clock_event_device *evt); | 142 | struct clock_event_device *evt); |
143 | static void lapic_timer_broadcast(const cpumask_t *mask); | 143 | static void lapic_timer_broadcast(const struct cpumask *mask); |
144 | static void apic_pm_activate(void); | 144 | static void apic_pm_activate(void); |
145 | 145 | ||
146 | /* | 146 | /* |
@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, | |||
453 | /* | 453 | /* |
454 | * Local APIC timer broadcast function | 454 | * Local APIC timer broadcast function |
455 | */ | 455 | */ |
456 | static void lapic_timer_broadcast(const cpumask_t *mask) | 456 | static void lapic_timer_broadcast(const struct cpumask *mask) |
457 | { | 457 | { |
458 | #ifdef CONFIG_SMP | 458 | #ifdef CONFIG_SMP |
459 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | 459 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 42e0853030cb..3f95a40f718a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
355 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 355 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
356 | } else if (smp_num_siblings > 1) { | 356 | } else if (smp_num_siblings > 1) { |
357 | 357 | ||
358 | if (smp_num_siblings > NR_CPUS) { | 358 | if (smp_num_siblings > nr_cpu_ids) { |
359 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | 359 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", |
360 | smp_num_siblings); | 360 | smp_num_siblings); |
361 | smp_num_siblings = 1; | 361 | smp_num_siblings = 1; |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 88ea02dcb622..28102ad1a363 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | |||
517 | } | 517 | } |
518 | } | 518 | } |
519 | 519 | ||
520 | static void free_acpi_perf_data(void) | ||
521 | { | ||
522 | unsigned int i; | ||
523 | |||
524 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ | ||
525 | for_each_possible_cpu(i) | ||
526 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) | ||
527 | ->shared_cpu_map); | ||
528 | free_percpu(acpi_perf_data); | ||
529 | } | ||
530 | |||
520 | /* | 531 | /* |
521 | * acpi_cpufreq_early_init - initialize ACPI P-States library | 532 | * acpi_cpufreq_early_init - initialize ACPI P-States library |
522 | * | 533 | * |
@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | |||
527 | */ | 538 | */ |
528 | static int __init acpi_cpufreq_early_init(void) | 539 | static int __init acpi_cpufreq_early_init(void) |
529 | { | 540 | { |
541 | unsigned int i; | ||
530 | dprintk("acpi_cpufreq_early_init\n"); | 542 | dprintk("acpi_cpufreq_early_init\n"); |
531 | 543 | ||
532 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | 544 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); |
@@ -534,6 +546,16 @@ static int __init acpi_cpufreq_early_init(void) | |||
534 | dprintk("Memory allocation error for acpi_perf_data.\n"); | 546 | dprintk("Memory allocation error for acpi_perf_data.\n"); |
535 | return -ENOMEM; | 547 | return -ENOMEM; |
536 | } | 548 | } |
549 | for_each_possible_cpu(i) { | ||
550 | if (!alloc_cpumask_var_node( | ||
551 | &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, | ||
552 | GFP_KERNEL, cpu_to_node(i))) { | ||
553 | |||
554 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ | ||
555 | free_acpi_perf_data(); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | } | ||
537 | 559 | ||
538 | /* Do initialization in ACPI core */ | 560 | /* Do initialization in ACPI core */ |
539 | acpi_processor_preregister_performance(acpi_perf_data); | 561 | acpi_processor_preregister_performance(acpi_perf_data); |
@@ -604,9 +626,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
604 | */ | 626 | */ |
605 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | 627 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || |
606 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | 628 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
607 | policy->cpus = perf->shared_cpu_map; | 629 | cpumask_copy(&policy->cpus, perf->shared_cpu_map); |
608 | } | 630 | } |
609 | policy->related_cpus = perf->shared_cpu_map; | 631 | cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); |
610 | 632 | ||
611 | #ifdef CONFIG_SMP | 633 | #ifdef CONFIG_SMP |
612 | dmi_check_system(sw_any_bug_dmi_table); | 634 | dmi_check_system(sw_any_bug_dmi_table); |
@@ -795,7 +817,7 @@ static int __init acpi_cpufreq_init(void) | |||
795 | 817 | ||
796 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); | 818 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); |
797 | if (ret) | 819 | if (ret) |
798 | free_percpu(acpi_perf_data); | 820 | free_acpi_perf_data(); |
799 | 821 | ||
800 | return ret; | 822 | return ret; |
801 | } | 823 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 7c7d56b43136..1b446d79a8fd 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -310,6 +310,12 @@ static int powernow_acpi_init(void) | |||
310 | goto err0; | 310 | goto err0; |
311 | } | 311 | } |
312 | 312 | ||
313 | if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, | ||
314 | GFP_KERNEL)) { | ||
315 | retval = -ENOMEM; | ||
316 | goto err05; | ||
317 | } | ||
318 | |||
313 | if (acpi_processor_register_performance(acpi_processor_perf, 0)) { | 319 | if (acpi_processor_register_performance(acpi_processor_perf, 0)) { |
314 | retval = -EIO; | 320 | retval = -EIO; |
315 | goto err1; | 321 | goto err1; |
@@ -412,6 +418,8 @@ static int powernow_acpi_init(void) | |||
412 | err2: | 418 | err2: |
413 | acpi_processor_unregister_performance(acpi_processor_perf, 0); | 419 | acpi_processor_unregister_performance(acpi_processor_perf, 0); |
414 | err1: | 420 | err1: |
421 | free_cpumask_var(acpi_processor_perf->shared_cpu_map); | ||
422 | err05: | ||
415 | kfree(acpi_processor_perf); | 423 | kfree(acpi_processor_perf); |
416 | err0: | 424 | err0: |
417 | printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); | 425 | printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); |
@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) { | |||
652 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | 660 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI |
653 | if (acpi_processor_perf) { | 661 | if (acpi_processor_perf) { |
654 | acpi_processor_unregister_performance(acpi_processor_perf, 0); | 662 | acpi_processor_unregister_performance(acpi_processor_perf, 0); |
663 | free_cpumask_var(acpi_processor_perf->shared_cpu_map); | ||
655 | kfree(acpi_processor_perf); | 664 | kfree(acpi_processor_perf); |
656 | } | 665 | } |
657 | #endif | 666 | #endif |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 7f05f44b97e9..c3c9adbaa26f 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned | |||
766 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | 766 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) |
767 | { | 767 | { |
768 | struct cpufreq_frequency_table *powernow_table; | 768 | struct cpufreq_frequency_table *powernow_table; |
769 | int ret_val; | 769 | int ret_val = -ENODEV; |
770 | 770 | ||
771 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { | 771 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { |
772 | dprintk("register performance failed: bad ACPI data\n"); | 772 | dprintk("register performance failed: bad ACPI data\n"); |
@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
815 | /* notify BIOS that we exist */ | 815 | /* notify BIOS that we exist */ |
816 | acpi_processor_notify_smm(THIS_MODULE); | 816 | acpi_processor_notify_smm(THIS_MODULE); |
817 | 817 | ||
818 | if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { | ||
819 | printk(KERN_ERR PFX | ||
820 | "unable to alloc powernow_k8_data cpumask\n"); | ||
821 | ret_val = -ENOMEM; | ||
822 | goto err_out_mem; | ||
823 | } | ||
824 | |||
818 | return 0; | 825 | return 0; |
819 | 826 | ||
820 | err_out_mem: | 827 | err_out_mem: |
@@ -826,7 +833,7 @@ err_out: | |||
826 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ | 833 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ |
827 | data->acpi_data.state_count = 0; | 834 | data->acpi_data.state_count = 0; |
828 | 835 | ||
829 | return -ENODEV; | 836 | return ret_val; |
830 | } | 837 | } |
831 | 838 | ||
832 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) | 839 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) |
@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | |||
929 | { | 936 | { |
930 | if (data->acpi_data.state_count) | 937 | if (data->acpi_data.state_count) |
931 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | 938 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); |
939 | free_cpumask_var(data->acpi_data.shared_cpu_map); | ||
932 | } | 940 | } |
933 | 941 | ||
934 | #else | 942 | #else |
@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1134 | data->cpu = pol->cpu; | 1142 | data->cpu = pol->cpu; |
1135 | data->currpstate = HW_PSTATE_INVALID; | 1143 | data->currpstate = HW_PSTATE_INVALID; |
1136 | 1144 | ||
1137 | if (powernow_k8_cpu_init_acpi(data)) { | 1145 | rc = powernow_k8_cpu_init_acpi(data); |
1146 | if (rc) { | ||
1138 | /* | 1147 | /* |
1139 | * Use the PSB BIOS structure. This is only availabe on | 1148 | * Use the PSB BIOS structure. This is only availabe on |
1140 | * an UP version, and is deprecated by AMD. | 1149 | * an UP version, and is deprecated by AMD. |
@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1152 | "ACPI maintainers and complain to your BIOS " | 1161 | "ACPI maintainers and complain to your BIOS " |
1153 | "vendor.\n"); | 1162 | "vendor.\n"); |
1154 | #endif | 1163 | #endif |
1155 | kfree(data); | 1164 | goto err_out; |
1156 | return -ENODEV; | ||
1157 | } | 1165 | } |
1158 | if (pol->cpu != 0) { | 1166 | if (pol->cpu != 0) { |
1159 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " | 1167 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " |
1160 | "CPU other than CPU0. Complain to your BIOS " | 1168 | "CPU other than CPU0. Complain to your BIOS " |
1161 | "vendor.\n"); | 1169 | "vendor.\n"); |
1162 | kfree(data); | 1170 | goto err_out; |
1163 | return -ENODEV; | ||
1164 | } | 1171 | } |
1165 | rc = find_psb_table(data); | 1172 | rc = find_psb_table(data); |
1166 | if (rc) { | 1173 | if (rc) { |
1167 | kfree(data); | 1174 | goto err_out; |
1168 | return -ENODEV; | ||
1169 | } | 1175 | } |
1170 | } | 1176 | } |
1171 | 1177 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index c6ecda64f5f1..48533d77be78 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -534,7 +534,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
534 | per_cpu(cpuid4_info, cpu) = NULL; | 534 | per_cpu(cpuid4_info, cpu) = NULL; |
535 | } | 535 | } |
536 | 536 | ||
537 | static void get_cpu_leaves(void *_retval) | 537 | static void __cpuinit get_cpu_leaves(void *_retval) |
538 | { | 538 | { |
539 | int j, *retval = _retval, cpu = smp_processor_id(); | 539 | int j, *retval = _retval, cpu = smp_processor_id(); |
540 | 540 | ||
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 85d28d53f5d3..2ac1f0c2beb3 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file) | |||
121 | lock_kernel(); | 121 | lock_kernel(); |
122 | 122 | ||
123 | cpu = iminor(file->f_path.dentry->d_inode); | 123 | cpu = iminor(file->f_path.dentry->d_inode); |
124 | if (cpu >= NR_CPUS || !cpu_online(cpu)) { | 124 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
125 | ret = -ENXIO; /* No such CPU */ | 125 | ret = -ENXIO; /* No such CPU */ |
126 | goto out; | 126 | goto out; |
127 | } | 127 | } |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 69911722b9d3..3639442aa7a4 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -214,11 +214,11 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) | |||
214 | 214 | ||
215 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 215 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
216 | if (cfg) { | 216 | if (cfg) { |
217 | /* FIXME: needs alloc_cpumask_var_node() */ | 217 | if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { |
218 | if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { | ||
219 | kfree(cfg); | 218 | kfree(cfg); |
220 | cfg = NULL; | 219 | cfg = NULL; |
221 | } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { | 220 | } else if (!alloc_cpumask_var_node(&cfg->old_domain, |
221 | GFP_ATOMIC, node)) { | ||
222 | free_cpumask_var(cfg->domain); | 222 | free_cpumask_var(cfg->domain); |
223 | kfree(cfg); | 223 | kfree(cfg); |
224 | cfg = NULL; | 224 | cfg = NULL; |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 82a7c7ed6d45..726266695b2c 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file) | |||
136 | lock_kernel(); | 136 | lock_kernel(); |
137 | cpu = iminor(file->f_path.dentry->d_inode); | 137 | cpu = iminor(file->f_path.dentry->d_inode); |
138 | 138 | ||
139 | if (cpu >= NR_CPUS || !cpu_online(cpu)) { | 139 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
140 | ret = -ENXIO; /* No such CPU */ | 140 | ret = -ENXIO; /* No such CPU */ |
141 | goto out; | 141 | goto out; |
142 | } | 142 | } |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index bf088c61fa40..2b46eb41643b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -501,7 +501,7 @@ void native_machine_shutdown(void) | |||
501 | 501 | ||
502 | #ifdef CONFIG_X86_32 | 502 | #ifdef CONFIG_X86_32 |
503 | /* See if there has been given a command line override */ | 503 | /* See if there has been given a command line override */ |
504 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && | 504 | if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && |
505 | cpu_online(reboot_cpu)) | 505 | cpu_online(reboot_cpu)) |
506 | reboot_cpu_id = reboot_cpu; | 506 | reboot_cpu_id = reboot_cpu; |
507 | #endif | 507 | #endif |
@@ -511,7 +511,7 @@ void native_machine_shutdown(void) | |||
511 | reboot_cpu_id = smp_processor_id(); | 511 | reboot_cpu_id = smp_processor_id(); |
512 | 512 | ||
513 | /* Make certain I only run on the appropriate processor */ | 513 | /* Make certain I only run on the appropriate processor */ |
514 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); | 514 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); |
515 | 515 | ||
516 | /* O.K Now that I'm on the appropriate processor, | 516 | /* O.K Now that I'm on the appropriate processor, |
517 | * stop all of the others. | 517 | * stop all of the others. |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 0b63b08e7530..a4b619c33106 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -153,12 +153,10 @@ void __init setup_per_cpu_areas(void) | |||
153 | align = max_t(unsigned long, PAGE_SIZE, align); | 153 | align = max_t(unsigned long, PAGE_SIZE, align); |
154 | size = roundup(old_size, align); | 154 | size = roundup(old_size, align); |
155 | 155 | ||
156 | printk(KERN_INFO | 156 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
157 | "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | ||
158 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 157 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
159 | 158 | ||
160 | printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", | 159 | pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); |
161 | size); | ||
162 | 160 | ||
163 | for_each_possible_cpu(cpu) { | 161 | for_each_possible_cpu(cpu) { |
164 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 162 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
@@ -169,22 +167,15 @@ void __init setup_per_cpu_areas(void) | |||
169 | if (!node_online(node) || !NODE_DATA(node)) { | 167 | if (!node_online(node) || !NODE_DATA(node)) { |
170 | ptr = __alloc_bootmem(size, align, | 168 | ptr = __alloc_bootmem(size, align, |
171 | __pa(MAX_DMA_ADDRESS)); | 169 | __pa(MAX_DMA_ADDRESS)); |
172 | printk(KERN_INFO | 170 | pr_info("cpu %d has no node %d or node-local memory\n", |
173 | "cpu %d has no node %d or node-local memory\n", | ||
174 | cpu, node); | 171 | cpu, node); |
175 | if (ptr) | 172 | pr_debug("per cpu data for cpu%d at %016lx\n", |
176 | printk(KERN_DEBUG | 173 | cpu, __pa(ptr)); |
177 | "per cpu data for cpu%d at %016lx\n", | 174 | } else { |
178 | cpu, __pa(ptr)); | ||
179 | } | ||
180 | else { | ||
181 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, | 175 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, |
182 | __pa(MAX_DMA_ADDRESS)); | 176 | __pa(MAX_DMA_ADDRESS)); |
183 | if (ptr) | 177 | pr_debug("per cpu data for cpu%d on node%d at %016lx\n", |
184 | printk(KERN_DEBUG | 178 | cpu, node, __pa(ptr)); |
185 | "per cpu data for cpu%d on node%d " | ||
186 | "at %016lx\n", | ||
187 | cpu, node, __pa(ptr)); | ||
188 | } | 179 | } |
189 | #endif | 180 | #endif |
190 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 181 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
@@ -339,25 +330,25 @@ static const cpumask_t cpu_mask_none; | |||
339 | /* | 330 | /* |
340 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | 331 | * Returns a pointer to the bitmask of CPUs on Node 'node'. |
341 | */ | 332 | */ |
342 | const cpumask_t *_node_to_cpumask_ptr(int node) | 333 | const cpumask_t *cpumask_of_node(int node) |
343 | { | 334 | { |
344 | if (node_to_cpumask_map == NULL) { | 335 | if (node_to_cpumask_map == NULL) { |
345 | printk(KERN_WARNING | 336 | printk(KERN_WARNING |
346 | "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", | 337 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", |
347 | node); | 338 | node); |
348 | dump_stack(); | 339 | dump_stack(); |
349 | return (const cpumask_t *)&cpu_online_map; | 340 | return (const cpumask_t *)&cpu_online_map; |
350 | } | 341 | } |
351 | if (node >= nr_node_ids) { | 342 | if (node >= nr_node_ids) { |
352 | printk(KERN_WARNING | 343 | printk(KERN_WARNING |
353 | "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", | 344 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", |
354 | node, nr_node_ids); | 345 | node, nr_node_ids); |
355 | dump_stack(); | 346 | dump_stack(); |
356 | return &cpu_mask_none; | 347 | return &cpu_mask_none; |
357 | } | 348 | } |
358 | return &node_to_cpumask_map[node]; | 349 | return &node_to_cpumask_map[node]; |
359 | } | 350 | } |
360 | EXPORT_SYMBOL(_node_to_cpumask_ptr); | 351 | EXPORT_SYMBOL(cpumask_of_node); |
361 | 352 | ||
362 | /* | 353 | /* |
363 | * Returns a bitmask of CPUs on Node 'node'. | 354 | * Returns a bitmask of CPUs on Node 'node'. |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 31869bf5fabd..6bd4d9b73870 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -496,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
496 | } | 496 | } |
497 | 497 | ||
498 | /* maps the cpu to the sched domain representing multi-core */ | 498 | /* maps the cpu to the sched domain representing multi-core */ |
499 | cpumask_t cpu_coregroup_map(int cpu) | 499 | const struct cpumask *cpu_coregroup_mask(int cpu) |
500 | { | 500 | { |
501 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 501 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
502 | /* | 502 | /* |
@@ -504,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu) | |||
504 | * And for power savings, we return cpu_core_map | 504 | * And for power savings, we return cpu_core_map |
505 | */ | 505 | */ |
506 | if (sched_mc_power_savings || sched_smt_power_savings) | 506 | if (sched_mc_power_savings || sched_smt_power_savings) |
507 | return per_cpu(cpu_core_map, cpu); | 507 | return &per_cpu(cpu_core_map, cpu); |
508 | else | 508 | else |
509 | return c->llc_shared_map; | 509 | return &c->llc_shared_map; |
510 | } | ||
511 | |||
512 | cpumask_t cpu_coregroup_map(int cpu) | ||
513 | { | ||
514 | return *cpu_coregroup_mask(cpu); | ||
510 | } | 515 | } |
511 | 516 | ||
512 | static void impress_friends(void) | 517 | static void impress_friends(void) |
@@ -1149,7 +1154,7 @@ static void __init smp_cpu_index_default(void) | |||
1149 | for_each_possible_cpu(i) { | 1154 | for_each_possible_cpu(i) { |
1150 | c = &cpu_data(i); | 1155 | c = &cpu_data(i); |
1151 | /* mark all to hotplug */ | 1156 | /* mark all to hotplug */ |
1152 | c->cpu_index = NR_CPUS; | 1157 | c->cpu_index = nr_cpu_ids; |
1153 | } | 1158 | } |
1154 | } | 1159 | } |
1155 | 1160 | ||
@@ -1293,6 +1298,8 @@ __init void prefill_possible_map(void) | |||
1293 | else | 1298 | else |
1294 | possible = setup_possible_cpus; | 1299 | possible = setup_possible_cpus; |
1295 | 1300 | ||
1301 | total_cpus = max_t(int, possible, num_processors + disabled_cpus); | ||
1302 | |||
1296 | if (possible > CONFIG_NR_CPUS) { | 1303 | if (possible > CONFIG_NR_CPUS) { |
1297 | printk(KERN_WARNING | 1304 | printk(KERN_WARNING |
1298 | "%d Processors exceeds NR_CPUS limit of %d\n", | 1305 | "%d Processors exceeds NR_CPUS limit of %d\n", |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index a5bc05492b1e..9840b7ec749a 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -357,9 +357,8 @@ void __init find_smp_config(void) | |||
357 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); | 357 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); |
358 | 358 | ||
359 | /* initialize the CPU structures (moved from smp_boot_cpus) */ | 359 | /* initialize the CPU structures (moved from smp_boot_cpus) */ |
360 | for (i = 0; i < NR_CPUS; i++) { | 360 | for (i = 0; i < nr_cpu_ids; i++) |
361 | cpu_irq_affinity[i] = ~0; | 361 | cpu_irq_affinity[i] = ~0; |
362 | } | ||
363 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); | 362 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); |
364 | 363 | ||
365 | /* The boot CPU must be extended */ | 364 | /* The boot CPU must be extended */ |
@@ -1227,7 +1226,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
1227 | * new values until the next timer interrupt in which they do process | 1226 | * new values until the next timer interrupt in which they do process |
1228 | * accounting. | 1227 | * accounting. |
1229 | */ | 1228 | */ |
1230 | for (i = 0; i < NR_CPUS; ++i) | 1229 | for (i = 0; i < nr_cpu_ids; ++i) |
1231 | per_cpu(prof_multiplier, i) = multiplier; | 1230 | per_cpu(prof_multiplier, i) = multiplier; |
1232 | 1231 | ||
1233 | return 0; | 1232 | return 0; |
@@ -1257,7 +1256,7 @@ void __init voyager_smp_intr_init(void) | |||
1257 | int i; | 1256 | int i; |
1258 | 1257 | ||
1259 | /* initialize the per cpu irq mask to all disabled */ | 1258 | /* initialize the per cpu irq mask to all disabled */ |
1260 | for (i = 0; i < NR_CPUS; i++) | 1259 | for (i = 0; i < nr_cpu_ids; i++) |
1261 | vic_irq_mask[i] = 0xFFFF; | 1260 | vic_irq_mask[i] = 0xFFFF; |
1262 | 1261 | ||
1263 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); | 1262 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); |