aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-03 15:04:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-03 15:04:39 -0500
commit7d3b56ba37a95f1f370f50258ed3954c304c524b (patch)
tree86102527b92f02450aa245f084ffb491c18d2e0a /arch/x86
parent269b012321f2f1f8e4648c43a93bf432b42c6668 (diff)
parentab14398abd195af91a744c320a52a1bce814dd1e (diff)
Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits) x86: setup_per_cpu_areas() cleanup cpumask: fix compile error when CONFIG_NR_CPUS is not defined cpumask: use alloc_cpumask_var_node where appropriate cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t x86: use cpumask_var_t in acpi/boot.c x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids sched: put back some stack hog changes that were undone in kernel/sched.c x86: enable cpus display of kernel_max and offlined cpus ia64: cpumask fix for is_affinity_mask_valid() cpumask: convert RCU implementations, fix xtensa: define __fls mn10300: define __fls m32r: define __fls h8300: define __fls frv: define __fls cris: define __fls cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS cpumask: zero extra bits in alloc_cpumask_var_node cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/ cpumask: convert mm/ ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/es7000/apic.h32
-rw-r--r--arch/x86/include/asm/lguest.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h4
-rw-r--r--arch/x86/include/asm/pci.h10
-rw-r--r--arch/x86/include/asm/summit/apic.h42
-rw-r--r--arch/x86/include/asm/topology.h36
-rw-r--r--arch/x86/kernel/acpi/boot.c31
-rw-r--r--arch/x86/kernel/apic.c4
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c24
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/io_apic.c6
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/setup_percpu.c33
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c7
20 files changed, 155 insertions, 140 deletions
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index 51ac1230294e..bc53d5ef1386 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
157 157
158 num_bits_set = cpumask_weight(cpumask); 158 num_bits_set = cpumask_weight(cpumask);
159 /* Return id to all */ 159 /* Return id to all */
160 if (num_bits_set == NR_CPUS) 160 if (num_bits_set == nr_cpu_ids)
161 return 0xFF; 161 return 0xFF;
162 /* 162 /*
163 * The cpus in the mask must all be on the apic cluster. If are not 163 * The cpus in the mask must all be on the apic cluster. If are not
@@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
190 190
191 num_bits_set = cpus_weight(*cpumask); 191 num_bits_set = cpus_weight(*cpumask);
192 /* Return id to all */ 192 /* Return id to all */
193 if (num_bits_set == NR_CPUS) 193 if (num_bits_set == nr_cpu_ids)
194 return cpu_to_logical_apicid(0); 194 return cpu_to_logical_apicid(0);
195 /* 195 /*
196 * The cpus in the mask must all be on the apic cluster. If are not 196 * The cpus in the mask must all be on the apic cluster. If are not
@@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
219 const struct cpumask *andmask) 219 const struct cpumask *andmask)
220{ 220{
221 int num_bits_set;
222 int cpus_found = 0;
223 int cpu;
224 int apicid = cpu_to_logical_apicid(0); 221 int apicid = cpu_to_logical_apicid(0);
225 cpumask_var_t cpumask; 222 cpumask_var_t cpumask;
226 223
@@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
229 226
230 cpumask_and(cpumask, inmask, andmask); 227 cpumask_and(cpumask, inmask, andmask);
231 cpumask_and(cpumask, cpumask, cpu_online_mask); 228 cpumask_and(cpumask, cpumask, cpu_online_mask);
229 apicid = cpu_mask_to_apicid(cpumask);
232 230
233 num_bits_set = cpumask_weight(cpumask);
234 /* Return id to all */
235 if (num_bits_set == NR_CPUS)
236 goto exit;
237 /*
238 * The cpus in the mask must all be on the apic cluster. If are not
239 * on the same apicid cluster return default value of TARGET_CPUS.
240 */
241 cpu = cpumask_first(cpumask);
242 apicid = cpu_to_logical_apicid(cpu);
243 while (cpus_found < num_bits_set) {
244 if (cpumask_test_cpu(cpu, cpumask)) {
245 int new_apicid = cpu_to_logical_apicid(cpu);
246 if (apicid_cluster(apicid) !=
247 apicid_cluster(new_apicid)){
248 printk ("%s: Not a valid mask!\n", __func__);
249 return cpu_to_logical_apicid(0);
250 }
251 apicid = new_apicid;
252 cpus_found++;
253 }
254 cpu++;
255 }
256exit:
257 free_cpumask_var(cpumask); 231 free_cpumask_var(cpumask);
258 return apicid; 232 return apicid;
259} 233}
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index d28a507cef39..1caf57628b9c 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -15,7 +15,7 @@
15#define SHARED_SWITCHER_PAGES \ 15#define SHARED_SWITCHER_PAGES \
16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) 16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
17/* Pages for switcher itself, then two pages per cpu */ 17/* Pages for switcher itself, then two pages per cpu */
18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) 18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
19 19
20/* We map at -4M for ease of mapping into the guest (one PTE page). */ 20/* We map at -4M for ease of mapping into the guest (one PTE page). */
21#define SWITCHER_ADDR 0xFFC00000 21#define SWITCHER_ADDR 0xFFC00000
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
index c80f00d29965..bf37bc49bd8e 100644
--- a/arch/x86/include/asm/numaq/apic.h
+++ b/arch/x86/include/asm/numaq/apic.h
@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
63extern u8 cpu_2_logical_apicid[]; 63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu) 64static inline int cpu_to_logical_apicid(int cpu)
65{ 65{
66 if (cpu >= NR_CPUS) 66 if (cpu >= nr_cpu_ids)
67 return BAD_APICID; 67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu]; 68 return (int)cpu_2_logical_apicid[cpu];
69} 69}
70 70
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 66834c41c049..a977de23cb4d 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -102,9 +102,9 @@ extern void pci_iommu_alloc(void);
102 102
103#ifdef CONFIG_NUMA 103#ifdef CONFIG_NUMA
104/* Returns the node based on pci bus */ 104/* Returns the node based on pci bus */
105static inline int __pcibus_to_node(struct pci_bus *bus) 105static inline int __pcibus_to_node(const struct pci_bus *bus)
106{ 106{
107 struct pci_sysdata *sd = bus->sysdata; 107 const struct pci_sysdata *sd = bus->sysdata;
108 108
109 return sd->node; 109 return sd->node;
110} 110}
@@ -113,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
113{ 113{
114 return node_to_cpumask(__pcibus_to_node(bus)); 114 return node_to_cpumask(__pcibus_to_node(bus));
115} 115}
116
117static inline const struct cpumask *
118cpumask_of_pcibus(const struct pci_bus *bus)
119{
120 return cpumask_of_node(__pcibus_to_node(bus));
121}
116#endif 122#endif
117 123
118#endif /* _ASM_X86_PCI_H */ 124#endif /* _ASM_X86_PCI_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 99327d1be49f..4bb5fb34f030 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void)
52 int i; 52 int i;
53 53
54 /* Create logical APIC IDs by counting CPUs already in cluster. */ 54 /* Create logical APIC IDs by counting CPUs already in cluster. */
55 for (count = 0, i = NR_CPUS; --i >= 0; ) { 55 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
56 lid = cpu_2_logical_apicid[i]; 56 lid = cpu_2_logical_apicid[i];
57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
58 ++count; 58 ++count;
@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid)
97static inline int cpu_to_logical_apicid(int cpu) 97static inline int cpu_to_logical_apicid(int cpu)
98{ 98{
99#ifdef CONFIG_SMP 99#ifdef CONFIG_SMP
100 if (cpu >= NR_CPUS) 100 if (cpu >= nr_cpu_ids)
101 return BAD_APICID; 101 return BAD_APICID;
102 return (int)cpu_2_logical_apicid[cpu]; 102 return (int)cpu_2_logical_apicid[cpu];
103#else 103#else
104 return logical_smp_processor_id(); 104 return logical_smp_processor_id();
@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu)
107 107
108static inline int cpu_present_to_apicid(int mps_cpu) 108static inline int cpu_present_to_apicid(int mps_cpu)
109{ 109{
110 if (mps_cpu < NR_CPUS) 110 if (mps_cpu < nr_cpu_ids)
111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
112 else 112 else
113 return BAD_APICID; 113 return BAD_APICID;
@@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
146 146
147 num_bits_set = cpus_weight(*cpumask); 147 num_bits_set = cpus_weight(*cpumask);
148 /* Return id to all */ 148 /* Return id to all */
149 if (num_bits_set == NR_CPUS) 149 if (num_bits_set >= nr_cpu_ids)
150 return (int) 0xFF; 150 return (int) 0xFF;
151 /* 151 /*
152 * The cpus in the mask must all be on the apic cluster. If are not 152 * The cpus in the mask must all be on the apic cluster. If are not
@@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
174 const struct cpumask *andmask) 174 const struct cpumask *andmask)
175{ 175{
176 int num_bits_set; 176 int apicid = cpu_to_logical_apicid(0);
177 int cpus_found = 0;
178 int cpu;
179 int apicid = 0xFF;
180 cpumask_var_t cpumask; 177 cpumask_var_t cpumask;
181 178
182 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 179 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
183 return (int) 0xFF; 180 return apicid;
184 181
185 cpumask_and(cpumask, inmask, andmask); 182 cpumask_and(cpumask, inmask, andmask);
186 cpumask_and(cpumask, cpumask, cpu_online_mask); 183 cpumask_and(cpumask, cpumask, cpu_online_mask);
184 apicid = cpu_mask_to_apicid(cpumask);
187 185
188 num_bits_set = cpumask_weight(cpumask);
189 /* Return id to all */
190 if (num_bits_set == nr_cpu_ids)
191 goto exit;
192 /*
193 * The cpus in the mask must all be on the apic cluster. If are not
194 * on the same apicid cluster return default value of TARGET_CPUS.
195 */
196 cpu = cpumask_first(cpumask);
197 apicid = cpu_to_logical_apicid(cpu);
198 while (cpus_found < num_bits_set) {
199 if (cpumask_test_cpu(cpu, cpumask)) {
200 int new_apicid = cpu_to_logical_apicid(cpu);
201 if (apicid_cluster(apicid) !=
202 apicid_cluster(new_apicid)){
203 printk ("%s: Not a valid mask!\n", __func__);
204 return 0xFF;
205 }
206 apicid = apicid | new_apicid;
207 cpus_found++;
208 }
209 cpu++;
210 }
211exit:
212 free_cpumask_var(cpumask); 186 free_cpumask_var(cpumask);
213 return apicid; 187 return apicid;
214} 188}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 79e31e9dcdda..4e2f2e0aab27 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu)
61 * 61 *
62 * Side note: this function creates the returned cpumask on the stack 62 * Side note: this function creates the returned cpumask on the stack
63 * so with a high NR_CPUS count, excessive stack space is used. The 63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * node_to_cpumask_ptr function should be used whenever possible. 64 * cpumask_of_node function should be used whenever possible.
65 */ 65 */
66static inline cpumask_t node_to_cpumask(int node) 66static inline cpumask_t node_to_cpumask(int node)
67{ 67{
68 return node_to_cpumask_map[node]; 68 return node_to_cpumask_map[node];
69} 69}
70 70
71/* Returns a bitmask of CPUs on Node 'node'. */
72static inline const struct cpumask *cpumask_of_node(int node)
73{
74 return &node_to_cpumask_map[node];
75}
76
71#else /* CONFIG_X86_64 */ 77#else /* CONFIG_X86_64 */
72 78
73/* Mappings between node number and cpus on that node. */ 79/* Mappings between node number and cpus on that node. */
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
82#ifdef CONFIG_DEBUG_PER_CPU_MAPS 88#ifdef CONFIG_DEBUG_PER_CPU_MAPS
83extern int cpu_to_node(int cpu); 89extern int cpu_to_node(int cpu);
84extern int early_cpu_to_node(int cpu); 90extern int early_cpu_to_node(int cpu);
85extern const cpumask_t *_node_to_cpumask_ptr(int node); 91extern const cpumask_t *cpumask_of_node(int node);
86extern cpumask_t node_to_cpumask(int node); 92extern cpumask_t node_to_cpumask(int node);
87 93
88#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 94#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu)
103} 109}
104 110
105/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
106static inline const cpumask_t *_node_to_cpumask_ptr(int node) 112static inline const cpumask_t *cpumask_of_node(int node)
107{ 113{
108 return &node_to_cpumask_map[node]; 114 return &node_to_cpumask_map[node];
109} 115}
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node)
116 122
117#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
118 124
119/* Replace default node_to_cpumask_ptr with optimized version */ 125/*
126 * Replace default node_to_cpumask_ptr with optimized version
127 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
128 */
120#define node_to_cpumask_ptr(v, node) \ 129#define node_to_cpumask_ptr(v, node) \
121 const cpumask_t *v = _node_to_cpumask_ptr(node) 130 const cpumask_t *v = cpumask_of_node(node)
122 131
123#define node_to_cpumask_ptr_next(v, node) \ 132#define node_to_cpumask_ptr_next(v, node) \
124 v = _node_to_cpumask_ptr(node) 133 v = cpumask_of_node(node)
125 134
126#endif /* CONFIG_X86_64 */ 135#endif /* CONFIG_X86_64 */
127 136
@@ -187,7 +196,7 @@ extern int __node_distance(int, int);
187#define cpu_to_node(cpu) 0 196#define cpu_to_node(cpu) 0
188#define early_cpu_to_node(cpu) 0 197#define early_cpu_to_node(cpu) 0
189 198
190static inline const cpumask_t *_node_to_cpumask_ptr(int node) 199static inline const cpumask_t *cpumask_of_node(int node)
191{ 200{
192 return &cpu_online_map; 201 return &cpu_online_map;
193} 202}
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node)
200 return first_cpu(cpu_online_map); 209 return first_cpu(cpu_online_map);
201} 210}
202 211
203/* Replace default node_to_cpumask_ptr with optimized version */ 212/*
213 * Replace default node_to_cpumask_ptr with optimized version
214 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
215 */
204#define node_to_cpumask_ptr(v, node) \ 216#define node_to_cpumask_ptr(v, node) \
205 const cpumask_t *v = _node_to_cpumask_ptr(node) 217 const cpumask_t *v = cpumask_of_node(node)
206 218
207#define node_to_cpumask_ptr_next(v, node) \ 219#define node_to_cpumask_ptr_next(v, node) \
208 v = _node_to_cpumask_ptr(node) 220 v = cpumask_of_node(node)
209#endif 221#endif
210 222
211#include <asm-generic/topology.h> 223#include <asm-generic/topology.h>
@@ -214,12 +226,12 @@ static inline int node_to_first_cpu(int node)
214/* Returns the number of the first CPU on Node 'node'. */ 226/* Returns the number of the first CPU on Node 'node'. */
215static inline int node_to_first_cpu(int node) 227static inline int node_to_first_cpu(int node)
216{ 228{
217 node_to_cpumask_ptr(mask, node); 229 return cpumask_first(cpumask_of_node(node));
218 return first_cpu(*mask);
219} 230}
220#endif 231#endif
221 232
222extern cpumask_t cpu_coregroup_map(int cpu); 233extern cpumask_t cpu_coregroup_map(int cpu);
234extern const struct cpumask *cpu_coregroup_mask(int cpu);
223 235
224#ifdef ENABLE_TOPO_DEFINES 236#ifdef ENABLE_TOPO_DEFINES
225#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 237#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 65d0b72777ea..29dc0c89d4af 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -538,9 +538,10 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
539 union acpi_object *obj; 539 union acpi_object *obj;
540 struct acpi_madt_local_apic *lapic; 540 struct acpi_madt_local_apic *lapic;
541 cpumask_t tmp_map, new_map; 541 cpumask_var_t tmp_map, new_map;
542 u8 physid; 542 u8 physid;
543 int cpu; 543 int cpu;
544 int retval = -ENOMEM;
544 545
545 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 546 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
546 return -EINVAL; 547 return -EINVAL;
@@ -569,23 +570,37 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
569 buffer.length = ACPI_ALLOCATE_BUFFER; 570 buffer.length = ACPI_ALLOCATE_BUFFER;
570 buffer.pointer = NULL; 571 buffer.pointer = NULL;
571 572
572 tmp_map = cpu_present_map; 573 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
574 goto out;
575
576 if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
577 goto free_tmp_map;
578
579 cpumask_copy(tmp_map, cpu_present_mask);
573 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 580 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
574 581
575 /* 582 /*
576 * If mp_register_lapic successfully generates a new logical cpu 583 * If mp_register_lapic successfully generates a new logical cpu
577 * number, then the following will get us exactly what was mapped 584 * number, then the following will get us exactly what was mapped
578 */ 585 */
579 cpus_andnot(new_map, cpu_present_map, tmp_map); 586 cpumask_andnot(new_map, cpu_present_mask, tmp_map);
580 if (cpus_empty(new_map)) { 587 if (cpumask_empty(new_map)) {
581 printk ("Unable to map lapic to logical cpu number\n"); 588 printk ("Unable to map lapic to logical cpu number\n");
582 return -EINVAL; 589 retval = -EINVAL;
590 goto free_new_map;
583 } 591 }
584 592
585 cpu = first_cpu(new_map); 593 cpu = cpumask_first(new_map);
586 594
587 *pcpu = cpu; 595 *pcpu = cpu;
588 return 0; 596 retval = 0;
597
598free_new_map:
599 free_cpumask_var(new_map);
600free_tmp_map:
601 free_cpumask_var(tmp_map);
602out:
603 return retval;
589} 604}
590 605
591/* wrapper to silence section mismatch warning */ 606/* wrapper to silence section mismatch warning */
@@ -598,7 +613,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
598int acpi_unmap_lsapic(int cpu) 613int acpi_unmap_lsapic(int cpu)
599{ 614{
600 per_cpu(x86_cpu_to_apicid, cpu) = -1; 615 per_cpu(x86_cpu_to_apicid, cpu) = -1;
601 cpu_clear(cpu, cpu_present_map); 616 set_cpu_present(cpu, false);
602 num_processors--; 617 num_processors--;
603 618
604 return (0); 619 return (0);
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index d652515e2855..b13d3c4dbd42 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
140 struct clock_event_device *evt); 140 struct clock_event_device *evt);
141static void lapic_timer_setup(enum clock_event_mode mode, 141static void lapic_timer_setup(enum clock_event_mode mode,
142 struct clock_event_device *evt); 142 struct clock_event_device *evt);
143static void lapic_timer_broadcast(const cpumask_t *mask); 143static void lapic_timer_broadcast(const struct cpumask *mask);
144static void apic_pm_activate(void); 144static void apic_pm_activate(void);
145 145
146/* 146/*
@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
453/* 453/*
454 * Local APIC timer broadcast function 454 * Local APIC timer broadcast function
455 */ 455 */
456static void lapic_timer_broadcast(const cpumask_t *mask) 456static void lapic_timer_broadcast(const struct cpumask *mask)
457{ 457{
458#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 42e0853030cb..3f95a40f718a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
356 } else if (smp_num_siblings > 1) { 356 } else if (smp_num_siblings > 1) {
357 357
358 if (smp_num_siblings > NR_CPUS) { 358 if (smp_num_siblings > nr_cpu_ids) {
359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
360 smp_num_siblings); 360 smp_num_siblings);
361 smp_num_siblings = 1; 361 smp_num_siblings = 1;
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 88ea02dcb622..28102ad1a363 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
517 } 517 }
518} 518}
519 519
520static void free_acpi_perf_data(void)
521{
522 unsigned int i;
523
524 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
525 for_each_possible_cpu(i)
526 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
527 ->shared_cpu_map);
528 free_percpu(acpi_perf_data);
529}
530
520/* 531/*
521 * acpi_cpufreq_early_init - initialize ACPI P-States library 532 * acpi_cpufreq_early_init - initialize ACPI P-States library
522 * 533 *
@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
527 */ 538 */
528static int __init acpi_cpufreq_early_init(void) 539static int __init acpi_cpufreq_early_init(void)
529{ 540{
541 unsigned int i;
530 dprintk("acpi_cpufreq_early_init\n"); 542 dprintk("acpi_cpufreq_early_init\n");
531 543
532 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 544 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
@@ -534,6 +546,16 @@ static int __init acpi_cpufreq_early_init(void)
534 dprintk("Memory allocation error for acpi_perf_data.\n"); 546 dprintk("Memory allocation error for acpi_perf_data.\n");
535 return -ENOMEM; 547 return -ENOMEM;
536 } 548 }
549 for_each_possible_cpu(i) {
550 if (!alloc_cpumask_var_node(
551 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
552 GFP_KERNEL, cpu_to_node(i))) {
553
554 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
555 free_acpi_perf_data();
556 return -ENOMEM;
557 }
558 }
537 559
538 /* Do initialization in ACPI core */ 560 /* Do initialization in ACPI core */
539 acpi_processor_preregister_performance(acpi_perf_data); 561 acpi_processor_preregister_performance(acpi_perf_data);
@@ -604,9 +626,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
604 */ 626 */
605 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
606 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
607 policy->cpus = perf->shared_cpu_map; 629 cpumask_copy(&policy->cpus, perf->shared_cpu_map);
608 } 630 }
609 policy->related_cpus = perf->shared_cpu_map; 631 cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
610 632
611#ifdef CONFIG_SMP 633#ifdef CONFIG_SMP
612 dmi_check_system(sw_any_bug_dmi_table); 634 dmi_check_system(sw_any_bug_dmi_table);
@@ -795,7 +817,7 @@ static int __init acpi_cpufreq_init(void)
795 817
796 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 818 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
797 if (ret) 819 if (ret)
798 free_percpu(acpi_perf_data); 820 free_acpi_perf_data();
799 821
800 return ret; 822 return ret;
801} 823}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 7c7d56b43136..1b446d79a8fd 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -310,6 +310,12 @@ static int powernow_acpi_init(void)
310 goto err0; 310 goto err0;
311 } 311 }
312 312
313 if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
314 GFP_KERNEL)) {
315 retval = -ENOMEM;
316 goto err05;
317 }
318
313 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 319 if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
314 retval = -EIO; 320 retval = -EIO;
315 goto err1; 321 goto err1;
@@ -412,6 +418,8 @@ static int powernow_acpi_init(void)
412err2: 418err2:
413 acpi_processor_unregister_performance(acpi_processor_perf, 0); 419 acpi_processor_unregister_performance(acpi_processor_perf, 0);
414err1: 420err1:
421 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
422err05:
415 kfree(acpi_processor_perf); 423 kfree(acpi_processor_perf);
416err0: 424err0:
417 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); 425 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
652#ifdef CONFIG_X86_POWERNOW_K7_ACPI 660#ifdef CONFIG_X86_POWERNOW_K7_ACPI
653 if (acpi_processor_perf) { 661 if (acpi_processor_perf) {
654 acpi_processor_unregister_performance(acpi_processor_perf, 0); 662 acpi_processor_unregister_performance(acpi_processor_perf, 0);
663 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
655 kfree(acpi_processor_perf); 664 kfree(acpi_processor_perf);
656 } 665 }
657#endif 666#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 7f05f44b97e9..c3c9adbaa26f 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
766static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 766static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
767{ 767{
768 struct cpufreq_frequency_table *powernow_table; 768 struct cpufreq_frequency_table *powernow_table;
769 int ret_val; 769 int ret_val = -ENODEV;
770 770
771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
772 dprintk("register performance failed: bad ACPI data\n"); 772 dprintk("register performance failed: bad ACPI data\n");
@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
815 /* notify BIOS that we exist */ 815 /* notify BIOS that we exist */
816 acpi_processor_notify_smm(THIS_MODULE); 816 acpi_processor_notify_smm(THIS_MODULE);
817 817
818 if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
819 printk(KERN_ERR PFX
820 "unable to alloc powernow_k8_data cpumask\n");
821 ret_val = -ENOMEM;
822 goto err_out_mem;
823 }
824
818 return 0; 825 return 0;
819 826
820err_out_mem: 827err_out_mem:
@@ -826,7 +833,7 @@ err_out:
826 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 833 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
827 data->acpi_data.state_count = 0; 834 data->acpi_data.state_count = 0;
828 835
829 return -ENODEV; 836 return ret_val;
830} 837}
831 838
832static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) 839static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
929{ 936{
930 if (data->acpi_data.state_count) 937 if (data->acpi_data.state_count)
931 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 938 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
939 free_cpumask_var(data->acpi_data.shared_cpu_map);
932} 940}
933 941
934#else 942#else
@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1134 data->cpu = pol->cpu; 1142 data->cpu = pol->cpu;
1135 data->currpstate = HW_PSTATE_INVALID; 1143 data->currpstate = HW_PSTATE_INVALID;
1136 1144
1137 if (powernow_k8_cpu_init_acpi(data)) { 1145 rc = powernow_k8_cpu_init_acpi(data);
1146 if (rc) {
1138 /* 1147 /*
1139 * Use the PSB BIOS structure. This is only availabe on 1148 * Use the PSB BIOS structure. This is only availabe on
1140 * an UP version, and is deprecated by AMD. 1149 * an UP version, and is deprecated by AMD.
@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1152 "ACPI maintainers and complain to your BIOS " 1161 "ACPI maintainers and complain to your BIOS "
1153 "vendor.\n"); 1162 "vendor.\n");
1154#endif 1163#endif
1155 kfree(data); 1164 goto err_out;
1156 return -ENODEV;
1157 } 1165 }
1158 if (pol->cpu != 0) { 1166 if (pol->cpu != 0) {
1159 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1167 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1160 "CPU other than CPU0. Complain to your BIOS " 1168 "CPU other than CPU0. Complain to your BIOS "
1161 "vendor.\n"); 1169 "vendor.\n");
1162 kfree(data); 1170 goto err_out;
1163 return -ENODEV;
1164 } 1171 }
1165 rc = find_psb_table(data); 1172 rc = find_psb_table(data);
1166 if (rc) { 1173 if (rc) {
1167 kfree(data); 1174 goto err_out;
1168 return -ENODEV;
1169 } 1175 }
1170 } 1176 }
1171 1177
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index c6ecda64f5f1..48533d77be78 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -534,7 +534,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
534 per_cpu(cpuid4_info, cpu) = NULL; 534 per_cpu(cpuid4_info, cpu) = NULL;
535} 535}
536 536
537static void get_cpu_leaves(void *_retval) 537static void __cpuinit get_cpu_leaves(void *_retval)
538{ 538{
539 int j, *retval = _retval, cpu = smp_processor_id(); 539 int j, *retval = _retval, cpu = smp_processor_id();
540 540
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 85d28d53f5d3..2ac1f0c2beb3 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file)
121 lock_kernel(); 121 lock_kernel();
122 122
123 cpu = iminor(file->f_path.dentry->d_inode); 123 cpu = iminor(file->f_path.dentry->d_inode);
124 if (cpu >= NR_CPUS || !cpu_online(cpu)) { 124 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
125 ret = -ENXIO; /* No such CPU */ 125 ret = -ENXIO; /* No such CPU */
126 goto out; 126 goto out;
127 } 127 }
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 69911722b9d3..3639442aa7a4 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -214,11 +214,11 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
214 214
215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
216 if (cfg) { 216 if (cfg) {
217 /* FIXME: needs alloc_cpumask_var_node() */ 217 if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
218 if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
219 kfree(cfg); 218 kfree(cfg);
220 cfg = NULL; 219 cfg = NULL;
221 } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { 220 } else if (!alloc_cpumask_var_node(&cfg->old_domain,
221 GFP_ATOMIC, node)) {
222 free_cpumask_var(cfg->domain); 222 free_cpumask_var(cfg->domain);
223 kfree(cfg); 223 kfree(cfg);
224 cfg = NULL; 224 cfg = NULL;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 82a7c7ed6d45..726266695b2c 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file)
136 lock_kernel(); 136 lock_kernel();
137 cpu = iminor(file->f_path.dentry->d_inode); 137 cpu = iminor(file->f_path.dentry->d_inode);
138 138
139 if (cpu >= NR_CPUS || !cpu_online(cpu)) { 139 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
140 ret = -ENXIO; /* No such CPU */ 140 ret = -ENXIO; /* No such CPU */
141 goto out; 141 goto out;
142 } 142 }
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index bf088c61fa40..2b46eb41643b 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -501,7 +501,7 @@ void native_machine_shutdown(void)
501 501
502#ifdef CONFIG_X86_32 502#ifdef CONFIG_X86_32
503 /* See if there has been given a command line override */ 503 /* See if there has been given a command line override */
504 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 504 if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
505 cpu_online(reboot_cpu)) 505 cpu_online(reboot_cpu))
506 reboot_cpu_id = reboot_cpu; 506 reboot_cpu_id = reboot_cpu;
507#endif 507#endif
@@ -511,7 +511,7 @@ void native_machine_shutdown(void)
511 reboot_cpu_id = smp_processor_id(); 511 reboot_cpu_id = smp_processor_id();
512 512
513 /* Make certain I only run on the appropriate processor */ 513 /* Make certain I only run on the appropriate processor */
514 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); 514 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
515 515
516 /* O.K Now that I'm on the appropriate processor, 516 /* O.K Now that I'm on the appropriate processor,
517 * stop all of the others. 517 * stop all of the others.
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0b63b08e7530..a4b619c33106 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -153,12 +153,10 @@ void __init setup_per_cpu_areas(void)
153 align = max_t(unsigned long, PAGE_SIZE, align); 153 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align); 154 size = roundup(old_size, align);
155 155
156 printk(KERN_INFO 156 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
157 "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
158 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 157 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
159 158
160 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 159 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
161 size);
162 160
163 for_each_possible_cpu(cpu) { 161 for_each_possible_cpu(cpu) {
164#ifndef CONFIG_NEED_MULTIPLE_NODES 162#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -169,22 +167,15 @@ void __init setup_per_cpu_areas(void)
169 if (!node_online(node) || !NODE_DATA(node)) { 167 if (!node_online(node) || !NODE_DATA(node)) {
170 ptr = __alloc_bootmem(size, align, 168 ptr = __alloc_bootmem(size, align,
171 __pa(MAX_DMA_ADDRESS)); 169 __pa(MAX_DMA_ADDRESS));
172 printk(KERN_INFO 170 pr_info("cpu %d has no node %d or node-local memory\n",
173 "cpu %d has no node %d or node-local memory\n",
174 cpu, node); 171 cpu, node);
175 if (ptr) 172 pr_debug("per cpu data for cpu%d at %016lx\n",
176 printk(KERN_DEBUG 173 cpu, __pa(ptr));
177 "per cpu data for cpu%d at %016lx\n", 174 } else {
178 cpu, __pa(ptr));
179 }
180 else {
181 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 175 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
182 __pa(MAX_DMA_ADDRESS)); 176 __pa(MAX_DMA_ADDRESS));
183 if (ptr) 177 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
184 printk(KERN_DEBUG 178 cpu, node, __pa(ptr));
185 "per cpu data for cpu%d on node%d "
186 "at %016lx\n",
187 cpu, node, __pa(ptr));
188 } 179 }
189#endif 180#endif
190 per_cpu_offset(cpu) = ptr - __per_cpu_start; 181 per_cpu_offset(cpu) = ptr - __per_cpu_start;
@@ -339,25 +330,25 @@ static const cpumask_t cpu_mask_none;
339/* 330/*
340 * Returns a pointer to the bitmask of CPUs on Node 'node'. 331 * Returns a pointer to the bitmask of CPUs on Node 'node'.
341 */ 332 */
342const cpumask_t *_node_to_cpumask_ptr(int node) 333const cpumask_t *cpumask_of_node(int node)
343{ 334{
344 if (node_to_cpumask_map == NULL) { 335 if (node_to_cpumask_map == NULL) {
345 printk(KERN_WARNING 336 printk(KERN_WARNING
346 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 337 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
347 node); 338 node);
348 dump_stack(); 339 dump_stack();
349 return (const cpumask_t *)&cpu_online_map; 340 return (const cpumask_t *)&cpu_online_map;
350 } 341 }
351 if (node >= nr_node_ids) { 342 if (node >= nr_node_ids) {
352 printk(KERN_WARNING 343 printk(KERN_WARNING
353 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 344 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
354 node, nr_node_ids); 345 node, nr_node_ids);
355 dump_stack(); 346 dump_stack();
356 return &cpu_mask_none; 347 return &cpu_mask_none;
357 } 348 }
358 return &node_to_cpumask_map[node]; 349 return &node_to_cpumask_map[node];
359} 350}
360EXPORT_SYMBOL(_node_to_cpumask_ptr); 351EXPORT_SYMBOL(cpumask_of_node);
361 352
362/* 353/*
363 * Returns a bitmask of CPUs on Node 'node'. 354 * Returns a bitmask of CPUs on Node 'node'.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 31869bf5fabd..6bd4d9b73870 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -496,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
496} 496}
497 497
498/* maps the cpu to the sched domain representing multi-core */ 498/* maps the cpu to the sched domain representing multi-core */
499cpumask_t cpu_coregroup_map(int cpu) 499const struct cpumask *cpu_coregroup_mask(int cpu)
500{ 500{
501 struct cpuinfo_x86 *c = &cpu_data(cpu); 501 struct cpuinfo_x86 *c = &cpu_data(cpu);
502 /* 502 /*
@@ -504,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu)
504 * And for power savings, we return cpu_core_map 504 * And for power savings, we return cpu_core_map
505 */ 505 */
506 if (sched_mc_power_savings || sched_smt_power_savings) 506 if (sched_mc_power_savings || sched_smt_power_savings)
507 return per_cpu(cpu_core_map, cpu); 507 return &per_cpu(cpu_core_map, cpu);
508 else 508 else
509 return c->llc_shared_map; 509 return &c->llc_shared_map;
510}
511
512cpumask_t cpu_coregroup_map(int cpu)
513{
514 return *cpu_coregroup_mask(cpu);
510} 515}
511 516
512static void impress_friends(void) 517static void impress_friends(void)
@@ -1149,7 +1154,7 @@ static void __init smp_cpu_index_default(void)
1149 for_each_possible_cpu(i) { 1154 for_each_possible_cpu(i) {
1150 c = &cpu_data(i); 1155 c = &cpu_data(i);
1151 /* mark all to hotplug */ 1156 /* mark all to hotplug */
1152 c->cpu_index = NR_CPUS; 1157 c->cpu_index = nr_cpu_ids;
1153 } 1158 }
1154} 1159}
1155 1160
@@ -1293,6 +1298,8 @@ __init void prefill_possible_map(void)
1293 else 1298 else
1294 possible = setup_possible_cpus; 1299 possible = setup_possible_cpus;
1295 1300
1301 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1302
1296 if (possible > CONFIG_NR_CPUS) { 1303 if (possible > CONFIG_NR_CPUS) {
1297 printk(KERN_WARNING 1304 printk(KERN_WARNING
1298 "%d Processors exceeds NR_CPUS limit of %d\n", 1305 "%d Processors exceeds NR_CPUS limit of %d\n",
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index a5bc05492b1e..9840b7ec749a 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -357,9 +357,8 @@ void __init find_smp_config(void)
357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); 357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
358 358
359 /* initialize the CPU structures (moved from smp_boot_cpus) */ 359 /* initialize the CPU structures (moved from smp_boot_cpus) */
360 for (i = 0; i < NR_CPUS; i++) { 360 for (i = 0; i < nr_cpu_ids; i++)
361 cpu_irq_affinity[i] = ~0; 361 cpu_irq_affinity[i] = ~0;
362 }
363 cpu_online_map = cpumask_of_cpu(boot_cpu_id); 362 cpu_online_map = cpumask_of_cpu(boot_cpu_id);
364 363
365 /* The boot CPU must be extended */ 364 /* The boot CPU must be extended */
@@ -1227,7 +1226,7 @@ int setup_profiling_timer(unsigned int multiplier)
1227 * new values until the next timer interrupt in which they do process 1226 * new values until the next timer interrupt in which they do process
1228 * accounting. 1227 * accounting.
1229 */ 1228 */
1230 for (i = 0; i < NR_CPUS; ++i) 1229 for (i = 0; i < nr_cpu_ids; ++i)
1231 per_cpu(prof_multiplier, i) = multiplier; 1230 per_cpu(prof_multiplier, i) = multiplier;
1232 1231
1233 return 0; 1232 return 0;
@@ -1257,7 +1256,7 @@ void __init voyager_smp_intr_init(void)
1257 int i; 1256 int i;
1258 1257
1259 /* initialize the per cpu irq mask to all disabled */ 1258 /* initialize the per cpu irq mask to all disabled */
1260 for (i = 0; i < NR_CPUS; i++) 1259 for (i = 0; i < nr_cpu_ids; i++)
1261 vic_irq_mask[i] = 0xFFFF; 1260 vic_irq_mask[i] = 0xFFFF;
1262 1261
1263 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); 1262 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);