aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/cpumask.h18
-rw-r--r--arch/x86/include/asm/pci.h5
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/smp.h13
-rw-r--r--arch/x86/include/asm/topology.h87
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c16
-rw-r--r--arch/x86/kernel/apic/es7000_32.c17
-rw-r--r--arch/x86/kernel/apic/nmi.c11
-rw-r--r--arch/x86/kernel/apic/numaq_32.c11
-rw-r--r--arch/x86/kernel/apic/probe_32.c3
-rw-r--r--arch/x86/kernel/apic/summit_32.c21
-rw-r--r--arch/x86/kernel/apm_32.c248
-rw-r--r--arch/x86/kernel/cpu/common.c13
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c13
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c24
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c10
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c6
-rw-r--r--arch/x86/kernel/microcode_amd.c43
-rw-r--r--arch/x86/kernel/microcode_core.c160
-rw-r--r--arch/x86/kernel/microcode_intel.c83
-rw-r--r--arch/x86/kernel/process.c20
-rw-r--r--arch/x86/kernel/smpboot.c70
-rw-r--r--arch/x86/kernel/tlb_uv.c9
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/mmio-mod.c19
-rw-r--r--arch/x86/mm/numa.c67
-rw-r--r--arch/x86/mm/numa_64.c111
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/xen/smp.c6
33 files changed, 561 insertions, 562 deletions
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
index a7f3c75f8ad7..61c852fa346b 100644
--- a/arch/x86/include/asm/cpumask.h
+++ b/arch/x86/include/asm/cpumask.h
@@ -3,8 +3,6 @@
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5 5
6#ifdef CONFIG_X86_64
7
8extern cpumask_var_t cpu_callin_mask; 6extern cpumask_var_t cpu_callin_mask;
9extern cpumask_var_t cpu_callout_mask; 7extern cpumask_var_t cpu_callout_mask;
10extern cpumask_var_t cpu_initialized_mask; 8extern cpumask_var_t cpu_initialized_mask;
@@ -12,21 +10,5 @@ extern cpumask_var_t cpu_sibling_setup_mask;
12 10
13extern void setup_cpu_local_masks(void); 11extern void setup_cpu_local_masks(void);
14 12
15#else /* CONFIG_X86_32 */
16
17extern cpumask_t cpu_callin_map;
18extern cpumask_t cpu_callout_map;
19extern cpumask_t cpu_initialized;
20extern cpumask_t cpu_sibling_setup_map;
21
22#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
23#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
24#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
25#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
26
27static inline void setup_cpu_local_masks(void) { }
28
29#endif /* CONFIG_X86_32 */
30
31#endif /* __ASSEMBLY__ */ 13#endif /* __ASSEMBLY__ */
32#endif /* _ASM_X86_CPUMASK_H */ 14#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index e545ea01abcf..b51a1e8b0baf 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -140,11 +140,6 @@ static inline int __pcibus_to_node(const struct pci_bus *bus)
140 return sd->node; 140 return sd->node;
141} 141}
142 142
143static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
144{
145 return node_to_cpumask(__pcibus_to_node(bus));
146}
147
148static inline const struct cpumask * 143static inline const struct cpumask *
149cpumask_of_pcibus(const struct pci_bus *bus) 144cpumask_of_pcibus(const struct pci_bus *bus)
150{ 145{
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ae85a8d66a30..34c52370f2fe 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -94,7 +94,7 @@ struct cpuinfo_x86 {
94 unsigned long loops_per_jiffy; 94 unsigned long loops_per_jiffy;
95#ifdef CONFIG_SMP 95#ifdef CONFIG_SMP
96 /* cpus sharing the last level cache: */ 96 /* cpus sharing the last level cache: */
97 cpumask_t llc_shared_map; 97 cpumask_var_t llc_shared_map;
98#endif 98#endif
99 /* cpuid returned max cores value: */ 99 /* cpuid returned max cores value: */
100 u16 x86_max_cores; 100 u16 x86_max_cores;
@@ -736,6 +736,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
736extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 736extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
737 737
738extern void select_idle_routine(const struct cpuinfo_x86 *c); 738extern void select_idle_routine(const struct cpuinfo_x86 *c);
739extern void init_c1e_mask(void);
739 740
740extern unsigned long boot_option_idle_override; 741extern unsigned long boot_option_idle_override;
741extern unsigned long idle_halt; 742extern unsigned long idle_halt;
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 47d0e21f2b9e..19e0d88b966d 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,19 +21,19 @@
21extern int smp_num_siblings; 21extern int smp_num_siblings;
22extern unsigned int num_processors; 22extern unsigned int num_processors;
23 23
24DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 24DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
25DECLARE_PER_CPU(cpumask_t, cpu_core_map); 25DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
26DECLARE_PER_CPU(u16, cpu_llc_id); 26DECLARE_PER_CPU(u16, cpu_llc_id);
27DECLARE_PER_CPU(int, cpu_number); 27DECLARE_PER_CPU(int, cpu_number);
28 28
29static inline struct cpumask *cpu_sibling_mask(int cpu) 29static inline struct cpumask *cpu_sibling_mask(int cpu)
30{ 30{
31 return &per_cpu(cpu_sibling_map, cpu); 31 return per_cpu(cpu_sibling_map, cpu);
32} 32}
33 33
34static inline struct cpumask *cpu_core_mask(int cpu) 34static inline struct cpumask *cpu_core_mask(int cpu)
35{ 35{
36 return &per_cpu(cpu_core_map, cpu); 36 return per_cpu(cpu_core_map, cpu);
37} 37}
38 38
39DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 39DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
@@ -121,9 +121,10 @@ static inline void arch_send_call_function_single_ipi(int cpu)
121 smp_ops.send_call_func_single_ipi(cpu); 121 smp_ops.send_call_func_single_ipi(cpu);
122} 122}
123 123
124static inline void arch_send_call_function_ipi(cpumask_t mask) 124#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
125static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
125{ 126{
126 smp_ops.send_call_func_ipi(&mask); 127 smp_ops.send_call_func_ipi(mask);
127} 128}
128 129
129void cpu_disable_common(void); 130void cpu_disable_common(void);
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 744299c0b774..892b119dba6f 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -44,9 +44,6 @@
44 44
45#ifdef CONFIG_X86_32 45#ifdef CONFIG_X86_32
46 46
47/* Mappings between node number and cpus on that node. */
48extern cpumask_t node_to_cpumask_map[];
49
50/* Mappings between logical cpu number and node number */ 47/* Mappings between logical cpu number and node number */
51extern int cpu_to_node_map[]; 48extern int cpu_to_node_map[];
52 49
@@ -57,30 +54,8 @@ static inline int cpu_to_node(int cpu)
57} 54}
58#define early_cpu_to_node(cpu) cpu_to_node(cpu) 55#define early_cpu_to_node(cpu) cpu_to_node(cpu)
59 56
60/* Returns a bitmask of CPUs on Node 'node'.
61 *
62 * Side note: this function creates the returned cpumask on the stack
63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * cpumask_of_node function should be used whenever possible.
65 */
66static inline cpumask_t node_to_cpumask(int node)
67{
68 return node_to_cpumask_map[node];
69}
70
71/* Returns a bitmask of CPUs on Node 'node'. */
72static inline const struct cpumask *cpumask_of_node(int node)
73{
74 return &node_to_cpumask_map[node];
75}
76
77static inline void setup_node_to_cpumask_map(void) { }
78
79#else /* CONFIG_X86_64 */ 57#else /* CONFIG_X86_64 */
80 58
81/* Mappings between node number and cpus on that node. */
82extern cpumask_t *node_to_cpumask_map;
83
84/* Mappings between logical cpu number and node number */ 59/* Mappings between logical cpu number and node number */
85DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 60DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
86 61
@@ -91,8 +66,6 @@ DECLARE_PER_CPU(int, node_number);
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS 66#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92extern int cpu_to_node(int cpu); 67extern int cpu_to_node(int cpu);
93extern int early_cpu_to_node(int cpu); 68extern int early_cpu_to_node(int cpu);
94extern const cpumask_t *cpumask_of_node(int node);
95extern cpumask_t node_to_cpumask(int node);
96 69
97#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 70#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
98 71
@@ -108,42 +81,32 @@ static inline int early_cpu_to_node(int cpu)
108 return early_per_cpu(x86_cpu_to_node_map, cpu); 81 return early_per_cpu(x86_cpu_to_node_map, cpu);
109} 82}
110 83
111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 84#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
112static inline const cpumask_t *cpumask_of_node(int node) 85
113{ 86#endif /* CONFIG_X86_64 */
114 return &node_to_cpumask_map[node];
115}
116 87
117/* Returns a bitmask of CPUs on Node 'node'. */ 88/* Mappings between node number and cpus on that node. */
118static inline cpumask_t node_to_cpumask(int node) 89extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
90
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92extern const struct cpumask *cpumask_of_node(int node);
93#else
94/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
95static inline const struct cpumask *cpumask_of_node(int node)
119{ 96{
120 return node_to_cpumask_map[node]; 97 return node_to_cpumask_map[node];
121} 98}
122 99#endif
123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
124 100
125extern void setup_node_to_cpumask_map(void); 101extern void setup_node_to_cpumask_map(void);
126 102
127/* 103/*
128 * Replace default node_to_cpumask_ptr with optimized version
129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
130 */
131#define node_to_cpumask_ptr(v, node) \
132 const cpumask_t *v = cpumask_of_node(node)
133
134#define node_to_cpumask_ptr_next(v, node) \
135 v = cpumask_of_node(node)
136
137#endif /* CONFIG_X86_64 */
138
139/*
140 * Returns the number of the node containing Node 'node'. This 104 * Returns the number of the node containing Node 'node'. This
141 * architecture is flat, so it is a pretty simple function! 105 * architecture is flat, so it is a pretty simple function!
142 */ 106 */
143#define parent_node(node) (node) 107#define parent_node(node) (node)
144 108
145#define pcibus_to_node(bus) __pcibus_to_node(bus) 109#define pcibus_to_node(bus) __pcibus_to_node(bus)
146#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
147 110
148#ifdef CONFIG_X86_32 111#ifdef CONFIG_X86_32
149extern unsigned long node_start_pfn[]; 112extern unsigned long node_start_pfn[];
@@ -209,40 +172,24 @@ static inline int early_cpu_to_node(int cpu)
209 return 0; 172 return 0;
210} 173}
211 174
212static inline const cpumask_t *cpumask_of_node(int node) 175static inline const struct cpumask *cpumask_of_node(int node)
213{
214 return &cpu_online_map;
215}
216static inline cpumask_t node_to_cpumask(int node)
217{ 176{
218 return cpu_online_map; 177 return cpu_online_mask;
219} 178}
220 179
221static inline void setup_node_to_cpumask_map(void) { } 180static inline void setup_node_to_cpumask_map(void) { }
222 181
223/*
224 * Replace default node_to_cpumask_ptr with optimized version
225 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
226 */
227#define node_to_cpumask_ptr(v, node) \
228 const cpumask_t *v = cpumask_of_node(node)
229
230#define node_to_cpumask_ptr_next(v, node) \
231 v = cpumask_of_node(node)
232#endif 182#endif
233 183
234#include <asm-generic/topology.h> 184#include <asm-generic/topology.h>
235 185
236extern cpumask_t cpu_coregroup_map(int cpu);
237extern const struct cpumask *cpu_coregroup_mask(int cpu); 186extern const struct cpumask *cpu_coregroup_mask(int cpu);
238 187
239#ifdef ENABLE_TOPO_DEFINES 188#ifdef ENABLE_TOPO_DEFINES
240#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 189#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
241#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 190#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
242#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 191#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
243#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 192#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
244#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
245#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
246 193
247/* indicates that pointers to the topology cpumask_t maps are valid */ 194/* indicates that pointers to the topology cpumask_t maps are valid */
248#define arch_provides_topology_pointers yes 195#define arch_provides_topology_pointers yes
@@ -256,7 +203,7 @@ struct pci_bus;
256void set_pci_bus_resources_arch_default(struct pci_bus *b); 203void set_pci_bus_resources_arch_default(struct pci_bus *b);
257 204
258#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
259#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) 206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
260#define smt_capable() (smp_num_siblings > 1) 207#define smt_capable() (smp_num_siblings > 1)
261#endif 208#endif
262 209
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index d806ecaa948f..676cdac385c0 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void)
26 return 1; 26 return 1;
27} 27}
28 28
29static const cpumask_t *bigsmp_target_cpus(void) 29static const struct cpumask *bigsmp_target_cpus(void)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 return &cpu_online_map; 32 return cpu_online_mask;
33#else 33#else
34 return &cpumask_of_cpu(0); 34 return cpumask_of(0);
35#endif 35#endif
36} 36}
37 37
@@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
118} 118}
119 119
120/* As we are using single CPU as destination, pick only one CPU here */ 120/* As we are using single CPU as destination, pick only one CPU here */
121static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) 121static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
122{ 122{
123 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); 123 return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
124} 124}
125 125
126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
188 { } /* NULL entry stops DMI scanning */ 188 { } /* NULL entry stops DMI scanning */
189}; 189};
190 190
191static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) 191static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
192{ 192{
193 cpus_clear(*retmask); 193 cpumask_clear(retmask);
194 cpu_set(cpu, *retmask); 194 cpumask_set_cpu(cpu, retmask);
195} 195}
196 196
197static int probe_bigsmp(void) 197static int probe_bigsmp(void)
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 19588f2770ee..1c11b819f245 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -410,7 +410,7 @@ static void es7000_enable_apic_mode(void)
410 WARN(1, "Command failed, status = %x\n", mip_status); 410 WARN(1, "Command failed, status = %x\n", mip_status);
411} 411}
412 412
413static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) 413static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
414{ 414{
415 /* Careful. Some cpus do not strictly honor the set of cpus 415 /* Careful. Some cpus do not strictly honor the set of cpus
416 * specified in the interrupt destination when using lowest 416 * specified in the interrupt destination when using lowest
@@ -420,7 +420,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
420 * deliver interrupts to the wrong hyperthread when only one 420 * deliver interrupts to the wrong hyperthread when only one
421 * hyperthread was specified in the interrupt desitination. 421 * hyperthread was specified in the interrupt desitination.
422 */ 422 */
423 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; 423 cpumask_clear(retmask);
424 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
424} 425}
425 426
426 427
@@ -455,14 +456,14 @@ static int es7000_apic_id_registered(void)
455 return 1; 456 return 1;
456} 457}
457 458
458static const cpumask_t *target_cpus_cluster(void) 459static const struct cpumask *target_cpus_cluster(void)
459{ 460{
460 return &CPU_MASK_ALL; 461 return cpu_all_mask;
461} 462}
462 463
463static const cpumask_t *es7000_target_cpus(void) 464static const struct cpumask *es7000_target_cpus(void)
464{ 465{
465 return &cpumask_of_cpu(smp_processor_id()); 466 return cpumask_of(smp_processor_id());
466} 467}
467 468
468static unsigned long 469static unsigned long
@@ -517,7 +518,7 @@ static void es7000_setup_apic_routing(void)
517 "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 518 "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
518 (apic_version[apic] == 0x14) ? 519 (apic_version[apic] == 0x14) ?
519 "Physical Cluster" : "Logical Cluster", 520 "Physical Cluster" : "Logical Cluster",
520 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); 521 nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
521} 522}
522 523
523static int es7000_apicid_to_node(int logical_apicid) 524static int es7000_apicid_to_node(int logical_apicid)
@@ -572,7 +573,7 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
572 return 1; 573 return 1;
573} 574}
574 575
575static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) 576static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
576{ 577{
577 unsigned int round = 0; 578 unsigned int round = 0;
578 int cpu, uninitialized_var(apicid); 579 int cpu, uninitialized_var(apicid);
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index bdfad80c3cf1..d6bd62407152 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -39,7 +39,7 @@
39int unknown_nmi_panic; 39int unknown_nmi_panic;
40int nmi_watchdog_enabled; 40int nmi_watchdog_enabled;
41 41
42static cpumask_t backtrace_mask = CPU_MASK_NONE; 42static cpumask_var_t backtrace_mask;
43 43
44/* nmi_active: 44/* nmi_active:
45 * >0: the lapic NMI watchdog is active, but can be disabled 45 * >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL);
141 printk(KERN_INFO "Testing NMI watchdog ... "); 142 printk(KERN_INFO "Testing NMI watchdog ... ");
142 143
143#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
@@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
413 touched = 1; 414 touched = 1;
414 } 415 }
415 416
416 if (cpu_isset(cpu, backtrace_mask)) { 417 if (cpumask_test_cpu(cpu, backtrace_mask)) {
417 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
418 419
419 spin_lock(&lock); 420 spin_lock(&lock);
420 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 421 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
421 dump_stack(); 422 dump_stack();
422 spin_unlock(&lock); 423 spin_unlock(&lock);
423 cpu_clear(cpu, backtrace_mask); 424 cpumask_clear_cpu(cpu, backtrace_mask);
424 } 425 }
425 426
426 /* Could check oops_in_progress here too, but it's safer not to */ 427 /* Could check oops_in_progress here too, but it's safer not to */
@@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void)
554{ 555{
555 int i; 556 int i;
556 557
557 backtrace_mask = cpu_online_map; 558 cpumask_copy(backtrace_mask, cpu_online_mask);
558 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 559 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
559 for (i = 0; i < 10 * 1000; i++) { 560 for (i = 0; i < 10 * 1000; i++) {
560 if (cpus_empty(backtrace_mask)) 561 if (cpumask_empty(backtrace_mask))
561 break; 562 break;
562 mdelay(1); 563 mdelay(1);
563 } 564 }
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index ba2fc6465534..533e59c6fc82 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -334,9 +334,9 @@ static inline void numaq_smp_callin_clear_local_apic(void)
334 clear_local_APIC(); 334 clear_local_APIC();
335} 335}
336 336
337static inline const cpumask_t *numaq_target_cpus(void) 337static inline const struct cpumask *numaq_target_cpus(void)
338{ 338{
339 return &CPU_MASK_ALL; 339 return cpu_all_mask;
340} 340}
341 341
342static inline unsigned long 342static inline unsigned long
@@ -427,7 +427,7 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
427 * We use physical apicids here, not logical, so just return the default 427 * We use physical apicids here, not logical, so just return the default
428 * physical broadcast to stop people from breaking us 428 * physical broadcast to stop people from breaking us
429 */ 429 */
430static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) 430static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask)
431{ 431{
432 return 0x0F; 432 return 0x0F;
433} 433}
@@ -462,7 +462,7 @@ static int probe_numaq(void)
462 return found_numaq; 462 return found_numaq;
463} 463}
464 464
465static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) 465static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
466{ 466{
467 /* Careful. Some cpus do not strictly honor the set of cpus 467 /* Careful. Some cpus do not strictly honor the set of cpus
468 * specified in the interrupt destination when using lowest 468 * specified in the interrupt destination when using lowest
@@ -472,7 +472,8 @@ static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
472 * deliver interrupts to the wrong hyperthread when only one 472 * deliver interrupts to the wrong hyperthread when only one
473 * hyperthread was specified in the interrupt desitination. 473 * hyperthread was specified in the interrupt desitination.
474 */ 474 */
475 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; 475 cpumask_clear(retmask);
476 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
476} 477}
477 478
478static void numaq_setup_portio_remap(void) 479static void numaq_setup_portio_remap(void)
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 141c99a1c264..01eda2ac65e4 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -83,7 +83,8 @@ static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
83 * deliver interrupts to the wrong hyperthread when only one 83 * deliver interrupts to the wrong hyperthread when only one
84 * hyperthread was specified in the interrupt desitination. 84 * hyperthread was specified in the interrupt desitination.
85 */ 85 */
86 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; 86 cpumask_clear(retmask);
87 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
87} 88}
88 89
89/* should be called last. */ 90/* should be called last. */
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index aac52fa873ff..9cfe1f415d81 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -53,23 +53,19 @@ static unsigned summit_get_apic_id(unsigned long x)
53 return (x >> 24) & 0xFF; 53 return (x >> 24) & 0xFF;
54} 54}
55 55
56static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) 56static inline void summit_send_IPI_mask(const struct cpumask *mask, int vector)
57{ 57{
58 default_send_IPI_mask_sequence_logical(mask, vector); 58 default_send_IPI_mask_sequence_logical(mask, vector);
59} 59}
60 60
61static void summit_send_IPI_allbutself(int vector) 61static void summit_send_IPI_allbutself(int vector)
62{ 62{
63 cpumask_t mask = cpu_online_map; 63 default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
64 cpu_clear(smp_processor_id(), mask);
65
66 if (!cpus_empty(mask))
67 summit_send_IPI_mask(&mask, vector);
68} 64}
69 65
70static void summit_send_IPI_all(int vector) 66static void summit_send_IPI_all(int vector)
71{ 67{
72 summit_send_IPI_mask(&cpu_online_map, vector); 68 summit_send_IPI_mask(cpu_online_mask, vector);
73} 69}
74 70
75#include <asm/tsc.h> 71#include <asm/tsc.h>
@@ -186,13 +182,13 @@ static inline int is_WPEG(struct rio_detail *rio){
186 182
187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) 183#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
188 184
189static const cpumask_t *summit_target_cpus(void) 185static const struct cpumask *summit_target_cpus(void)
190{ 186{
191 /* CPU_MASK_ALL (0xff) has undefined behaviour with 187 /* CPU_MASK_ALL (0xff) has undefined behaviour with
192 * dest_LowestPrio mode logical clustered apic interrupt routing 188 * dest_LowestPrio mode logical clustered apic interrupt routing
193 * Just start on cpu 0. IRQ balancing will spread load 189 * Just start on cpu 0. IRQ balancing will spread load
194 */ 190 */
195 return &cpumask_of_cpu(0); 191 return cpumask_of(0);
196} 192}
197 193
198static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) 194static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
@@ -289,7 +285,7 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
289 return 1; 285 return 1;
290} 286}
291 287
292static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) 288static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
293{ 289{
294 unsigned int round = 0; 290 unsigned int round = 0;
295 int cpu, apicid = 0; 291 int cpu, apicid = 0;
@@ -346,7 +342,7 @@ static int probe_summit(void)
346 return 0; 342 return 0;
347} 343}
348 344
349static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) 345static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
350{ 346{
351 /* Careful. Some cpus do not strictly honor the set of cpus 347 /* Careful. Some cpus do not strictly honor the set of cpus
352 * specified in the interrupt destination when using lowest 348 * specified in the interrupt destination when using lowest
@@ -356,7 +352,8 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
356 * deliver interrupts to the wrong hyperthread when only one 352 * deliver interrupts to the wrong hyperthread when only one
357 * hyperthread was specified in the interrupt desitination. 353 * hyperthread was specified in the interrupt desitination.
358 */ 354 */
359 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; 355 cpumask_clear(retmask);
356 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
360} 357}
361 358
362#ifdef CONFIG_X86_SUMMIT_NUMA 359#ifdef CONFIG_X86_SUMMIT_NUMA
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index ac7783a67432..49e0939bac42 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -466,7 +466,7 @@ static const lookup_t error_table[] = {
466 * @err: APM BIOS return code 466 * @err: APM BIOS return code
467 * 467 *
468 * Write a meaningful log entry to the kernel log in the event of 468 * Write a meaningful log entry to the kernel log in the event of
469 * an APM error. 469 * an APM error. Note that this also handles (negative) kernel errors.
470 */ 470 */
471 471
472static void apm_error(char *str, int err) 472static void apm_error(char *str, int err)
@@ -478,43 +478,14 @@ static void apm_error(char *str, int err)
478 break; 478 break;
479 if (i < ERROR_COUNT) 479 if (i < ERROR_COUNT)
480 printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); 480 printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg);
481 else if (err < 0)
482 printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err);
481 else 483 else
482 printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", 484 printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n",
483 str, err); 485 str, err);
484} 486}
485 487
486/* 488/*
487 * Lock APM functionality to physical CPU 0
488 */
489
490#ifdef CONFIG_SMP
491
492static cpumask_t apm_save_cpus(void)
493{
494 cpumask_t x = current->cpus_allowed;
495 /* Some bioses don't like being called from CPU != 0 */
496 set_cpus_allowed(current, cpumask_of_cpu(0));
497 BUG_ON(smp_processor_id() != 0);
498 return x;
499}
500
501static inline void apm_restore_cpus(cpumask_t mask)
502{
503 set_cpus_allowed(current, mask);
504}
505
506#else
507
508/*
509 * No CPU lockdown needed on a uniprocessor
510 */
511
512#define apm_save_cpus() (current->cpus_allowed)
513#define apm_restore_cpus(x) (void)(x)
514
515#endif
516
517/*
518 * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and 489 * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and
519 * apm_info.allow_ints, we are being really paranoid here! Not only 490 * apm_info.allow_ints, we are being really paranoid here! Not only
520 * are interrupts disabled, but all the segment registers (except SS) 491 * are interrupts disabled, but all the segment registers (except SS)
@@ -568,16 +539,23 @@ static inline void apm_irq_restore(unsigned long flags)
568# define APM_DO_RESTORE_SEGS 539# define APM_DO_RESTORE_SEGS
569#endif 540#endif
570 541
542struct apm_bios_call {
543 u32 func;
544 /* In and out */
545 u32 ebx;
546 u32 ecx;
547 /* Out only */
548 u32 eax;
549 u32 edx;
550 u32 esi;
551
552 /* Error: -ENOMEM, or bits 8-15 of eax */
553 int err;
554};
555
571/** 556/**
572 * apm_bios_call - Make an APM BIOS 32bit call 557 * __apm_bios_call - Make an APM BIOS 32bit call
573 * @func: APM function to execute 558 * @_call: pointer to struct apm_bios_call.
574 * @ebx_in: EBX register for call entry
575 * @ecx_in: ECX register for call entry
576 * @eax: EAX register return
577 * @ebx: EBX register return
578 * @ecx: ECX register return
579 * @edx: EDX register return
580 * @esi: ESI register return
581 * 559 *
582 * Make an APM call using the 32bit protected mode interface. The 560 * Make an APM call using the 32bit protected mode interface. The
583 * caller is responsible for knowing if APM BIOS is configured and 561 * caller is responsible for knowing if APM BIOS is configured and
@@ -586,80 +564,142 @@ static inline void apm_irq_restore(unsigned long flags)
586 * flag is loaded into AL. If there is an error, then the error 564 * flag is loaded into AL. If there is an error, then the error
587 * code is returned in AH (bits 8-15 of eax) and this function 565 * code is returned in AH (bits 8-15 of eax) and this function
588 * returns non-zero. 566 * returns non-zero.
567 *
568 * Note: this makes the call on the current CPU.
589 */ 569 */
590 570static long __apm_bios_call(void *_call)
591static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
592 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi)
593{ 571{
594 APM_DECL_SEGS 572 APM_DECL_SEGS
595 unsigned long flags; 573 unsigned long flags;
596 cpumask_t cpus;
597 int cpu; 574 int cpu;
598 struct desc_struct save_desc_40; 575 struct desc_struct save_desc_40;
599 struct desc_struct *gdt; 576 struct desc_struct *gdt;
600 577 struct apm_bios_call *call = _call;
601 cpus = apm_save_cpus();
602 578
603 cpu = get_cpu(); 579 cpu = get_cpu();
580 BUG_ON(cpu != 0);
604 gdt = get_cpu_gdt_table(cpu); 581 gdt = get_cpu_gdt_table(cpu);
605 save_desc_40 = gdt[0x40 / 8]; 582 save_desc_40 = gdt[0x40 / 8];
606 gdt[0x40 / 8] = bad_bios_desc; 583 gdt[0x40 / 8] = bad_bios_desc;
607 584
608 apm_irq_save(flags); 585 apm_irq_save(flags);
609 APM_DO_SAVE_SEGS; 586 APM_DO_SAVE_SEGS;
610 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); 587 apm_bios_call_asm(call->func, call->ebx, call->ecx,
588 &call->eax, &call->ebx, &call->ecx, &call->edx,
589 &call->esi);
611 APM_DO_RESTORE_SEGS; 590 APM_DO_RESTORE_SEGS;
612 apm_irq_restore(flags); 591 apm_irq_restore(flags);
613 gdt[0x40 / 8] = save_desc_40; 592 gdt[0x40 / 8] = save_desc_40;
614 put_cpu(); 593 put_cpu();
615 apm_restore_cpus(cpus);
616 594
617 return *eax & 0xff; 595 return call->eax & 0xff;
596}
597
598/* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */
599static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call)
600{
601 int ret;
602
603 /* Don't bother with work_on_cpu in the common case, so we don't
604 * have to worry about OOM or overhead. */
605 if (get_cpu() == 0) {
606 ret = fn(call);
607 put_cpu();
608 } else {
609 put_cpu();
610 ret = work_on_cpu(0, fn, call);
611 }
612
613 /* work_on_cpu can fail with -ENOMEM */
614 if (ret < 0)
615 call->err = ret;
616 else
617 call->err = (call->eax >> 8) & 0xff;
618
619 return ret;
618} 620}
619 621
620/** 622/**
621 * apm_bios_call_simple - make a simple APM BIOS 32bit call 623 * apm_bios_call - Make an APM BIOS 32bit call (on CPU 0)
622 * @func: APM function to invoke 624 * @call: the apm_bios_call registers.
623 * @ebx_in: EBX register value for BIOS call 625 *
624 * @ecx_in: ECX register value for BIOS call 626 * If there is an error, it is returned in @call.err.
625 * @eax: EAX register on return from the BIOS call 627 */
628static int apm_bios_call(struct apm_bios_call *call)
629{
630 return on_cpu0(__apm_bios_call, call);
631}
632
633/**
634 * __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0)
635 * @_call: pointer to struct apm_bios_call.
626 * 636 *
627 * Make a BIOS call that returns one value only, or just status. 637 * Make a BIOS call that returns one value only, or just status.
628 * If there is an error, then the error code is returned in AH 638 * If there is an error, then the error code is returned in AH
629 * (bits 8-15 of eax) and this function returns non-zero. This is 639 * (bits 8-15 of eax) and this function returns non-zero (it can
630 * used for simpler BIOS operations. This call may hold interrupts 640 * also return -ENOMEM). This is used for simpler BIOS operations.
631 * off for a long time on some laptops. 641 * This call may hold interrupts off for a long time on some laptops.
642 *
643 * Note: this makes the call on the current CPU.
632 */ 644 */
633 645static long __apm_bios_call_simple(void *_call)
634static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
635{ 646{
636 u8 error; 647 u8 error;
637 APM_DECL_SEGS 648 APM_DECL_SEGS
638 unsigned long flags; 649 unsigned long flags;
639 cpumask_t cpus;
640 int cpu; 650 int cpu;
641 struct desc_struct save_desc_40; 651 struct desc_struct save_desc_40;
642 struct desc_struct *gdt; 652 struct desc_struct *gdt;
643 653 struct apm_bios_call *call = _call;
644 cpus = apm_save_cpus();
645 654
646 cpu = get_cpu(); 655 cpu = get_cpu();
656 BUG_ON(cpu != 0);
647 gdt = get_cpu_gdt_table(cpu); 657 gdt = get_cpu_gdt_table(cpu);
648 save_desc_40 = gdt[0x40 / 8]; 658 save_desc_40 = gdt[0x40 / 8];
649 gdt[0x40 / 8] = bad_bios_desc; 659 gdt[0x40 / 8] = bad_bios_desc;
650 660
651 apm_irq_save(flags); 661 apm_irq_save(flags);
652 APM_DO_SAVE_SEGS; 662 APM_DO_SAVE_SEGS;
653 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); 663 error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
664 &call->eax);
654 APM_DO_RESTORE_SEGS; 665 APM_DO_RESTORE_SEGS;
655 apm_irq_restore(flags); 666 apm_irq_restore(flags);
656 gdt[0x40 / 8] = save_desc_40; 667 gdt[0x40 / 8] = save_desc_40;
657 put_cpu(); 668 put_cpu();
658 apm_restore_cpus(cpus);
659 return error; 669 return error;
660} 670}
661 671
662/** 672/**
673 * apm_bios_call_simple - make a simple APM BIOS 32bit call
674 * @func: APM function to invoke
675 * @ebx_in: EBX register value for BIOS call
676 * @ecx_in: ECX register value for BIOS call
677 * @eax: EAX register on return from the BIOS call
678 * @err: bits
679 *
680 * Make a BIOS call that returns one value only, or just status.
681 * If there is an error, then the error code is returned in @err
682 * and this function returns non-zero. This is used for simpler
683 * BIOS operations. This call may hold interrupts off for a long
684 * time on some laptops.
685 */
686static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax,
687 int *err)
688{
689 struct apm_bios_call call;
690 int ret;
691
692 call.func = func;
693 call.ebx = ebx_in;
694 call.ecx = ecx_in;
695
696 ret = on_cpu0(__apm_bios_call_simple, &call);
697 *eax = call.eax;
698 *err = call.err;
699 return ret;
700}
701
702/**
663 * apm_driver_version - APM driver version 703 * apm_driver_version - APM driver version
664 * @val: loaded with the APM version on return 704 * @val: loaded with the APM version on return
665 * 705 *
@@ -678,9 +718,10 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
678static int apm_driver_version(u_short *val) 718static int apm_driver_version(u_short *val)
679{ 719{
680 u32 eax; 720 u32 eax;
721 int err;
681 722
682 if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax)) 723 if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err))
683 return (eax >> 8) & 0xff; 724 return err;
684 *val = eax; 725 *val = eax;
685 return APM_SUCCESS; 726 return APM_SUCCESS;
686} 727}
@@ -701,22 +742,21 @@ static int apm_driver_version(u_short *val)
701 * that APM 1.2 is in use. If no messges are pending the value 0x80 742 * that APM 1.2 is in use. If no messges are pending the value 0x80
702 * is returned (No power management events pending). 743 * is returned (No power management events pending).
703 */ 744 */
704
705static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) 745static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
706{ 746{
707 u32 eax; 747 struct apm_bios_call call;
708 u32 ebx;
709 u32 ecx;
710 u32 dummy;
711 748
712 if (apm_bios_call(APM_FUNC_GET_EVENT, 0, 0, &eax, &ebx, &ecx, 749 call.func = APM_FUNC_GET_EVENT;
713 &dummy, &dummy)) 750 call.ebx = call.ecx = 0;
714 return (eax >> 8) & 0xff; 751
715 *event = ebx; 752 if (apm_bios_call(&call))
753 return call.err;
754
755 *event = call.ebx;
716 if (apm_info.connection_version < 0x0102) 756 if (apm_info.connection_version < 0x0102)
717 *info = ~0; /* indicate info not valid */ 757 *info = ~0; /* indicate info not valid */
718 else 758 else
719 *info = ecx; 759 *info = call.ecx;
720 return APM_SUCCESS; 760 return APM_SUCCESS;
721} 761}
722 762
@@ -737,9 +777,10 @@ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
737static int set_power_state(u_short what, u_short state) 777static int set_power_state(u_short what, u_short state)
738{ 778{
739 u32 eax; 779 u32 eax;
780 int err;
740 781
741 if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax)) 782 if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err))
742 return (eax >> 8) & 0xff; 783 return err;
743 return APM_SUCCESS; 784 return APM_SUCCESS;
744} 785}
745 786
@@ -770,6 +811,7 @@ static int apm_do_idle(void)
770 u8 ret = 0; 811 u8 ret = 0;
771 int idled = 0; 812 int idled = 0;
772 int polling; 813 int polling;
814 int err;
773 815
774 polling = !!(current_thread_info()->status & TS_POLLING); 816 polling = !!(current_thread_info()->status & TS_POLLING);
775 if (polling) { 817 if (polling) {
@@ -782,7 +824,7 @@ static int apm_do_idle(void)
782 } 824 }
783 if (!need_resched()) { 825 if (!need_resched()) {
784 idled = 1; 826 idled = 1;
785 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); 827 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err);
786 } 828 }
787 if (polling) 829 if (polling)
788 current_thread_info()->status |= TS_POLLING; 830 current_thread_info()->status |= TS_POLLING;
@@ -797,8 +839,7 @@ static int apm_do_idle(void)
797 * Only report the failure the first 5 times. 839 * Only report the failure the first 5 times.
798 */ 840 */
799 if (++t < 5) { 841 if (++t < 5) {
800 printk(KERN_DEBUG "apm_do_idle failed (%d)\n", 842 printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err);
801 (eax >> 8) & 0xff);
802 t = jiffies; 843 t = jiffies;
803 } 844 }
804 return -1; 845 return -1;
@@ -816,9 +857,10 @@ static int apm_do_idle(void)
816static void apm_do_busy(void) 857static void apm_do_busy(void)
817{ 858{
818 u32 dummy; 859 u32 dummy;
860 int err;
819 861
820 if (clock_slowed || ALWAYS_CALL_BUSY) { 862 if (clock_slowed || ALWAYS_CALL_BUSY) {
821 (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy); 863 (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err);
822 clock_slowed = 0; 864 clock_slowed = 0;
823 } 865 }
824} 866}
@@ -937,7 +979,7 @@ static void apm_power_off(void)
937 979
938 /* Some bioses don't like being called from CPU != 0 */ 980 /* Some bioses don't like being called from CPU != 0 */
939 if (apm_info.realmode_power_off) { 981 if (apm_info.realmode_power_off) {
940 (void)apm_save_cpus(); 982 set_cpus_allowed_ptr(current, cpumask_of(0));
941 machine_real_restart(po_bios_call, sizeof(po_bios_call)); 983 machine_real_restart(po_bios_call, sizeof(po_bios_call));
942 } else { 984 } else {
943 (void)set_system_power_state(APM_STATE_OFF); 985 (void)set_system_power_state(APM_STATE_OFF);
@@ -956,12 +998,13 @@ static void apm_power_off(void)
956static int apm_enable_power_management(int enable) 998static int apm_enable_power_management(int enable)
957{ 999{
958 u32 eax; 1000 u32 eax;
1001 int err;
959 1002
960 if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) 1003 if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED))
961 return APM_NOT_ENGAGED; 1004 return APM_NOT_ENGAGED;
962 if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, 1005 if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL,
963 enable, &eax)) 1006 enable, &eax, &err))
964 return (eax >> 8) & 0xff; 1007 return err;
965 if (enable) 1008 if (enable)
966 apm_info.bios.flags &= ~APM_BIOS_DISABLED; 1009 apm_info.bios.flags &= ~APM_BIOS_DISABLED;
967 else 1010 else
@@ -986,24 +1029,23 @@ static int apm_enable_power_management(int enable)
986 1029
987static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) 1030static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
988{ 1031{
989 u32 eax; 1032 struct apm_bios_call call;
990 u32 ebx; 1033
991 u32 ecx; 1034 call.func = APM_FUNC_GET_STATUS;
992 u32 edx; 1035 call.ebx = APM_DEVICE_ALL;
993 u32 dummy; 1036 call.ecx = 0;
994 1037
995 if (apm_info.get_power_status_broken) 1038 if (apm_info.get_power_status_broken)
996 return APM_32_UNSUPPORTED; 1039 return APM_32_UNSUPPORTED;
997 if (apm_bios_call(APM_FUNC_GET_STATUS, APM_DEVICE_ALL, 0, 1040 if (apm_bios_call(&call))
998 &eax, &ebx, &ecx, &edx, &dummy)) 1041 return call.err;
999 return (eax >> 8) & 0xff; 1042 *status = call.ebx;
1000 *status = ebx; 1043 *bat = call.ecx;
1001 *bat = ecx;
1002 if (apm_info.get_power_status_swabinminutes) { 1044 if (apm_info.get_power_status_swabinminutes) {
1003 *life = swab16((u16)edx); 1045 *life = swab16((u16)call.edx);
1004 *life |= 0x8000; 1046 *life |= 0x8000;
1005 } else 1047 } else
1006 *life = edx; 1048 *life = call.edx;
1007 return APM_SUCCESS; 1049 return APM_SUCCESS;
1008} 1050}
1009 1051
@@ -1048,12 +1090,14 @@ static int apm_get_battery_status(u_short which, u_short *status,
1048static int apm_engage_power_management(u_short device, int enable) 1090static int apm_engage_power_management(u_short device, int enable)
1049{ 1091{
1050 u32 eax; 1092 u32 eax;
1093 int err;
1051 1094
1052 if ((enable == 0) && (device == APM_DEVICE_ALL) 1095 if ((enable == 0) && (device == APM_DEVICE_ALL)
1053 && (apm_info.bios.flags & APM_BIOS_DISABLED)) 1096 && (apm_info.bios.flags & APM_BIOS_DISABLED))
1054 return APM_DISABLED; 1097 return APM_DISABLED;
1055 if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax)) 1098 if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable,
1056 return (eax >> 8) & 0xff; 1099 &eax, &err))
1100 return err;
1057 if (device == APM_DEVICE_ALL) { 1101 if (device == APM_DEVICE_ALL) {
1058 if (enable) 1102 if (enable)
1059 apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; 1103 apm_info.bios.flags &= ~APM_BIOS_DISENGAGED;
@@ -1689,16 +1733,14 @@ static int apm(void *unused)
1689 char *power_stat; 1733 char *power_stat;
1690 char *bat_stat; 1734 char *bat_stat;
1691 1735
1692#ifdef CONFIG_SMP
1693 /* 2002/08/01 - WT 1736 /* 2002/08/01 - WT
1694 * This is to avoid random crashes at boot time during initialization 1737 * This is to avoid random crashes at boot time during initialization
1695 * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. 1738 * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D.
1696 * Some bioses don't like being called from CPU != 0. 1739 * Some bioses don't like being called from CPU != 0.
1697 * Method suggested by Ingo Molnar. 1740 * Method suggested by Ingo Molnar.
1698 */ 1741 */
1699 set_cpus_allowed(current, cpumask_of_cpu(0)); 1742 set_cpus_allowed_ptr(current, cpumask_of(0));
1700 BUG_ON(smp_processor_id() != 0); 1743 BUG_ON(smp_processor_id() != 0);
1701#endif
1702 1744
1703 if (apm_info.connection_version == 0) { 1745 if (apm_info.connection_version == 0) {
1704 apm_info.connection_version = apm_info.bios.version; 1746 apm_info.connection_version = apm_info.bios.version;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e2962cc1e27b..c4f667896c28 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -41,8 +41,6 @@
41 41
42#include "cpu.h" 42#include "cpu.h"
43 43
44#ifdef CONFIG_X86_64
45
46/* all of these masks are initialized in setup_cpu_local_masks() */ 44/* all of these masks are initialized in setup_cpu_local_masks() */
47cpumask_var_t cpu_initialized_mask; 45cpumask_var_t cpu_initialized_mask;
48cpumask_var_t cpu_callout_mask; 46cpumask_var_t cpu_callout_mask;
@@ -60,16 +58,6 @@ void __init setup_cpu_local_masks(void)
60 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 58 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
61} 59}
62 60
63#else /* CONFIG_X86_32 */
64
65cpumask_t cpu_sibling_setup_map;
66cpumask_t cpu_callout_map;
67cpumask_t cpu_initialized;
68cpumask_t cpu_callin_map;
69
70#endif /* CONFIG_X86_32 */
71
72
73static const struct cpu_dev *this_cpu __cpuinitdata; 61static const struct cpu_dev *this_cpu __cpuinitdata;
74 62
75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 63DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
@@ -859,6 +847,7 @@ static void vgetcpu_set_mode(void)
859void __init identify_boot_cpu(void) 847void __init identify_boot_cpu(void)
860{ 848{
861 identify_cpu(&boot_cpu_data); 849 identify_cpu(&boot_cpu_data);
850 init_c1e_mask();
862#ifdef CONFIG_X86_32 851#ifdef CONFIG_X86_32
863 sysenter_setup(); 852 sysenter_setup();
864 enable_sep_cpu(); 853 enable_sep_cpu();
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 41ed94915f97..6ac55bd341ae 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -211,7 +211,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
211 unsigned int i; 211 unsigned int i;
212 212
213#ifdef CONFIG_SMP 213#ifdef CONFIG_SMP
214 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 214 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
215#endif 215#endif
216 216
217 /* Errata workaround */ 217 /* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index a15ac94e0b9b..4709ead2db52 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -54,7 +54,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
54static int cpu_family = CPU_OPTERON; 54static int cpu_family = CPU_OPTERON;
55 55
56#ifndef CONFIG_SMP 56#ifndef CONFIG_SMP
57DEFINE_PER_CPU(cpumask_t, cpu_core_map); 57static inline const struct cpumask *cpu_core_mask(int cpu)
58{
59 return cpumask_of(0);
60}
58#endif 61#endif
59 62
60/* Return a frequency in MHz, given an input fid */ 63/* Return a frequency in MHz, given an input fid */
@@ -699,7 +702,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
699 702
700 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); 703 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
701 data->powernow_table = powernow_table; 704 data->powernow_table = powernow_table;
702 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 705 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
703 print_basics(data); 706 print_basics(data);
704 707
705 for (j = 0; j < data->numps; j++) 708 for (j = 0; j < data->numps; j++)
@@ -862,7 +865,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
862 865
863 /* fill in data */ 866 /* fill in data */
864 data->numps = data->acpi_data.state_count; 867 data->numps = data->acpi_data.state_count;
865 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 868 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
866 print_basics(data); 869 print_basics(data);
867 powernow_k8_acpi_pst_values(data, 0); 870 powernow_k8_acpi_pst_values(data, 0);
868 871
@@ -1300,7 +1303,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1300 if (cpu_family == CPU_HW_PSTATE) 1303 if (cpu_family == CPU_HW_PSTATE)
1301 cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); 1304 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1302 else 1305 else
1303 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); 1306 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1304 data->available_cores = pol->cpus; 1307 data->available_cores = pol->cpus;
1305 1308
1306 if (cpu_family == CPU_HW_PSTATE) 1309 if (cpu_family == CPU_HW_PSTATE)
@@ -1365,7 +1368,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
1365 unsigned int khz = 0; 1368 unsigned int khz = 0;
1366 unsigned int first; 1369 unsigned int first;
1367 1370
1368 first = first_cpu(per_cpu(cpu_core_map, cpu)); 1371 first = cpumask_first(cpu_core_mask(cpu));
1369 data = per_cpu(powernow_data, first); 1372 data = per_cpu(powernow_data, first);
1370 1373
1371 if (!data) 1374 if (!data)
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 8bbb11adb315..016c1a4fa3fc 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -321,7 +321,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
321 321
322 /* only run on CPU to be set, or on its sibling */ 322 /* only run on CPU to be set, or on its sibling */
323#ifdef CONFIG_SMP 323#ifdef CONFIG_SMP
324 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 324 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
325#endif 325#endif
326 326
327 cpus_allowed = current->cpus_allowed; 327 cpus_allowed = current->cpus_allowed;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index c471eb1a389c..483eda96e102 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -159,7 +159,7 @@ struct _cpuid4_info_regs {
159 unsigned long can_disable; 159 unsigned long can_disable;
160}; 160};
161 161
162#ifdef CONFIG_PCI 162#if defined(CONFIG_PCI) && defined(CONFIG_SYSFS)
163static struct pci_device_id k8_nb_id[] = { 163static struct pci_device_id k8_nb_id[] = {
164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, 164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
165 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, 165 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
@@ -324,15 +324,6 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
324 return 0; 324 return 0;
325} 325}
326 326
327static int
328__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
329{
330 struct _cpuid4_info_regs *leaf_regs =
331 (struct _cpuid4_info_regs *)this_leaf;
332
333 return cpuid4_cache_lookup_regs(index, leaf_regs);
334}
335
336static int __cpuinit find_num_cache_leaves(void) 327static int __cpuinit find_num_cache_leaves(void)
337{ 328{
338 unsigned int eax, ebx, ecx, edx; 329 unsigned int eax, ebx, ecx, edx;
@@ -508,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
508 return l2; 499 return l2;
509} 500}
510 501
502#ifdef CONFIG_SYSFS
503
511/* pointer to _cpuid4_info array (for each cache leaf) */ 504/* pointer to _cpuid4_info array (for each cache leaf) */
512static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 505static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
513#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 506#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
@@ -571,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
571 per_cpu(cpuid4_info, cpu) = NULL; 564 per_cpu(cpuid4_info, cpu) = NULL;
572} 565}
573 566
567static int
568__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
569{
570 struct _cpuid4_info_regs *leaf_regs =
571 (struct _cpuid4_info_regs *)this_leaf;
572
573 return cpuid4_cache_lookup_regs(index, leaf_regs);
574}
575
574static void __cpuinit get_cpu_leaves(void *_retval) 576static void __cpuinit get_cpu_leaves(void *_retval)
575{ 577{
576 int j, *retval = _retval, cpu = smp_processor_id(); 578 int j, *retval = _retval, cpu = smp_processor_id();
@@ -612,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
612 return retval; 614 return retval;
613} 615}
614 616
615#ifdef CONFIG_SYSFS
616
617#include <linux/kobject.h> 617#include <linux/kobject.h>
618#include <linux/sysfs.h> 618#include <linux/sysfs.h>
619 619
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index ca14604611ec..863f89568b1a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -990,7 +990,7 @@ static struct sysdev_attribute *mce_attributes[] = {
990 NULL 990 NULL
991}; 991};
992 992
993static cpumask_t mce_device_initialized = CPU_MASK_NONE; 993static cpumask_var_t mce_device_initialized;
994 994
995/* Per cpu sysdev init. All of the cpus still share the same ctl bank */ 995/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
996static __cpuinit int mce_create_device(unsigned int cpu) 996static __cpuinit int mce_create_device(unsigned int cpu)
@@ -1021,7 +1021,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
1021 if (err) 1021 if (err)
1022 goto error2; 1022 goto error2;
1023 } 1023 }
1024 cpu_set(cpu, mce_device_initialized); 1024 cpumask_set_cpu(cpu, mce_device_initialized);
1025 1025
1026 return 0; 1026 return 0;
1027error2: 1027error2:
@@ -1043,7 +1043,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
1043{ 1043{
1044 int i; 1044 int i;
1045 1045
1046 if (!cpu_isset(cpu, mce_device_initialized)) 1046 if (!cpumask_test_cpu(cpu, mce_device_initialized))
1047 return; 1047 return;
1048 1048
1049 for (i = 0; mce_attributes[i]; i++) 1049 for (i = 0; mce_attributes[i]; i++)
@@ -1053,7 +1053,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
1053 sysdev_remove_file(&per_cpu(device_mce, cpu), 1053 sysdev_remove_file(&per_cpu(device_mce, cpu),
1054 &bank_attrs[i]); 1054 &bank_attrs[i]);
1055 sysdev_unregister(&per_cpu(device_mce,cpu)); 1055 sysdev_unregister(&per_cpu(device_mce,cpu));
1056 cpu_clear(cpu, mce_device_initialized); 1056 cpumask_clear_cpu(cpu, mce_device_initialized);
1057} 1057}
1058 1058
1059/* Make sure there are no machine checks on offlined CPUs. */ 1059/* Make sure there are no machine checks on offlined CPUs. */
@@ -1162,6 +1162,8 @@ static __init int mce_init_device(void)
1162 if (!mce_available(&boot_cpu_data)) 1162 if (!mce_available(&boot_cpu_data))
1163 return -EIO; 1163 return -EIO;
1164 1164
1165 alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
1166
1165 err = mce_init_banks(); 1167 err = mce_init_banks();
1166 if (err) 1168 if (err)
1167 return err; 1169 return err;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 7d01be868870..56dde9c4bc96 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -485,7 +485,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
485 485
486#ifdef CONFIG_SMP 486#ifdef CONFIG_SMP
487 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 487 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
488 i = cpumask_first(&per_cpu(cpu_core_map, cpu)); 488 i = cpumask_first(cpu_core_mask(cpu));
489 489
490 /* first core not up yet */ 490 /* first core not up yet */
491 if (cpu_data(i).cpu_core_id) 491 if (cpu_data(i).cpu_core_id)
@@ -505,7 +505,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
505 if (err) 505 if (err)
506 goto out; 506 goto out;
507 507
508 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); 508 cpumask_copy(b->cpus, cpu_core_mask(cpu));
509 per_cpu(threshold_banks, cpu)[bank] = b; 509 per_cpu(threshold_banks, cpu)[bank] = b;
510 goto out; 510 goto out;
511 } 511 }
@@ -529,7 +529,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
529#ifndef CONFIG_SMP 529#ifndef CONFIG_SMP
530 cpumask_setall(b->cpus); 530 cpumask_setall(b->cpus);
531#else 531#else
532 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); 532 cpumask_copy(b->cpus, cpu_core_mask(cpu));
533#endif 533#endif
534 534
535 per_cpu(threshold_banks, cpu)[bank] = b; 535 per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index 57df3d383470..d6b72df89d69 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -249,7 +249,7 @@ void cmci_rediscover(int dying)
249 for_each_online_cpu (cpu) { 249 for_each_online_cpu (cpu) {
250 if (cpu == dying) 250 if (cpu == dying)
251 continue; 251 continue;
252 if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) 252 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
253 continue; 253 continue;
254 /* Recheck banks in case CPUs don't all have the same */ 254 /* Recheck banks in case CPUs don't all have the same */
255 if (cmci_supported(&banks)) 255 if (cmci_supported(&banks))
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index d67e0e48bc2d..f93047fed791 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
14 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
16 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
17 cpus_weight(per_cpu(cpu_core_map, cpu))); 17 cpumask_weight(cpu_sibling_mask(cpu)));
18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
20 seq_printf(m, "apicid\t\t: %d\n", c->apicid); 20 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
@@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
143static void *c_start(struct seq_file *m, loff_t *pos) 143static void *c_start(struct seq_file *m, loff_t *pos)
144{ 144{
145 if (*pos == 0) /* just in case, cpu 0 is not the first */ 145 if (*pos == 0) /* just in case, cpu 0 is not the first */
146 *pos = first_cpu(cpu_online_map); 146 *pos = cpumask_first(cpu_online_mask);
147 else 147 else
148 *pos = next_cpu_nr(*pos - 1, cpu_online_map); 148 *pos = cpumask_next(*pos - 1, cpu_online_mask);
149 if ((*pos) < nr_cpu_ids) 149 if ((*pos) < nr_cpu_ids)
150 return &cpu_data(*pos); 150 return &cpu_data(*pos);
151 return NULL; 151 return NULL;
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index c25fdb382292..453b5795a5c6 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -12,31 +12,30 @@
12 * 12 *
13 * Licensed under the terms of the GNU General Public 13 * Licensed under the terms of the GNU General Public
14 * License version 2. See file COPYING for details. 14 * License version 2. See file COPYING for details.
15*/ 15 */
16 16#include <linux/platform_device.h>
17#include <linux/capability.h> 17#include <linux/capability.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/cpumask.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
19#include <linux/firmware.h>
26#include <linux/spinlock.h> 20#include <linux/spinlock.h>
27#include <linux/mm.h> 21#include <linux/cpumask.h>
28#include <linux/fs.h> 22#include <linux/pci_ids.h>
23#include <linux/uaccess.h>
24#include <linux/vmalloc.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
29#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/sched.h>
29#include <linux/init.h>
30#include <linux/slab.h>
30#include <linux/cpu.h> 31#include <linux/cpu.h>
31#include <linux/firmware.h>
32#include <linux/platform_device.h>
33#include <linux/pci.h> 32#include <linux/pci.h>
34#include <linux/pci_ids.h> 33#include <linux/fs.h>
35#include <linux/uaccess.h> 34#include <linux/mm.h>
36 35
37#include <asm/msr.h>
38#include <asm/processor.h>
39#include <asm/microcode.h> 36#include <asm/microcode.h>
37#include <asm/processor.h>
38#include <asm/msr.h>
40 39
41MODULE_DESCRIPTION("AMD Microcode Update Driver"); 40MODULE_DESCRIPTION("AMD Microcode Update Driver");
42MODULE_AUTHOR("Peter Oruba"); 41MODULE_AUTHOR("Peter Oruba");
@@ -72,8 +71,8 @@ struct microcode_header_amd {
72} __attribute__((packed)); 71} __attribute__((packed));
73 72
74struct microcode_amd { 73struct microcode_amd {
75 struct microcode_header_amd hdr; 74 struct microcode_header_amd hdr;
76 unsigned int mpb[0]; 75 unsigned int mpb[0];
77}; 76};
78 77
79#define UCODE_MAX_SIZE 2048 78#define UCODE_MAX_SIZE 2048
@@ -184,8 +183,8 @@ static int get_ucode_data(void *to, const u8 *from, size_t n)
184 return 0; 183 return 0;
185} 184}
186 185
187static void *get_next_ucode(const u8 *buf, unsigned int size, 186static void *
188 unsigned int *mc_size) 187get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
189{ 188{
190 unsigned int total_size; 189 unsigned int total_size;
191 u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; 190 u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
@@ -223,7 +222,6 @@ static void *get_next_ucode(const u8 *buf, unsigned int size,
223 return mc; 222 return mc;
224} 223}
225 224
226
227static int install_equiv_cpu_table(const u8 *buf) 225static int install_equiv_cpu_table(const u8 *buf)
228{ 226{
229 u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; 227 u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
@@ -372,4 +370,3 @@ struct microcode_ops * __init init_amd_microcode(void)
372{ 370{
373 return &microcode_amd_ops; 371 return &microcode_amd_ops;
374} 372}
375
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index c9b721ba968c..a0f3851ef310 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -70,67 +70,78 @@
70 * Fix sigmatch() macro to handle old CPUs with pf == 0. 70 * Fix sigmatch() macro to handle old CPUs with pf == 0.
71 * Thanks to Stuart Swales for pointing out this bug. 71 * Thanks to Stuart Swales for pointing out this bug.
72 */ 72 */
73#include <linux/platform_device.h>
73#include <linux/capability.h> 74#include <linux/capability.h>
74#include <linux/kernel.h> 75#include <linux/miscdevice.h>
75#include <linux/init.h> 76#include <linux/firmware.h>
76#include <linux/sched.h>
77#include <linux/smp_lock.h> 77#include <linux/smp_lock.h>
78#include <linux/spinlock.h>
78#include <linux/cpumask.h> 79#include <linux/cpumask.h>
79#include <linux/module.h> 80#include <linux/uaccess.h>
80#include <linux/slab.h>
81#include <linux/vmalloc.h> 81#include <linux/vmalloc.h>
82#include <linux/miscdevice.h> 82#include <linux/kernel.h>
83#include <linux/spinlock.h> 83#include <linux/module.h>
84#include <linux/mm.h>
85#include <linux/fs.h>
86#include <linux/mutex.h> 84#include <linux/mutex.h>
85#include <linux/sched.h>
86#include <linux/init.h>
87#include <linux/slab.h>
87#include <linux/cpu.h> 88#include <linux/cpu.h>
88#include <linux/firmware.h> 89#include <linux/fs.h>
89#include <linux/platform_device.h> 90#include <linux/mm.h>
90 91
91#include <asm/msr.h>
92#include <asm/uaccess.h>
93#include <asm/processor.h>
94#include <asm/microcode.h> 92#include <asm/microcode.h>
93#include <asm/processor.h>
94#include <asm/msr.h>
95 95
96MODULE_DESCRIPTION("Microcode Update Driver"); 96MODULE_DESCRIPTION("Microcode Update Driver");
97MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); 97MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
98MODULE_LICENSE("GPL"); 98MODULE_LICENSE("GPL");
99 99
100#define MICROCODE_VERSION "2.00" 100#define MICROCODE_VERSION "2.00"
101 101
102static struct microcode_ops *microcode_ops; 102static struct microcode_ops *microcode_ops;
103 103
104/* no concurrent ->write()s are allowed on /dev/cpu/microcode */ 104/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
105static DEFINE_MUTEX(microcode_mutex); 105static DEFINE_MUTEX(microcode_mutex);
106 106
107struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 107struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
111static int do_microcode_update(const void __user *buf, size_t size) 130static int do_microcode_update(const void __user *buf, size_t size)
112{ 131{
113 cpumask_t old;
114 int error = 0; 132 int error = 0;
115 int cpu; 133 int cpu;
116 134 struct update_for_cpu ufc = { .buf = buf, .size = size };
117 old = current->cpus_allowed;
118 135
119 for_each_online_cpu(cpu) { 136 for_each_online_cpu(cpu) {
120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
121 138
122 if (!uci->valid) 139 if (!uci->valid)
123 continue; 140 continue;
124 141 error = work_on_cpu(cpu, update_for_cpu, &ufc);
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
127 if (error < 0) 142 if (error < 0)
128 goto out; 143 break;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
131 } 144 }
132out:
133 set_cpus_allowed_ptr(current, &old);
134 return error; 145 return error;
135} 146}
136 147
@@ -198,18 +209,33 @@ static void microcode_dev_exit(void)
198 209
199MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); 210MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
200#else 211#else
201#define microcode_dev_init() 0 212#define microcode_dev_init() 0
202#define microcode_dev_exit() do { } while (0) 213#define microcode_dev_exit() do { } while (0)
203#endif 214#endif
204 215
205/* fake device for request_firmware */ 216/* fake device for request_firmware */
206static struct platform_device *microcode_pdev; 217static struct platform_device *microcode_pdev;
218
219static long reload_for_cpu(void *unused)
220{
221 struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id();
222 int err = 0;
223
224 mutex_lock(&microcode_mutex);
225 if (uci->valid) {
226 err = microcode_ops->request_microcode_fw(smp_processor_id(),
227 &microcode_pdev->dev);
228 if (!err)
229 microcode_ops->apply_microcode(smp_processor_id());
230 }
231 mutex_unlock(&microcode_mutex);
232 return err;
233}
207 234
208static ssize_t reload_store(struct sys_device *dev, 235static ssize_t reload_store(struct sys_device *dev,
209 struct sysdev_attribute *attr, 236 struct sysdev_attribute *attr,
210 const char *buf, size_t sz) 237 const char *buf, size_t sz)
211{ 238{
212 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
213 char *end; 239 char *end;
214 unsigned long val = simple_strtoul(buf, &end, 0); 240 unsigned long val = simple_strtoul(buf, &end, 0);
215 int err = 0; 241 int err = 0;
@@ -218,21 +244,9 @@ static ssize_t reload_store(struct sys_device *dev,
218 if (end == buf) 244 if (end == buf)
219 return -EINVAL; 245 return -EINVAL;
220 if (val == 1) { 246 if (val == 1) {
221 cpumask_t old = current->cpus_allowed;
222
223 get_online_cpus(); 247 get_online_cpus();
224 if (cpu_online(cpu)) { 248 if (cpu_online(cpu))
225 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 249 err = work_on_cpu(cpu, reload_for_cpu, NULL);
226 mutex_lock(&microcode_mutex);
227 if (uci->valid) {
228 err = microcode_ops->request_microcode_fw(cpu,
229 &microcode_pdev->dev);
230 if (!err)
231 microcode_ops->apply_microcode(cpu);
232 }
233 mutex_unlock(&microcode_mutex);
234 set_cpus_allowed_ptr(current, &old);
235 }
236 put_online_cpus(); 250 put_online_cpus();
237 } 251 }
238 if (err) 252 if (err)
@@ -268,8 +282,8 @@ static struct attribute *mc_default_attrs[] = {
268}; 282};
269 283
270static struct attribute_group mc_attr_group = { 284static struct attribute_group mc_attr_group = {
271 .attrs = mc_default_attrs, 285 .attrs = mc_default_attrs,
272 .name = "microcode", 286 .name = "microcode",
273}; 287};
274 288
275static void __microcode_fini_cpu(int cpu) 289static void __microcode_fini_cpu(int cpu)
@@ -328,9 +342,9 @@ static int microcode_resume_cpu(int cpu)
328 return 0; 342 return 0;
329} 343}
330 344
331static void microcode_update_cpu(int cpu) 345static long microcode_update_cpu(void *unused)
332{ 346{
333 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 347 struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id();
334 int err = 0; 348 int err = 0;
335 349
336 /* 350 /*
@@ -338,30 +352,27 @@ static void microcode_update_cpu(int cpu)
338 * otherwise just request a firmware: 352 * otherwise just request a firmware:
339 */ 353 */
340 if (uci->valid) { 354 if (uci->valid) {
341 err = microcode_resume_cpu(cpu); 355 err = microcode_resume_cpu(smp_processor_id());
342 } else { 356 } else {
343 collect_cpu_info(cpu); 357 collect_cpu_info(smp_processor_id());
344 if (uci->valid && system_state == SYSTEM_RUNNING) 358 if (uci->valid && system_state == SYSTEM_RUNNING)
345 err = microcode_ops->request_microcode_fw(cpu, 359 err = microcode_ops->request_microcode_fw(
360 smp_processor_id(),
346 &microcode_pdev->dev); 361 &microcode_pdev->dev);
347 } 362 }
348 if (!err) 363 if (!err)
349 microcode_ops->apply_microcode(cpu); 364 microcode_ops->apply_microcode(smp_processor_id());
365 return err;
350} 366}
351 367
352static void microcode_init_cpu(int cpu) 368static int microcode_init_cpu(int cpu)
353{ 369{
354 cpumask_t old = current->cpus_allowed; 370 int err;
355
356 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
357 /* We should bind the task to the CPU */
358 BUG_ON(raw_smp_processor_id() != cpu);
359
360 mutex_lock(&microcode_mutex); 371 mutex_lock(&microcode_mutex);
361 microcode_update_cpu(cpu); 372 err = work_on_cpu(cpu, microcode_update_cpu, NULL);
362 mutex_unlock(&microcode_mutex); 373 mutex_unlock(&microcode_mutex);
363 374
364 set_cpus_allowed_ptr(current, &old); 375 return err;
365} 376}
366 377
367static int mc_sysdev_add(struct sys_device *sys_dev) 378static int mc_sysdev_add(struct sys_device *sys_dev)
@@ -379,8 +390,11 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
379 if (err) 390 if (err)
380 return err; 391 return err;
381 392
382 microcode_init_cpu(cpu); 393 err = microcode_init_cpu(cpu);
383 return 0; 394 if (err)
395 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
396
397 return err;
384} 398}
385 399
386static int mc_sysdev_remove(struct sys_device *sys_dev) 400static int mc_sysdev_remove(struct sys_device *sys_dev)
@@ -404,14 +418,14 @@ static int mc_sysdev_resume(struct sys_device *dev)
404 return 0; 418 return 0;
405 419
406 /* only CPU 0 will apply ucode here */ 420 /* only CPU 0 will apply ucode here */
407 microcode_update_cpu(0); 421 microcode_update_cpu(NULL);
408 return 0; 422 return 0;
409} 423}
410 424
411static struct sysdev_driver mc_sysdev_driver = { 425static struct sysdev_driver mc_sysdev_driver = {
412 .add = mc_sysdev_add, 426 .add = mc_sysdev_add,
413 .remove = mc_sysdev_remove, 427 .remove = mc_sysdev_remove,
414 .resume = mc_sysdev_resume, 428 .resume = mc_sysdev_resume,
415}; 429};
416 430
417static __cpuinit int 431static __cpuinit int
@@ -424,7 +438,9 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
424 switch (action) { 438 switch (action) {
425 case CPU_ONLINE: 439 case CPU_ONLINE:
426 case CPU_ONLINE_FROZEN: 440 case CPU_ONLINE_FROZEN:
427 microcode_init_cpu(cpu); 441 if (microcode_init_cpu(cpu))
442 printk(KERN_ERR "microcode: failed to init CPU%d\n",
443 cpu);
428 case CPU_DOWN_FAILED: 444 case CPU_DOWN_FAILED:
429 case CPU_DOWN_FAILED_FROZEN: 445 case CPU_DOWN_FAILED_FROZEN:
430 pr_debug("microcode: CPU%d added\n", cpu); 446 pr_debug("microcode: CPU%d added\n", cpu);
@@ -448,7 +464,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
448} 464}
449 465
450static struct notifier_block __refdata mc_cpu_notifier = { 466static struct notifier_block __refdata mc_cpu_notifier = {
451 .notifier_call = mc_cpu_callback, 467 .notifier_call = mc_cpu_callback,
452}; 468};
453 469
454static int __init microcode_init(void) 470static int __init microcode_init(void)
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 5e9f4fc51385..149b9ec7c1ab 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -70,28 +70,28 @@
70 * Fix sigmatch() macro to handle old CPUs with pf == 0. 70 * Fix sigmatch() macro to handle old CPUs with pf == 0.
71 * Thanks to Stuart Swales for pointing out this bug. 71 * Thanks to Stuart Swales for pointing out this bug.
72 */ 72 */
73#include <linux/platform_device.h>
73#include <linux/capability.h> 74#include <linux/capability.h>
74#include <linux/kernel.h> 75#include <linux/miscdevice.h>
75#include <linux/init.h> 76#include <linux/firmware.h>
76#include <linux/sched.h>
77#include <linux/smp_lock.h> 77#include <linux/smp_lock.h>
78#include <linux/spinlock.h>
78#include <linux/cpumask.h> 79#include <linux/cpumask.h>
79#include <linux/module.h> 80#include <linux/uaccess.h>
80#include <linux/slab.h>
81#include <linux/vmalloc.h> 81#include <linux/vmalloc.h>
82#include <linux/miscdevice.h> 82#include <linux/kernel.h>
83#include <linux/spinlock.h> 83#include <linux/module.h>
84#include <linux/mm.h>
85#include <linux/fs.h>
86#include <linux/mutex.h> 84#include <linux/mutex.h>
85#include <linux/sched.h>
86#include <linux/init.h>
87#include <linux/slab.h>
87#include <linux/cpu.h> 88#include <linux/cpu.h>
88#include <linux/firmware.h> 89#include <linux/fs.h>
89#include <linux/platform_device.h> 90#include <linux/mm.h>
90#include <linux/uaccess.h>
91 91
92#include <asm/msr.h>
93#include <asm/processor.h>
94#include <asm/microcode.h> 92#include <asm/microcode.h>
93#include <asm/processor.h>
94#include <asm/msr.h>
95 95
96MODULE_DESCRIPTION("Microcode Update Driver"); 96MODULE_DESCRIPTION("Microcode Update Driver");
97MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); 97MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -129,12 +129,13 @@ struct extended_sigtable {
129 struct extended_signature sigs[0]; 129 struct extended_signature sigs[0];
130}; 130};
131 131
132#define DEFAULT_UCODE_DATASIZE (2000) 132#define DEFAULT_UCODE_DATASIZE (2000)
133#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel)) 133#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
134#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) 134#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
135#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) 135#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
136#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) 136#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
137#define DWSIZE (sizeof(u32)) 137#define DWSIZE (sizeof(u32))
138
138#define get_totalsize(mc) \ 139#define get_totalsize(mc) \
139 (((struct microcode_intel *)mc)->hdr.totalsize ? \ 140 (((struct microcode_intel *)mc)->hdr.totalsize ? \
140 ((struct microcode_intel *)mc)->hdr.totalsize : \ 141 ((struct microcode_intel *)mc)->hdr.totalsize : \
@@ -197,30 +198,31 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
197} 198}
198 199
199static inline int 200static inline int
200update_match_revision(struct microcode_header_intel *mc_header, int rev) 201update_match_revision(struct microcode_header_intel *mc_header, int rev)
201{ 202{
202 return (mc_header->rev <= rev) ? 0 : 1; 203 return (mc_header->rev <= rev) ? 0 : 1;
203} 204}
204 205
205static int microcode_sanity_check(void *mc) 206static int microcode_sanity_check(void *mc)
206{ 207{
208 unsigned long total_size, data_size, ext_table_size;
207 struct microcode_header_intel *mc_header = mc; 209 struct microcode_header_intel *mc_header = mc;
208 struct extended_sigtable *ext_header = NULL; 210 struct extended_sigtable *ext_header = NULL;
209 struct extended_signature *ext_sig;
210 unsigned long total_size, data_size, ext_table_size;
211 int sum, orig_sum, ext_sigcount = 0, i; 211 int sum, orig_sum, ext_sigcount = 0, i;
212 struct extended_signature *ext_sig;
212 213
213 total_size = get_totalsize(mc_header); 214 total_size = get_totalsize(mc_header);
214 data_size = get_datasize(mc_header); 215 data_size = get_datasize(mc_header);
216
215 if (data_size + MC_HEADER_SIZE > total_size) { 217 if (data_size + MC_HEADER_SIZE > total_size) {
216 printk(KERN_ERR "microcode: error! " 218 printk(KERN_ERR "microcode: error! "
217 "Bad data size in microcode data file\n"); 219 "Bad data size in microcode data file\n");
218 return -EINVAL; 220 return -EINVAL;
219 } 221 }
220 222
221 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { 223 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
222 printk(KERN_ERR "microcode: error! " 224 printk(KERN_ERR "microcode: error! "
223 "Unknown microcode update format\n"); 225 "Unknown microcode update format\n");
224 return -EINVAL; 226 return -EINVAL;
225 } 227 }
226 ext_table_size = total_size - (MC_HEADER_SIZE + data_size); 228 ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
@@ -318,11 +320,15 @@ get_matching_microcode(struct cpu_signature *cpu_sig, void *mc, int rev)
318 320
319static void apply_microcode(int cpu) 321static void apply_microcode(int cpu)
320{ 322{
323 struct microcode_intel *mc_intel;
324 struct ucode_cpu_info *uci;
321 unsigned long flags; 325 unsigned long flags;
322 unsigned int val[2]; 326 unsigned int val[2];
323 int cpu_num = raw_smp_processor_id(); 327 int cpu_num;
324 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 328
325 struct microcode_intel *mc_intel = uci->mc; 329 cpu_num = raw_smp_processor_id();
330 uci = ucode_cpu_info + cpu;
331 mc_intel = uci->mc;
326 332
327 /* We should bind the task to the CPU */ 333 /* We should bind the task to the CPU */
328 BUG_ON(cpu_num != cpu); 334 BUG_ON(cpu_num != cpu);
@@ -348,15 +354,17 @@ static void apply_microcode(int cpu)
348 spin_unlock_irqrestore(&microcode_update_lock, flags); 354 spin_unlock_irqrestore(&microcode_update_lock, flags);
349 if (val[1] != mc_intel->hdr.rev) { 355 if (val[1] != mc_intel->hdr.rev) {
350 printk(KERN_ERR "microcode: CPU%d update from revision " 356 printk(KERN_ERR "microcode: CPU%d update from revision "
351 "0x%x to 0x%x failed\n", cpu_num, uci->cpu_sig.rev, val[1]); 357 "0x%x to 0x%x failed\n",
358 cpu_num, uci->cpu_sig.rev, val[1]);
352 return; 359 return;
353 } 360 }
354 printk(KERN_INFO "microcode: CPU%d updated from revision " 361 printk(KERN_INFO "microcode: CPU%d updated from revision "
355 "0x%x to 0x%x, date = %04x-%02x-%02x \n", 362 "0x%x to 0x%x, date = %04x-%02x-%02x \n",
356 cpu_num, uci->cpu_sig.rev, val[1], 363 cpu_num, uci->cpu_sig.rev, val[1],
357 mc_intel->hdr.date & 0xffff, 364 mc_intel->hdr.date & 0xffff,
358 mc_intel->hdr.date >> 24, 365 mc_intel->hdr.date >> 24,
359 (mc_intel->hdr.date >> 16) & 0xff); 366 (mc_intel->hdr.date >> 16) & 0xff);
367
360 uci->cpu_sig.rev = val[1]; 368 uci->cpu_sig.rev = val[1];
361} 369}
362 370
@@ -404,18 +412,23 @@ static int generic_load_microcode(int cpu, void *data, size_t size,
404 leftover -= mc_size; 412 leftover -= mc_size;
405 } 413 }
406 414
407 if (new_mc) { 415 if (!new_mc)
408 if (!leftover) { 416 goto out;
409 if (uci->mc) 417
410 vfree(uci->mc); 418 if (leftover) {
411 uci->mc = (struct microcode_intel *)new_mc; 419 vfree(new_mc);
412 pr_debug("microcode: CPU%d found a matching microcode update with" 420 goto out;
413 " version 0x%x (current=0x%x)\n",
414 cpu, new_rev, uci->cpu_sig.rev);
415 } else
416 vfree(new_mc);
417 } 421 }
418 422
423 if (uci->mc)
424 vfree(uci->mc);
425 uci->mc = (struct microcode_intel *)new_mc;
426
427 pr_debug("microcode: CPU%d found a matching microcode update with"
428 " version 0x%x (current=0x%x)\n",
429 cpu, new_rev, uci->cpu_sig.rev);
430
431 out:
419 return (int)leftover; 432 return (int)leftover;
420} 433}
421 434
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 156f87582c6c..25e28087a3ee 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -325,7 +325,7 @@ void stop_this_cpu(void *dummy)
325 /* 325 /*
326 * Remove this CPU: 326 * Remove this CPU:
327 */ 327 */
328 cpu_clear(smp_processor_id(), cpu_online_map); 328 set_cpu_online(smp_processor_id(), false);
329 disable_local_APIC(); 329 disable_local_APIC();
330 330
331 for (;;) { 331 for (;;) {
@@ -475,12 +475,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
475 return 1; 475 return 1;
476} 476}
477 477
478static cpumask_t c1e_mask = CPU_MASK_NONE; 478static cpumask_var_t c1e_mask;
479static int c1e_detected; 479static int c1e_detected;
480 480
481void c1e_remove_cpu(int cpu) 481void c1e_remove_cpu(int cpu)
482{ 482{
483 cpu_clear(cpu, c1e_mask); 483 if (c1e_mask != NULL)
484 cpumask_clear_cpu(cpu, c1e_mask);
484} 485}
485 486
486/* 487/*
@@ -509,8 +510,8 @@ static void c1e_idle(void)
509 if (c1e_detected) { 510 if (c1e_detected) {
510 int cpu = smp_processor_id(); 511 int cpu = smp_processor_id();
511 512
512 if (!cpu_isset(cpu, c1e_mask)) { 513 if (!cpumask_test_cpu(cpu, c1e_mask)) {
513 cpu_set(cpu, c1e_mask); 514 cpumask_set_cpu(cpu, c1e_mask);
514 /* 515 /*
515 * Force broadcast so ACPI can not interfere. Needs 516 * Force broadcast so ACPI can not interfere. Needs
516 * to run with interrupts enabled as it uses 517 * to run with interrupts enabled as it uses
@@ -562,6 +563,15 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
562 pm_idle = default_idle; 563 pm_idle = default_idle;
563} 564}
564 565
566void __init init_c1e_mask(void)
567{
568 /* If we're using c1e_idle, we need to allocate c1e_mask. */
569 if (pm_idle == c1e_idle) {
570 alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
571 cpumask_clear(c1e_mask);
572 }
573}
574
565static int __init idle_setup(char *str) 575static int __init idle_setup(char *str)
566{ 576{
567 if (!str) 577 if (!str)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ef7d10170c30..58d24ef917d8 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -101,11 +101,11 @@ EXPORT_SYMBOL(smp_num_siblings);
101DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 101DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
102 102
103/* representing HT siblings of each logical CPU */ 103/* representing HT siblings of each logical CPU */
104DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 104DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
105EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 105EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
106 106
107/* representing HT and core siblings of each logical CPU */ 107/* representing HT and core siblings of each logical CPU */
108DEFINE_PER_CPU(cpumask_t, cpu_core_map); 108DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
109EXPORT_PER_CPU_SYMBOL(cpu_core_map); 109EXPORT_PER_CPU_SYMBOL(cpu_core_map);
110 110
111/* Per CPU bogomips and other parameters */ 111/* Per CPU bogomips and other parameters */
@@ -115,11 +115,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
115atomic_t init_deasserted; 115atomic_t init_deasserted;
116 116
117#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 117#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
118
119/* which logical CPUs are on which nodes */
120cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
121 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
122EXPORT_SYMBOL(node_to_cpumask_map);
123/* which node each logical CPU is on */ 118/* which node each logical CPU is on */
124int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; 119int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
125EXPORT_SYMBOL(cpu_to_node_map); 120EXPORT_SYMBOL(cpu_to_node_map);
@@ -128,7 +123,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
128static void map_cpu_to_node(int cpu, int node) 123static void map_cpu_to_node(int cpu, int node)
129{ 124{
130 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); 125 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
131 cpumask_set_cpu(cpu, &node_to_cpumask_map[node]); 126 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
132 cpu_to_node_map[cpu] = node; 127 cpu_to_node_map[cpu] = node;
133} 128}
134 129
@@ -139,7 +134,7 @@ static void unmap_cpu_to_node(int cpu)
139 134
140 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); 135 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
141 for (node = 0; node < MAX_NUMNODES; node++) 136 for (node = 0; node < MAX_NUMNODES; node++)
142 cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]); 137 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
143 cpu_to_node_map[cpu] = 0; 138 cpu_to_node_map[cpu] = 0;
144} 139}
145#else /* !(CONFIG_NUMA && CONFIG_X86_32) */ 140#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@ -301,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused)
301 __flush_tlb_all(); 296 __flush_tlb_all();
302#endif 297#endif
303 298
304 /* This must be done before setting cpu_online_map */ 299 /* This must be done before setting cpu_online_mask */
305 set_cpu_sibling_map(raw_smp_processor_id()); 300 set_cpu_sibling_map(raw_smp_processor_id());
306 wmb(); 301 wmb();
307 302
@@ -334,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused)
334 cpu_idle(); 329 cpu_idle();
335} 330}
336 331
332#ifdef CONFIG_CPUMASK_OFFSTACK
333/* In this case, llc_shared_map is a pointer to a cpumask. */
334static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
335 const struct cpuinfo_x86 *src)
336{
337 struct cpumask *llc = dst->llc_shared_map;
338 *dst = *src;
339 dst->llc_shared_map = llc;
340}
341#else
342static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
343 const struct cpuinfo_x86 *src)
344{
345 *dst = *src;
346}
347#endif /* CONFIG_CPUMASK_OFFSTACK */
348
337/* 349/*
338 * The bootstrap kernel entry code has set these up. Save them for 350 * The bootstrap kernel entry code has set these up. Save them for
339 * a given CPU 351 * a given CPU
@@ -343,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id)
343{ 355{
344 struct cpuinfo_x86 *c = &cpu_data(id); 356 struct cpuinfo_x86 *c = &cpu_data(id);
345 357
346 *c = boot_cpu_data; 358 copy_cpuinfo_x86(c, &boot_cpu_data);
347 c->cpu_index = id; 359 c->cpu_index = id;
348 if (id != 0) 360 if (id != 0)
349 identify_secondary_cpu(c); 361 identify_secondary_cpu(c);
@@ -367,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
367 cpumask_set_cpu(cpu, cpu_sibling_mask(i)); 379 cpumask_set_cpu(cpu, cpu_sibling_mask(i));
368 cpumask_set_cpu(i, cpu_core_mask(cpu)); 380 cpumask_set_cpu(i, cpu_core_mask(cpu));
369 cpumask_set_cpu(cpu, cpu_core_mask(i)); 381 cpumask_set_cpu(cpu, cpu_core_mask(i));
370 cpumask_set_cpu(i, &c->llc_shared_map); 382 cpumask_set_cpu(i, c->llc_shared_map);
371 cpumask_set_cpu(cpu, &o->llc_shared_map); 383 cpumask_set_cpu(cpu, o->llc_shared_map);
372 } 384 }
373 } 385 }
374 } else { 386 } else {
375 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 387 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
376 } 388 }
377 389
378 cpumask_set_cpu(cpu, &c->llc_shared_map); 390 cpumask_set_cpu(cpu, c->llc_shared_map);
379 391
380 if (current_cpu_data.x86_max_cores == 1) { 392 if (current_cpu_data.x86_max_cores == 1) {
381 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); 393 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
@@ -386,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
386 for_each_cpu(i, cpu_sibling_setup_mask) { 398 for_each_cpu(i, cpu_sibling_setup_mask) {
387 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 399 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
388 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 400 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
389 cpumask_set_cpu(i, &c->llc_shared_map); 401 cpumask_set_cpu(i, c->llc_shared_map);
390 cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); 402 cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
391 } 403 }
392 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 404 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
393 cpumask_set_cpu(i, cpu_core_mask(cpu)); 405 cpumask_set_cpu(i, cpu_core_mask(cpu));
@@ -425,12 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
425 if (sched_mc_power_savings || sched_smt_power_savings) 437 if (sched_mc_power_savings || sched_smt_power_savings)
426 return cpu_core_mask(cpu); 438 return cpu_core_mask(cpu);
427 else 439 else
428 return &c->llc_shared_map; 440 return c->llc_shared_map;
429}
430
431cpumask_t cpu_coregroup_map(int cpu)
432{
433 return *cpu_coregroup_mask(cpu);
434} 441}
435 442
436static void impress_friends(void) 443static void impress_friends(void)
@@ -897,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
897 */ 904 */
898static __init void disable_smp(void) 905static __init void disable_smp(void)
899{ 906{
900 /* use the read/write pointers to the present and possible maps */ 907 init_cpu_present(cpumask_of(0));
901 cpumask_copy(&cpu_present_map, cpumask_of(0)); 908 init_cpu_possible(cpumask_of(0));
902 cpumask_copy(&cpu_possible_map, cpumask_of(0));
903 smpboot_clear_io_apic_irqs(); 909 smpboot_clear_io_apic_irqs();
904 910
905 if (smp_found_config) 911 if (smp_found_config)
@@ -1031,6 +1037,8 @@ static void __init smp_cpu_index_default(void)
1031 */ 1037 */
1032void __init native_smp_prepare_cpus(unsigned int max_cpus) 1038void __init native_smp_prepare_cpus(unsigned int max_cpus)
1033{ 1039{
1040 unsigned int i;
1041
1034 preempt_disable(); 1042 preempt_disable();
1035 smp_cpu_index_default(); 1043 smp_cpu_index_default();
1036 current_cpu_data = boot_cpu_data; 1044 current_cpu_data = boot_cpu_data;
@@ -1044,6 +1052,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1044 boot_cpu_logical_apicid = logical_smp_processor_id(); 1052 boot_cpu_logical_apicid = logical_smp_processor_id();
1045#endif 1053#endif
1046 current_thread_info()->cpu = 0; /* needed? */ 1054 current_thread_info()->cpu = 0; /* needed? */
1055 for_each_possible_cpu(i) {
1056 alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1057 alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1058 alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
1059 cpumask_clear(per_cpu(cpu_core_map, i));
1060 cpumask_clear(per_cpu(cpu_sibling_map, i));
1061 cpumask_clear(cpu_data(i).llc_shared_map);
1062 }
1047 set_cpu_sibling_map(0); 1063 set_cpu_sibling_map(0);
1048 1064
1049 enable_IR_x2apic(); 1065 enable_IR_x2apic();
@@ -1132,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus);
1132 1148
1133 1149
1134/* 1150/*
1135 * cpu_possible_map should be static, it cannot change as cpu's 1151 * cpu_possible_mask should be static, it cannot change as cpu's
1136 * are onlined, or offlined. The reason is per-cpu data-structures 1152 * are onlined, or offlined. The reason is per-cpu data-structures
1137 * are allocated by some modules at init time, and dont expect to 1153 * are allocated by some modules at init time, and dont expect to
1138 * do this dynamically on cpu arrival/departure. 1154 * do this dynamically on cpu arrival/departure.
1139 * cpu_present_map on the other hand can change dynamically. 1155 * cpu_present_mask on the other hand can change dynamically.
1140 * In case when cpu_hotplug is not compiled, then we resort to current 1156 * In case when cpu_hotplug is not compiled, then we resort to current
1141 * behaviour, which is cpu_possible == cpu_present. 1157 * behaviour, which is cpu_possible == cpu_present.
1142 * - Ashok Raj 1158 * - Ashok Raj
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 79c073247284..deb5ebb32c3b 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -275,6 +275,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
275 return NULL; 275 return NULL;
276} 276}
277 277
278static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
279
278/** 280/**
279 * uv_flush_tlb_others - globally purge translation cache of a virtual 281 * uv_flush_tlb_others - globally purge translation cache of a virtual
280 * address or all TLB's 282 * address or all TLB's
@@ -304,8 +306,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
304 struct mm_struct *mm, 306 struct mm_struct *mm,
305 unsigned long va, unsigned int cpu) 307 unsigned long va, unsigned int cpu)
306{ 308{
307 static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); 309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
308 struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
309 int i; 310 int i;
310 int bit; 311 int bit;
311 int blade; 312 int blade;
@@ -755,6 +756,10 @@ static int __init uv_bau_init(void)
755 if (!is_uv_system()) 756 if (!is_uv_system())
756 return 0; 757 return 0;
757 758
759 for_each_possible_cpu(cur_cpu)
760 alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
761 GFP_KERNEL, cpu_to_node(cur_cpu));
762
758 uv_bau_retry_limit = 1; 763 uv_bau_retry_limit = 1;
759 uv_nshift = uv_hub_info->n_val; 764 uv_nshift = uv_hub_info->n_val;
760 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 765 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 08537747cb58..fdd30d08ab52 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
14mmiotrace-y := kmmio.o pf_in.o mmio-mod.o 14mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
15obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 15obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
16 16
17obj-$(CONFIG_NUMA) += numa_$(BITS).o 17obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
18obj-$(CONFIG_K8_NUMA) += k8topology_64.o 18obj-$(CONFIG_K8_NUMA) += k8topology_64.o
19obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 19obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
20 20
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 2c4baa88f2cb..c9342ed8b402 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -378,27 +378,34 @@ static void clear_trace_list(void)
378} 378}
379 379
380#ifdef CONFIG_HOTPLUG_CPU 380#ifdef CONFIG_HOTPLUG_CPU
381static cpumask_t downed_cpus; 381static cpumask_var_t downed_cpus;
382 382
383static void enter_uniprocessor(void) 383static void enter_uniprocessor(void)
384{ 384{
385 int cpu; 385 int cpu;
386 int err; 386 int err;
387 387
388 if (downed_cpus == NULL &&
389 !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
390 pr_notice(NAME "Failed to allocate mask\n");
391 goto out;
392 }
393
388 get_online_cpus(); 394 get_online_cpus();
389 downed_cpus = cpu_online_map; 395 cpumask_copy(downed_cpus, cpu_online_mask);
390 cpu_clear(first_cpu(cpu_online_map), downed_cpus); 396 cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
391 if (num_online_cpus() > 1) 397 if (num_online_cpus() > 1)
392 pr_notice(NAME "Disabling non-boot CPUs...\n"); 398 pr_notice(NAME "Disabling non-boot CPUs...\n");
393 put_online_cpus(); 399 put_online_cpus();
394 400
395 for_each_cpu_mask(cpu, downed_cpus) { 401 for_each_cpu(cpu, downed_cpus) {
396 err = cpu_down(cpu); 402 err = cpu_down(cpu);
397 if (!err) 403 if (!err)
398 pr_info(NAME "CPU%d is down.\n", cpu); 404 pr_info(NAME "CPU%d is down.\n", cpu);
399 else 405 else
400 pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err); 406 pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
401 } 407 }
408out:
402 if (num_online_cpus() > 1) 409 if (num_online_cpus() > 1)
403 pr_warning(NAME "multiple CPUs still online, " 410 pr_warning(NAME "multiple CPUs still online, "
404 "may miss events.\n"); 411 "may miss events.\n");
@@ -411,10 +418,10 @@ static void __ref leave_uniprocessor(void)
411 int cpu; 418 int cpu;
412 int err; 419 int err;
413 420
414 if (cpus_weight(downed_cpus) == 0) 421 if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
415 return; 422 return;
416 pr_notice(NAME "Re-enabling CPUs...\n"); 423 pr_notice(NAME "Re-enabling CPUs...\n");
417 for_each_cpu_mask(cpu, downed_cpus) { 424 for_each_cpu(cpu, downed_cpus) {
418 err = cpu_up(cpu); 425 err = cpu_up(cpu);
419 if (!err) 426 if (!err)
420 pr_info(NAME "enabled CPU%d.\n", cpu); 427 pr_info(NAME "enabled CPU%d.\n", cpu);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
new file mode 100644
index 000000000000..550df481accd
--- /dev/null
+++ b/arch/x86/mm/numa.c
@@ -0,0 +1,67 @@
1/* Common code for 32 and 64-bit NUMA */
2#include <linux/topology.h>
3#include <linux/module.h>
4#include <linux/bootmem.h>
5
6#ifdef CONFIG_DEBUG_PER_CPU_MAPS
7# define DBG(x...) printk(KERN_DEBUG x)
8#else
9# define DBG(x...)
10#endif
11
12/*
13 * Which logical CPUs are on which nodes
14 */
15cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
16EXPORT_SYMBOL(node_to_cpumask_map);
17
18/*
19 * Allocate node_to_cpumask_map based on number of available nodes
20 * Requires node_possible_map to be valid.
21 *
22 * Note: node_to_cpumask() is not valid until after this is done.
23 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
24 */
25void __init setup_node_to_cpumask_map(void)
26{
27 unsigned int node, num = 0;
28
29 /* setup nr_node_ids if not done yet */
30 if (nr_node_ids == MAX_NUMNODES) {
31 for_each_node_mask(node, node_possible_map)
32 num = node;
33 nr_node_ids = num + 1;
34 }
35
36 /* allocate the map */
37 for (node = 0; node < nr_node_ids; node++)
38 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
39
40 /* cpumask_of_node() will now work */
41 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
42}
43
44#ifdef CONFIG_DEBUG_PER_CPU_MAPS
45/*
46 * Returns a pointer to the bitmask of CPUs on Node 'node'.
47 */
48const struct cpumask *cpumask_of_node(int node)
49{
50 if (node >= nr_node_ids) {
51 printk(KERN_WARNING
52 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
53 node, nr_node_ids);
54 dump_stack();
55 return cpu_none_mask;
56 }
57 if (node_to_cpumask_map[node] == NULL) {
58 printk(KERN_WARNING
59 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
60 node);
61 dump_stack();
62 return cpu_online_mask;
63 }
64 return node_to_cpumask_map[node];
65}
66EXPORT_SYMBOL(cpumask_of_node);
67#endif
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 64c9cf043cdd..d73aaa892371 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -20,12 +20,6 @@
20#include <asm/acpi.h> 20#include <asm/acpi.h>
21#include <asm/k8.h> 21#include <asm/k8.h>
22 22
23#ifdef CONFIG_DEBUG_PER_CPU_MAPS
24# define DBG(x...) printk(KERN_DEBUG x)
25#else
26# define DBG(x...)
27#endif
28
29struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
30EXPORT_SYMBOL(node_data); 24EXPORT_SYMBOL(node_data);
31 25
@@ -49,12 +43,6 @@ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
49EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 43EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
50 44
51/* 45/*
52 * Which logical CPUs are on which nodes
53 */
54cpumask_t *node_to_cpumask_map;
55EXPORT_SYMBOL(node_to_cpumask_map);
56
57/*
58 * Given a shift value, try to populate memnodemap[] 46 * Given a shift value, try to populate memnodemap[]
59 * Returns : 47 * Returns :
60 * 1 if OK 48 * 1 if OK
@@ -661,36 +649,6 @@ void __init init_cpu_to_node(void)
661#endif 649#endif
662 650
663 651
664/*
665 * Allocate node_to_cpumask_map based on number of available nodes
666 * Requires node_possible_map to be valid.
667 *
668 * Note: node_to_cpumask() is not valid until after this is done.
669 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
670 */
671void __init setup_node_to_cpumask_map(void)
672{
673 unsigned int node, num = 0;
674 cpumask_t *map;
675
676 /* setup nr_node_ids if not done yet */
677 if (nr_node_ids == MAX_NUMNODES) {
678 for_each_node_mask(node, node_possible_map)
679 num = node;
680 nr_node_ids = num + 1;
681 }
682
683 /* allocate the map */
684 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
685 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
686
687 pr_debug("Node to cpumask map at %p for %d nodes\n",
688 map, nr_node_ids);
689
690 /* node_to_cpumask() will now work */
691 node_to_cpumask_map = map;
692}
693
694void __cpuinit numa_set_node(int cpu, int node) 652void __cpuinit numa_set_node(int cpu, int node)
695{ 653{
696 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 654 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
@@ -723,12 +681,12 @@ void __cpuinit numa_clear_node(int cpu)
723 681
724void __cpuinit numa_add_cpu(int cpu) 682void __cpuinit numa_add_cpu(int cpu)
725{ 683{
726 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 684 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
727} 685}
728 686
729void __cpuinit numa_remove_cpu(int cpu) 687void __cpuinit numa_remove_cpu(int cpu)
730{ 688{
731 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 689 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
732} 690}
733 691
734#else /* CONFIG_DEBUG_PER_CPU_MAPS */ 692#else /* CONFIG_DEBUG_PER_CPU_MAPS */
@@ -739,20 +697,20 @@ void __cpuinit numa_remove_cpu(int cpu)
739static void __cpuinit numa_set_cpumask(int cpu, int enable) 697static void __cpuinit numa_set_cpumask(int cpu, int enable)
740{ 698{
741 int node = early_cpu_to_node(cpu); 699 int node = early_cpu_to_node(cpu);
742 cpumask_t *mask; 700 struct cpumask *mask;
743 char buf[64]; 701 char buf[64];
744 702
745 if (node_to_cpumask_map == NULL) { 703 mask = node_to_cpumask_map[node];
746 printk(KERN_ERR "node_to_cpumask_map NULL\n"); 704 if (mask == NULL) {
705 printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
747 dump_stack(); 706 dump_stack();
748 return; 707 return;
749 } 708 }
750 709
751 mask = &node_to_cpumask_map[node];
752 if (enable) 710 if (enable)
753 cpu_set(cpu, *mask); 711 cpumask_set_cpu(cpu, mask);
754 else 712 else
755 cpu_clear(cpu, *mask); 713 cpumask_clear_cpu(cpu, mask);
756 714
757 cpulist_scnprintf(buf, sizeof(buf), mask); 715 cpulist_scnprintf(buf, sizeof(buf), mask);
758 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 716 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
@@ -799,59 +757,6 @@ int early_cpu_to_node(int cpu)
799 return per_cpu(x86_cpu_to_node_map, cpu); 757 return per_cpu(x86_cpu_to_node_map, cpu);
800} 758}
801 759
802
803/* empty cpumask */
804static const cpumask_t cpu_mask_none;
805
806/*
807 * Returns a pointer to the bitmask of CPUs on Node 'node'.
808 */
809const cpumask_t *cpumask_of_node(int node)
810{
811 if (node_to_cpumask_map == NULL) {
812 printk(KERN_WARNING
813 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
814 node);
815 dump_stack();
816 return (const cpumask_t *)&cpu_online_map;
817 }
818 if (node >= nr_node_ids) {
819 printk(KERN_WARNING
820 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
821 node, nr_node_ids);
822 dump_stack();
823 return &cpu_mask_none;
824 }
825 return &node_to_cpumask_map[node];
826}
827EXPORT_SYMBOL(cpumask_of_node);
828
829/*
830 * Returns a bitmask of CPUs on Node 'node'.
831 *
832 * Side note: this function creates the returned cpumask on the stack
833 * so with a high NR_CPUS count, excessive stack space is used. The
834 * node_to_cpumask_ptr function should be used whenever possible.
835 */
836cpumask_t node_to_cpumask(int node)
837{
838 if (node_to_cpumask_map == NULL) {
839 printk(KERN_WARNING
840 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
841 dump_stack();
842 return cpu_online_map;
843 }
844 if (node >= nr_node_ids) {
845 printk(KERN_WARNING
846 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
847 node, nr_node_ids);
848 dump_stack();
849 return cpu_mask_none;
850 }
851 return node_to_cpumask_map[node];
852}
853EXPORT_SYMBOL(node_to_cpumask);
854
855/* 760/*
856 * --------- end of debug versions of the numa functions --------- 761 * --------- end of debug versions of the numa functions ---------
857 */ 762 */
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 4c4a51c90bc2..819b131fd752 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -380,7 +380,7 @@ static unsigned int get_stagger(void)
380{ 380{
381#ifdef CONFIG_SMP 381#ifdef CONFIG_SMP
382 int cpu = smp_processor_id(); 382 int cpu = smp_processor_id();
383 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); 383 return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
384#endif 384#endif
385 return 0; 385 return 0;
386} 386}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8d470562ffc9..585a6e330837 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
158 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 158 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
159 if (rc >= 0) { 159 if (rc >= 0) {
160 num_processors++; 160 num_processors++;
161 cpu_set(i, cpu_possible_map); 161 set_cpu_possible(i, true);
162 } 162 }
163 } 163 }
164} 164}
@@ -197,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
197 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 197 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
198 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) 198 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
199 continue; 199 continue;
200 cpu_clear(cpu, cpu_possible_map); 200 set_cpu_possible(cpu, false);
201 } 201 }
202 202
203 for_each_possible_cpu (cpu) { 203 for_each_possible_cpu (cpu) {
@@ -210,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
210 if (IS_ERR(idle)) 210 if (IS_ERR(idle))
211 panic("failed fork for CPU %d", cpu); 211 panic("failed fork for CPU %d", cpu);
212 212
213 cpu_set(cpu, cpu_present_map); 213 set_cpu_present(cpu, true);
214 } 214 }
215} 215}
216 216