aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/smp.h1
-rw-r--r--arch/alpha/kernel/irq.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/smp.c7
-rw-r--r--arch/alpha/kernel/sys_dp264.c8
-rw-r--r--arch/alpha/kernel/sys_titan.c4
-rw-r--r--arch/arm/common/gic.c4
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c3
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-davinci/time.c2
-rw-r--r--arch/arm/mach-imx/time.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-ns9xxx/time-ns9360.c2
-rw-r--r--arch/arm/mach-omap1/time.c2
-rw-r--r--arch/arm/mach-omap1/timer32k.c2
-rw-r--r--arch/arm/mach-omap2/timer-gp.c2
-rw-r--r--arch/arm/mach-pxa/time.c2
-rw-r--r--arch/arm/mach-realview/core.c2
-rw-r--r--arch/arm/mach-realview/localtimer.c4
-rw-r--r--arch/arm/mach-sa1100/time.c2
-rw-r--r--arch/arm/mach-versatile/core.c2
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c4
-rw-r--r--arch/arm/plat-mxc/time.c2
-rw-r--r--arch/arm/plat-orion/time.c2
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/blackfin/kernel/time-ts.c2
-rw-r--r--arch/cris/arch-v32/kernel/irq.c4
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/include/asm/smp.h1
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c2
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/kernel/iosapic.c12
-rw-r--r--arch/ia64/kernel/irq.c9
-rw-r--r--arch/ia64/kernel/msi_ia64.c12
-rw-r--r--arch/ia64/kernel/smpboot.c10
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/sn/kernel/irq.c6
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c7
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/kernel/smpboot.c6
-rw-r--r--arch/m68knommu/platform/coldfire/pit.c2
-rw-r--r--arch/mips/include/asm/irq.h3
-rw-r--r--arch/mips/include/asm/smp.h3
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c4
-rw-r--r--arch/mips/kernel/cevt-ds1287.c2
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c4
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cevt-txx9.c2
-rw-r--r--arch/mips/kernel/i8253.c2
-rw-r--r--arch/mips/kernel/irq-gic.c6
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c7
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/mti-malta/malta-smtc.c6
-rw-r--r--arch/mips/nxp/pnx8550/common/time.c1
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c8
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c8
-rw-r--r--arch/mips/sibyte/sb1250/irq.c8
-rw-r--r--arch/mips/sibyte/sb1250/smp.c8
-rw-r--r--arch/mips/sni/time.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/irq.c6
-rw-r--r--arch/parisc/kernel/smp.c15
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/mpic.c4
-rw-r--r--arch/powerpc/sysdev/mpic.h2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/sh/include/asm/smp.h2
-rw-r--r--arch/sh/kernel/smp.c10
-rw-r--r--arch/sh/kernel/timers/timer-broadcast.c2
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c2
-rw-r--r--arch/sparc/include/asm/smp_32.h2
-rw-r--r--arch/sparc/kernel/smp.c6
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c4
-rw-r--r--arch/sparc64/kernel/irq.c11
-rw-r--r--arch/sparc64/kernel/of_device.c2
-rw-r--r--arch/sparc64/kernel/pci_msi.c2
-rw-r--r--arch/sparc64/kernel/smp.c4
-rw-r--r--arch/sparc64/kernel/time.c2
-rw-r--r--arch/um/kernel/smp.c7
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/x86/kernel/apic.c8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/x86/kernel/hpet.c8
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/io_apic.c95
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/smpboot.c6
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c7
-rw-r--r--arch/x86/xen/time.c2
110 files changed, 213 insertions, 308 deletions
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 544c69af8168..547e90951cec 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -45,7 +45,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
45#define raw_smp_processor_id() (current_thread_info()->cpu) 45#define raw_smp_processor_id() (current_thread_info()->cpu)
46 46
47extern int smp_num_cpus; 47extern int smp_num_cpus;
48#define cpu_possible_map cpu_present_map
49 48
50extern void arch_send_call_function_single_ipi(int cpu); 49extern void arch_send_call_function_single_ipi(int cpu);
51extern void arch_send_call_function_ipi(cpumask_t mask); 50extern void arch_send_call_function_ipi(cpumask_t mask);
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index c626a821cdcb..d0f1620007f7 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
55 last_cpu = cpu; 55 last_cpu = cpu;
56 56
57 irq_desc[irq].affinity = cpumask_of_cpu(cpu); 57 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
58 irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); 58 irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
59 return 0; 59 return 0;
60} 60}
61#endif /* CONFIG_SMP */ 61#endif /* CONFIG_SMP */
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 351407e07e71..f238370c907d 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -94,6 +94,7 @@ common_shutdown_1(void *generic_ptr)
94 flags |= 0x00040000UL; /* "remain halted" */ 94 flags |= 0x00040000UL; /* "remain halted" */
95 *pflags = flags; 95 *pflags = flags;
96 cpu_clear(cpuid, cpu_present_map); 96 cpu_clear(cpuid, cpu_present_map);
97 cpu_clear(cpuid, cpu_possible_map);
97 halt(); 98 halt();
98 } 99 }
99#endif 100#endif
@@ -120,6 +121,7 @@ common_shutdown_1(void *generic_ptr)
120#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
121 /* Wait for the secondaries to halt. */ 122 /* Wait for the secondaries to halt. */
122 cpu_clear(boot_cpuid, cpu_present_map); 123 cpu_clear(boot_cpuid, cpu_present_map);
124 cpu_clear(boot_cpuid, cpu_possible_map);
123 while (cpus_weight(cpu_present_map)) 125 while (cpus_weight(cpu_present_map))
124 barrier(); 126 barrier();
125#endif 127#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index cf7da10097bb..d953e510f68d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -70,11 +70,6 @@ enum ipi_message_type {
70/* Set to a secondary's cpuid when it comes online. */ 70/* Set to a secondary's cpuid when it comes online. */
71static int smp_secondary_alive __devinitdata = 0; 71static int smp_secondary_alive __devinitdata = 0;
72 72
73/* Which cpus ids came online. */
74cpumask_t cpu_online_map;
75
76EXPORT_SYMBOL(cpu_online_map);
77
78int smp_num_probed; /* Internal processor count */ 73int smp_num_probed; /* Internal processor count */
79int smp_num_cpus = 1; /* Number that came online. */ 74int smp_num_cpus = 1; /* Number that came online. */
80EXPORT_SYMBOL(smp_num_cpus); 75EXPORT_SYMBOL(smp_num_cpus);
@@ -440,6 +435,7 @@ setup_smp(void)
440 ((char *)cpubase + i*hwrpb->processor_size); 435 ((char *)cpubase + i*hwrpb->processor_size);
441 if ((cpu->flags & 0x1cc) == 0x1cc) { 436 if ((cpu->flags & 0x1cc) == 0x1cc) {
442 smp_num_probed++; 437 smp_num_probed++;
438 cpu_set(i, cpu_possible_map);
443 cpu_set(i, cpu_present_map); 439 cpu_set(i, cpu_present_map);
444 cpu->pal_revision = boot_cpu_palrev; 440 cpu->pal_revision = boot_cpu_palrev;
445 } 441 }
@@ -473,6 +469,7 @@ smp_prepare_cpus(unsigned int max_cpus)
473 469
474 /* Nothing to do on a UP box, or when told not to. */ 470 /* Nothing to do on a UP box, or when told not to. */
475 if (smp_num_probed == 1 || max_cpus == 0) { 471 if (smp_num_probed == 1 || max_cpus == 0) {
472 cpu_possible_map = cpumask_of_cpu(boot_cpuid);
476 cpu_present_map = cpumask_of_cpu(boot_cpuid); 473 cpu_present_map = cpumask_of_cpu(boot_cpuid);
477 printk(KERN_INFO "SMP mode deactivated.\n"); 474 printk(KERN_INFO "SMP mode deactivated.\n");
478 return; 475 return;
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index c71b0fd7a61f..ab44c164d9d4 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -177,19 +177,19 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
177} 177}
178 178
179static void 179static void
180dp264_set_affinity(unsigned int irq, cpumask_t affinity) 180dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
181{ 181{
182 spin_lock(&dp264_irq_lock); 182 spin_lock(&dp264_irq_lock);
183 cpu_set_irq_affinity(irq, affinity); 183 cpu_set_irq_affinity(irq, *affinity);
184 tsunami_update_irq_hw(cached_irq_mask); 184 tsunami_update_irq_hw(cached_irq_mask);
185 spin_unlock(&dp264_irq_lock); 185 spin_unlock(&dp264_irq_lock);
186} 186}
187 187
188static void 188static void
189clipper_set_affinity(unsigned int irq, cpumask_t affinity) 189clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
190{ 190{
191 spin_lock(&dp264_irq_lock); 191 spin_lock(&dp264_irq_lock);
192 cpu_set_irq_affinity(irq - 16, affinity); 192 cpu_set_irq_affinity(irq - 16, *affinity);
193 tsunami_update_irq_hw(cached_irq_mask); 193 tsunami_update_irq_hw(cached_irq_mask);
194 spin_unlock(&dp264_irq_lock); 194 spin_unlock(&dp264_irq_lock);
195} 195}
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 52c91ccc1648..27f840a4ad3d 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -158,10 +158,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
158} 158}
159 159
160static void 160static void
161titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) 161titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
162{ 162{
163 spin_lock(&titan_irq_lock); 163 spin_lock(&titan_irq_lock);
164 titan_cpu_set_irq_affinity(irq - 16, affinity); 164 titan_cpu_set_irq_affinity(irq - 16, *affinity);
165 titan_update_irq_hw(titan_cached_irq_mask); 165 titan_update_irq_hw(titan_cached_irq_mask);
166 spin_unlock(&titan_irq_lock); 166 spin_unlock(&titan_irq_lock);
167} 167}
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 7fc9860a97d7..c6884ba1d5ed 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -109,11 +109,11 @@ static void gic_unmask_irq(unsigned int irq)
109} 109}
110 110
111#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
112static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) 112static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
113{ 113{
114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); 114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
115 unsigned int shift = (irq % 4) * 8; 115 unsigned int shift = (irq % 4) * 8;
116 unsigned int cpu = first_cpu(mask_val); 116 unsigned int cpu = cpumask_first(mask_val);
117 u32 val; 117 u32 val;
118 118
119 spin_lock(&irq_controller_lock); 119 spin_lock(&irq_controller_lock);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2f3eb795fa6e..7141cee1fab7 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -174,7 +174,7 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
174 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 174 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
175 175
176 spin_lock_irq(&desc->lock); 176 spin_lock_irq(&desc->lock);
177 desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); 177 desc->chip->set_affinity(irq, cpumask_of(cpu));
178 spin_unlock_irq(&desc->lock); 178 spin_unlock_irq(&desc->lock);
179} 179}
180 180
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e42a749a56dd..bd905c0a7365 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -34,16 +34,6 @@
34#include <asm/ptrace.h> 34#include <asm/ptrace.h>
35 35
36/* 36/*
37 * bitmask of present and online CPUs.
38 * The present bitmask indicates that the CPU is physically present.
39 * The online bitmask indicates that the CPU is up and running.
40 */
41cpumask_t cpu_possible_map;
42EXPORT_SYMBOL(cpu_possible_map);
43cpumask_t cpu_online_map;
44EXPORT_SYMBOL(cpu_online_map);
45
46/*
47 * as from 2.5, kernels no longer have an init_tasks structure 37 * as from 2.5, kernels no longer have an init_tasks structure
48 * so we need some other way of telling a new secondary core 38 * so we need some other way of telling a new secondary core
49 * where to place its SVC stack 39 * where to place its SVC stack
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index a72e798a2a40..72f51d39202c 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -169,7 +169,6 @@ static struct clock_event_device clkevt = {
169 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 169 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
170 .shift = 32, 170 .shift = 32,
171 .rating = 150, 171 .rating = 150,
172 .cpumask = CPU_MASK_CPU0,
173 .set_next_event = clkevt32k_next_event, 172 .set_next_event = clkevt32k_next_event,
174 .set_mode = clkevt32k_mode, 173 .set_mode = clkevt32k_mode,
175}; 174};
@@ -197,7 +196,7 @@ void __init at91rm9200_timer_init(void)
197 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); 196 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
198 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); 197 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
199 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; 198 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
200 clkevt.cpumask = cpumask_of_cpu(0); 199 clkevt.cpumask = cpumask_of(0);
201 clockevents_register_device(&clkevt); 200 clockevents_register_device(&clkevt);
202 201
203 /* register clocksource */ 202 /* register clocksource */
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 122fd77ed580..b63e1d5f1bad 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -91,7 +91,6 @@ static struct clock_event_device pit_clkevt = {
91 .features = CLOCK_EVT_FEAT_PERIODIC, 91 .features = CLOCK_EVT_FEAT_PERIODIC,
92 .shift = 32, 92 .shift = 32,
93 .rating = 100, 93 .rating = 100,
94 .cpumask = CPU_MASK_CPU0,
95 .set_mode = pit_clkevt_mode, 94 .set_mode = pit_clkevt_mode,
96}; 95};
97 96
@@ -173,6 +172,7 @@ static void __init at91sam926x_pit_init(void)
173 172
174 /* Set up and register clockevents */ 173 /* Set up and register clockevents */
175 pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); 174 pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
175 pit_clkevt.cpumask = cpumask_of(0);
176 clockevents_register_device(&pit_clkevt); 176 clockevents_register_device(&pit_clkevt);
177} 177}
178 178
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 3b9a296b5c4b..f8bcd29d17a6 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -322,7 +322,7 @@ static void __init davinci_timer_init(void)
322 clockevent_davinci.min_delta_ns = 322 clockevent_davinci.min_delta_ns =
323 clockevent_delta2ns(1, &clockevent_davinci); 323 clockevent_delta2ns(1, &clockevent_davinci);
324 324
325 clockevent_davinci.cpumask = cpumask_of_cpu(0); 325 clockevent_davinci.cpumask = cpumask_of(0);
326 clockevents_register_device(&clockevent_davinci); 326 clockevents_register_device(&clockevent_davinci);
327} 327}
328 328
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c
index a11765f5f23b..aff0ebcfa847 100644
--- a/arch/arm/mach-imx/time.c
+++ b/arch/arm/mach-imx/time.c
@@ -184,7 +184,7 @@ static int __init imx_clockevent_init(unsigned long rate)
184 clockevent_imx.min_delta_ns = 184 clockevent_imx.min_delta_ns =
185 clockevent_delta2ns(0xf, &clockevent_imx); 185 clockevent_delta2ns(0xf, &clockevent_imx);
186 186
187 clockevent_imx.cpumask = cpumask_of_cpu(0); 187 clockevent_imx.cpumask = cpumask_of(0);
188 188
189 clockevents_register_device(&clockevent_imx); 189 clockevents_register_device(&clockevent_imx);
190 190
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 7766f469456b..f4656d2ac8a8 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -487,7 +487,7 @@ static int __init ixp4xx_clockevent_init(void)
487 clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); 487 clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
488 clockevent_ixp4xx.min_delta_ns = 488 clockevent_ixp4xx.min_delta_ns =
489 clockevent_delta2ns(0xf, &clockevent_ixp4xx); 489 clockevent_delta2ns(0xf, &clockevent_ixp4xx);
490 clockevent_ixp4xx.cpumask = cpumask_of_cpu(0); 490 clockevent_ixp4xx.cpumask = cpumask_of(0);
491 491
492 clockevents_register_device(&clockevent_ixp4xx); 492 clockevents_register_device(&clockevent_ixp4xx);
493 return 0; 493 return 0;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 345a14cb73c3..444d9c0f5ca6 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -182,7 +182,7 @@ static void __init msm_timer_init(void)
182 clockevent_delta2ns(0xf0000000 >> clock->shift, ce); 182 clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
183 /* 4 gets rounded down to 3 */ 183 /* 4 gets rounded down to 3 */
184 ce->min_delta_ns = clockevent_delta2ns(4, ce); 184 ce->min_delta_ns = clockevent_delta2ns(4, ce);
185 ce->cpumask = cpumask_of_cpu(0); 185 ce->cpumask = cpumask_of(0);
186 186
187 cs->mult = clocksource_hz2mult(clock->freq, cs->shift); 187 cs->mult = clocksource_hz2mult(clock->freq, cs->shift);
188 res = clocksource_register(cs); 188 res = clocksource_register(cs);
diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c
index a63424d083d9..41df69721769 100644
--- a/arch/arm/mach-ns9xxx/time-ns9360.c
+++ b/arch/arm/mach-ns9xxx/time-ns9360.c
@@ -173,7 +173,7 @@ static void __init ns9360_timer_init(void)
173 ns9360_clockevent_device.min_delta_ns = 173 ns9360_clockevent_device.min_delta_ns =
174 clockevent_delta2ns(1, &ns9360_clockevent_device); 174 clockevent_delta2ns(1, &ns9360_clockevent_device);
175 175
176 ns9360_clockevent_device.cpumask = cpumask_of_cpu(0); 176 ns9360_clockevent_device.cpumask = cpumask_of(0);
177 clockevents_register_device(&ns9360_clockevent_device); 177 clockevents_register_device(&ns9360_clockevent_device);
178 178
179 setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT, 179 setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT,
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 2cf7e32bd293..495a32c287b4 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -173,7 +173,7 @@ static __init void omap_init_mpu_timer(unsigned long rate)
173 clockevent_mpu_timer1.min_delta_ns = 173 clockevent_mpu_timer1.min_delta_ns =
174 clockevent_delta2ns(1, &clockevent_mpu_timer1); 174 clockevent_delta2ns(1, &clockevent_mpu_timer1);
175 175
176 clockevent_mpu_timer1.cpumask = cpumask_of_cpu(0); 176 clockevent_mpu_timer1.cpumask = cpumask_of(0);
177 clockevents_register_device(&clockevent_mpu_timer1); 177 clockevents_register_device(&clockevent_mpu_timer1);
178} 178}
179 179
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 705367ece174..fd3f7396e162 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -187,7 +187,7 @@ static __init void omap_init_32k_timer(void)
187 clockevent_32k_timer.min_delta_ns = 187 clockevent_32k_timer.min_delta_ns =
188 clockevent_delta2ns(1, &clockevent_32k_timer); 188 clockevent_delta2ns(1, &clockevent_32k_timer);
189 189
190 clockevent_32k_timer.cpumask = cpumask_of_cpu(0); 190 clockevent_32k_timer.cpumask = cpumask_of(0);
191 clockevents_register_device(&clockevent_32k_timer); 191 clockevents_register_device(&clockevent_32k_timer);
192} 192}
193 193
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 589393bedade..ae6036300f60 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -120,7 +120,7 @@ static void __init omap2_gp_clockevent_init(void)
120 clockevent_gpt.min_delta_ns = 120 clockevent_gpt.min_delta_ns =
121 clockevent_delta2ns(1, &clockevent_gpt); 121 clockevent_delta2ns(1, &clockevent_gpt);
122 122
123 clockevent_gpt.cpumask = cpumask_of_cpu(0); 123 clockevent_gpt.cpumask = cpumask_of(0);
124 clockevents_register_device(&clockevent_gpt); 124 clockevents_register_device(&clockevent_gpt);
125} 125}
126 126
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index f8a9a62959e5..bf3c9a4aad50 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -122,7 +122,6 @@ static struct clock_event_device ckevt_pxa_osmr0 = {
122 .features = CLOCK_EVT_FEAT_ONESHOT, 122 .features = CLOCK_EVT_FEAT_ONESHOT,
123 .shift = 32, 123 .shift = 32,
124 .rating = 200, 124 .rating = 200,
125 .cpumask = CPU_MASK_CPU0,
126 .set_next_event = pxa_osmr0_set_next_event, 125 .set_next_event = pxa_osmr0_set_next_event,
127 .set_mode = pxa_osmr0_set_mode, 126 .set_mode = pxa_osmr0_set_mode,
128}; 127};
@@ -170,6 +169,7 @@ static void __init pxa_timer_init(void)
170 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); 169 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0);
171 ckevt_pxa_osmr0.min_delta_ns = 170 ckevt_pxa_osmr0.min_delta_ns =
172 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; 171 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1;
172 ckevt_pxa_osmr0.cpumask = cpumask_of(0);
173 173
174 cksrc_pxa_oscr0.mult = 174 cksrc_pxa_oscr0.mult =
175 clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift); 175 clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift);
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 2f04d54711e7..b07cb9b7adb1 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -511,7 +511,7 @@ static struct clock_event_device timer0_clockevent = {
511 .set_mode = timer_set_mode, 511 .set_mode = timer_set_mode,
512 .set_next_event = timer_set_next_event, 512 .set_next_event = timer_set_next_event,
513 .rating = 300, 513 .rating = 300,
514 .cpumask = CPU_MASK_ALL, 514 .cpumask = cpu_all_mask,
515}; 515};
516 516
517static void __init realview_clockevents_init(unsigned int timer_irq) 517static void __init realview_clockevents_init(unsigned int timer_irq)
diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c
index 44d178cd5733..504961ef343c 100644
--- a/arch/arm/mach-realview/localtimer.c
+++ b/arch/arm/mach-realview/localtimer.c
@@ -161,7 +161,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
161 clk->set_mode = local_timer_set_mode; 161 clk->set_mode = local_timer_set_mode;
162 clk->set_next_event = local_timer_set_next_event; 162 clk->set_next_event = local_timer_set_next_event;
163 clk->irq = IRQ_LOCALTIMER; 163 clk->irq = IRQ_LOCALTIMER;
164 clk->cpumask = cpumask_of_cpu(cpu); 164 clk->cpumask = cpumask_of(cpu);
165 clk->shift = 20; 165 clk->shift = 20;
166 clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); 166 clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift);
167 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 167 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
@@ -199,7 +199,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
199 clk->rating = 200; 199 clk->rating = 200;
200 clk->set_mode = dummy_timer_set_mode; 200 clk->set_mode = dummy_timer_set_mode;
201 clk->broadcast = smp_timer_broadcast; 201 clk->broadcast = smp_timer_broadcast;
202 clk->cpumask = cpumask_of_cpu(cpu); 202 clk->cpumask = cpumask_of(cpu);
203 203
204 clockevents_register_device(clk); 204 clockevents_register_device(clk);
205} 205}
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index 24c0a4bae850..1cac4ac0b4b8 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -73,7 +73,6 @@ static struct clock_event_device ckevt_sa1100_osmr0 = {
73 .features = CLOCK_EVT_FEAT_ONESHOT, 73 .features = CLOCK_EVT_FEAT_ONESHOT,
74 .shift = 32, 74 .shift = 32,
75 .rating = 200, 75 .rating = 200,
76 .cpumask = CPU_MASK_CPU0,
77 .set_next_event = sa1100_osmr0_set_next_event, 76 .set_next_event = sa1100_osmr0_set_next_event,
78 .set_mode = sa1100_osmr0_set_mode, 77 .set_mode = sa1100_osmr0_set_mode,
79}; 78};
@@ -110,6 +109,7 @@ static void __init sa1100_timer_init(void)
110 clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); 109 clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0);
111 ckevt_sa1100_osmr0.min_delta_ns = 110 ckevt_sa1100_osmr0.min_delta_ns =
112 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; 111 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1;
112 ckevt_sa1100_osmr0.cpumask = cpumask_of(0);
113 113
114 cksrc_sa1100_oscr.mult = 114 cksrc_sa1100_oscr.mult =
115 clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift); 115 clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 565e0ba0d67e..a3f1933434e2 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -965,7 +965,7 @@ static void __init versatile_timer_init(void)
965 timer0_clockevent.min_delta_ns = 965 timer0_clockevent.min_delta_ns =
966 clockevent_delta2ns(0xf, &timer0_clockevent); 966 clockevent_delta2ns(0xf, &timer0_clockevent);
967 967
968 timer0_clockevent.cpumask = cpumask_of_cpu(0); 968 timer0_clockevent.cpumask = cpumask_of(0);
969 clockevents_register_device(&timer0_clockevent); 969 clockevents_register_device(&timer0_clockevent);
970} 970}
971 971
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 4de366e8b4c5..6d6bd5899240 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -260,10 +260,10 @@ static void em_stop(void)
260static void em_route_irq(int irq, unsigned int cpu) 260static void em_route_irq(int irq, unsigned int cpu)
261{ 261{
262 struct irq_desc *desc = irq_desc + irq; 262 struct irq_desc *desc = irq_desc + irq;
263 cpumask_t mask = cpumask_of_cpu(cpu); 263 const struct cpumask *mask = cpumask_of(cpu);
264 264
265 spin_lock_irq(&desc->lock); 265 spin_lock_irq(&desc->lock);
266 desc->affinity = mask; 266 desc->affinity = *mask;
267 desc->chip->set_affinity(irq, mask); 267 desc->chip->set_affinity(irq, mask);
268 spin_unlock_irq(&desc->lock); 268 spin_unlock_irq(&desc->lock);
269} 269}
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index fd28f5194f71..758a1293bcfa 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -190,7 +190,7 @@ static int __init mxc_clockevent_init(void)
190 clockevent_mxc.min_delta_ns = 190 clockevent_mxc.min_delta_ns =
191 clockevent_delta2ns(0xff, &clockevent_mxc); 191 clockevent_delta2ns(0xff, &clockevent_mxc);
192 192
193 clockevent_mxc.cpumask = cpumask_of_cpu(0); 193 clockevent_mxc.cpumask = cpumask_of(0);
194 194
195 clockevents_register_device(&clockevent_mxc); 195 clockevents_register_device(&clockevent_mxc);
196 196
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c
index 544d6b327f3a..6fa2923e6dca 100644
--- a/arch/arm/plat-orion/time.c
+++ b/arch/arm/plat-orion/time.c
@@ -149,7 +149,6 @@ static struct clock_event_device orion_clkevt = {
149 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 149 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
150 .shift = 32, 150 .shift = 32,
151 .rating = 300, 151 .rating = 300,
152 .cpumask = CPU_MASK_CPU0,
153 .set_next_event = orion_clkevt_next_event, 152 .set_next_event = orion_clkevt_next_event,
154 .set_mode = orion_clkevt_mode, 153 .set_mode = orion_clkevt_mode,
155}; 154};
@@ -199,5 +198,6 @@ void __init orion_time_init(unsigned int irq, unsigned int tclk)
199 orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); 198 orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift);
200 orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); 199 orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt);
201 orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); 200 orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt);
201 orion_clkevt.cpumask = cpumask_of(0);
202 clockevents_register_device(&orion_clkevt); 202 clockevents_register_device(&orion_clkevt);
203} 203}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 283481d74a5b..0ff46bf873b0 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -106,7 +106,6 @@ static struct clock_event_device comparator = {
106 .features = CLOCK_EVT_FEAT_ONESHOT, 106 .features = CLOCK_EVT_FEAT_ONESHOT,
107 .shift = 16, 107 .shift = 16,
108 .rating = 50, 108 .rating = 50,
109 .cpumask = CPU_MASK_CPU0,
110 .set_next_event = comparator_next_event, 109 .set_next_event = comparator_next_event,
111 .set_mode = comparator_mode, 110 .set_mode = comparator_mode,
112}; 111};
@@ -134,6 +133,7 @@ void __init time_init(void)
134 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); 133 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
135 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); 134 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
136 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; 135 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
136 comparator.cpumask = cpumask_of(0);
137 137
138 sysreg_write(COMPARE, 0); 138 sysreg_write(COMPARE, 0);
139 timer_irqaction.dev_id = &comparator; 139 timer_irqaction.dev_id = &comparator;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index e887efc86c29..0ed2badfd746 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -162,7 +162,6 @@ static struct clock_event_device clockevent_bfin = {
162 .name = "bfin_core_timer", 162 .name = "bfin_core_timer",
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .shift = 32, 164 .shift = 32,
165 .cpumask = CPU_MASK_CPU0,
166 .set_next_event = bfin_timer_set_next_event, 165 .set_next_event = bfin_timer_set_next_event,
167 .set_mode = bfin_timer_set_mode, 166 .set_mode = bfin_timer_set_mode,
168}; 167};
@@ -193,6 +192,7 @@ static int __init bfin_clockevent_init(void)
193 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 192 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
194 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); 193 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
195 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); 194 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
195 clockevent_bfin.cpumask = cpumask_of(0);
196 clockevents_register_device(&clockevent_bfin); 196 clockevents_register_device(&clockevent_bfin);
197 197
198 return 0; 198 return 0;
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 173c141ac9ba..295131fee710 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -325,11 +325,11 @@ static void end_crisv32_irq(unsigned int irq)
325{ 325{
326} 326}
327 327
328void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest) 328void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
329{ 329{
330 unsigned long flags; 330 unsigned long flags;
331 spin_lock_irqsave(&irq_lock, flags); 331 spin_lock_irqsave(&irq_lock, flags);
332 irq_allocations[irq - FIRST_IRQ].mask = dest; 332 irq_allocations[irq - FIRST_IRQ].mask = *dest;
333 spin_unlock_irqrestore(&irq_lock, flags); 333 spin_unlock_irqrestore(&irq_lock, flags);
334} 334}
335 335
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 52e16c6436f9..9dac17334640 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -29,11 +29,7 @@
29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; 29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
30 30
31/* CPU masks */ 31/* CPU masks */
32cpumask_t cpu_online_map = CPU_MASK_NONE;
33EXPORT_SYMBOL(cpu_online_map);
34cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 32cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
35cpumask_t cpu_possible_map;
36EXPORT_SYMBOL(cpu_possible_map);
37EXPORT_SYMBOL(phys_cpu_present_map); 33EXPORT_SYMBOL(phys_cpu_present_map);
38 34
39/* Variables used during SMP boot */ 35/* Variables used during SMP boot */
diff --git a/arch/cris/include/asm/smp.h b/arch/cris/include/asm/smp.h
index dba33aba3e95..c615a06dd757 100644
--- a/arch/cris/include/asm/smp.h
+++ b/arch/cris/include/asm/smp.h
@@ -4,7 +4,6 @@
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5 5
6extern cpumask_t phys_cpu_present_map; 6extern cpumask_t phys_cpu_present_map;
7extern cpumask_t cpu_possible_map;
8 7
9#define raw_smp_processor_id() (current_thread_info()->cpu) 8#define raw_smp_processor_id() (current_thread_info()->cpu)
10 9
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index c2f58ff364e7..cc0a3182db3c 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq)
22} 22}
23 23
24static void 24static void
25hpsim_set_affinity_noop (unsigned int a, cpumask_t b) 25hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
26{ 26{
27} 27}
28 28
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 12d96e0cd513..21c402365d0e 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -57,7 +57,6 @@ extern struct smp_boot_data {
57 57
58extern char no_int_routing __devinitdata; 58extern char no_int_routing __devinitdata;
59 59
60extern cpumask_t cpu_online_map;
61extern cpumask_t cpu_core_map[NR_CPUS]; 60extern cpumask_t cpu_core_map[NR_CPUS];
62DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 61DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
63extern int smp_num_siblings; 62extern int smp_num_siblings;
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 5c4674ae8aea..c8adecd5b416 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -330,25 +330,25 @@ unmask_irq (unsigned int irq)
330 330
331 331
332static void 332static void
333iosapic_set_affinity (unsigned int irq, cpumask_t mask) 333iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
334{ 334{
335#ifdef CONFIG_SMP 335#ifdef CONFIG_SMP
336 u32 high32, low32; 336 u32 high32, low32;
337 int dest, rte_index; 337 int cpu, dest, rte_index;
338 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 338 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
339 struct iosapic_rte_info *rte; 339 struct iosapic_rte_info *rte;
340 struct iosapic *iosapic; 340 struct iosapic *iosapic;
341 341
342 irq &= (~IA64_IRQ_REDIRECTED); 342 irq &= (~IA64_IRQ_REDIRECTED);
343 343
344 cpus_and(mask, mask, cpu_online_map); 344 cpu = cpumask_first_and(cpu_online_mask, mask);
345 if (cpus_empty(mask)) 345 if (cpu >= nr_cpu_ids)
346 return; 346 return;
347 347
348 if (irq_prepare_move(irq, first_cpu(mask))) 348 if (irq_prepare_move(irq, cpu))
349 return; 349 return;
350 350
351 dest = cpu_physical_id(first_cpu(mask)); 351 dest = cpu_physical_id(cpu);
352 352
353 if (!iosapic_intr_info[irq].count) 353 if (!iosapic_intr_info[irq].count)
354 return; /* not an IOSAPIC interrupt */ 354 return; /* not an IOSAPIC interrupt */
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7fd18f54c056..0b6db53fedcf 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS];
133 */ 133 */
134static void migrate_irqs(void) 134static void migrate_irqs(void)
135{ 135{
136 cpumask_t mask;
137 irq_desc_t *desc; 136 irq_desc_t *desc;
138 int irq, new_cpu; 137 int irq, new_cpu;
139 138
@@ -152,15 +151,14 @@ static void migrate_irqs(void)
152 if (desc->status == IRQ_PER_CPU) 151 if (desc->status == IRQ_PER_CPU)
153 continue; 152 continue;
154 153
155 cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); 154 if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
156 if (any_online_cpu(mask) == NR_CPUS) { 155 >= nr_cpu_ids) {
157 /* 156 /*
158 * Save it for phase 2 processing 157 * Save it for phase 2 processing
159 */ 158 */
160 vectors_in_migration[irq] = irq; 159 vectors_in_migration[irq] = irq;
161 160
162 new_cpu = any_online_cpu(cpu_online_map); 161 new_cpu = any_online_cpu(cpu_online_map);
163 mask = cpumask_of_cpu(new_cpu);
164 162
165 /* 163 /*
166 * Al three are essential, currently WARN_ON.. maybe panic? 164 * Al three are essential, currently WARN_ON.. maybe panic?
@@ -168,7 +166,8 @@ static void migrate_irqs(void)
168 if (desc->chip && desc->chip->disable && 166 if (desc->chip && desc->chip->disable &&
169 desc->chip->enable && desc->chip->set_affinity) { 167 desc->chip->enable && desc->chip->set_affinity) {
170 desc->chip->disable(irq); 168 desc->chip->disable(irq);
171 desc->chip->set_affinity(irq, mask); 169 desc->chip->set_affinity(irq,
170 cpumask_of(new_cpu));
172 desc->chip->enable(irq); 171 desc->chip->enable(irq);
173 } else { 172 } else {
174 WARN_ON((!(desc->chip) || !(desc->chip->disable) || 173 WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 702a09c13238..890339339035 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -49,11 +49,12 @@
49static struct irq_chip ia64_msi_chip; 49static struct irq_chip ia64_msi_chip;
50 50
51#ifdef CONFIG_SMP 51#ifdef CONFIG_SMP
52static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 52static void ia64_set_msi_irq_affinity(unsigned int irq,
53 const cpumask_t *cpu_mask)
53{ 54{
54 struct msi_msg msg; 55 struct msi_msg msg;
55 u32 addr, data; 56 u32 addr, data;
56 int cpu = first_cpu(cpu_mask); 57 int cpu = first_cpu(*cpu_mask);
57 58
58 if (!cpu_online(cpu)) 59 if (!cpu_online(cpu))
59 return; 60 return;
@@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq)
166 167
167#ifdef CONFIG_DMAR 168#ifdef CONFIG_DMAR
168#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
169static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 170static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
170{ 171{
171 struct irq_cfg *cfg = irq_cfg + irq; 172 struct irq_cfg *cfg = irq_cfg + irq;
172 struct msi_msg msg; 173 struct msi_msg msg;
173 int cpu = first_cpu(mask); 174 int cpu = cpumask_first(mask);
174
175 175
176 if (!cpu_online(cpu)) 176 if (!cpu_online(cpu))
177 return; 177 return;
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
188 188
189 dmar_msi_write(irq, &msg); 189 dmar_msi_write(irq, &msg);
190 irq_desc[irq].affinity = mask; 190 irq_desc[irq].affinity = *mask;
191} 191}
192#endif /* CONFIG_SMP */ 192#endif /* CONFIG_SMP */
193 193
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 1dcbb85fc4ee..11463994a7d5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu;
131 */ 131 */
132DEFINE_PER_CPU(int, cpu_state); 132DEFINE_PER_CPU(int, cpu_state);
133 133
134/* Bitmasks of currently online, and possible CPUs */
135cpumask_t cpu_online_map;
136EXPORT_SYMBOL(cpu_online_map);
137cpumask_t cpu_possible_map = CPU_MASK_NONE;
138EXPORT_SYMBOL(cpu_possible_map);
139
140cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 134cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
141EXPORT_SYMBOL(cpu_core_map); 135EXPORT_SYMBOL(cpu_core_map);
142DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); 136DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
@@ -688,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu)
688{ 682{
689 int new_cpei_cpu; 683 int new_cpei_cpu;
690 irq_desc_t *desc = NULL; 684 irq_desc_t *desc = NULL;
691 cpumask_t mask; 685 const struct cpumask *mask;
692 int retval = 0; 686 int retval = 0;
693 687
694 /* 688 /*
@@ -701,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu)
701 * Now re-target the CPEI to a different processor 695 * Now re-target the CPEI to a different processor
702 */ 696 */
703 new_cpei_cpu = any_online_cpu(cpu_online_map); 697 new_cpei_cpu = any_online_cpu(cpu_online_map);
704 mask = cpumask_of_cpu(new_cpei_cpu); 698 mask = cpumask_of(new_cpei_cpu);
705 set_cpei_target_cpu(new_cpei_cpu); 699 set_cpei_target_cpu(new_cpei_cpu);
706 desc = irq_desc + ia64_cpe_irq; 700 desc = irq_desc + ia64_cpe_irq;
707 /* 701 /*
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index c75b914f2d6b..a8d61a3e9a94 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
219 cpumask_t shared_cpu_map; 219 cpumask_t shared_cpu_map;
220 220
221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); 221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
222 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); 222 len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
223 len += sprintf(buf+len, "\n"); 223 len += sprintf(buf+len, "\n");
224 return len; 224 return len;
225} 225}
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 0c66dbdd1d72..66fd705e82c0 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -227,14 +227,14 @@ finish_up:
227 return new_irq_info; 227 return new_irq_info;
228} 228}
229 229
230static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) 230static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
231{ 231{
232 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 232 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
233 nasid_t nasid; 233 nasid_t nasid;
234 int slice; 234 int slice;
235 235
236 nasid = cpuid_to_nasid(first_cpu(mask)); 236 nasid = cpuid_to_nasid(cpumask_first(mask));
237 slice = cpuid_to_slice(first_cpu(mask)); 237 slice = cpuid_to_slice(cpumask_first(mask));
238 238
239 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, 239 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
240 sn_irq_lh[irq], list) 240 sn_irq_lh[irq], list)
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 83f190ffe350..ca553b0429ce 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
151} 151}
152 152
153#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
154static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 154static void sn_set_msi_irq_affinity(unsigned int irq,
155 const struct cpumask *cpu_mask)
155{ 156{
156 struct msi_msg msg; 157 struct msi_msg msg;
157 int slice; 158 int slice;
@@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
164 struct sn_pcibus_provider *provider; 165 struct sn_pcibus_provider *provider;
165 unsigned int cpu; 166 unsigned int cpu;
166 167
167 cpu = first_cpu(cpu_mask); 168 cpu = cpumask_first(cpu_mask);
168 sn_irq_info = sn_msi_info[irq].sn_irq_info; 169 sn_irq_info = sn_msi_info[irq].sn_irq_info;
169 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) 170 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
170 return; 171 return;
@@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
204 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 205 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
205 206
206 write_msi_msg(irq, &msg); 207 write_msi_msg(irq, &msg);
207 irq_desc[irq].affinity = cpu_mask; 208 irq_desc[irq].affinity = *cpu_mask;
208} 209}
209#endif /* CONFIG_SMP */ 210#endif /* CONFIG_SMP */
210 211
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 29047d5c259a..cabba332cc48 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,6 +10,7 @@ config M32R
10 default y 10 default y
11 select HAVE_IDE 11 select HAVE_IDE
12 select HAVE_OPROFILE 12 select HAVE_OPROFILE
13 select INIT_ALL_POSSIBLE
13 14
14config SBUS 15config SBUS
15 bool 16 bool
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 39cb6da72dcb..0f06b3722e96 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -73,17 +73,11 @@ static unsigned int bsp_phys_id = -1;
73/* Bitmask of physically existing CPUs */ 73/* Bitmask of physically existing CPUs */
74physid_mask_t phys_cpu_present_map; 74physid_mask_t phys_cpu_present_map;
75 75
76/* Bitmask of currently online CPUs */
77cpumask_t cpu_online_map;
78EXPORT_SYMBOL(cpu_online_map);
79
80cpumask_t cpu_bootout_map; 76cpumask_t cpu_bootout_map;
81cpumask_t cpu_bootin_map; 77cpumask_t cpu_bootin_map;
82static cpumask_t cpu_callin_map; 78static cpumask_t cpu_callin_map;
83cpumask_t cpu_callout_map; 79cpumask_t cpu_callout_map;
84EXPORT_SYMBOL(cpu_callout_map); 80EXPORT_SYMBOL(cpu_callout_map);
85cpumask_t cpu_possible_map = CPU_MASK_ALL;
86EXPORT_SYMBOL(cpu_possible_map);
87 81
88/* Per CPU bogomips and other parameters */ 82/* Per CPU bogomips and other parameters */
89struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned; 83struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c
index c5b916700b22..2a12e7fa9748 100644
--- a/arch/m68knommu/platform/coldfire/pit.c
+++ b/arch/m68knommu/platform/coldfire/pit.c
@@ -156,7 +156,7 @@ void hw_timer_init(void)
156{ 156{
157 u32 imr; 157 u32 imr;
158 158
159 cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 159 cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id());
160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); 160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
161 cf_pit_clockevent.max_delta_ns = 161 cf_pit_clockevent.max_delta_ns =
162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent); 162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent);
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index a58f0eecc68f..abc62aa744ac 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -49,7 +49,8 @@ static inline void smtc_im_ack_irq(unsigned int irq)
49#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 49#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
50#include <linux/cpumask.h> 50#include <linux/cpumask.h>
51 51
52extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); 52extern void plat_set_irq_affinity(unsigned int irq,
53 const struct cpumask *affinity);
53extern void smtc_forward_irq(unsigned int irq); 54extern void smtc_forward_irq(unsigned int irq);
54 55
55/* 56/*
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 0ff5b523ea77..86557b5d1b3f 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -38,9 +38,6 @@ extern int __cpu_logical_map[NR_CPUS];
38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
39#define SMP_CALL_FUNCTION 0x2 39#define SMP_CALL_FUNCTION 0x2
40 40
41extern cpumask_t phys_cpu_present_map;
42#define cpu_possible_map phys_cpu_present_map
43
44extern void asmlinkage smp_bootstrap(void); 41extern void asmlinkage smp_bootstrap(void);
45 42
46/* 43/*
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index d7f8a782aae4..03965cb1b252 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -146,7 +146,7 @@ void __init plat_time_init(void)
146 146
147 BUG_ON(HZ != 100); 147 BUG_ON(HZ != 100);
148 148
149 cd->cpumask = cpumask_of_cpu(cpu); 149 cd->cpumask = cpumask_of(cpu);
150 clockevents_register_device(cd); 150 clockevents_register_device(cd);
151 action->dev_id = cd; 151 action->dev_id = cd;
152 setup_irq(JAZZ_TIMER_IRQ, action); 152 setup_irq(JAZZ_TIMER_IRQ, action);
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 0a57f86945f1..b820661678b0 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -126,7 +126,7 @@ void __cpuinit sb1480_clockevent_init(void)
126 cd->min_delta_ns = clockevent_delta2ns(2, cd); 126 cd->min_delta_ns = clockevent_delta2ns(2, cd);
127 cd->rating = 200; 127 cd->rating = 200;
128 cd->irq = irq; 128 cd->irq = irq;
129 cd->cpumask = cpumask_of_cpu(cpu); 129 cd->cpumask = cpumask_of(cpu);
130 cd->set_next_event = sibyte_next_event; 130 cd->set_next_event = sibyte_next_event;
131 cd->set_mode = sibyte_set_mode; 131 cd->set_mode = sibyte_set_mode;
132 clockevents_register_device(cd); 132 clockevents_register_device(cd);
@@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(void)
148 action->name = name; 148 action->name = name;
149 action->dev_id = cd; 149 action->dev_id = cd;
150 150
151 irq_set_affinity(irq, cpumask_of_cpu(cpu)); 151 irq_set_affinity(irq, cpumask_of(cpu));
152 setup_irq(irq, action); 152 setup_irq(irq, action);
153} 153}
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index df4acb68bfb5..1ada45ea0700 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -88,7 +88,6 @@ static void ds1287_event_handler(struct clock_event_device *dev)
88static struct clock_event_device ds1287_clockevent = { 88static struct clock_event_device ds1287_clockevent = {
89 .name = "ds1287", 89 .name = "ds1287",
90 .features = CLOCK_EVT_FEAT_PERIODIC, 90 .features = CLOCK_EVT_FEAT_PERIODIC,
91 .cpumask = CPU_MASK_CPU0,
92 .set_next_event = ds1287_set_next_event, 91 .set_next_event = ds1287_set_next_event,
93 .set_mode = ds1287_set_mode, 92 .set_mode = ds1287_set_mode,
94 .event_handler = ds1287_event_handler, 93 .event_handler = ds1287_event_handler,
@@ -122,6 +121,7 @@ int __init ds1287_clockevent_init(int irq)
122 clockevent_set_clock(cd, 32768); 121 clockevent_set_clock(cd, 32768);
123 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 122 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
124 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 123 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
124 cd->cpumask = cpumask_of(0);
125 125
126 clockevents_register_device(&ds1287_clockevent); 126 clockevents_register_device(&ds1287_clockevent);
127 127
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 6e2f58520afb..e9b787feedcb 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -96,7 +96,6 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
96static struct clock_event_device gt641xx_timer0_clockevent = { 96static struct clock_event_device gt641xx_timer0_clockevent = {
97 .name = "gt641xx-timer0", 97 .name = "gt641xx-timer0",
98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
99 .cpumask = CPU_MASK_CPU0,
100 .irq = GT641XX_TIMER0_IRQ, 99 .irq = GT641XX_TIMER0_IRQ,
101 .set_next_event = gt641xx_timer0_set_next_event, 100 .set_next_event = gt641xx_timer0_set_next_event,
102 .set_mode = gt641xx_timer0_set_mode, 101 .set_mode = gt641xx_timer0_set_mode,
@@ -132,6 +131,7 @@ static int __init gt641xx_timer0_clockevent_init(void)
132 clockevent_set_clock(cd, gt641xx_base_clock); 131 clockevent_set_clock(cd, gt641xx_base_clock);
133 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 132 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 133 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
134 cd->cpumask = cpumask_of(0);
135 135
136 clockevents_register_device(&gt641xx_timer0_clockevent); 136 clockevents_register_device(&gt641xx_timer0_clockevent);
137 137
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 4a4c59f2737a..e1ec83b68031 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -195,7 +195,7 @@ int __cpuinit mips_clockevent_init(void)
195 195
196 cd->rating = 300; 196 cd->rating = 300;
197 cd->irq = irq; 197 cd->irq = irq;
198 cd->cpumask = cpumask_of_cpu(cpu); 198 cd->cpumask = cpumask_of(cpu);
199 cd->set_next_event = mips_next_event; 199 cd->set_next_event = mips_next_event;
200 cd->set_mode = mips_set_clock_mode; 200 cd->set_mode = mips_set_clock_mode;
201 cd->event_handler = mips_event_handler; 201 cd->event_handler = mips_event_handler;
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 63ac3ad462bc..a2eebaafda52 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -125,7 +125,7 @@ void __cpuinit sb1250_clockevent_init(void)
125 cd->min_delta_ns = clockevent_delta2ns(2, cd); 125 cd->min_delta_ns = clockevent_delta2ns(2, cd);
126 cd->rating = 200; 126 cd->rating = 200;
127 cd->irq = irq; 127 cd->irq = irq;
128 cd->cpumask = cpumask_of_cpu(cpu); 128 cd->cpumask = cpumask_of(cpu);
129 cd->set_next_event = sibyte_next_event; 129 cd->set_next_event = sibyte_next_event;
130 cd->set_mode = sibyte_set_mode; 130 cd->set_mode = sibyte_set_mode;
131 clockevents_register_device(cd); 131 clockevents_register_device(cd);
@@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(void)
147 action->name = name; 147 action->name = name;
148 action->dev_id = cd; 148 action->dev_id = cd;
149 149
150 irq_set_affinity(irq, cpumask_of_cpu(cpu)); 150 irq_set_affinity(irq, cpumask_of(cpu));
151 setup_irq(irq, action); 151 setup_irq(irq, action);
152} 152}
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 5162fe4b5952..6d45e24db5bf 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -292,7 +292,7 @@ int __cpuinit mips_clockevent_init(void)
292 292
293 cd->rating = 300; 293 cd->rating = 300;
294 cd->irq = irq; 294 cd->irq = irq;
295 cd->cpumask = cpumask_of_cpu(cpu); 295 cd->cpumask = cpumask_of(cpu);
296 cd->set_next_event = mips_next_event; 296 cd->set_next_event = mips_next_event;
297 cd->set_mode = mips_set_clock_mode; 297 cd->set_mode = mips_set_clock_mode;
298 cd->event_handler = mips_event_handler; 298 cd->event_handler = mips_event_handler;
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index b5fc4eb412d2..eccf7d6096bd 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -112,7 +112,6 @@ static struct clock_event_device txx9tmr_clock_event_device = {
112 .name = "TXx9", 112 .name = "TXx9",
113 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 113 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
114 .rating = 200, 114 .rating = 200,
115 .cpumask = CPU_MASK_CPU0,
116 .set_mode = txx9tmr_set_mode, 115 .set_mode = txx9tmr_set_mode,
117 .set_next_event = txx9tmr_set_next_event, 116 .set_next_event = txx9tmr_set_next_event,
118}; 117};
@@ -150,6 +149,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
150 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); 149 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
151 cd->min_delta_ns = clockevent_delta2ns(0xf, cd); 150 cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
152 cd->irq = irq; 151 cd->irq = irq;
152 cd->cpumask = cpumask_of(0),
153 clockevents_register_device(cd); 153 clockevents_register_device(cd);
154 setup_irq(irq, &txx9tmr_irq); 154 setup_irq(irq, &txx9tmr_irq);
155 printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", 155 printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index b6ac55162b9a..f4d187825f96 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -115,7 +115,7 @@ void __init setup_pit_timer(void)
115 * Start pit with the boot cpu mask and make it global after the 115 * Start pit with the boot cpu mask and make it global after the
116 * IO_APIC has been initialized. 116 * IO_APIC has been initialized.
117 */ 117 */
118 cd->cpumask = cpumask_of_cpu(cpu); 118 cd->cpumask = cpumask_of(cpu);
119 clockevent_set_clock(cd, CLOCK_TICK_RATE); 119 clockevent_set_clock(cd, CLOCK_TICK_RATE);
120 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); 120 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
121 cd->min_delta_ns = clockevent_delta2ns(0xF, cd); 121 cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index f0a4bb19e096..494a49a317e9 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq)
155 155
156static DEFINE_SPINLOCK(gic_lock); 156static DEFINE_SPINLOCK(gic_lock);
157 157
158static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) 158static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
159{ 159{
160 cpumask_t tmp = CPU_MASK_NONE; 160 cpumask_t tmp = CPU_MASK_NONE;
161 unsigned long flags; 161 unsigned long flags;
@@ -164,7 +164,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
164 pr_debug(KERN_DEBUG "%s called\n", __func__); 164 pr_debug(KERN_DEBUG "%s called\n", __func__);
165 irq -= _irqbase; 165 irq -= _irqbase;
166 166
167 cpus_and(tmp, cpumask, cpu_online_map); 167 cpumask_and(&tmp, cpumask, cpu_online_mask);
168 if (cpus_empty(tmp)) 168 if (cpus_empty(tmp))
169 return; 169 return;
170 170
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
188 188
189 } 189 }
190 irq_desc[irq].affinity = cpumask; 190 irq_desc[irq].affinity = *cpumask;
191 spin_unlock_irqrestore(&gic_lock, flags); 191 spin_unlock_irqrestore(&gic_lock, flags);
192 192
193} 193}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index ca476c4f62a5..f27beca4b26d 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -51,10 +51,10 @@ static int __init allowcpus(char *str)
51 int len; 51 int len;
52 52
53 cpus_clear(cpu_allow_map); 53 cpus_clear(cpu_allow_map);
54 if (cpulist_parse(str, cpu_allow_map) == 0) { 54 if (cpulist_parse(str, &cpu_allow_map) == 0) {
55 cpu_set(0, cpu_allow_map); 55 cpu_set(0, cpu_allow_map);
56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); 56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
57 len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map); 57 len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map);
58 buf[len] = '\0'; 58 buf[len] = '\0';
59 pr_debug("Allowable CPUs: %s\n", buf); 59 pr_debug("Allowable CPUs: %s\n", buf);
60 return 1; 60 return 1;
@@ -226,7 +226,7 @@ void __init cmp_smp_setup(void)
226 226
227 for (i = 1; i < NR_CPUS; i++) { 227 for (i = 1; i < NR_CPUS; i++) {
228 if (amon_cpu_avail(i)) { 228 if (amon_cpu_avail(i)) {
229 cpu_set(i, phys_cpu_present_map); 229 cpu_set(i, cpu_possible_map);
230 __cpu_number_map[i] = ++ncpu; 230 __cpu_number_map[i] = ++ncpu;
231 __cpu_logical_map[ncpu] = i; 231 __cpu_logical_map[ncpu] = i;
232 } 232 }
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 87a1816c1f45..6f7ee5ac46ee 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
70 write_vpe_c0_vpeconf0(tmp); 70 write_vpe_c0_vpeconf0(tmp);
71 71
72 /* Record this as available CPU */ 72 /* Record this as available CPU */
73 cpu_set(tc, phys_cpu_present_map); 73 cpu_set(tc, cpu_possible_map);
74 __cpu_number_map[tc] = ++ncpu; 74 __cpu_number_map[tc] = ++ncpu;
75 __cpu_logical_map[ncpu] = tc; 75 __cpu_logical_map[ncpu] = tc;
76 } 76 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 8bf88faf5afd..3da94704f816 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -44,15 +44,10 @@
44#include <asm/mipsmtregs.h> 44#include <asm/mipsmtregs.h>
45#endif /* CONFIG_MIPS_MT_SMTC */ 45#endif /* CONFIG_MIPS_MT_SMTC */
46 46
47cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
48volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 47volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
49cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
50int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 48int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
51int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 49int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 50
53EXPORT_SYMBOL(phys_cpu_present_map);
54EXPORT_SYMBOL(cpu_online_map);
55
56extern void cpu_idle(void); 51extern void cpu_idle(void);
57 52
58/* Number of TCs (or siblings in Intel speak) per CPU core */ 53/* Number of TCs (or siblings in Intel speak) per CPU core */
@@ -195,7 +190,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
195/* preload SMP state for boot cpu */ 190/* preload SMP state for boot cpu */
196void __devinit smp_prepare_boot_cpu(void) 191void __devinit smp_prepare_boot_cpu(void)
197{ 192{
198 cpu_set(0, phys_cpu_present_map); 193 cpu_set(0, cpu_possible_map);
199 cpu_set(0, cpu_online_map); 194 cpu_set(0, cpu_online_map);
200 cpu_set(0, cpu_callin_map); 195 cpu_set(0, cpu_callin_map);
201} 196}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 897fb2b4751c..b6cca01ff82b 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -290,7 +290,7 @@ static void smtc_configure_tlb(void)
290 * possibly leave some TCs/VPEs as "slave" processors. 290 * possibly leave some TCs/VPEs as "slave" processors.
291 * 291 *
292 * Use c0_MVPConf0 to find out how many TCs are available, setting up 292 * Use c0_MVPConf0 to find out how many TCs are available, setting up
293 * phys_cpu_present_map and the logical/physical mappings. 293 * cpu_possible_map and the logical/physical mappings.
294 */ 294 */
295 295
296int __init smtc_build_cpu_map(int start_cpu_slot) 296int __init smtc_build_cpu_map(int start_cpu_slot)
@@ -304,7 +304,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
304 */ 304 */
305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
307 cpu_set(i, phys_cpu_present_map); 307 cpu_set(i, cpu_possible_map);
308 __cpu_number_map[i] = i; 308 __cpu_number_map[i] = i;
309 __cpu_logical_map[i] = i; 309 __cpu_logical_map[i] = i;
310 } 310 }
@@ -521,7 +521,7 @@ void smtc_prepare_cpus(int cpus)
521 * Pull any physically present but unused TCs out of circulation. 521 * Pull any physically present but unused TCs out of circulation.
522 */ 522 */
523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
524 cpu_clear(tc, phys_cpu_present_map); 524 cpu_clear(tc, cpu_possible_map);
525 cpu_clear(tc, cpu_present_map); 525 cpu_clear(tc, cpu_present_map);
526 tc++; 526 tc++;
527 } 527 }
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index f84a46a8ae6e..aabd7274507b 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -114,9 +114,9 @@ struct plat_smp_ops msmtc_smp_ops = {
114 */ 114 */
115 115
116 116
117void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) 117void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
118{ 118{
119 cpumask_t tmask = affinity; 119 cpumask_t tmask = *affinity;
120 int cpu = 0; 120 int cpu = 0;
121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); 121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
122 122
@@ -139,7 +139,7 @@ void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity)
139 * be made to forward to an offline "CPU". 139 * be made to forward to an offline "CPU".
140 */ 140 */
141 141
142 for_each_cpu_mask(cpu, affinity) { 142 for_each_cpu(cpu, affinity) {
143 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 143 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
144 cpu_clear(cpu, tmask); 144 cpu_clear(cpu, tmask);
145 } 145 }
diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c
index 62f495b57f93..cf293b279098 100644
--- a/arch/mips/nxp/pnx8550/common/time.c
+++ b/arch/mips/nxp/pnx8550/common/time.c
@@ -102,6 +102,7 @@ __init void plat_time_init(void)
102 unsigned int p; 102 unsigned int p;
103 unsigned int pow2p; 103 unsigned int pow2p;
104 104
105 pnx8xxx_clockevent.cpumask = cpu_none_mask;
105 clockevents_register_device(&pnx8xxx_clockevent); 106 clockevents_register_device(&pnx8xxx_clockevent);
106 clocksource_register(&pnx_clocksource); 107 clocksource_register(&pnx_clocksource);
107 108
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 3a7df647ca77..f78c29b68d77 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -141,7 +141,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
141} 141}
142 142
143/* 143/*
144 * Detect available CPUs, populate phys_cpu_present_map before smp_init 144 * Detect available CPUs, populate cpu_possible_map before smp_init
145 * 145 *
146 * We don't want to start the secondary CPU yet nor do we have a nice probing 146 * We don't want to start the secondary CPU yet nor do we have a nice probing
147 * feature in PMON so we just assume presence of the secondary core. 147 * feature in PMON so we just assume presence of the secondary core.
@@ -150,10 +150,10 @@ static void __init yos_smp_setup(void)
150{ 150{
151 int i; 151 int i;
152 152
153 cpus_clear(phys_cpu_present_map); 153 cpus_clear(cpu_possible_map);
154 154
155 for (i = 0; i < 2; i++) { 155 for (i = 0; i < 2; i++) {
156 cpu_set(i, phys_cpu_present_map); 156 cpu_set(i, cpu_possible_map);
157 __cpu_number_map[i] = i; 157 __cpu_number_map[i] = i;
158 __cpu_logical_map[i] = i; 158 __cpu_logical_map[i] = i;
159 } 159 }
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index ba5cdebeaf0d..5b47d6b65275 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
76 /* Only let it join in if it's marked enabled */ 76 /* Only let it join in if it's marked enabled */
77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) && 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 (tot_cpus_found != NR_CPUS)) { 78 (tot_cpus_found != NR_CPUS)) {
79 cpu_set(cpuid, phys_cpu_present_map); 79 cpu_set(cpuid, cpu_possible_map);
80 alloc_cpupda(cpuid, tot_cpus_found); 80 alloc_cpupda(cpuid, tot_cpus_found);
81 cpus_found++; 81 cpus_found++;
82 tot_cpus_found++; 82 tot_cpus_found++;
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 1327c2746fb7..f024057a35f8 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -134,7 +134,7 @@ void __cpuinit hub_rt_clock_event_init(void)
134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
135 cd->rating = 200; 135 cd->rating = 200;
136 cd->irq = irq; 136 cd->irq = irq;
137 cd->cpumask = cpumask_of_cpu(cpu); 137 cd->cpumask = cpumask_of(cpu);
138 cd->set_next_event = rt_next_event; 138 cd->set_next_event = rt_next_event;
139 cd->set_mode = rt_set_mode; 139 cd->set_mode = rt_set_mode;
140 clockevents_register_device(cd); 140 clockevents_register_device(cd);
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index a35818ed4263..12b465d404df 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq);
50static void disable_bcm1480_irq(unsigned int irq); 50static void disable_bcm1480_irq(unsigned int irq);
51static void ack_bcm1480_irq(unsigned int irq); 51static void ack_bcm1480_irq(unsigned int irq);
52#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
53static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask); 53static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
54#endif 54#endif
55 55
56#ifdef CONFIG_PCI 56#ifdef CONFIG_PCI
@@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
109} 109}
110 110
111#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
112static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) 112static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
113{ 113{
114 int i = 0, old_cpu, cpu, int_on, k; 114 int i = 0, old_cpu, cpu, int_on, k;
115 u64 cur_ints; 115 u64 cur_ints;
@@ -117,11 +117,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
117 unsigned long flags; 117 unsigned long flags;
118 unsigned int irq_dirty; 118 unsigned int irq_dirty;
119 119
120 if (cpus_weight(mask) != 1) { 120 if (cpumask_weight(mask) != 1) {
121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
122 return; 122 return;
123 } 123 }
124 i = first_cpu(mask); 124 i = cpumask_first(mask);
125 125
126 /* Convert logical CPU to physical CPU */ 126 /* Convert logical CPU to physical CPU */
127 cpu = cpu_logical_map(i); 127 cpu = cpu_logical_map(i);
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index bd9eeb43ed0e..dddfda8e8294 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -136,7 +136,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
136 136
137/* 137/*
138 * Use CFE to find out how many CPUs are available, setting up 138 * Use CFE to find out how many CPUs are available, setting up
139 * phys_cpu_present_map and the logical/physical mappings. 139 * cpu_possible_map and the logical/physical mappings.
140 * XXXKW will the boot CPU ever not be physical 0? 140 * XXXKW will the boot CPU ever not be physical 0?
141 * 141 *
142 * Common setup before any secondaries are started 142 * Common setup before any secondaries are started
@@ -145,14 +145,14 @@ static void __init bcm1480_smp_setup(void)
145{ 145{
146 int i, num; 146 int i, num;
147 147
148 cpus_clear(phys_cpu_present_map); 148 cpus_clear(cpu_possible_map);
149 cpu_set(0, phys_cpu_present_map); 149 cpu_set(0, cpu_possible_map);
150 __cpu_number_map[0] = 0; 150 __cpu_number_map[0] = 0;
151 __cpu_logical_map[0] = 0; 151 __cpu_logical_map[0] = 0;
152 152
153 for (i = 1, num = 0; i < NR_CPUS; i++) { 153 for (i = 1, num = 0; i < NR_CPUS; i++) {
154 if (cfe_cpu_stop(i) == 0) { 154 if (cfe_cpu_stop(i) == 0) {
155 cpu_set(i, phys_cpu_present_map); 155 cpu_set(i, cpu_possible_map);
156 __cpu_number_map[i] = ++num; 156 __cpu_number_map[i] = ++num;
157 __cpu_logical_map[num] = i; 157 __cpu_logical_map[num] = i;
158 } 158 }
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index a5158483986e..808ac2959b8c 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq);
50static void disable_sb1250_irq(unsigned int irq); 50static void disable_sb1250_irq(unsigned int irq);
51static void ack_sb1250_irq(unsigned int irq); 51static void ack_sb1250_irq(unsigned int irq);
52#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
53static void sb1250_set_affinity(unsigned int irq, cpumask_t mask); 53static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
54#endif 54#endif
55 55
56#ifdef CONFIG_SIBYTE_HAS_LDT 56#ifdef CONFIG_SIBYTE_HAS_LDT
@@ -103,16 +103,16 @@ void sb1250_unmask_irq(int cpu, int irq)
103} 103}
104 104
105#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
106static void sb1250_set_affinity(unsigned int irq, cpumask_t mask) 106static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
107{ 107{
108 int i = 0, old_cpu, cpu, int_on; 108 int i = 0, old_cpu, cpu, int_on;
109 u64 cur_ints; 109 u64 cur_ints;
110 struct irq_desc *desc = irq_desc + irq; 110 struct irq_desc *desc = irq_desc + irq;
111 unsigned long flags; 111 unsigned long flags;
112 112
113 i = first_cpu(mask); 113 i = cpumask_first(mask);
114 114
115 if (cpus_weight(mask) > 1) { 115 if (cpumask_weight(mask) > 1) {
116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
117 return; 117 return;
118 } 118 }
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 0734b933e969..5950a288a7da 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -124,7 +124,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
124 124
125/* 125/*
126 * Use CFE to find out how many CPUs are available, setting up 126 * Use CFE to find out how many CPUs are available, setting up
127 * phys_cpu_present_map and the logical/physical mappings. 127 * cpu_possible_map and the logical/physical mappings.
128 * XXXKW will the boot CPU ever not be physical 0? 128 * XXXKW will the boot CPU ever not be physical 0?
129 * 129 *
130 * Common setup before any secondaries are started 130 * Common setup before any secondaries are started
@@ -133,14 +133,14 @@ static void __init sb1250_smp_setup(void)
133{ 133{
134 int i, num; 134 int i, num;
135 135
136 cpus_clear(phys_cpu_present_map); 136 cpus_clear(cpu_possible_map);
137 cpu_set(0, phys_cpu_present_map); 137 cpu_set(0, cpu_possible_map);
138 __cpu_number_map[0] = 0; 138 __cpu_number_map[0] = 0;
139 __cpu_logical_map[0] = 0; 139 __cpu_logical_map[0] = 0;
140 140
141 for (i = 1, num = 0; i < NR_CPUS; i++) { 141 for (i = 1, num = 0; i < NR_CPUS; i++) {
142 if (cfe_cpu_stop(i) == 0) { 142 if (cfe_cpu_stop(i) == 0) {
143 cpu_set(i, phys_cpu_present_map); 143 cpu_set(i, cpu_possible_map);
144 __cpu_number_map[i] = ++num; 144 __cpu_number_map[i] = ++num;
145 __cpu_logical_map[num] = i; 145 __cpu_logical_map[num] = i;
146 } 146 }
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 796e3ce28720..69f5f88711cc 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -80,7 +80,7 @@ static void __init sni_a20r_timer_setup(void)
80 struct irqaction *action = &a20r_irqaction; 80 struct irqaction *action = &a20r_irqaction;
81 unsigned int cpu = smp_processor_id(); 81 unsigned int cpu = smp_processor_id();
82 82
83 cd->cpumask = cpumask_of_cpu(cpu); 83 cd->cpumask = cpumask_of(cpu);
84 clockevents_register_device(cd); 84 clockevents_register_device(cd);
85 action->dev_id = cd; 85 action->dev_id = cd;
86 setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction); 86 setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 644a70b1b04e..aacf11d33723 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,6 +11,7 @@ config PARISC
11 select HAVE_OPROFILE 11 select HAVE_OPROFILE
12 select RTC_CLASS 12 select RTC_CLASS
13 select RTC_DRV_PARISC 13 select RTC_DRV_PARISC
14 select INIT_ALL_POSSIBLE
14 help 15 help
15 The PA-RISC microprocessor is designed by Hewlett-Packard and used 16 The PA-RISC microprocessor is designed by Hewlett-Packard and used
16 in many of their workstations & servers (HP9000 700 and 800 series, 17 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 23ef950df008..4cea935e2f99 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
131 return 0; 131 return 0;
132} 132}
133 133
134static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) 134static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
135{ 135{
136 if (cpu_check_affinity(irq, &dest)) 136 if (cpu_check_affinity(irq, dest))
137 return; 137 return;
138 138
139 irq_desc[irq].affinity = dest; 139 irq_desc[irq].affinity = *dest;
140} 140}
141#endif 141#endif
142 142
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index d47f3975c9c6..80bc000523fa 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -67,21 +67,6 @@ static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is boo
67 67
68static int parisc_max_cpus __read_mostly = 1; 68static int parisc_max_cpus __read_mostly = 1;
69 69
70/* online cpus are ones that we've managed to bring up completely
71 * possible cpus are all valid cpu
72 * present cpus are all detected cpu
73 *
74 * On startup we bring up the "possible" cpus. Since we discover
75 * CPUs later, we add them as hotplug, so the possible cpu mask is
76 * empty in the beginning.
77 */
78
79cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
80cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
81
82EXPORT_SYMBOL(cpu_online_map);
83EXPORT_SYMBOL(cpu_possible_map);
84
85DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; 70DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
86 71
87enum ipi_message_type { 72enum ipi_message_type {
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ac222d0ab12e..23b8b5e36f98 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map)
237 mask = map; 237 mask = map;
238 } 238 }
239 if (irq_desc[irq].chip->set_affinity) 239 if (irq_desc[irq].chip->set_affinity)
240 irq_desc[irq].chip->set_affinity(irq, mask); 240 irq_desc[irq].chip->set_affinity(irq, &mask);
241 else if (irq_desc[irq].action && !(warned++)) 241 else if (irq_desc[irq].action && !(warned++))
242 printk("Cannot set affinity for irq %i\n", irq); 242 printk("Cannot set affinity for irq %i\n", irq);
243 } 243 }
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ff9f7010097d..d1165566f064 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -60,13 +60,9 @@
60int smp_hw_index[NR_CPUS]; 60int smp_hw_index[NR_CPUS];
61struct thread_info *secondary_ti; 61struct thread_info *secondary_ti;
62 62
63cpumask_t cpu_possible_map = CPU_MASK_NONE;
64cpumask_t cpu_online_map = CPU_MASK_NONE;
65DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 63DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
66DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 64DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
67 65
68EXPORT_SYMBOL(cpu_online_map);
69EXPORT_SYMBOL(cpu_possible_map);
70EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 66EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71EXPORT_PER_CPU_SYMBOL(cpu_core_map); 67EXPORT_PER_CPU_SYMBOL(cpu_core_map);
72 68
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e2ee66b5831d..6f39d35d6f55 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -869,7 +869,7 @@ static void register_decrementer_clockevent(int cpu)
869 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; 869 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
870 870
871 *dec = decrementer_clockevent; 871 *dec = decrementer_clockevent;
872 dec->cpumask = cpumask_of_cpu(cpu); 872 dec->cpumask = cpumask_of(cpu);
873 873
874 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", 874 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
875 dec->name, dec->mult, dec->shift, cpu); 875 dec->name, dec->mult, dec->shift, cpu);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index e1904774a70f..424b335a71c8 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq)
332 lpar_xirr_info_set((0xff << 24) | irq); 332 lpar_xirr_info_set((0xff << 24) | irq);
333} 333}
334 334
335static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) 335static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
336{ 336{
337 unsigned int irq; 337 unsigned int irq;
338 int status; 338 int status;
@@ -845,7 +845,7 @@ void xics_migrate_irqs_away(void)
845 845
846 /* Reset affinity to all cpus */ 846 /* Reset affinity to all cpus */
847 irq_desc[virq].affinity = CPU_MASK_ALL; 847 irq_desc[virq].affinity = CPU_MASK_ALL;
848 desc->chip->set_affinity(virq, CPU_MASK_ALL); 848 desc->chip->set_affinity(virq, cpu_all_mask);
849unlock: 849unlock:
850 spin_unlock_irqrestore(&desc->lock, flags); 850 spin_unlock_irqrestore(&desc->lock, flags);
851 } 851 }
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 1890fb085cde..5d7f9f0c93c3 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -817,7 +817,7 @@ static void mpic_end_ipi(unsigned int irq)
817 817
818#endif /* CONFIG_SMP */ 818#endif /* CONFIG_SMP */
819 819
820void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 820void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
821{ 821{
822 struct mpic *mpic = mpic_from_irq(irq); 822 struct mpic *mpic = mpic_from_irq(irq);
823 unsigned int src = mpic_irq_to_hw(irq); 823 unsigned int src = mpic_irq_to_hw(irq);
@@ -829,7 +829,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
829 } else { 829 } else {
830 cpumask_t tmp; 830 cpumask_t tmp;
831 831
832 cpus_and(tmp, cpumask, cpu_online_map); 832 cpumask_and(&tmp, cpumask, cpu_online_mask);
833 833
834 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 834 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
835 mpic_physmask(cpus_addr(tmp)[0])); 835 mpic_physmask(cpus_addr(tmp)[0]));
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 6209c62a426d..3cef2af10f42 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
36 36
37extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); 37extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
38extern void mpic_set_vector(unsigned int virq, unsigned int vector); 38extern void mpic_set_vector(unsigned int virq, unsigned int vector);
39extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); 39extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
40 40
41#endif /* _POWERPC_SYSDEV_MPIC_H */ 41#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8116a3328a19..b4aa5869c7f9 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -75,6 +75,7 @@ config S390
75 select HAVE_KRETPROBES 75 select HAVE_KRETPROBES
76 select HAVE_KVM if 64BIT 76 select HAVE_KVM if 64BIT
77 select HAVE_ARCH_TRACEHOOK 77 select HAVE_ARCH_TRACEHOOK
78 select INIT_ALL_POSSIBLE
78 79
79source "init/Kconfig" 80source "init/Kconfig"
80 81
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b5595688a477..f03914b8ed2f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -52,12 +52,6 @@
52struct _lowcore *lowcore_ptr[NR_CPUS]; 52struct _lowcore *lowcore_ptr[NR_CPUS];
53EXPORT_SYMBOL(lowcore_ptr); 53EXPORT_SYMBOL(lowcore_ptr);
54 54
55cpumask_t cpu_online_map = CPU_MASK_NONE;
56EXPORT_SYMBOL(cpu_online_map);
57
58cpumask_t cpu_possible_map = CPU_MASK_ALL;
59EXPORT_SYMBOL(cpu_possible_map);
60
61static struct task_struct *current_set[NR_CPUS]; 55static struct task_struct *current_set[NR_CPUS];
62 56
63static u8 smp_cpu_type; 57static u8 smp_cpu_type;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index eccefbbff887..f5bd141c8443 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -154,7 +154,7 @@ void init_cpu_timer(void)
154 cd->min_delta_ns = 1; 154 cd->min_delta_ns = 1;
155 cd->max_delta_ns = LONG_MAX; 155 cd->max_delta_ns = LONG_MAX;
156 cd->rating = 400; 156 cd->rating = 400;
157 cd->cpumask = cpumask_of_cpu(cpu); 157 cd->cpumask = cpumask_of(cpu);
158 cd->set_next_event = s390_next_event; 158 cd->set_next_event = s390_next_event;
159 cd->set_mode = s390_set_mode; 159 cd->set_mode = s390_set_mode;
160 160
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 85b660c17eb0..c24e9c6a1736 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -31,7 +31,7 @@ enum {
31}; 31};
32 32
33void smp_message_recv(unsigned int msg); 33void smp_message_recv(unsigned int msg);
34void smp_timer_broadcast(cpumask_t mask); 34void smp_timer_broadcast(const struct cpumask *mask);
35 35
36void local_timer_interrupt(void); 36void local_timer_interrupt(void);
37void local_timer_setup(unsigned int cpu); 37void local_timer_setup(unsigned int cpu);
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 3c5ad1660bbc..8f4027412614 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -31,12 +31,6 @@
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33 33
34cpumask_t cpu_possible_map;
35EXPORT_SYMBOL(cpu_possible_map);
36
37cpumask_t cpu_online_map;
38EXPORT_SYMBOL(cpu_online_map);
39
40static inline void __init smp_store_cpu_info(unsigned int cpu) 34static inline void __init smp_store_cpu_info(unsigned int cpu)
41{ 35{
42 struct sh_cpuinfo *c = cpu_data + cpu; 36 struct sh_cpuinfo *c = cpu_data + cpu;
@@ -190,11 +184,11 @@ void arch_send_call_function_single_ipi(int cpu)
190 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 184 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
191} 185}
192 186
193void smp_timer_broadcast(cpumask_t mask) 187void smp_timer_broadcast(const struct cpumask *mask)
194{ 188{
195 int cpu; 189 int cpu;
196 190
197 for_each_cpu_mask(cpu, mask) 191 for_each_cpu(cpu, mask)
198 plat_send_ipi(cpu, SMP_MSG_TIMER); 192 plat_send_ipi(cpu, SMP_MSG_TIMER);
199} 193}
200 194
diff --git a/arch/sh/kernel/timers/timer-broadcast.c b/arch/sh/kernel/timers/timer-broadcast.c
index c2317635230f..96e8eaea1e62 100644
--- a/arch/sh/kernel/timers/timer-broadcast.c
+++ b/arch/sh/kernel/timers/timer-broadcast.c
@@ -51,7 +51,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
51 clk->mult = 1; 51 clk->mult = 1;
52 clk->set_mode = dummy_timer_set_mode; 52 clk->set_mode = dummy_timer_set_mode;
53 clk->broadcast = smp_timer_broadcast; 53 clk->broadcast = smp_timer_broadcast;
54 clk->cpumask = cpumask_of_cpu(cpu); 54 clk->cpumask = cpumask_of(cpu);
55 55
56 clockevents_register_device(clk); 56 clockevents_register_device(clk);
57} 57}
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 3c61ddd4d43e..0db3f9510336 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -263,7 +263,7 @@ static int tmu_timer_init(void)
263 tmu0_clockevent.min_delta_ns = 263 tmu0_clockevent.min_delta_ns =
264 clockevent_delta2ns(1, &tmu0_clockevent); 264 clockevent_delta2ns(1, &tmu0_clockevent);
265 265
266 tmu0_clockevent.cpumask = cpumask_of_cpu(0); 266 tmu0_clockevent.cpumask = cpumask_of(0);
267 267
268 clockevents_register_device(&tmu0_clockevent); 268 clockevents_register_device(&tmu0_clockevent);
269 269
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index a8180e546a48..8408d9d2a662 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -29,8 +29,6 @@
29 */ 29 */
30 30
31extern unsigned char boot_cpu_id; 31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34 32
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, 33typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long); 34 unsigned long, unsigned long);
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index e396c1f17a92..1e5ac4e282e1 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -39,8 +39,6 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
39unsigned char boot_cpu_id = 0; 39unsigned char boot_cpu_id = 0;
40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ 40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
41 41
42cpumask_t cpu_online_map = CPU_MASK_NONE;
43cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
44cpumask_t smp_commenced_mask = CPU_MASK_NONE; 42cpumask_t smp_commenced_mask = CPU_MASK_NONE;
45 43
46/* The only guaranteed locking primitive available on all Sparc 44/* The only guaranteed locking primitive available on all Sparc
@@ -334,7 +332,7 @@ void __init smp_setup_cpu_possible_map(void)
334 instance = 0; 332 instance = 0;
335 while (!cpu_find_by_instance(instance, NULL, &mid)) { 333 while (!cpu_find_by_instance(instance, NULL, &mid)) {
336 if (mid < NR_CPUS) { 334 if (mid < NR_CPUS) {
337 cpu_set(mid, phys_cpu_present_map); 335 cpu_set(mid, cpu_possible_map);
338 cpu_set(mid, cpu_present_map); 336 cpu_set(mid, cpu_present_map);
339 } 337 }
340 instance++; 338 instance++;
@@ -354,7 +352,7 @@ void __init smp_prepare_boot_cpu(void)
354 352
355 current_thread_info()->cpu = cpuid; 353 current_thread_info()->cpu = cpuid;
356 cpu_set(cpuid, cpu_online_map); 354 cpu_set(cpuid, cpu_online_map);
357 cpu_set(cpuid, phys_cpu_present_map); 355 cpu_set(cpuid, cpu_possible_map);
358} 356}
359 357
360int __cpuinit __cpu_up(unsigned int cpu) 358int __cpuinit __cpu_up(unsigned int cpu)
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index b0dfff848653..32d11a5fe3a8 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -113,10 +113,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
113#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
114/* IRQ implementation. */ 114/* IRQ implementation. */
115EXPORT_SYMBOL(synchronize_irq); 115EXPORT_SYMBOL(synchronize_irq);
116
117/* CPU online map and active count. */
118EXPORT_SYMBOL(cpu_online_map);
119EXPORT_SYMBOL(phys_cpu_present_map);
120#endif 116#endif
121 117
122EXPORT_SYMBOL(__udelay); 118EXPORT_SYMBOL(__udelay);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 52fc836f464d..4aaf18e83c8c 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
312 } 312 }
313} 313}
314 314
315static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) 315static void sun4u_set_affinity(unsigned int virt_irq,
316 const struct cpumask *mask)
316{ 317{
317 sun4u_irq_enable(virt_irq); 318 sun4u_irq_enable(virt_irq);
318} 319}
@@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq)
362 ino, err); 363 ino, err);
363} 364}
364 365
365static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) 366static void sun4v_set_affinity(unsigned int virt_irq,
367 const struct cpumask *mask)
366{ 368{
367 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 369 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
368 unsigned long cpuid = irq_choose_cpu(virt_irq); 370 unsigned long cpuid = irq_choose_cpu(virt_irq);
@@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq)
429 dev_handle, dev_ino, err); 431 dev_handle, dev_ino, err);
430} 432}
431 433
432static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) 434static void sun4v_virt_set_affinity(unsigned int virt_irq,
435 const struct cpumask *mask)
433{ 436{
434 unsigned long cpuid, dev_handle, dev_ino; 437 unsigned long cpuid, dev_handle, dev_ino;
435 int err; 438 int err;
@@ -788,7 +791,7 @@ void fixup_irqs(void)
788 !(irq_desc[irq].status & IRQ_PER_CPU)) { 791 !(irq_desc[irq].status & IRQ_PER_CPU)) {
789 if (irq_desc[irq].chip->set_affinity) 792 if (irq_desc[irq].chip->set_affinity)
790 irq_desc[irq].chip->set_affinity(irq, 793 irq_desc[irq].chip->set_affinity(irq,
791 irq_desc[irq].affinity); 794 &irq_desc[irq].affinity);
792 } 795 }
793 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 796 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
794 } 797 }
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 0f616ae3246c..df2efb7fc14c 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -780,7 +780,7 @@ out:
780 if (nid != -1) { 780 if (nid != -1) {
781 cpumask_t numa_mask = node_to_cpumask(nid); 781 cpumask_t numa_mask = node_to_cpumask(nid);
782 782
783 irq_set_affinity(irq, numa_mask); 783 irq_set_affinity(irq, &numa_mask);
784 } 784 }
785 785
786 return irq; 786 return irq;
diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c
index 2e680f34f727..0d0cd815e83e 100644
--- a/arch/sparc64/kernel/pci_msi.c
+++ b/arch/sparc64/kernel/pci_msi.c
@@ -288,7 +288,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
288 if (nid != -1) { 288 if (nid != -1) {
289 cpumask_t numa_mask = node_to_cpumask(nid); 289 cpumask_t numa_mask = node_to_cpumask(nid);
290 290
291 irq_set_affinity(irq, numa_mask); 291 irq_set_affinity(irq, &numa_mask);
292 } 292 }
293 err = request_irq(irq, sparc64_msiq_interrupt, 0, 293 err = request_irq(irq, sparc64_msiq_interrupt, 0,
294 "MSIQ", 294 "MSIQ",
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index f500b0618bb0..a97b8822c22c 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -49,14 +49,10 @@
49 49
50int sparc64_multi_core __read_mostly; 50int sparc64_multi_core __read_mostly;
51 51
52cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
53cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
54DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 52DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
55cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 53cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 54 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57 55
58EXPORT_SYMBOL(cpu_possible_map);
59EXPORT_SYMBOL(cpu_online_map);
60EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 56EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
61EXPORT_SYMBOL(cpu_core_map); 57EXPORT_SYMBOL(cpu_core_map);
62 58
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 141da3759091..9df8f095a8b1 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -763,7 +763,7 @@ void __devinit setup_sparc64_timer(void)
763 sevt = &__get_cpu_var(sparc64_events); 763 sevt = &__get_cpu_var(sparc64_events);
764 764
765 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); 765 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
766 sevt->cpumask = cpumask_of_cpu(smp_processor_id()); 766 sevt->cpumask = cpumask_of(smp_processor_id());
767 767
768 clockevents_register_device(sevt); 768 clockevents_register_device(sevt);
769} 769}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 045772142844..98351c78bc81 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -25,13 +25,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
25#include "irq_user.h" 25#include "irq_user.h"
26#include "os.h" 26#include "os.h"
27 27
28/* CPU online map, set by smp_boot_cpus */
29cpumask_t cpu_online_map = CPU_MASK_NONE;
30cpumask_t cpu_possible_map = CPU_MASK_NONE;
31
32EXPORT_SYMBOL(cpu_online_map);
33EXPORT_SYMBOL(cpu_possible_map);
34
35/* Per CPU bogomips and other parameters 28/* Per CPU bogomips and other parameters
36 * The only piece used here is the ipi pipe, which is set before SMP is 29 * The only piece used here is the ipi pipe, which is set before SMP is
37 * started and never changed. 30 * started and never changed.
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 47f04f4a3464..b13a87a3ec95 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ static int itimer_next_event(unsigned long delta,
50static struct clock_event_device itimer_clockevent = { 50static struct clock_event_device itimer_clockevent = {
51 .name = "itimer", 51 .name = "itimer",
52 .rating = 250, 52 .rating = 250,
53 .cpumask = CPU_MASK_ALL, 53 .cpumask = cpu_all_mask,
54 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 54 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
55 .set_mode = itimer_set_mode, 55 .set_mode = itimer_set_mode,
56 .set_next_event = itimer_next_event, 56 .set_next_event = itimer_next_event,
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 16f94879b525..b2cef49f3085 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta,
141 struct clock_event_device *evt); 141 struct clock_event_device *evt);
142static void lapic_timer_setup(enum clock_event_mode mode, 142static void lapic_timer_setup(enum clock_event_mode mode,
143 struct clock_event_device *evt); 143 struct clock_event_device *evt);
144static void lapic_timer_broadcast(cpumask_t mask); 144static void lapic_timer_broadcast(const struct cpumask *mask);
145static void apic_pm_activate(void); 145static void apic_pm_activate(void);
146 146
147/* 147/*
@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
453/* 453/*
454 * Local APIC timer broadcast function 454 * Local APIC timer broadcast function
455 */ 455 */
456static void lapic_timer_broadcast(cpumask_t mask) 456static void lapic_timer_broadcast(const struct cpumask *mask)
457{ 457{
458#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 459 send_IPI_mask(*mask, LOCAL_TIMER_VECTOR);
460#endif 460#endif
461} 461}
462 462
@@ -469,7 +469,7 @@ static void __cpuinit setup_APIC_timer(void)
469 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 469 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
470 470
471 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 471 memcpy(levt, &lapic_clockevent, sizeof(*levt));
472 levt->cpumask = cpumask_of_cpu(smp_processor_id()); 472 levt->cpumask = cpumask_of(smp_processor_id());
473 473
474 clockevents_register_device(levt); 474 clockevents_register_device(levt);
475} 475}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 3f46afbb1cf1..43ea612d3e9d 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -626,8 +626,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
626 cpumask_t *mask = &this_leaf->shared_cpu_map; 626 cpumask_t *mask = &this_leaf->shared_cpu_map;
627 627
628 n = type? 628 n = type?
629 cpulist_scnprintf(buf, len-2, *mask): 629 cpulist_scnprintf(buf, len-2, mask) :
630 cpumask_scnprintf(buf, len-2, *mask); 630 cpumask_scnprintf(buf, len-2, mask);
631 buf[n++] = '\n'; 631 buf[n++] = '\n';
632 buf[n] = '\0'; 632 buf[n] = '\0';
633 } 633 }
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 067d8de913f6..e76d7e272974 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -246,7 +246,7 @@ static void hpet_legacy_clockevent_register(void)
246 * Start hpet with the boot cpu mask and make it 246 * Start hpet with the boot cpu mask and make it
247 * global after the IO_APIC has been initialized. 247 * global after the IO_APIC has been initialized.
248 */ 248 */
249 hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 249 hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
250 clockevents_register_device(&hpet_clockevent); 250 clockevents_register_device(&hpet_clockevent);
251 global_clock_event = &hpet_clockevent; 251 global_clock_event = &hpet_clockevent;
252 printk(KERN_DEBUG "hpet clockevent registered\n"); 252 printk(KERN_DEBUG "hpet clockevent registered\n");
@@ -301,7 +301,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
301 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 301 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
302 hpet_setup_msi_irq(hdev->irq); 302 hpet_setup_msi_irq(hdev->irq);
303 disable_irq(hdev->irq); 303 disable_irq(hdev->irq);
304 irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); 304 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
305 enable_irq(hdev->irq); 305 enable_irq(hdev->irq);
306 } 306 }
307 break; 307 break;
@@ -449,7 +449,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
449 return -1; 449 return -1;
450 450
451 disable_irq(dev->irq); 451 disable_irq(dev->irq);
452 irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); 452 irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
453 enable_irq(dev->irq); 453 enable_irq(dev->irq);
454 454
455 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", 455 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
@@ -500,7 +500,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
500 /* 5 usec minimum reprogramming delta. */ 500 /* 5 usec minimum reprogramming delta. */
501 evt->min_delta_ns = 5000; 501 evt->min_delta_ns = 5000;
502 502
503 evt->cpumask = cpumask_of_cpu(hdev->cpu); 503 evt->cpumask = cpumask_of(hdev->cpu);
504 clockevents_register_device(evt); 504 clockevents_register_device(evt);
505} 505}
506 506
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index c1b5e3ece1f2..10f92fb532f3 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -114,7 +114,7 @@ void __init setup_pit_timer(void)
114 * Start pit with the boot cpu mask and make it global after the 114 * Start pit with the boot cpu mask and make it global after the
115 * IO_APIC has been initialized. 115 * IO_APIC has been initialized.
116 */ 116 */
117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 117 pit_clockevent.cpumask = cpumask_of(smp_processor_id());
118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
119 pit_clockevent.shift); 119 pit_clockevent.shift);
120 pit_clockevent.max_delta_ns = 120 pit_clockevent.max_delta_ns =
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index a1a2e070f31a..d7f0993b8056 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -398,7 +398,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
398 398
399static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); 399static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
400 400
401static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 401static void set_ioapic_affinity_irq_desc(struct irq_desc *desc,
402 const struct cpumask *mask)
402{ 403{
403 struct irq_cfg *cfg; 404 struct irq_cfg *cfg;
404 unsigned long flags; 405 unsigned long flags;
@@ -406,18 +407,17 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
406 cpumask_t tmp; 407 cpumask_t tmp;
407 unsigned int irq; 408 unsigned int irq;
408 409
409 cpus_and(tmp, mask, cpu_online_map); 410 if (!cpumask_intersects(mask, cpu_online_mask))
410 if (cpus_empty(tmp))
411 return; 411 return;
412 412
413 irq = desc->irq; 413 irq = desc->irq;
414 cfg = desc->chip_data; 414 cfg = desc->chip_data;
415 if (assign_irq_vector(irq, cfg, mask)) 415 if (assign_irq_vector(irq, cfg, *mask))
416 return; 416 return;
417 417
418 set_extra_move_desc(desc, mask); 418 set_extra_move_desc(desc, *mask);
419 419
420 cpus_and(tmp, cfg->domain, mask); 420 cpumask_and(&tmp, &cfg->domain, mask);
421 dest = cpu_mask_to_apicid(tmp); 421 dest = cpu_mask_to_apicid(tmp);
422 /* 422 /*
423 * Only the high 8 bits are valid. 423 * Only the high 8 bits are valid.
@@ -426,11 +426,12 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
426 426
427 spin_lock_irqsave(&ioapic_lock, flags); 427 spin_lock_irqsave(&ioapic_lock, flags);
428 __target_IO_APIC_irq(irq, dest, cfg); 428 __target_IO_APIC_irq(irq, dest, cfg);
429 desc->affinity = mask; 429 cpumask_copy(&desc->affinity, mask);
430 spin_unlock_irqrestore(&ioapic_lock, flags); 430 spin_unlock_irqrestore(&ioapic_lock, flags);
431} 431}
432 432
433static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 433static void set_ioapic_affinity_irq(unsigned int irq,
434 const struct cpumask *mask)
434{ 435{
435 struct irq_desc *desc; 436 struct irq_desc *desc;
436 437
@@ -2272,7 +2273,7 @@ static void ir_irq_migration(struct work_struct *work)
2272 continue; 2273 continue;
2273 } 2274 }
2274 2275
2275 desc->chip->set_affinity(irq, desc->pending_mask); 2276 desc->chip->set_affinity(irq, &desc->pending_mask);
2276 spin_unlock_irqrestore(&desc->lock, flags); 2277 spin_unlock_irqrestore(&desc->lock, flags);
2277 } 2278 }
2278 } 2279 }
@@ -2281,18 +2282,20 @@ static void ir_irq_migration(struct work_struct *work)
2281/* 2282/*
2282 * Migrates the IRQ destination in the process context. 2283 * Migrates the IRQ destination in the process context.
2283 */ 2284 */
2284static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 2285static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2286 const struct cpumask *mask)
2285{ 2287{
2286 if (desc->status & IRQ_LEVEL) { 2288 if (desc->status & IRQ_LEVEL) {
2287 desc->status |= IRQ_MOVE_PENDING; 2289 desc->status |= IRQ_MOVE_PENDING;
2288 desc->pending_mask = mask; 2290 cpumask_copy(&desc->pending_mask, mask);
2289 migrate_irq_remapped_level_desc(desc); 2291 migrate_irq_remapped_level_desc(desc);
2290 return; 2292 return;
2291 } 2293 }
2292 2294
2293 migrate_ioapic_irq_desc(desc, mask); 2295 migrate_ioapic_irq_desc(desc, mask);
2294} 2296}
2295static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2297static void set_ir_ioapic_affinity_irq(unsigned int irq,
2298 const struct cpumask *mask)
2296{ 2299{
2297 struct irq_desc *desc = irq_to_desc(irq); 2300 struct irq_desc *desc = irq_to_desc(irq);
2298 2301
@@ -3146,7 +3149,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3146} 3149}
3147 3150
3148#ifdef CONFIG_SMP 3151#ifdef CONFIG_SMP
3149static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3152static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3150{ 3153{
3151 struct irq_desc *desc = irq_to_desc(irq); 3154 struct irq_desc *desc = irq_to_desc(irq);
3152 struct irq_cfg *cfg; 3155 struct irq_cfg *cfg;
@@ -3154,17 +3157,16 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3154 unsigned int dest; 3157 unsigned int dest;
3155 cpumask_t tmp; 3158 cpumask_t tmp;
3156 3159
3157 cpus_and(tmp, mask, cpu_online_map); 3160 if (!cpumask_intersects(mask, cpu_online_mask))
3158 if (cpus_empty(tmp))
3159 return; 3161 return;
3160 3162
3161 cfg = desc->chip_data; 3163 cfg = desc->chip_data;
3162 if (assign_irq_vector(irq, cfg, mask)) 3164 if (assign_irq_vector(irq, cfg, *mask))
3163 return; 3165 return;
3164 3166
3165 set_extra_move_desc(desc, mask); 3167 set_extra_move_desc(desc, *mask);
3166 3168
3167 cpus_and(tmp, cfg->domain, mask); 3169 cpumask_and(&tmp, &cfg->domain, mask);
3168 dest = cpu_mask_to_apicid(tmp); 3170 dest = cpu_mask_to_apicid(tmp);
3169 3171
3170 read_msi_msg_desc(desc, &msg); 3172 read_msi_msg_desc(desc, &msg);
@@ -3175,14 +3177,15 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3175 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3177 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3176 3178
3177 write_msi_msg_desc(desc, &msg); 3179 write_msi_msg_desc(desc, &msg);
3178 desc->affinity = mask; 3180 cpumask_copy(&desc->affinity, mask);
3179} 3181}
3180#ifdef CONFIG_INTR_REMAP 3182#ifdef CONFIG_INTR_REMAP
3181/* 3183/*
3182 * Migrate the MSI irq to another cpumask. This migration is 3184 * Migrate the MSI irq to another cpumask. This migration is
3183 * done in the process context using interrupt-remapping hardware. 3185 * done in the process context using interrupt-remapping hardware.
3184 */ 3186 */
3185static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3187static void ir_set_msi_irq_affinity(unsigned int irq,
3188 const struct cpumask *mask)
3186{ 3189{
3187 struct irq_desc *desc = irq_to_desc(irq); 3190 struct irq_desc *desc = irq_to_desc(irq);
3188 struct irq_cfg *cfg; 3191 struct irq_cfg *cfg;
@@ -3190,20 +3193,19 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3190 cpumask_t tmp, cleanup_mask; 3193 cpumask_t tmp, cleanup_mask;
3191 struct irte irte; 3194 struct irte irte;
3192 3195
3193 cpus_and(tmp, mask, cpu_online_map); 3196 if (!cpumask_intersects(mask, cpu_online_mask))
3194 if (cpus_empty(tmp))
3195 return; 3197 return;
3196 3198
3197 if (get_irte(irq, &irte)) 3199 if (get_irte(irq, &irte))
3198 return; 3200 return;
3199 3201
3200 cfg = desc->chip_data; 3202 cfg = desc->chip_data;
3201 if (assign_irq_vector(irq, cfg, mask)) 3203 if (assign_irq_vector(irq, cfg, *mask))
3202 return; 3204 return;
3203 3205
3204 set_extra_move_desc(desc, mask); 3206 set_extra_move_desc(desc, mask);
3205 3207
3206 cpus_and(tmp, cfg->domain, mask); 3208 cpumask_and(&tmp, &cfg->domain, mask);
3207 dest = cpu_mask_to_apicid(tmp); 3209 dest = cpu_mask_to_apicid(tmp);
3208 3210
3209 irte.vector = cfg->vector; 3211 irte.vector = cfg->vector;
@@ -3226,7 +3228,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3226 cfg->move_in_progress = 0; 3228 cfg->move_in_progress = 0;
3227 } 3229 }
3228 3230
3229 desc->affinity = mask; 3231 cpumask_copy(&desc->affinity, mask);
3230} 3232}
3231 3233
3232#endif 3234#endif
@@ -3417,7 +3419,7 @@ void arch_teardown_msi_irq(unsigned int irq)
3417 3419
3418#ifdef CONFIG_DMAR 3420#ifdef CONFIG_DMAR
3419#ifdef CONFIG_SMP 3421#ifdef CONFIG_SMP
3420static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3422static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3421{ 3423{
3422 struct irq_desc *desc = irq_to_desc(irq); 3424 struct irq_desc *desc = irq_to_desc(irq);
3423 struct irq_cfg *cfg; 3425 struct irq_cfg *cfg;
@@ -3425,17 +3427,16 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3425 unsigned int dest; 3427 unsigned int dest;
3426 cpumask_t tmp; 3428 cpumask_t tmp;
3427 3429
3428 cpus_and(tmp, mask, cpu_online_map); 3430 if (!cpumask_intersects(mask, cpu_online_mask))
3429 if (cpus_empty(tmp))
3430 return; 3431 return;
3431 3432
3432 cfg = desc->chip_data; 3433 cfg = desc->chip_data;
3433 if (assign_irq_vector(irq, cfg, mask)) 3434 if (assign_irq_vector(irq, cfg, *mask))
3434 return; 3435 return;
3435 3436
3436 set_extra_move_desc(desc, mask); 3437 set_extra_move_desc(desc, *mask);
3437 3438
3438 cpus_and(tmp, cfg->domain, mask); 3439 cpumask_and(&tmp, &cfg->domain, mask);
3439 dest = cpu_mask_to_apicid(tmp); 3440 dest = cpu_mask_to_apicid(tmp);
3440 3441
3441 dmar_msi_read(irq, &msg); 3442 dmar_msi_read(irq, &msg);
@@ -3446,7 +3447,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3446 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3447 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3447 3448
3448 dmar_msi_write(irq, &msg); 3449 dmar_msi_write(irq, &msg);
3449 desc->affinity = mask; 3450 cpumask_copy(&desc->affinity, mask);
3450} 3451}
3451 3452
3452#endif /* CONFIG_SMP */ 3453#endif /* CONFIG_SMP */
@@ -3480,7 +3481,7 @@ int arch_setup_dmar_msi(unsigned int irq)
3480#ifdef CONFIG_HPET_TIMER 3481#ifdef CONFIG_HPET_TIMER
3481 3482
3482#ifdef CONFIG_SMP 3483#ifdef CONFIG_SMP
3483static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3484static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3484{ 3485{
3485 struct irq_desc *desc = irq_to_desc(irq); 3486 struct irq_desc *desc = irq_to_desc(irq);
3486 struct irq_cfg *cfg; 3487 struct irq_cfg *cfg;
@@ -3488,17 +3489,16 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3488 unsigned int dest; 3489 unsigned int dest;
3489 cpumask_t tmp; 3490 cpumask_t tmp;
3490 3491
3491 cpus_and(tmp, mask, cpu_online_map); 3492 if (!cpumask_intersects(mask, cpu_online_mask))
3492 if (cpus_empty(tmp))
3493 return; 3493 return;
3494 3494
3495 cfg = desc->chip_data; 3495 cfg = desc->chip_data;
3496 if (assign_irq_vector(irq, cfg, mask)) 3496 if (assign_irq_vector(irq, cfg, *mask))
3497 return; 3497 return;
3498 3498
3499 set_extra_move_desc(desc, mask); 3499 set_extra_move_desc(desc, *mask);
3500 3500
3501 cpus_and(tmp, cfg->domain, mask); 3501 cpumask_and(&tmp, &cfg->domain, mask);
3502 dest = cpu_mask_to_apicid(tmp); 3502 dest = cpu_mask_to_apicid(tmp);
3503 3503
3504 hpet_msi_read(irq, &msg); 3504 hpet_msi_read(irq, &msg);
@@ -3509,7 +3509,7 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3509 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3509 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3510 3510
3511 hpet_msi_write(irq, &msg); 3511 hpet_msi_write(irq, &msg);
3512 desc->affinity = mask; 3512 cpumask_copy(&desc->affinity, mask);
3513} 3513}
3514 3514
3515#endif /* CONFIG_SMP */ 3515#endif /* CONFIG_SMP */
@@ -3564,28 +3564,27 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3564 write_ht_irq_msg(irq, &msg); 3564 write_ht_irq_msg(irq, &msg);
3565} 3565}
3566 3566
3567static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3567static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3568{ 3568{
3569 struct irq_desc *desc = irq_to_desc(irq); 3569 struct irq_desc *desc = irq_to_desc(irq);
3570 struct irq_cfg *cfg; 3570 struct irq_cfg *cfg;
3571 unsigned int dest; 3571 unsigned int dest;
3572 cpumask_t tmp; 3572 cpumask_t tmp;
3573 3573
3574 cpus_and(tmp, mask, cpu_online_map); 3574 if (!cpumask_intersects(mask, cpu_online_mask))
3575 if (cpus_empty(tmp))
3576 return; 3575 return;
3577 3576
3578 cfg = desc->chip_data; 3577 cfg = desc->chip_data;
3579 if (assign_irq_vector(irq, cfg, mask)) 3578 if (assign_irq_vector(irq, cfg, *mask))
3580 return; 3579 return;
3581 3580
3582 set_extra_move_desc(desc, mask); 3581 set_extra_move_desc(desc, *mask);
3583 3582
3584 cpus_and(tmp, cfg->domain, mask); 3583 cpumask_and(&tmp, &cfg->domain, mask);
3585 dest = cpu_mask_to_apicid(tmp); 3584 dest = cpu_mask_to_apicid(tmp);
3586 3585
3587 target_ht_irq(irq, dest, cfg->vector); 3586 target_ht_irq(irq, dest, cfg->vector);
3588 desc->affinity = mask; 3587 cpumask_copy(&desc->affinity, mask);
3589} 3588}
3590 3589
3591#endif 3590#endif
@@ -3928,10 +3927,10 @@ void __init setup_ioapic_dest(void)
3928 3927
3929#ifdef CONFIG_INTR_REMAP 3928#ifdef CONFIG_INTR_REMAP
3930 if (intr_remapping_enabled) 3929 if (intr_remapping_enabled)
3931 set_ir_ioapic_affinity_irq_desc(desc, mask); 3930 set_ir_ioapic_affinity_irq_desc(desc, &mask);
3932 else 3931 else
3933#endif 3932#endif
3934 set_ioapic_affinity_irq_desc(desc, mask); 3933 set_ioapic_affinity_irq_desc(desc, &mask);
3935 } 3934 }
3936 3935
3937 } 3936 }
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 119fc9c8ff7f..9cf9cbbf7a02 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -253,7 +253,7 @@ void fixup_irqs(cpumask_t map)
253 mask = map; 253 mask = map;
254 } 254 }
255 if (desc->chip->set_affinity) 255 if (desc->chip->set_affinity)
256 desc->chip->set_affinity(irq, mask); 256 desc->chip->set_affinity(irq, &mask);
257 else if (desc->action && !(warned++)) 257 else if (desc->action && !(warned++))
258 printk("Cannot set affinity for irq %i\n", irq); 258 printk("Cannot set affinity for irq %i\n", irq);
259 } 259 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 900009c70591..27f2307b0a34 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -118,7 +118,7 @@ void fixup_irqs(cpumask_t map)
118 desc->chip->mask(irq); 118 desc->chip->mask(irq);
119 119
120 if (desc->chip->set_affinity) 120 if (desc->chip->set_affinity)
121 desc->chip->set_affinity(irq, mask); 121 desc->chip->set_affinity(irq, &mask);
122 else if (!(warned++)) 122 else if (!(warned++))
123 set_affinity = 0; 123 set_affinity = 0;
124 124
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 3b599518c322..c12314c9e86f 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = {
287 .set_mode = mfgpt_set_mode, 287 .set_mode = mfgpt_set_mode,
288 .set_next_event = mfgpt_next_event, 288 .set_next_event = mfgpt_next_event,
289 .rating = 250, 289 .rating = 250,
290 .cpumask = CPU_MASK_ALL, 290 .cpumask = cpu_all_mask,
291 .shift = 32 291 .shift = 32
292}; 292};
293 293
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ae0c0d3bb770..1c2084291f97 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -282,7 +282,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
282 else 282 else
283 cpu_clear(cpu, *mask); 283 cpu_clear(cpu, *mask);
284 284
285 cpulist_scnprintf(buf, sizeof(buf), *mask); 285 cpulist_scnprintf(buf, sizeof(buf), mask);
286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
288 } 288 }
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0e9f446269f4..9d58134e0231 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,14 +102,8 @@ EXPORT_SYMBOL(smp_num_siblings);
102/* Last level cache ID of each logical CPU */ 102/* Last level cache ID of each logical CPU */
103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
104 104
105/* bitmap of online cpus */
106cpumask_t cpu_online_map __read_mostly;
107EXPORT_SYMBOL(cpu_online_map);
108
109cpumask_t cpu_callin_map; 105cpumask_t cpu_callin_map;
110cpumask_t cpu_callout_map; 106cpumask_t cpu_callout_map;
111cpumask_t cpu_possible_map;
112EXPORT_SYMBOL(cpu_possible_map);
113 107
114/* representing HT siblings of each logical CPU */ 108/* representing HT siblings of each logical CPU */
115DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 109DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 254ee07f8635..c4c1f9e09402 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void)
226 /* Upper bound is clockevent's use of ulong for cycle deltas. */ 226 /* Upper bound is clockevent's use of ulong for cycle deltas. */
227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); 227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt);
228 evt->min_delta_ns = clockevent_delta2ns(1, evt); 228 evt->min_delta_ns = clockevent_delta2ns(1, evt);
229 evt->cpumask = cpumask_of_cpu(cpu); 229 evt->cpumask = cpumask_of(cpu);
230 230
231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", 231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n",
232 evt->name, evt->mult, evt->shift); 232 evt->name, evt->mult, evt->shift);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index a5d8e1ace1cf..104c8220a383 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -737,7 +737,7 @@ static void lguest_time_init(void)
737 737
738 /* We can't set cpumask in the initializer: damn C limitations! Set it 738 /* We can't set cpumask in the initializer: damn C limitations! Set it
739 * here and register our timer device. */ 739 * here and register our timer device. */
740 lguest_clockevent.cpumask = cpumask_of_cpu(0); 740 lguest_clockevent.cpumask = cpumask_of(0);
741 clockevents_register_device(&lguest_clockevent); 741 clockevents_register_device(&lguest_clockevent);
742 742
743 /* Finally, we unblock the timer interrupt. */ 743 /* Finally, we unblock the timer interrupt. */
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 52145007bd7e..9c990185e9f2 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -63,11 +63,6 @@ static int voyager_extended_cpus = 1;
63/* Used for the invalidate map that's also checked in the spinlock */ 63/* Used for the invalidate map that's also checked in the spinlock */
64static volatile unsigned long smp_invalidate_needed; 64static volatile unsigned long smp_invalidate_needed;
65 65
66/* Bitmask of currently online CPUs - used by setup.c for
67 /proc/cpuinfo, visible externally but still physical */
68cpumask_t cpu_online_map = CPU_MASK_NONE;
69EXPORT_SYMBOL(cpu_online_map);
70
71/* Bitmask of CPUs present in the system - exported by i386_syms.c, used 66/* Bitmask of CPUs present in the system - exported by i386_syms.c, used
72 * by scheduler but indexed physically */ 67 * by scheduler but indexed physically */
73cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 68cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
@@ -218,8 +213,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
218/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
219cpumask_t cpu_callin_map = CPU_MASK_NONE; 214cpumask_t cpu_callin_map = CPU_MASK_NONE;
220cpumask_t cpu_callout_map = CPU_MASK_NONE; 215cpumask_t cpu_callout_map = CPU_MASK_NONE;
221cpumask_t cpu_possible_map = CPU_MASK_NONE;
222EXPORT_SYMBOL(cpu_possible_map);
223 216
224/* The per processor IRQ masks (these are usually kept in sync) */ 217/* The per processor IRQ masks (these are usually kept in sync) */
225static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c9f7cda48ed7..65d75a6be0ba 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -437,7 +437,7 @@ void xen_setup_timer(int cpu)
437 evt = &per_cpu(xen_clock_events, cpu); 437 evt = &per_cpu(xen_clock_events, cpu);
438 memcpy(evt, xen_clockevent, sizeof(*evt)); 438 memcpy(evt, xen_clockevent, sizeof(*evt));
439 439
440 evt->cpumask = cpumask_of_cpu(cpu); 440 evt->cpumask = cpumask_of(cpu);
441 evt->irq = irq; 441 evt->irq = irq;
442 442
443 setup_runstate_info(cpu); 443 setup_runstate_info(cpu);