aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 14:44:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 14:44:09 -0500
commitb840d79631c882786925303c2b0f4fefc31845ed (patch)
treecda60a95d4507fe1321fc285af38982d7eb9693b /arch
parent597b0d21626da4e6f09f132442caf0cc2b0eb47c (diff)
parentc3d80000e3a812fe5a200d6bde755fbd7fa65481 (diff)
Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits) x86: export vector_used_by_percpu_irq x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and() sched: nominate preferred wakeup cpu, fix x86: fix lguest used_vectors breakage, -v2 x86: fix warning in arch/x86/kernel/io_apic.c sched: fix warning in kernel/sched.c sched: move test_sd_parent() to an SMP section of sched.h sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0 sched: activate active load balancing in new idle cpus sched: bias task wakeups to preferred semi-idle packages sched: nominate preferred wakeup cpu sched: favour lower logical cpu number for sched_mc balance sched: framework for sched_mc/smt_power_savings=N sched: convert BALANCE_FOR_xx_POWER to inline functions x86: use possible_cpus=NUM to extend the possible cpus allowed x86: fix cpu_mask_to_apicid_and to include cpu_online_mask x86: update io_apic.c to the new cpumask code x86: Introduce topology_core_cpumask()/topology_thread_cpumask() x86: xen: use smp_call_function_many() x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c ... Fixed up trivial conflict in kernel/time/tick-sched.c manually
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/smp.h1
-rw-r--r--arch/alpha/kernel/irq.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/smp.c7
-rw-r--r--arch/alpha/kernel/sys_dp264.c8
-rw-r--r--arch/alpha/kernel/sys_titan.c4
-rw-r--r--arch/arm/common/gic.c4
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c3
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-davinci/time.c2
-rw-r--r--arch/arm/mach-imx/time.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-ns9xxx/time-ns9360.c2
-rw-r--r--arch/arm/mach-omap1/time.c2
-rw-r--r--arch/arm/mach-omap1/timer32k.c2
-rw-r--r--arch/arm/mach-omap2/timer-gp.c2
-rw-r--r--arch/arm/mach-pxa/time.c2
-rw-r--r--arch/arm/mach-realview/core.c2
-rw-r--r--arch/arm/mach-realview/localtimer.c4
-rw-r--r--arch/arm/mach-sa1100/time.c2
-rw-r--r--arch/arm/mach-versatile/core.c2
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c4
-rw-r--r--arch/arm/plat-mxc/time.c2
-rw-r--r--arch/arm/plat-orion/time.c2
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/blackfin/kernel/time-ts.c2
-rw-r--r--arch/cris/arch-v32/kernel/irq.c4
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/include/asm/smp.h1
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c2
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/topology.h2
-rw-r--r--arch/ia64/kernel/iosapic.c12
-rw-r--r--arch/ia64/kernel/irq.c9
-rw-r--r--arch/ia64/kernel/msi_ia64.c12
-rw-r--r--arch/ia64/kernel/smpboot.c10
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/sn/kernel/irq.c6
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c7
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/kernel/smpboot.c6
-rw-r--r--arch/m68knommu/platform/coldfire/pit.c2
-rw-r--r--arch/mips/include/asm/irq.h3
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h1
-rw-r--r--arch/mips/include/asm/smp.h3
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c4
-rw-r--r--arch/mips/kernel/cevt-ds1287.c2
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c4
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cevt-txx9.c2
-rw-r--r--arch/mips/kernel/i8253.c2
-rw-r--r--arch/mips/kernel/irq-gic.c6
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c7
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/mti-malta/malta-smtc.c6
-rw-r--r--arch/mips/nxp/pnx8550/common/time.c1
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c8
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c8
-rw-r--r--arch/mips/sibyte/sb1250/irq.c8
-rw-r--r--arch/mips/sibyte/sb1250/smp.c8
-rw-r--r--arch/mips/sni/time.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/irq.c6
-rw-r--r--arch/parisc/kernel/smp.c15
-rw-r--r--arch/powerpc/include/asm/topology.h1
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/mpic.c4
-rw-r--r--arch/powerpc/sysdev/mpic.h2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/sh/include/asm/smp.h2
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sh/kernel/smp.c10
-rw-r--r--arch/sh/kernel/timers/timer-broadcast.c2
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c2
-rw-r--r--arch/sparc/include/asm/smp_32.h2
-rw-r--r--arch/sparc/kernel/irq_64.c11
-rw-r--r--arch/sparc/kernel/of_device_64.c2
-rw-r--r--arch/sparc/kernel/pci_msi.c2
-rw-r--r--arch/sparc/kernel/smp_32.c6
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/sparc_ksyms_32.c4
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/um/kernel/smp.c7
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h32
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h13
-rw-r--r--arch/x86/include/asm/desc.h10
-rw-r--r--arch/x86/include/asm/es7000/apic.h82
-rw-r--r--arch/x86/include/asm/es7000/ipi.h12
-rw-r--r--arch/x86/include/asm/genapic_32.h13
-rw-r--r--arch/x86/include/asm/genapic_64.h14
-rw-r--r--arch/x86/include/asm/ipi.h23
-rw-r--r--arch/x86/include/asm/irq.h3
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h28
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h18
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h1
-rw-r--r--arch/x86/include/asm/numaq/apic.h12
-rw-r--r--arch/x86/include/asm/numaq/ipi.h13
-rw-r--r--arch/x86/include/asm/smp.h6
-rw-r--r--arch/x86/include/asm/summit/apic.h55
-rw-r--r--arch/x86/include/asm/summit/ipi.h9
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/kernel/apic.c34
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c45
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c108
-rw-r--r--arch/x86/kernel/genapic_flat_64.c107
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c81
-rw-r--r--arch/x86/kernel/genx2apic_phys.c74
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c61
-rw-r--r--arch/x86/kernel/hpet.c8
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/io_apic.c370
-rw-r--r--arch/x86/kernel/ipi.c28
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/irq_32.c13
-rw-r--r--arch/x86/kernel/irq_64.c15
-rw-r--r--arch/x86/kernel/irqinit_32.c16
-rw-r--r--arch/x86/kernel/irqinit_64.c13
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/setup_percpu.c19
-rw-r--r--arch/x86/kernel/smp.c8
-rw-r--r--arch/x86/kernel/smpboot.c33
-rw-r--r--arch/x86/kernel/tlb_32.c2
-rw-r--r--arch/x86/kernel/tlb_64.c2
-rw-r--r--arch/x86/kernel/traps.c12
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mach-generic/bigsmp.c5
-rw-r--r--arch/x86/mach-generic/es7000.c5
-rw-r--r--arch/x86/mach-generic/numaq.c5
-rw-r--r--arch/x86/mach-generic/summit.c5
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c9
-rw-r--r--arch/x86/mm/numa_64.c4
-rw-r--r--arch/x86/mm/srat_64.c2
-rw-r--r--arch/x86/xen/mmu.c20
-rw-r--r--arch/x86/xen/smp.c27
-rw-r--r--arch/x86/xen/suspend.c3
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
157 files changed, 1085 insertions, 810 deletions
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 544c69af8168..547e90951cec 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -45,7 +45,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
45#define raw_smp_processor_id() (current_thread_info()->cpu) 45#define raw_smp_processor_id() (current_thread_info()->cpu)
46 46
47extern int smp_num_cpus; 47extern int smp_num_cpus;
48#define cpu_possible_map cpu_present_map
49 48
50extern void arch_send_call_function_single_ipi(int cpu); 49extern void arch_send_call_function_single_ipi(int cpu);
51extern void arch_send_call_function_ipi(cpumask_t mask); 50extern void arch_send_call_function_ipi(cpumask_t mask);
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index c626a821cdcb..d0f1620007f7 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
55 last_cpu = cpu; 55 last_cpu = cpu;
56 56
57 irq_desc[irq].affinity = cpumask_of_cpu(cpu); 57 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
58 irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); 58 irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
59 return 0; 59 return 0;
60} 60}
61#endif /* CONFIG_SMP */ 61#endif /* CONFIG_SMP */
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 351407e07e71..f238370c907d 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -94,6 +94,7 @@ common_shutdown_1(void *generic_ptr)
94 flags |= 0x00040000UL; /* "remain halted" */ 94 flags |= 0x00040000UL; /* "remain halted" */
95 *pflags = flags; 95 *pflags = flags;
96 cpu_clear(cpuid, cpu_present_map); 96 cpu_clear(cpuid, cpu_present_map);
97 cpu_clear(cpuid, cpu_possible_map);
97 halt(); 98 halt();
98 } 99 }
99#endif 100#endif
@@ -120,6 +121,7 @@ common_shutdown_1(void *generic_ptr)
120#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
121 /* Wait for the secondaries to halt. */ 122 /* Wait for the secondaries to halt. */
122 cpu_clear(boot_cpuid, cpu_present_map); 123 cpu_clear(boot_cpuid, cpu_present_map);
124 cpu_clear(boot_cpuid, cpu_possible_map);
123 while (cpus_weight(cpu_present_map)) 125 while (cpus_weight(cpu_present_map))
124 barrier(); 126 barrier();
125#endif 127#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index cf7da10097bb..d953e510f68d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -70,11 +70,6 @@ enum ipi_message_type {
70/* Set to a secondary's cpuid when it comes online. */ 70/* Set to a secondary's cpuid when it comes online. */
71static int smp_secondary_alive __devinitdata = 0; 71static int smp_secondary_alive __devinitdata = 0;
72 72
73/* Which cpus ids came online. */
74cpumask_t cpu_online_map;
75
76EXPORT_SYMBOL(cpu_online_map);
77
78int smp_num_probed; /* Internal processor count */ 73int smp_num_probed; /* Internal processor count */
79int smp_num_cpus = 1; /* Number that came online. */ 74int smp_num_cpus = 1; /* Number that came online. */
80EXPORT_SYMBOL(smp_num_cpus); 75EXPORT_SYMBOL(smp_num_cpus);
@@ -440,6 +435,7 @@ setup_smp(void)
440 ((char *)cpubase + i*hwrpb->processor_size); 435 ((char *)cpubase + i*hwrpb->processor_size);
441 if ((cpu->flags & 0x1cc) == 0x1cc) { 436 if ((cpu->flags & 0x1cc) == 0x1cc) {
442 smp_num_probed++; 437 smp_num_probed++;
438 cpu_set(i, cpu_possible_map);
443 cpu_set(i, cpu_present_map); 439 cpu_set(i, cpu_present_map);
444 cpu->pal_revision = boot_cpu_palrev; 440 cpu->pal_revision = boot_cpu_palrev;
445 } 441 }
@@ -473,6 +469,7 @@ smp_prepare_cpus(unsigned int max_cpus)
473 469
474 /* Nothing to do on a UP box, or when told not to. */ 470 /* Nothing to do on a UP box, or when told not to. */
475 if (smp_num_probed == 1 || max_cpus == 0) { 471 if (smp_num_probed == 1 || max_cpus == 0) {
472 cpu_possible_map = cpumask_of_cpu(boot_cpuid);
476 cpu_present_map = cpumask_of_cpu(boot_cpuid); 473 cpu_present_map = cpumask_of_cpu(boot_cpuid);
477 printk(KERN_INFO "SMP mode deactivated.\n"); 474 printk(KERN_INFO "SMP mode deactivated.\n");
478 return; 475 return;
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index c71b0fd7a61f..ab44c164d9d4 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -177,19 +177,19 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
177} 177}
178 178
179static void 179static void
180dp264_set_affinity(unsigned int irq, cpumask_t affinity) 180dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
181{ 181{
182 spin_lock(&dp264_irq_lock); 182 spin_lock(&dp264_irq_lock);
183 cpu_set_irq_affinity(irq, affinity); 183 cpu_set_irq_affinity(irq, *affinity);
184 tsunami_update_irq_hw(cached_irq_mask); 184 tsunami_update_irq_hw(cached_irq_mask);
185 spin_unlock(&dp264_irq_lock); 185 spin_unlock(&dp264_irq_lock);
186} 186}
187 187
188static void 188static void
189clipper_set_affinity(unsigned int irq, cpumask_t affinity) 189clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
190{ 190{
191 spin_lock(&dp264_irq_lock); 191 spin_lock(&dp264_irq_lock);
192 cpu_set_irq_affinity(irq - 16, affinity); 192 cpu_set_irq_affinity(irq - 16, *affinity);
193 tsunami_update_irq_hw(cached_irq_mask); 193 tsunami_update_irq_hw(cached_irq_mask);
194 spin_unlock(&dp264_irq_lock); 194 spin_unlock(&dp264_irq_lock);
195} 195}
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 52c91ccc1648..27f840a4ad3d 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -158,10 +158,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
158} 158}
159 159
160static void 160static void
161titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) 161titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
162{ 162{
163 spin_lock(&titan_irq_lock); 163 spin_lock(&titan_irq_lock);
164 titan_cpu_set_irq_affinity(irq - 16, affinity); 164 titan_cpu_set_irq_affinity(irq - 16, *affinity);
165 titan_update_irq_hw(titan_cached_irq_mask); 165 titan_update_irq_hw(titan_cached_irq_mask);
166 spin_unlock(&titan_irq_lock); 166 spin_unlock(&titan_irq_lock);
167} 167}
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 7fc9860a97d7..c6884ba1d5ed 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -109,11 +109,11 @@ static void gic_unmask_irq(unsigned int irq)
109} 109}
110 110
111#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
112static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) 112static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
113{ 113{
114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); 114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
115 unsigned int shift = (irq % 4) * 8; 115 unsigned int shift = (irq % 4) * 8;
116 unsigned int cpu = first_cpu(mask_val); 116 unsigned int cpu = cpumask_first(mask_val);
117 u32 val; 117 u32 val;
118 118
119 spin_lock(&irq_controller_lock); 119 spin_lock(&irq_controller_lock);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2f3eb795fa6e..7141cee1fab7 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -174,7 +174,7 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
174 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 174 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
175 175
176 spin_lock_irq(&desc->lock); 176 spin_lock_irq(&desc->lock);
177 desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); 177 desc->chip->set_affinity(irq, cpumask_of(cpu));
178 spin_unlock_irq(&desc->lock); 178 spin_unlock_irq(&desc->lock);
179} 179}
180 180
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 019237d21622..55fa7ff96a3e 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -34,16 +34,6 @@
34#include <asm/ptrace.h> 34#include <asm/ptrace.h>
35 35
36/* 36/*
37 * bitmask of present and online CPUs.
38 * The present bitmask indicates that the CPU is physically present.
39 * The online bitmask indicates that the CPU is up and running.
40 */
41cpumask_t cpu_possible_map;
42EXPORT_SYMBOL(cpu_possible_map);
43cpumask_t cpu_online_map;
44EXPORT_SYMBOL(cpu_online_map);
45
46/*
47 * as from 2.5, kernels no longer have an init_tasks structure 37 * as from 2.5, kernels no longer have an init_tasks structure
48 * so we need some other way of telling a new secondary core 38 * so we need some other way of telling a new secondary core
49 * where to place its SVC stack 39 * where to place its SVC stack
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index d140eae53ded..1ff1bda0a894 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -178,7 +178,6 @@ static struct clock_event_device clkevt = {
178 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 178 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
179 .shift = 32, 179 .shift = 32,
180 .rating = 150, 180 .rating = 150,
181 .cpumask = CPU_MASK_CPU0,
182 .set_next_event = clkevt32k_next_event, 181 .set_next_event = clkevt32k_next_event,
183 .set_mode = clkevt32k_mode, 182 .set_mode = clkevt32k_mode,
184}; 183};
@@ -206,7 +205,7 @@ void __init at91rm9200_timer_init(void)
206 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); 205 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
207 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); 206 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
208 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; 207 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
209 clkevt.cpumask = cpumask_of_cpu(0); 208 clkevt.cpumask = cpumask_of(0);
210 clockevents_register_device(&clkevt); 209 clockevents_register_device(&clkevt);
211 210
212 /* register clocksource */ 211 /* register clocksource */
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 122fd77ed580..b63e1d5f1bad 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -91,7 +91,6 @@ static struct clock_event_device pit_clkevt = {
91 .features = CLOCK_EVT_FEAT_PERIODIC, 91 .features = CLOCK_EVT_FEAT_PERIODIC,
92 .shift = 32, 92 .shift = 32,
93 .rating = 100, 93 .rating = 100,
94 .cpumask = CPU_MASK_CPU0,
95 .set_mode = pit_clkevt_mode, 94 .set_mode = pit_clkevt_mode,
96}; 95};
97 96
@@ -173,6 +172,7 @@ static void __init at91sam926x_pit_init(void)
173 172
174 /* Set up and register clockevents */ 173 /* Set up and register clockevents */
175 pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); 174 pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
175 pit_clkevt.cpumask = cpumask_of(0);
176 clockevents_register_device(&pit_clkevt); 176 clockevents_register_device(&pit_clkevt);
177} 177}
178 178
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 3b9a296b5c4b..f8bcd29d17a6 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -322,7 +322,7 @@ static void __init davinci_timer_init(void)
322 clockevent_davinci.min_delta_ns = 322 clockevent_davinci.min_delta_ns =
323 clockevent_delta2ns(1, &clockevent_davinci); 323 clockevent_delta2ns(1, &clockevent_davinci);
324 324
325 clockevent_davinci.cpumask = cpumask_of_cpu(0); 325 clockevent_davinci.cpumask = cpumask_of(0);
326 clockevents_register_device(&clockevent_davinci); 326 clockevents_register_device(&clockevent_davinci);
327} 327}
328 328
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c
index a11765f5f23b..aff0ebcfa847 100644
--- a/arch/arm/mach-imx/time.c
+++ b/arch/arm/mach-imx/time.c
@@ -184,7 +184,7 @@ static int __init imx_clockevent_init(unsigned long rate)
184 clockevent_imx.min_delta_ns = 184 clockevent_imx.min_delta_ns =
185 clockevent_delta2ns(0xf, &clockevent_imx); 185 clockevent_delta2ns(0xf, &clockevent_imx);
186 186
187 clockevent_imx.cpumask = cpumask_of_cpu(0); 187 clockevent_imx.cpumask = cpumask_of(0);
188 188
189 clockevents_register_device(&clockevent_imx); 189 clockevents_register_device(&clockevent_imx);
190 190
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 7766f469456b..f4656d2ac8a8 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -487,7 +487,7 @@ static int __init ixp4xx_clockevent_init(void)
487 clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); 487 clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
488 clockevent_ixp4xx.min_delta_ns = 488 clockevent_ixp4xx.min_delta_ns =
489 clockevent_delta2ns(0xf, &clockevent_ixp4xx); 489 clockevent_delta2ns(0xf, &clockevent_ixp4xx);
490 clockevent_ixp4xx.cpumask = cpumask_of_cpu(0); 490 clockevent_ixp4xx.cpumask = cpumask_of(0);
491 491
492 clockevents_register_device(&clockevent_ixp4xx); 492 clockevents_register_device(&clockevent_ixp4xx);
493 return 0; 493 return 0;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 345a14cb73c3..444d9c0f5ca6 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -182,7 +182,7 @@ static void __init msm_timer_init(void)
182 clockevent_delta2ns(0xf0000000 >> clock->shift, ce); 182 clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
183 /* 4 gets rounded down to 3 */ 183 /* 4 gets rounded down to 3 */
184 ce->min_delta_ns = clockevent_delta2ns(4, ce); 184 ce->min_delta_ns = clockevent_delta2ns(4, ce);
185 ce->cpumask = cpumask_of_cpu(0); 185 ce->cpumask = cpumask_of(0);
186 186
187 cs->mult = clocksource_hz2mult(clock->freq, cs->shift); 187 cs->mult = clocksource_hz2mult(clock->freq, cs->shift);
188 res = clocksource_register(cs); 188 res = clocksource_register(cs);
diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c
index a63424d083d9..41df69721769 100644
--- a/arch/arm/mach-ns9xxx/time-ns9360.c
+++ b/arch/arm/mach-ns9xxx/time-ns9360.c
@@ -173,7 +173,7 @@ static void __init ns9360_timer_init(void)
173 ns9360_clockevent_device.min_delta_ns = 173 ns9360_clockevent_device.min_delta_ns =
174 clockevent_delta2ns(1, &ns9360_clockevent_device); 174 clockevent_delta2ns(1, &ns9360_clockevent_device);
175 175
176 ns9360_clockevent_device.cpumask = cpumask_of_cpu(0); 176 ns9360_clockevent_device.cpumask = cpumask_of(0);
177 clockevents_register_device(&ns9360_clockevent_device); 177 clockevents_register_device(&ns9360_clockevent_device);
178 178
179 setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT, 179 setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT,
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 2cf7e32bd293..495a32c287b4 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -173,7 +173,7 @@ static __init void omap_init_mpu_timer(unsigned long rate)
173 clockevent_mpu_timer1.min_delta_ns = 173 clockevent_mpu_timer1.min_delta_ns =
174 clockevent_delta2ns(1, &clockevent_mpu_timer1); 174 clockevent_delta2ns(1, &clockevent_mpu_timer1);
175 175
176 clockevent_mpu_timer1.cpumask = cpumask_of_cpu(0); 176 clockevent_mpu_timer1.cpumask = cpumask_of(0);
177 clockevents_register_device(&clockevent_mpu_timer1); 177 clockevents_register_device(&clockevent_mpu_timer1);
178} 178}
179 179
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 705367ece174..fd3f7396e162 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -187,7 +187,7 @@ static __init void omap_init_32k_timer(void)
187 clockevent_32k_timer.min_delta_ns = 187 clockevent_32k_timer.min_delta_ns =
188 clockevent_delta2ns(1, &clockevent_32k_timer); 188 clockevent_delta2ns(1, &clockevent_32k_timer);
189 189
190 clockevent_32k_timer.cpumask = cpumask_of_cpu(0); 190 clockevent_32k_timer.cpumask = cpumask_of(0);
191 clockevents_register_device(&clockevent_32k_timer); 191 clockevents_register_device(&clockevent_32k_timer);
192} 192}
193 193
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 589393bedade..ae6036300f60 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -120,7 +120,7 @@ static void __init omap2_gp_clockevent_init(void)
120 clockevent_gpt.min_delta_ns = 120 clockevent_gpt.min_delta_ns =
121 clockevent_delta2ns(1, &clockevent_gpt); 121 clockevent_delta2ns(1, &clockevent_gpt);
122 122
123 clockevent_gpt.cpumask = cpumask_of_cpu(0); 123 clockevent_gpt.cpumask = cpumask_of(0);
124 clockevents_register_device(&clockevent_gpt); 124 clockevents_register_device(&clockevent_gpt);
125} 125}
126 126
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index 001624158519..95656a72268d 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -122,7 +122,6 @@ static struct clock_event_device ckevt_pxa_osmr0 = {
122 .features = CLOCK_EVT_FEAT_ONESHOT, 122 .features = CLOCK_EVT_FEAT_ONESHOT,
123 .shift = 32, 123 .shift = 32,
124 .rating = 200, 124 .rating = 200,
125 .cpumask = CPU_MASK_CPU0,
126 .set_next_event = pxa_osmr0_set_next_event, 125 .set_next_event = pxa_osmr0_set_next_event,
127 .set_mode = pxa_osmr0_set_mode, 126 .set_mode = pxa_osmr0_set_mode,
128}; 127};
@@ -163,6 +162,7 @@ static void __init pxa_timer_init(void)
163 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); 162 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0);
164 ckevt_pxa_osmr0.min_delta_ns = 163 ckevt_pxa_osmr0.min_delta_ns =
165 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; 164 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1;
165 ckevt_pxa_osmr0.cpumask = cpumask_of(0);
166 166
167 cksrc_pxa_oscr0.mult = 167 cksrc_pxa_oscr0.mult =
168 clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift); 168 clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift);
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 5f1d55963ced..bd2aa4f16141 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -624,7 +624,7 @@ static struct clock_event_device timer0_clockevent = {
624 .set_mode = timer_set_mode, 624 .set_mode = timer_set_mode,
625 .set_next_event = timer_set_next_event, 625 .set_next_event = timer_set_next_event,
626 .rating = 300, 626 .rating = 300,
627 .cpumask = CPU_MASK_ALL, 627 .cpumask = cpu_all_mask,
628}; 628};
629 629
630static void __init realview_clockevents_init(unsigned int timer_irq) 630static void __init realview_clockevents_init(unsigned int timer_irq)
diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c
index 9019ef2e5611..67d6d9cc68b2 100644
--- a/arch/arm/mach-realview/localtimer.c
+++ b/arch/arm/mach-realview/localtimer.c
@@ -154,7 +154,7 @@ void __cpuinit local_timer_setup(void)
154 clk->set_mode = local_timer_set_mode; 154 clk->set_mode = local_timer_set_mode;
155 clk->set_next_event = local_timer_set_next_event; 155 clk->set_next_event = local_timer_set_next_event;
156 clk->irq = IRQ_LOCALTIMER; 156 clk->irq = IRQ_LOCALTIMER;
157 clk->cpumask = cpumask_of_cpu(cpu); 157 clk->cpumask = cpumask_of(cpu);
158 clk->shift = 20; 158 clk->shift = 20;
159 clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); 159 clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift);
160 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 160 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
@@ -193,7 +193,7 @@ void __cpuinit local_timer_setup(void)
193 clk->rating = 200; 193 clk->rating = 200;
194 clk->set_mode = dummy_timer_set_mode; 194 clk->set_mode = dummy_timer_set_mode;
195 clk->broadcast = smp_timer_broadcast; 195 clk->broadcast = smp_timer_broadcast;
196 clk->cpumask = cpumask_of_cpu(cpu); 196 clk->cpumask = cpumask_of(cpu);
197 197
198 clockevents_register_device(clk); 198 clockevents_register_device(clk);
199} 199}
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index 8c5e727f3b75..711c0295c66f 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -73,7 +73,6 @@ static struct clock_event_device ckevt_sa1100_osmr0 = {
73 .features = CLOCK_EVT_FEAT_ONESHOT, 73 .features = CLOCK_EVT_FEAT_ONESHOT,
74 .shift = 32, 74 .shift = 32,
75 .rating = 200, 75 .rating = 200,
76 .cpumask = CPU_MASK_CPU0,
77 .set_next_event = sa1100_osmr0_set_next_event, 76 .set_next_event = sa1100_osmr0_set_next_event,
78 .set_mode = sa1100_osmr0_set_mode, 77 .set_mode = sa1100_osmr0_set_mode,
79}; 78};
@@ -110,6 +109,7 @@ static void __init sa1100_timer_init(void)
110 clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); 109 clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0);
111 ckevt_sa1100_osmr0.min_delta_ns = 110 ckevt_sa1100_osmr0.min_delta_ns =
112 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; 111 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1;
112 ckevt_sa1100_osmr0.cpumask = cpumask_of(0);
113 113
114 cksrc_sa1100_oscr.mult = 114 cksrc_sa1100_oscr.mult =
115 clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift); 115 clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index df25aa138509..1c43494f5c42 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -1005,7 +1005,7 @@ static void __init versatile_timer_init(void)
1005 timer0_clockevent.min_delta_ns = 1005 timer0_clockevent.min_delta_ns =
1006 clockevent_delta2ns(0xf, &timer0_clockevent); 1006 clockevent_delta2ns(0xf, &timer0_clockevent);
1007 1007
1008 timer0_clockevent.cpumask = cpumask_of_cpu(0); 1008 timer0_clockevent.cpumask = cpumask_of(0);
1009 clockevents_register_device(&timer0_clockevent); 1009 clockevents_register_device(&timer0_clockevent);
1010} 1010}
1011 1011
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 4de366e8b4c5..6d6bd5899240 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -260,10 +260,10 @@ static void em_stop(void)
260static void em_route_irq(int irq, unsigned int cpu) 260static void em_route_irq(int irq, unsigned int cpu)
261{ 261{
262 struct irq_desc *desc = irq_desc + irq; 262 struct irq_desc *desc = irq_desc + irq;
263 cpumask_t mask = cpumask_of_cpu(cpu); 263 const struct cpumask *mask = cpumask_of(cpu);
264 264
265 spin_lock_irq(&desc->lock); 265 spin_lock_irq(&desc->lock);
266 desc->affinity = mask; 266 desc->affinity = *mask;
267 desc->chip->set_affinity(irq, mask); 267 desc->chip->set_affinity(irq, mask);
268 spin_unlock_irq(&desc->lock); 268 spin_unlock_irq(&desc->lock);
269} 269}
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index fd28f5194f71..758a1293bcfa 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -190,7 +190,7 @@ static int __init mxc_clockevent_init(void)
190 clockevent_mxc.min_delta_ns = 190 clockevent_mxc.min_delta_ns =
191 clockevent_delta2ns(0xff, &clockevent_mxc); 191 clockevent_delta2ns(0xff, &clockevent_mxc);
192 192
193 clockevent_mxc.cpumask = cpumask_of_cpu(0); 193 clockevent_mxc.cpumask = cpumask_of(0);
194 194
195 clockevents_register_device(&clockevent_mxc); 195 clockevents_register_device(&clockevent_mxc);
196 196
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c
index 544d6b327f3a..6fa2923e6dca 100644
--- a/arch/arm/plat-orion/time.c
+++ b/arch/arm/plat-orion/time.c
@@ -149,7 +149,6 @@ static struct clock_event_device orion_clkevt = {
149 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 149 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
150 .shift = 32, 150 .shift = 32,
151 .rating = 300, 151 .rating = 300,
152 .cpumask = CPU_MASK_CPU0,
153 .set_next_event = orion_clkevt_next_event, 152 .set_next_event = orion_clkevt_next_event,
154 .set_mode = orion_clkevt_mode, 153 .set_mode = orion_clkevt_mode,
155}; 154};
@@ -199,5 +198,6 @@ void __init orion_time_init(unsigned int irq, unsigned int tclk)
199 orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); 198 orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift);
200 orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); 199 orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt);
201 orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); 200 orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt);
201 orion_clkevt.cpumask = cpumask_of(0);
202 clockevents_register_device(&orion_clkevt); 202 clockevents_register_device(&orion_clkevt);
203} 203}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 283481d74a5b..0ff46bf873b0 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -106,7 +106,6 @@ static struct clock_event_device comparator = {
106 .features = CLOCK_EVT_FEAT_ONESHOT, 106 .features = CLOCK_EVT_FEAT_ONESHOT,
107 .shift = 16, 107 .shift = 16,
108 .rating = 50, 108 .rating = 50,
109 .cpumask = CPU_MASK_CPU0,
110 .set_next_event = comparator_next_event, 109 .set_next_event = comparator_next_event,
111 .set_mode = comparator_mode, 110 .set_mode = comparator_mode,
112}; 111};
@@ -134,6 +133,7 @@ void __init time_init(void)
134 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); 133 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
135 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); 134 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
136 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; 135 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
136 comparator.cpumask = cpumask_of(0);
137 137
138 sysreg_write(COMPARE, 0); 138 sysreg_write(COMPARE, 0);
139 timer_irqaction.dev_id = &comparator; 139 timer_irqaction.dev_id = &comparator;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index e887efc86c29..0ed2badfd746 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -162,7 +162,6 @@ static struct clock_event_device clockevent_bfin = {
162 .name = "bfin_core_timer", 162 .name = "bfin_core_timer",
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .shift = 32, 164 .shift = 32,
165 .cpumask = CPU_MASK_CPU0,
166 .set_next_event = bfin_timer_set_next_event, 165 .set_next_event = bfin_timer_set_next_event,
167 .set_mode = bfin_timer_set_mode, 166 .set_mode = bfin_timer_set_mode,
168}; 167};
@@ -193,6 +192,7 @@ static int __init bfin_clockevent_init(void)
193 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 192 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
194 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); 193 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
195 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); 194 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
195 clockevent_bfin.cpumask = cpumask_of(0);
196 clockevents_register_device(&clockevent_bfin); 196 clockevents_register_device(&clockevent_bfin);
197 197
198 return 0; 198 return 0;
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 173c141ac9ba..295131fee710 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -325,11 +325,11 @@ static void end_crisv32_irq(unsigned int irq)
325{ 325{
326} 326}
327 327
328void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest) 328void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
329{ 329{
330 unsigned long flags; 330 unsigned long flags;
331 spin_lock_irqsave(&irq_lock, flags); 331 spin_lock_irqsave(&irq_lock, flags);
332 irq_allocations[irq - FIRST_IRQ].mask = dest; 332 irq_allocations[irq - FIRST_IRQ].mask = *dest;
333 spin_unlock_irqrestore(&irq_lock, flags); 333 spin_unlock_irqrestore(&irq_lock, flags);
334} 334}
335 335
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 52e16c6436f9..9dac17334640 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -29,11 +29,7 @@
29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; 29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
30 30
31/* CPU masks */ 31/* CPU masks */
32cpumask_t cpu_online_map = CPU_MASK_NONE;
33EXPORT_SYMBOL(cpu_online_map);
34cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 32cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
35cpumask_t cpu_possible_map;
36EXPORT_SYMBOL(cpu_possible_map);
37EXPORT_SYMBOL(phys_cpu_present_map); 33EXPORT_SYMBOL(phys_cpu_present_map);
38 34
39/* Variables used during SMP boot */ 35/* Variables used during SMP boot */
diff --git a/arch/cris/include/asm/smp.h b/arch/cris/include/asm/smp.h
index dba33aba3e95..c615a06dd757 100644
--- a/arch/cris/include/asm/smp.h
+++ b/arch/cris/include/asm/smp.h
@@ -4,7 +4,6 @@
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5 5
6extern cpumask_t phys_cpu_present_map; 6extern cpumask_t phys_cpu_present_map;
7extern cpumask_t cpu_possible_map;
8 7
9#define raw_smp_processor_id() (current_thread_info()->cpu) 8#define raw_smp_processor_id() (current_thread_info()->cpu)
10 9
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index c2f58ff364e7..cc0a3182db3c 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq)
22} 22}
23 23
24static void 24static void
25hpsim_set_affinity_noop (unsigned int a, cpumask_t b) 25hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
26{ 26{
27} 27}
28 28
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 12d96e0cd513..21c402365d0e 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -57,7 +57,6 @@ extern struct smp_boot_data {
57 57
58extern char no_int_routing __devinitdata; 58extern char no_int_routing __devinitdata;
59 59
60extern cpumask_t cpu_online_map;
61extern cpumask_t cpu_core_map[NR_CPUS]; 60extern cpumask_t cpu_core_map[NR_CPUS];
62DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 61DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
63extern int smp_num_siblings; 62extern int smp_num_siblings;
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 35bcb641c9e5..a3cc9f65f954 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -55,7 +55,6 @@
55void build_cpu_to_node_map(void); 55void build_cpu_to_node_map(void);
56 56
57#define SD_CPU_INIT (struct sched_domain) { \ 57#define SD_CPU_INIT (struct sched_domain) { \
58 .span = CPU_MASK_NONE, \
59 .parent = NULL, \ 58 .parent = NULL, \
60 .child = NULL, \ 59 .child = NULL, \
61 .groups = NULL, \ 60 .groups = NULL, \
@@ -80,7 +79,6 @@ void build_cpu_to_node_map(void);
80 79
81/* sched_domains SD_NODE_INIT for IA64 NUMA machines */ 80/* sched_domains SD_NODE_INIT for IA64 NUMA machines */
82#define SD_NODE_INIT (struct sched_domain) { \ 81#define SD_NODE_INIT (struct sched_domain) { \
83 .span = CPU_MASK_NONE, \
84 .parent = NULL, \ 82 .parent = NULL, \
85 .child = NULL, \ 83 .child = NULL, \
86 .groups = NULL, \ 84 .groups = NULL, \
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 5c4674ae8aea..c8adecd5b416 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -330,25 +330,25 @@ unmask_irq (unsigned int irq)
330 330
331 331
332static void 332static void
333iosapic_set_affinity (unsigned int irq, cpumask_t mask) 333iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
334{ 334{
335#ifdef CONFIG_SMP 335#ifdef CONFIG_SMP
336 u32 high32, low32; 336 u32 high32, low32;
337 int dest, rte_index; 337 int cpu, dest, rte_index;
338 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 338 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
339 struct iosapic_rte_info *rte; 339 struct iosapic_rte_info *rte;
340 struct iosapic *iosapic; 340 struct iosapic *iosapic;
341 341
342 irq &= (~IA64_IRQ_REDIRECTED); 342 irq &= (~IA64_IRQ_REDIRECTED);
343 343
344 cpus_and(mask, mask, cpu_online_map); 344 cpu = cpumask_first_and(cpu_online_mask, mask);
345 if (cpus_empty(mask)) 345 if (cpu >= nr_cpu_ids)
346 return; 346 return;
347 347
348 if (irq_prepare_move(irq, first_cpu(mask))) 348 if (irq_prepare_move(irq, cpu))
349 return; 349 return;
350 350
351 dest = cpu_physical_id(first_cpu(mask)); 351 dest = cpu_physical_id(cpu);
352 352
353 if (!iosapic_intr_info[irq].count) 353 if (!iosapic_intr_info[irq].count)
354 return; /* not an IOSAPIC interrupt */ 354 return; /* not an IOSAPIC interrupt */
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7fd18f54c056..0b6db53fedcf 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS];
133 */ 133 */
134static void migrate_irqs(void) 134static void migrate_irqs(void)
135{ 135{
136 cpumask_t mask;
137 irq_desc_t *desc; 136 irq_desc_t *desc;
138 int irq, new_cpu; 137 int irq, new_cpu;
139 138
@@ -152,15 +151,14 @@ static void migrate_irqs(void)
152 if (desc->status == IRQ_PER_CPU) 151 if (desc->status == IRQ_PER_CPU)
153 continue; 152 continue;
154 153
155 cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); 154 if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
156 if (any_online_cpu(mask) == NR_CPUS) { 155 >= nr_cpu_ids) {
157 /* 156 /*
158 * Save it for phase 2 processing 157 * Save it for phase 2 processing
159 */ 158 */
160 vectors_in_migration[irq] = irq; 159 vectors_in_migration[irq] = irq;
161 160
162 new_cpu = any_online_cpu(cpu_online_map); 161 new_cpu = any_online_cpu(cpu_online_map);
163 mask = cpumask_of_cpu(new_cpu);
164 162
165 /* 163 /*
166 * Al three are essential, currently WARN_ON.. maybe panic? 164 * Al three are essential, currently WARN_ON.. maybe panic?
@@ -168,7 +166,8 @@ static void migrate_irqs(void)
168 if (desc->chip && desc->chip->disable && 166 if (desc->chip && desc->chip->disable &&
169 desc->chip->enable && desc->chip->set_affinity) { 167 desc->chip->enable && desc->chip->set_affinity) {
170 desc->chip->disable(irq); 168 desc->chip->disable(irq);
171 desc->chip->set_affinity(irq, mask); 169 desc->chip->set_affinity(irq,
170 cpumask_of(new_cpu));
172 desc->chip->enable(irq); 171 desc->chip->enable(irq);
173 } else { 172 } else {
174 WARN_ON((!(desc->chip) || !(desc->chip->disable) || 173 WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 702a09c13238..890339339035 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -49,11 +49,12 @@
49static struct irq_chip ia64_msi_chip; 49static struct irq_chip ia64_msi_chip;
50 50
51#ifdef CONFIG_SMP 51#ifdef CONFIG_SMP
52static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 52static void ia64_set_msi_irq_affinity(unsigned int irq,
53 const cpumask_t *cpu_mask)
53{ 54{
54 struct msi_msg msg; 55 struct msi_msg msg;
55 u32 addr, data; 56 u32 addr, data;
56 int cpu = first_cpu(cpu_mask); 57 int cpu = first_cpu(*cpu_mask);
57 58
58 if (!cpu_online(cpu)) 59 if (!cpu_online(cpu))
59 return; 60 return;
@@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq)
166 167
167#ifdef CONFIG_DMAR 168#ifdef CONFIG_DMAR
168#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
169static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 170static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
170{ 171{
171 struct irq_cfg *cfg = irq_cfg + irq; 172 struct irq_cfg *cfg = irq_cfg + irq;
172 struct msi_msg msg; 173 struct msi_msg msg;
173 int cpu = first_cpu(mask); 174 int cpu = cpumask_first(mask);
174
175 175
176 if (!cpu_online(cpu)) 176 if (!cpu_online(cpu))
177 return; 177 return;
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
188 188
189 dmar_msi_write(irq, &msg); 189 dmar_msi_write(irq, &msg);
190 irq_desc[irq].affinity = mask; 190 irq_desc[irq].affinity = *mask;
191} 191}
192#endif /* CONFIG_SMP */ 192#endif /* CONFIG_SMP */
193 193
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 1dcbb85fc4ee..11463994a7d5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu;
131 */ 131 */
132DEFINE_PER_CPU(int, cpu_state); 132DEFINE_PER_CPU(int, cpu_state);
133 133
134/* Bitmasks of currently online, and possible CPUs */
135cpumask_t cpu_online_map;
136EXPORT_SYMBOL(cpu_online_map);
137cpumask_t cpu_possible_map = CPU_MASK_NONE;
138EXPORT_SYMBOL(cpu_possible_map);
139
140cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 134cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
141EXPORT_SYMBOL(cpu_core_map); 135EXPORT_SYMBOL(cpu_core_map);
142DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); 136DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
@@ -688,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu)
688{ 682{
689 int new_cpei_cpu; 683 int new_cpei_cpu;
690 irq_desc_t *desc = NULL; 684 irq_desc_t *desc = NULL;
691 cpumask_t mask; 685 const struct cpumask *mask;
692 int retval = 0; 686 int retval = 0;
693 687
694 /* 688 /*
@@ -701,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu)
701 * Now re-target the CPEI to a different processor 695 * Now re-target the CPEI to a different processor
702 */ 696 */
703 new_cpei_cpu = any_online_cpu(cpu_online_map); 697 new_cpei_cpu = any_online_cpu(cpu_online_map);
704 mask = cpumask_of_cpu(new_cpei_cpu); 698 mask = cpumask_of(new_cpei_cpu);
705 set_cpei_target_cpu(new_cpei_cpu); 699 set_cpei_target_cpu(new_cpei_cpu);
706 desc = irq_desc + ia64_cpe_irq; 700 desc = irq_desc + ia64_cpe_irq;
707 /* 701 /*
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index c75b914f2d6b..a8d61a3e9a94 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
219 cpumask_t shared_cpu_map; 219 cpumask_t shared_cpu_map;
220 220
221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); 221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
222 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); 222 len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
223 len += sprintf(buf+len, "\n"); 223 len += sprintf(buf+len, "\n");
224 return len; 224 return len;
225} 225}
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 0c66dbdd1d72..66fd705e82c0 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -227,14 +227,14 @@ finish_up:
227 return new_irq_info; 227 return new_irq_info;
228} 228}
229 229
230static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) 230static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
231{ 231{
232 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 232 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
233 nasid_t nasid; 233 nasid_t nasid;
234 int slice; 234 int slice;
235 235
236 nasid = cpuid_to_nasid(first_cpu(mask)); 236 nasid = cpuid_to_nasid(cpumask_first(mask));
237 slice = cpuid_to_slice(first_cpu(mask)); 237 slice = cpuid_to_slice(cpumask_first(mask));
238 238
239 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, 239 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
240 sn_irq_lh[irq], list) 240 sn_irq_lh[irq], list)
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 83f190ffe350..ca553b0429ce 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
151} 151}
152 152
153#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
154static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 154static void sn_set_msi_irq_affinity(unsigned int irq,
155 const struct cpumask *cpu_mask)
155{ 156{
156 struct msi_msg msg; 157 struct msi_msg msg;
157 int slice; 158 int slice;
@@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
164 struct sn_pcibus_provider *provider; 165 struct sn_pcibus_provider *provider;
165 unsigned int cpu; 166 unsigned int cpu;
166 167
167 cpu = first_cpu(cpu_mask); 168 cpu = cpumask_first(cpu_mask);
168 sn_irq_info = sn_msi_info[irq].sn_irq_info; 169 sn_irq_info = sn_msi_info[irq].sn_irq_info;
169 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) 170 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
170 return; 171 return;
@@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
204 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 205 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
205 206
206 write_msi_msg(irq, &msg); 207 write_msi_msg(irq, &msg);
207 irq_desc[irq].affinity = cpu_mask; 208 irq_desc[irq].affinity = *cpu_mask;
208} 209}
209#endif /* CONFIG_SMP */ 210#endif /* CONFIG_SMP */
210 211
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 29047d5c259a..cabba332cc48 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,6 +10,7 @@ config M32R
10 default y 10 default y
11 select HAVE_IDE 11 select HAVE_IDE
12 select HAVE_OPROFILE 12 select HAVE_OPROFILE
13 select INIT_ALL_POSSIBLE
13 14
14config SBUS 15config SBUS
15 bool 16 bool
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 39cb6da72dcb..0f06b3722e96 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -73,17 +73,11 @@ static unsigned int bsp_phys_id = -1;
73/* Bitmask of physically existing CPUs */ 73/* Bitmask of physically existing CPUs */
74physid_mask_t phys_cpu_present_map; 74physid_mask_t phys_cpu_present_map;
75 75
76/* Bitmask of currently online CPUs */
77cpumask_t cpu_online_map;
78EXPORT_SYMBOL(cpu_online_map);
79
80cpumask_t cpu_bootout_map; 76cpumask_t cpu_bootout_map;
81cpumask_t cpu_bootin_map; 77cpumask_t cpu_bootin_map;
82static cpumask_t cpu_callin_map; 78static cpumask_t cpu_callin_map;
83cpumask_t cpu_callout_map; 79cpumask_t cpu_callout_map;
84EXPORT_SYMBOL(cpu_callout_map); 80EXPORT_SYMBOL(cpu_callout_map);
85cpumask_t cpu_possible_map = CPU_MASK_ALL;
86EXPORT_SYMBOL(cpu_possible_map);
87 81
88/* Per CPU bogomips and other parameters */ 82/* Per CPU bogomips and other parameters */
89struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned; 83struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c
index c5b916700b22..2a12e7fa9748 100644
--- a/arch/m68knommu/platform/coldfire/pit.c
+++ b/arch/m68knommu/platform/coldfire/pit.c
@@ -156,7 +156,7 @@ void hw_timer_init(void)
156{ 156{
157 u32 imr; 157 u32 imr;
158 158
159 cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 159 cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id());
160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); 160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
161 cf_pit_clockevent.max_delta_ns = 161 cf_pit_clockevent.max_delta_ns =
162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent); 162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent);
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index a58f0eecc68f..abc62aa744ac 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -49,7 +49,8 @@ static inline void smtc_im_ack_irq(unsigned int irq)
49#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 49#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
50#include <linux/cpumask.h> 50#include <linux/cpumask.h>
51 51
52extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); 52extern void plat_set_irq_affinity(unsigned int irq,
53 const struct cpumask *affinity);
53extern void smtc_forward_irq(unsigned int irq); 54extern void smtc_forward_irq(unsigned int irq);
54 55
55/* 56/*
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 7785bec732f2..1fb959f98982 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -37,7 +37,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
37 37
38/* sched_domains SD_NODE_INIT for SGI IP27 machines */ 38/* sched_domains SD_NODE_INIT for SGI IP27 machines */
39#define SD_NODE_INIT (struct sched_domain) { \ 39#define SD_NODE_INIT (struct sched_domain) { \
40 .span = CPU_MASK_NONE, \
41 .parent = NULL, \ 40 .parent = NULL, \
42 .child = NULL, \ 41 .child = NULL, \
43 .groups = NULL, \ 42 .groups = NULL, \
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 0ff5b523ea77..86557b5d1b3f 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -38,9 +38,6 @@ extern int __cpu_logical_map[NR_CPUS];
38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
39#define SMP_CALL_FUNCTION 0x2 39#define SMP_CALL_FUNCTION 0x2
40 40
41extern cpumask_t phys_cpu_present_map;
42#define cpu_possible_map phys_cpu_present_map
43
44extern void asmlinkage smp_bootstrap(void); 41extern void asmlinkage smp_bootstrap(void);
45 42
46/* 43/*
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index d7f8a782aae4..03965cb1b252 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -146,7 +146,7 @@ void __init plat_time_init(void)
146 146
147 BUG_ON(HZ != 100); 147 BUG_ON(HZ != 100);
148 148
149 cd->cpumask = cpumask_of_cpu(cpu); 149 cd->cpumask = cpumask_of(cpu);
150 clockevents_register_device(cd); 150 clockevents_register_device(cd);
151 action->dev_id = cd; 151 action->dev_id = cd;
152 setup_irq(JAZZ_TIMER_IRQ, action); 152 setup_irq(JAZZ_TIMER_IRQ, action);
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 0a57f86945f1..b820661678b0 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -126,7 +126,7 @@ void __cpuinit sb1480_clockevent_init(void)
126 cd->min_delta_ns = clockevent_delta2ns(2, cd); 126 cd->min_delta_ns = clockevent_delta2ns(2, cd);
127 cd->rating = 200; 127 cd->rating = 200;
128 cd->irq = irq; 128 cd->irq = irq;
129 cd->cpumask = cpumask_of_cpu(cpu); 129 cd->cpumask = cpumask_of(cpu);
130 cd->set_next_event = sibyte_next_event; 130 cd->set_next_event = sibyte_next_event;
131 cd->set_mode = sibyte_set_mode; 131 cd->set_mode = sibyte_set_mode;
132 clockevents_register_device(cd); 132 clockevents_register_device(cd);
@@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(void)
148 action->name = name; 148 action->name = name;
149 action->dev_id = cd; 149 action->dev_id = cd;
150 150
151 irq_set_affinity(irq, cpumask_of_cpu(cpu)); 151 irq_set_affinity(irq, cpumask_of(cpu));
152 setup_irq(irq, action); 152 setup_irq(irq, action);
153} 153}
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index df4acb68bfb5..1ada45ea0700 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -88,7 +88,6 @@ static void ds1287_event_handler(struct clock_event_device *dev)
88static struct clock_event_device ds1287_clockevent = { 88static struct clock_event_device ds1287_clockevent = {
89 .name = "ds1287", 89 .name = "ds1287",
90 .features = CLOCK_EVT_FEAT_PERIODIC, 90 .features = CLOCK_EVT_FEAT_PERIODIC,
91 .cpumask = CPU_MASK_CPU0,
92 .set_next_event = ds1287_set_next_event, 91 .set_next_event = ds1287_set_next_event,
93 .set_mode = ds1287_set_mode, 92 .set_mode = ds1287_set_mode,
94 .event_handler = ds1287_event_handler, 93 .event_handler = ds1287_event_handler,
@@ -122,6 +121,7 @@ int __init ds1287_clockevent_init(int irq)
122 clockevent_set_clock(cd, 32768); 121 clockevent_set_clock(cd, 32768);
123 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 122 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
124 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 123 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
124 cd->cpumask = cpumask_of(0);
125 125
126 clockevents_register_device(&ds1287_clockevent); 126 clockevents_register_device(&ds1287_clockevent);
127 127
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 6e2f58520afb..e9b787feedcb 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -96,7 +96,6 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
96static struct clock_event_device gt641xx_timer0_clockevent = { 96static struct clock_event_device gt641xx_timer0_clockevent = {
97 .name = "gt641xx-timer0", 97 .name = "gt641xx-timer0",
98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
99 .cpumask = CPU_MASK_CPU0,
100 .irq = GT641XX_TIMER0_IRQ, 99 .irq = GT641XX_TIMER0_IRQ,
101 .set_next_event = gt641xx_timer0_set_next_event, 100 .set_next_event = gt641xx_timer0_set_next_event,
102 .set_mode = gt641xx_timer0_set_mode, 101 .set_mode = gt641xx_timer0_set_mode,
@@ -132,6 +131,7 @@ static int __init gt641xx_timer0_clockevent_init(void)
132 clockevent_set_clock(cd, gt641xx_base_clock); 131 clockevent_set_clock(cd, gt641xx_base_clock);
133 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 132 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 133 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
134 cd->cpumask = cpumask_of(0);
135 135
136 clockevents_register_device(&gt641xx_timer0_clockevent); 136 clockevents_register_device(&gt641xx_timer0_clockevent);
137 137
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 4a4c59f2737a..e1ec83b68031 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -195,7 +195,7 @@ int __cpuinit mips_clockevent_init(void)
195 195
196 cd->rating = 300; 196 cd->rating = 300;
197 cd->irq = irq; 197 cd->irq = irq;
198 cd->cpumask = cpumask_of_cpu(cpu); 198 cd->cpumask = cpumask_of(cpu);
199 cd->set_next_event = mips_next_event; 199 cd->set_next_event = mips_next_event;
200 cd->set_mode = mips_set_clock_mode; 200 cd->set_mode = mips_set_clock_mode;
201 cd->event_handler = mips_event_handler; 201 cd->event_handler = mips_event_handler;
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 63ac3ad462bc..a2eebaafda52 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -125,7 +125,7 @@ void __cpuinit sb1250_clockevent_init(void)
125 cd->min_delta_ns = clockevent_delta2ns(2, cd); 125 cd->min_delta_ns = clockevent_delta2ns(2, cd);
126 cd->rating = 200; 126 cd->rating = 200;
127 cd->irq = irq; 127 cd->irq = irq;
128 cd->cpumask = cpumask_of_cpu(cpu); 128 cd->cpumask = cpumask_of(cpu);
129 cd->set_next_event = sibyte_next_event; 129 cd->set_next_event = sibyte_next_event;
130 cd->set_mode = sibyte_set_mode; 130 cd->set_mode = sibyte_set_mode;
131 clockevents_register_device(cd); 131 clockevents_register_device(cd);
@@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(void)
147 action->name = name; 147 action->name = name;
148 action->dev_id = cd; 148 action->dev_id = cd;
149 149
150 irq_set_affinity(irq, cpumask_of_cpu(cpu)); 150 irq_set_affinity(irq, cpumask_of(cpu));
151 setup_irq(irq, action); 151 setup_irq(irq, action);
152} 152}
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 5162fe4b5952..6d45e24db5bf 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -292,7 +292,7 @@ int __cpuinit mips_clockevent_init(void)
292 292
293 cd->rating = 300; 293 cd->rating = 300;
294 cd->irq = irq; 294 cd->irq = irq;
295 cd->cpumask = cpumask_of_cpu(cpu); 295 cd->cpumask = cpumask_of(cpu);
296 cd->set_next_event = mips_next_event; 296 cd->set_next_event = mips_next_event;
297 cd->set_mode = mips_set_clock_mode; 297 cd->set_mode = mips_set_clock_mode;
298 cd->event_handler = mips_event_handler; 298 cd->event_handler = mips_event_handler;
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index b5fc4eb412d2..eccf7d6096bd 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -112,7 +112,6 @@ static struct clock_event_device txx9tmr_clock_event_device = {
112 .name = "TXx9", 112 .name = "TXx9",
113 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 113 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
114 .rating = 200, 114 .rating = 200,
115 .cpumask = CPU_MASK_CPU0,
116 .set_mode = txx9tmr_set_mode, 115 .set_mode = txx9tmr_set_mode,
117 .set_next_event = txx9tmr_set_next_event, 116 .set_next_event = txx9tmr_set_next_event,
118}; 117};
@@ -150,6 +149,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
150 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); 149 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
151 cd->min_delta_ns = clockevent_delta2ns(0xf, cd); 150 cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
152 cd->irq = irq; 151 cd->irq = irq;
152 cd->cpumask = cpumask_of(0),
153 clockevents_register_device(cd); 153 clockevents_register_device(cd);
154 setup_irq(irq, &txx9tmr_irq); 154 setup_irq(irq, &txx9tmr_irq);
155 printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", 155 printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index b6ac55162b9a..f4d187825f96 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -115,7 +115,7 @@ void __init setup_pit_timer(void)
115 * Start pit with the boot cpu mask and make it global after the 115 * Start pit with the boot cpu mask and make it global after the
116 * IO_APIC has been initialized. 116 * IO_APIC has been initialized.
117 */ 117 */
118 cd->cpumask = cpumask_of_cpu(cpu); 118 cd->cpumask = cpumask_of(cpu);
119 clockevent_set_clock(cd, CLOCK_TICK_RATE); 119 clockevent_set_clock(cd, CLOCK_TICK_RATE);
120 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); 120 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
121 cd->min_delta_ns = clockevent_delta2ns(0xF, cd); 121 cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index f0a4bb19e096..494a49a317e9 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq)
155 155
156static DEFINE_SPINLOCK(gic_lock); 156static DEFINE_SPINLOCK(gic_lock);
157 157
158static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) 158static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
159{ 159{
160 cpumask_t tmp = CPU_MASK_NONE; 160 cpumask_t tmp = CPU_MASK_NONE;
161 unsigned long flags; 161 unsigned long flags;
@@ -164,7 +164,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
164 pr_debug(KERN_DEBUG "%s called\n", __func__); 164 pr_debug(KERN_DEBUG "%s called\n", __func__);
165 irq -= _irqbase; 165 irq -= _irqbase;
166 166
167 cpus_and(tmp, cpumask, cpu_online_map); 167 cpumask_and(&tmp, cpumask, cpu_online_mask);
168 if (cpus_empty(tmp)) 168 if (cpus_empty(tmp))
169 return; 169 return;
170 170
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
188 188
189 } 189 }
190 irq_desc[irq].affinity = cpumask; 190 irq_desc[irq].affinity = *cpumask;
191 spin_unlock_irqrestore(&gic_lock, flags); 191 spin_unlock_irqrestore(&gic_lock, flags);
192 192
193} 193}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index ca476c4f62a5..f27beca4b26d 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -51,10 +51,10 @@ static int __init allowcpus(char *str)
51 int len; 51 int len;
52 52
53 cpus_clear(cpu_allow_map); 53 cpus_clear(cpu_allow_map);
54 if (cpulist_parse(str, cpu_allow_map) == 0) { 54 if (cpulist_parse(str, &cpu_allow_map) == 0) {
55 cpu_set(0, cpu_allow_map); 55 cpu_set(0, cpu_allow_map);
56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); 56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
57 len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map); 57 len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map);
58 buf[len] = '\0'; 58 buf[len] = '\0';
59 pr_debug("Allowable CPUs: %s\n", buf); 59 pr_debug("Allowable CPUs: %s\n", buf);
60 return 1; 60 return 1;
@@ -226,7 +226,7 @@ void __init cmp_smp_setup(void)
226 226
227 for (i = 1; i < NR_CPUS; i++) { 227 for (i = 1; i < NR_CPUS; i++) {
228 if (amon_cpu_avail(i)) { 228 if (amon_cpu_avail(i)) {
229 cpu_set(i, phys_cpu_present_map); 229 cpu_set(i, cpu_possible_map);
230 __cpu_number_map[i] = ++ncpu; 230 __cpu_number_map[i] = ++ncpu;
231 __cpu_logical_map[ncpu] = i; 231 __cpu_logical_map[ncpu] = i;
232 } 232 }
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 87a1816c1f45..6f7ee5ac46ee 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
70 write_vpe_c0_vpeconf0(tmp); 70 write_vpe_c0_vpeconf0(tmp);
71 71
72 /* Record this as available CPU */ 72 /* Record this as available CPU */
73 cpu_set(tc, phys_cpu_present_map); 73 cpu_set(tc, cpu_possible_map);
74 __cpu_number_map[tc] = ++ncpu; 74 __cpu_number_map[tc] = ++ncpu;
75 __cpu_logical_map[ncpu] = tc; 75 __cpu_logical_map[ncpu] = tc;
76 } 76 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 8bf88faf5afd..3da94704f816 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -44,15 +44,10 @@
44#include <asm/mipsmtregs.h> 44#include <asm/mipsmtregs.h>
45#endif /* CONFIG_MIPS_MT_SMTC */ 45#endif /* CONFIG_MIPS_MT_SMTC */
46 46
47cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
48volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 47volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
49cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
50int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 48int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
51int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 49int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 50
53EXPORT_SYMBOL(phys_cpu_present_map);
54EXPORT_SYMBOL(cpu_online_map);
55
56extern void cpu_idle(void); 51extern void cpu_idle(void);
57 52
58/* Number of TCs (or siblings in Intel speak) per CPU core */ 53/* Number of TCs (or siblings in Intel speak) per CPU core */
@@ -195,7 +190,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
195/* preload SMP state for boot cpu */ 190/* preload SMP state for boot cpu */
196void __devinit smp_prepare_boot_cpu(void) 191void __devinit smp_prepare_boot_cpu(void)
197{ 192{
198 cpu_set(0, phys_cpu_present_map); 193 cpu_set(0, cpu_possible_map);
199 cpu_set(0, cpu_online_map); 194 cpu_set(0, cpu_online_map);
200 cpu_set(0, cpu_callin_map); 195 cpu_set(0, cpu_callin_map);
201} 196}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 897fb2b4751c..b6cca01ff82b 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -290,7 +290,7 @@ static void smtc_configure_tlb(void)
290 * possibly leave some TCs/VPEs as "slave" processors. 290 * possibly leave some TCs/VPEs as "slave" processors.
291 * 291 *
292 * Use c0_MVPConf0 to find out how many TCs are available, setting up 292 * Use c0_MVPConf0 to find out how many TCs are available, setting up
293 * phys_cpu_present_map and the logical/physical mappings. 293 * cpu_possible_map and the logical/physical mappings.
294 */ 294 */
295 295
296int __init smtc_build_cpu_map(int start_cpu_slot) 296int __init smtc_build_cpu_map(int start_cpu_slot)
@@ -304,7 +304,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
304 */ 304 */
305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
307 cpu_set(i, phys_cpu_present_map); 307 cpu_set(i, cpu_possible_map);
308 __cpu_number_map[i] = i; 308 __cpu_number_map[i] = i;
309 __cpu_logical_map[i] = i; 309 __cpu_logical_map[i] = i;
310 } 310 }
@@ -521,7 +521,7 @@ void smtc_prepare_cpus(int cpus)
521 * Pull any physically present but unused TCs out of circulation. 521 * Pull any physically present but unused TCs out of circulation.
522 */ 522 */
523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
524 cpu_clear(tc, phys_cpu_present_map); 524 cpu_clear(tc, cpu_possible_map);
525 cpu_clear(tc, cpu_present_map); 525 cpu_clear(tc, cpu_present_map);
526 tc++; 526 tc++;
527 } 527 }
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index f84a46a8ae6e..aabd7274507b 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -114,9 +114,9 @@ struct plat_smp_ops msmtc_smp_ops = {
114 */ 114 */
115 115
116 116
117void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) 117void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
118{ 118{
119 cpumask_t tmask = affinity; 119 cpumask_t tmask = *affinity;
120 int cpu = 0; 120 int cpu = 0;
121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); 121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
122 122
@@ -139,7 +139,7 @@ void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity)
139 * be made to forward to an offline "CPU". 139 * be made to forward to an offline "CPU".
140 */ 140 */
141 141
142 for_each_cpu_mask(cpu, affinity) { 142 for_each_cpu(cpu, affinity) {
143 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 143 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
144 cpu_clear(cpu, tmask); 144 cpu_clear(cpu, tmask);
145 } 145 }
diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c
index 62f495b57f93..cf293b279098 100644
--- a/arch/mips/nxp/pnx8550/common/time.c
+++ b/arch/mips/nxp/pnx8550/common/time.c
@@ -102,6 +102,7 @@ __init void plat_time_init(void)
102 unsigned int p; 102 unsigned int p;
103 unsigned int pow2p; 103 unsigned int pow2p;
104 104
105 pnx8xxx_clockevent.cpumask = cpu_none_mask;
105 clockevents_register_device(&pnx8xxx_clockevent); 106 clockevents_register_device(&pnx8xxx_clockevent);
106 clocksource_register(&pnx_clocksource); 107 clocksource_register(&pnx_clocksource);
107 108
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 3a7df647ca77..f78c29b68d77 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -141,7 +141,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
141} 141}
142 142
143/* 143/*
144 * Detect available CPUs, populate phys_cpu_present_map before smp_init 144 * Detect available CPUs, populate cpu_possible_map before smp_init
145 * 145 *
146 * We don't want to start the secondary CPU yet nor do we have a nice probing 146 * We don't want to start the secondary CPU yet nor do we have a nice probing
147 * feature in PMON so we just assume presence of the secondary core. 147 * feature in PMON so we just assume presence of the secondary core.
@@ -150,10 +150,10 @@ static void __init yos_smp_setup(void)
150{ 150{
151 int i; 151 int i;
152 152
153 cpus_clear(phys_cpu_present_map); 153 cpus_clear(cpu_possible_map);
154 154
155 for (i = 0; i < 2; i++) { 155 for (i = 0; i < 2; i++) {
156 cpu_set(i, phys_cpu_present_map); 156 cpu_set(i, cpu_possible_map);
157 __cpu_number_map[i] = i; 157 __cpu_number_map[i] = i;
158 __cpu_logical_map[i] = i; 158 __cpu_logical_map[i] = i;
159 } 159 }
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index ba5cdebeaf0d..5b47d6b65275 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
76 /* Only let it join in if it's marked enabled */ 76 /* Only let it join in if it's marked enabled */
77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) && 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 (tot_cpus_found != NR_CPUS)) { 78 (tot_cpus_found != NR_CPUS)) {
79 cpu_set(cpuid, phys_cpu_present_map); 79 cpu_set(cpuid, cpu_possible_map);
80 alloc_cpupda(cpuid, tot_cpus_found); 80 alloc_cpupda(cpuid, tot_cpus_found);
81 cpus_found++; 81 cpus_found++;
82 tot_cpus_found++; 82 tot_cpus_found++;
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 1327c2746fb7..f024057a35f8 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -134,7 +134,7 @@ void __cpuinit hub_rt_clock_event_init(void)
134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
135 cd->rating = 200; 135 cd->rating = 200;
136 cd->irq = irq; 136 cd->irq = irq;
137 cd->cpumask = cpumask_of_cpu(cpu); 137 cd->cpumask = cpumask_of(cpu);
138 cd->set_next_event = rt_next_event; 138 cd->set_next_event = rt_next_event;
139 cd->set_mode = rt_set_mode; 139 cd->set_mode = rt_set_mode;
140 clockevents_register_device(cd); 140 clockevents_register_device(cd);
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index a35818ed4263..12b465d404df 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq);
50static void disable_bcm1480_irq(unsigned int irq); 50static void disable_bcm1480_irq(unsigned int irq);
51static void ack_bcm1480_irq(unsigned int irq); 51static void ack_bcm1480_irq(unsigned int irq);
52#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
53static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask); 53static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
54#endif 54#endif
55 55
56#ifdef CONFIG_PCI 56#ifdef CONFIG_PCI
@@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
109} 109}
110 110
111#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
112static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) 112static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
113{ 113{
114 int i = 0, old_cpu, cpu, int_on, k; 114 int i = 0, old_cpu, cpu, int_on, k;
115 u64 cur_ints; 115 u64 cur_ints;
@@ -117,11 +117,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
117 unsigned long flags; 117 unsigned long flags;
118 unsigned int irq_dirty; 118 unsigned int irq_dirty;
119 119
120 if (cpus_weight(mask) != 1) { 120 if (cpumask_weight(mask) != 1) {
121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
122 return; 122 return;
123 } 123 }
124 i = first_cpu(mask); 124 i = cpumask_first(mask);
125 125
126 /* Convert logical CPU to physical CPU */ 126 /* Convert logical CPU to physical CPU */
127 cpu = cpu_logical_map(i); 127 cpu = cpu_logical_map(i);
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index bd9eeb43ed0e..dddfda8e8294 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -136,7 +136,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
136 136
137/* 137/*
138 * Use CFE to find out how many CPUs are available, setting up 138 * Use CFE to find out how many CPUs are available, setting up
139 * phys_cpu_present_map and the logical/physical mappings. 139 * cpu_possible_map and the logical/physical mappings.
140 * XXXKW will the boot CPU ever not be physical 0? 140 * XXXKW will the boot CPU ever not be physical 0?
141 * 141 *
142 * Common setup before any secondaries are started 142 * Common setup before any secondaries are started
@@ -145,14 +145,14 @@ static void __init bcm1480_smp_setup(void)
145{ 145{
146 int i, num; 146 int i, num;
147 147
148 cpus_clear(phys_cpu_present_map); 148 cpus_clear(cpu_possible_map);
149 cpu_set(0, phys_cpu_present_map); 149 cpu_set(0, cpu_possible_map);
150 __cpu_number_map[0] = 0; 150 __cpu_number_map[0] = 0;
151 __cpu_logical_map[0] = 0; 151 __cpu_logical_map[0] = 0;
152 152
153 for (i = 1, num = 0; i < NR_CPUS; i++) { 153 for (i = 1, num = 0; i < NR_CPUS; i++) {
154 if (cfe_cpu_stop(i) == 0) { 154 if (cfe_cpu_stop(i) == 0) {
155 cpu_set(i, phys_cpu_present_map); 155 cpu_set(i, cpu_possible_map);
156 __cpu_number_map[i] = ++num; 156 __cpu_number_map[i] = ++num;
157 __cpu_logical_map[num] = i; 157 __cpu_logical_map[num] = i;
158 } 158 }
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index a5158483986e..808ac2959b8c 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq);
50static void disable_sb1250_irq(unsigned int irq); 50static void disable_sb1250_irq(unsigned int irq);
51static void ack_sb1250_irq(unsigned int irq); 51static void ack_sb1250_irq(unsigned int irq);
52#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
53static void sb1250_set_affinity(unsigned int irq, cpumask_t mask); 53static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
54#endif 54#endif
55 55
56#ifdef CONFIG_SIBYTE_HAS_LDT 56#ifdef CONFIG_SIBYTE_HAS_LDT
@@ -103,16 +103,16 @@ void sb1250_unmask_irq(int cpu, int irq)
103} 103}
104 104
105#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
106static void sb1250_set_affinity(unsigned int irq, cpumask_t mask) 106static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
107{ 107{
108 int i = 0, old_cpu, cpu, int_on; 108 int i = 0, old_cpu, cpu, int_on;
109 u64 cur_ints; 109 u64 cur_ints;
110 struct irq_desc *desc = irq_desc + irq; 110 struct irq_desc *desc = irq_desc + irq;
111 unsigned long flags; 111 unsigned long flags;
112 112
113 i = first_cpu(mask); 113 i = cpumask_first(mask);
114 114
115 if (cpus_weight(mask) > 1) { 115 if (cpumask_weight(mask) > 1) {
116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
117 return; 117 return;
118 } 118 }
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 0734b933e969..5950a288a7da 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -124,7 +124,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
124 124
125/* 125/*
126 * Use CFE to find out how many CPUs are available, setting up 126 * Use CFE to find out how many CPUs are available, setting up
127 * phys_cpu_present_map and the logical/physical mappings. 127 * cpu_possible_map and the logical/physical mappings.
128 * XXXKW will the boot CPU ever not be physical 0? 128 * XXXKW will the boot CPU ever not be physical 0?
129 * 129 *
130 * Common setup before any secondaries are started 130 * Common setup before any secondaries are started
@@ -133,14 +133,14 @@ static void __init sb1250_smp_setup(void)
133{ 133{
134 int i, num; 134 int i, num;
135 135
136 cpus_clear(phys_cpu_present_map); 136 cpus_clear(cpu_possible_map);
137 cpu_set(0, phys_cpu_present_map); 137 cpu_set(0, cpu_possible_map);
138 __cpu_number_map[0] = 0; 138 __cpu_number_map[0] = 0;
139 __cpu_logical_map[0] = 0; 139 __cpu_logical_map[0] = 0;
140 140
141 for (i = 1, num = 0; i < NR_CPUS; i++) { 141 for (i = 1, num = 0; i < NR_CPUS; i++) {
142 if (cfe_cpu_stop(i) == 0) { 142 if (cfe_cpu_stop(i) == 0) {
143 cpu_set(i, phys_cpu_present_map); 143 cpu_set(i, cpu_possible_map);
144 __cpu_number_map[i] = ++num; 144 __cpu_number_map[i] = ++num;
145 __cpu_logical_map[num] = i; 145 __cpu_logical_map[num] = i;
146 } 146 }
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 796e3ce28720..69f5f88711cc 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -80,7 +80,7 @@ static void __init sni_a20r_timer_setup(void)
80 struct irqaction *action = &a20r_irqaction; 80 struct irqaction *action = &a20r_irqaction;
81 unsigned int cpu = smp_processor_id(); 81 unsigned int cpu = smp_processor_id();
82 82
83 cd->cpumask = cpumask_of_cpu(cpu); 83 cd->cpumask = cpumask_of(cpu);
84 clockevents_register_device(cd); 84 clockevents_register_device(cd);
85 action->dev_id = cd; 85 action->dev_id = cd;
86 setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction); 86 setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 644a70b1b04e..aacf11d33723 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,6 +11,7 @@ config PARISC
11 select HAVE_OPROFILE 11 select HAVE_OPROFILE
12 select RTC_CLASS 12 select RTC_CLASS
13 select RTC_DRV_PARISC 13 select RTC_DRV_PARISC
14 select INIT_ALL_POSSIBLE
14 help 15 help
15 The PA-RISC microprocessor is designed by Hewlett-Packard and used 16 The PA-RISC microprocessor is designed by Hewlett-Packard and used
16 in many of their workstations & servers (HP9000 700 and 800 series, 17 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 23ef950df008..4cea935e2f99 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
131 return 0; 131 return 0;
132} 132}
133 133
134static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) 134static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
135{ 135{
136 if (cpu_check_affinity(irq, &dest)) 136 if (cpu_check_affinity(irq, dest))
137 return; 137 return;
138 138
139 irq_desc[irq].affinity = dest; 139 irq_desc[irq].affinity = *dest;
140} 140}
141#endif 141#endif
142 142
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index d47f3975c9c6..80bc000523fa 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -67,21 +67,6 @@ static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is boo
67 67
68static int parisc_max_cpus __read_mostly = 1; 68static int parisc_max_cpus __read_mostly = 1;
69 69
70/* online cpus are ones that we've managed to bring up completely
71 * possible cpus are all valid cpu
72 * present cpus are all detected cpu
73 *
74 * On startup we bring up the "possible" cpus. Since we discover
75 * CPUs later, we add them as hotplug, so the possible cpu mask is
76 * empty in the beginning.
77 */
78
79cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
80cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
81
82EXPORT_SYMBOL(cpu_online_map);
83EXPORT_SYMBOL(cpu_possible_map);
84
85DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; 70DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
86 71
87enum ipi_message_type { 72enum ipi_message_type {
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index c32da6f97999..373fca394a54 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -48,7 +48,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
48 48
49/* sched_domains SD_NODE_INIT for PPC64 machines */ 49/* sched_domains SD_NODE_INIT for PPC64 machines */
50#define SD_NODE_INIT (struct sched_domain) { \ 50#define SD_NODE_INIT (struct sched_domain) { \
51 .span = CPU_MASK_NONE, \
52 .parent = NULL, \ 51 .parent = NULL, \
53 .child = NULL, \ 52 .child = NULL, \
54 .groups = NULL, \ 53 .groups = NULL, \
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ac222d0ab12e..23b8b5e36f98 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map)
237 mask = map; 237 mask = map;
238 } 238 }
239 if (irq_desc[irq].chip->set_affinity) 239 if (irq_desc[irq].chip->set_affinity)
240 irq_desc[irq].chip->set_affinity(irq, mask); 240 irq_desc[irq].chip->set_affinity(irq, &mask);
241 else if (irq_desc[irq].action && !(warned++)) 241 else if (irq_desc[irq].action && !(warned++))
242 printk("Cannot set affinity for irq %i\n", irq); 242 printk("Cannot set affinity for irq %i\n", irq);
243 } 243 }
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ac3f721d235..65484b2200b3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -59,13 +59,9 @@
59 59
60struct thread_info *secondary_ti; 60struct thread_info *secondary_ti;
61 61
62cpumask_t cpu_possible_map = CPU_MASK_NONE;
63cpumask_t cpu_online_map = CPU_MASK_NONE;
64DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 62DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
65DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 63DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
66 64
67EXPORT_SYMBOL(cpu_online_map);
68EXPORT_SYMBOL(cpu_possible_map);
69EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 65EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
70EXPORT_PER_CPU_SYMBOL(cpu_core_map); 66EXPORT_PER_CPU_SYMBOL(cpu_core_map);
71 67
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e1f3a5140429..99f1ddd68582 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -844,7 +844,7 @@ static void register_decrementer_clockevent(int cpu)
844 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; 844 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
845 845
846 *dec = decrementer_clockevent; 846 *dec = decrementer_clockevent;
847 dec->cpumask = cpumask_of_cpu(cpu); 847 dec->cpumask = cpumask_of(cpu);
848 848
849 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", 849 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
850 dec->name, dec->mult, dec->shift, cpu); 850 dec->name, dec->mult, dec->shift, cpu);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index f7a69021b7bf..84e058f1e1cc 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq)
332 lpar_xirr_info_set((0xff << 24) | irq); 332 lpar_xirr_info_set((0xff << 24) | irq);
333} 333}
334 334
335static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) 335static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
336{ 336{
337 unsigned int irq; 337 unsigned int irq;
338 int status; 338 int status;
@@ -870,7 +870,7 @@ void xics_migrate_irqs_away(void)
870 870
871 /* Reset affinity to all cpus */ 871 /* Reset affinity to all cpus */
872 irq_desc[virq].affinity = CPU_MASK_ALL; 872 irq_desc[virq].affinity = CPU_MASK_ALL;
873 desc->chip->set_affinity(virq, CPU_MASK_ALL); 873 desc->chip->set_affinity(virq, cpu_all_mask);
874unlock: 874unlock:
875 spin_unlock_irqrestore(&desc->lock, flags); 875 spin_unlock_irqrestore(&desc->lock, flags);
876 } 876 }
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index c82babb70074..3e0d89dcdba2 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -806,7 +806,7 @@ static void mpic_end_ipi(unsigned int irq)
806 806
807#endif /* CONFIG_SMP */ 807#endif /* CONFIG_SMP */
808 808
809void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 809void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
810{ 810{
811 struct mpic *mpic = mpic_from_irq(irq); 811 struct mpic *mpic = mpic_from_irq(irq);
812 unsigned int src = mpic_irq_to_hw(irq); 812 unsigned int src = mpic_irq_to_hw(irq);
@@ -818,7 +818,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
818 } else { 818 } else {
819 cpumask_t tmp; 819 cpumask_t tmp;
820 820
821 cpus_and(tmp, cpumask, cpu_online_map); 821 cpumask_and(&tmp, cpumask, cpu_online_mask);
822 822
823 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 823 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
824 mpic_physmask(cpus_addr(tmp)[0])); 824 mpic_physmask(cpus_addr(tmp)[0]));
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 6209c62a426d..3cef2af10f42 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
36 36
37extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); 37extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
38extern void mpic_set_vector(unsigned int virq, unsigned int vector); 38extern void mpic_set_vector(unsigned int virq, unsigned int vector);
39extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); 39extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
40 40
41#endif /* _POWERPC_SYSDEV_MPIC_H */ 41#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8152fefc97b9..19577aeffd7b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -83,6 +83,7 @@ config S390
83 select HAVE_KRETPROBES 83 select HAVE_KRETPROBES
84 select HAVE_KVM if 64BIT 84 select HAVE_KVM if 64BIT
85 select HAVE_ARCH_TRACEHOOK 85 select HAVE_ARCH_TRACEHOOK
86 select INIT_ALL_POSSIBLE
86 87
87source "init/Kconfig" 88source "init/Kconfig"
88 89
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fc78541dc57..3ed5c7a83c6c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -55,12 +55,6 @@
55struct _lowcore *lowcore_ptr[NR_CPUS]; 55struct _lowcore *lowcore_ptr[NR_CPUS];
56EXPORT_SYMBOL(lowcore_ptr); 56EXPORT_SYMBOL(lowcore_ptr);
57 57
58cpumask_t cpu_online_map = CPU_MASK_NONE;
59EXPORT_SYMBOL(cpu_online_map);
60
61cpumask_t cpu_possible_map = CPU_MASK_ALL;
62EXPORT_SYMBOL(cpu_possible_map);
63
64static struct task_struct *current_set[NR_CPUS]; 58static struct task_struct *current_set[NR_CPUS];
65 59
66static u8 smp_cpu_type; 60static u8 smp_cpu_type;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 5be981a36c3e..d649600df5b9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -160,7 +160,7 @@ void init_cpu_timer(void)
160 cd->min_delta_ns = 1; 160 cd->min_delta_ns = 1;
161 cd->max_delta_ns = LONG_MAX; 161 cd->max_delta_ns = LONG_MAX;
162 cd->rating = 400; 162 cd->rating = 400;
163 cd->cpumask = cpumask_of_cpu(cpu); 163 cd->cpumask = cpumask_of(cpu);
164 cd->set_next_event = s390_next_event; 164 cd->set_next_event = s390_next_event;
165 cd->set_mode = s390_set_mode; 165 cd->set_mode = s390_set_mode;
166 166
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 85b660c17eb0..c24e9c6a1736 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -31,7 +31,7 @@ enum {
31}; 31};
32 32
33void smp_message_recv(unsigned int msg); 33void smp_message_recv(unsigned int msg);
34void smp_timer_broadcast(cpumask_t mask); 34void smp_timer_broadcast(const struct cpumask *mask);
35 35
36void local_timer_interrupt(void); 36void local_timer_interrupt(void);
37void local_timer_setup(unsigned int cpu); 37void local_timer_setup(unsigned int cpu);
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index 95f0085e098a..279d9cc4a007 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -5,7 +5,6 @@
5 5
6/* sched_domains SD_NODE_INIT for sh machines */ 6/* sched_domains SD_NODE_INIT for sh machines */
7#define SD_NODE_INIT (struct sched_domain) { \ 7#define SD_NODE_INIT (struct sched_domain) { \
8 .span = CPU_MASK_NONE, \
9 .parent = NULL, \ 8 .parent = NULL, \
10 .child = NULL, \ 9 .child = NULL, \
11 .groups = NULL, \ 10 .groups = NULL, \
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 3c5ad1660bbc..8f4027412614 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -31,12 +31,6 @@
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33 33
34cpumask_t cpu_possible_map;
35EXPORT_SYMBOL(cpu_possible_map);
36
37cpumask_t cpu_online_map;
38EXPORT_SYMBOL(cpu_online_map);
39
40static inline void __init smp_store_cpu_info(unsigned int cpu) 34static inline void __init smp_store_cpu_info(unsigned int cpu)
41{ 35{
42 struct sh_cpuinfo *c = cpu_data + cpu; 36 struct sh_cpuinfo *c = cpu_data + cpu;
@@ -190,11 +184,11 @@ void arch_send_call_function_single_ipi(int cpu)
190 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 184 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
191} 185}
192 186
193void smp_timer_broadcast(cpumask_t mask) 187void smp_timer_broadcast(const struct cpumask *mask)
194{ 188{
195 int cpu; 189 int cpu;
196 190
197 for_each_cpu_mask(cpu, mask) 191 for_each_cpu(cpu, mask)
198 plat_send_ipi(cpu, SMP_MSG_TIMER); 192 plat_send_ipi(cpu, SMP_MSG_TIMER);
199} 193}
200 194
diff --git a/arch/sh/kernel/timers/timer-broadcast.c b/arch/sh/kernel/timers/timer-broadcast.c
index c2317635230f..96e8eaea1e62 100644
--- a/arch/sh/kernel/timers/timer-broadcast.c
+++ b/arch/sh/kernel/timers/timer-broadcast.c
@@ -51,7 +51,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
51 clk->mult = 1; 51 clk->mult = 1;
52 clk->set_mode = dummy_timer_set_mode; 52 clk->set_mode = dummy_timer_set_mode;
53 clk->broadcast = smp_timer_broadcast; 53 clk->broadcast = smp_timer_broadcast;
54 clk->cpumask = cpumask_of_cpu(cpu); 54 clk->cpumask = cpumask_of(cpu);
55 55
56 clockevents_register_device(clk); 56 clockevents_register_device(clk);
57} 57}
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 3c61ddd4d43e..0db3f9510336 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -263,7 +263,7 @@ static int tmu_timer_init(void)
263 tmu0_clockevent.min_delta_ns = 263 tmu0_clockevent.min_delta_ns =
264 clockevent_delta2ns(1, &tmu0_clockevent); 264 clockevent_delta2ns(1, &tmu0_clockevent);
265 265
266 tmu0_clockevent.cpumask = cpumask_of_cpu(0); 266 tmu0_clockevent.cpumask = cpumask_of(0);
267 267
268 clockevents_register_device(&tmu0_clockevent); 268 clockevents_register_device(&tmu0_clockevent);
269 269
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index a8180e546a48..8408d9d2a662 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -29,8 +29,6 @@
29 */ 29 */
30 30
31extern unsigned char boot_cpu_id; 31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34 32
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, 33typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long); 34 unsigned long, unsigned long);
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index a3ea2bcb95de..cab8e0286871 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
312 } 312 }
313} 313}
314 314
315static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) 315static void sun4u_set_affinity(unsigned int virt_irq,
316 const struct cpumask *mask)
316{ 317{
317 sun4u_irq_enable(virt_irq); 318 sun4u_irq_enable(virt_irq);
318} 319}
@@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq)
362 ino, err); 363 ino, err);
363} 364}
364 365
365static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) 366static void sun4v_set_affinity(unsigned int virt_irq,
367 const struct cpumask *mask)
366{ 368{
367 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 369 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
368 unsigned long cpuid = irq_choose_cpu(virt_irq); 370 unsigned long cpuid = irq_choose_cpu(virt_irq);
@@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq)
429 dev_handle, dev_ino, err); 431 dev_handle, dev_ino, err);
430} 432}
431 433
432static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) 434static void sun4v_virt_set_affinity(unsigned int virt_irq,
435 const struct cpumask *mask)
433{ 436{
434 unsigned long cpuid, dev_handle, dev_ino; 437 unsigned long cpuid, dev_handle, dev_ino;
435 int err; 438 int err;
@@ -851,7 +854,7 @@ void fixup_irqs(void)
851 !(irq_desc[irq].status & IRQ_PER_CPU)) { 854 !(irq_desc[irq].status & IRQ_PER_CPU)) {
852 if (irq_desc[irq].chip->set_affinity) 855 if (irq_desc[irq].chip->set_affinity)
853 irq_desc[irq].chip->set_affinity(irq, 856 irq_desc[irq].chip->set_affinity(irq,
854 irq_desc[irq].affinity); 857 &irq_desc[irq].affinity);
855 } 858 }
856 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 859 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
857 } 860 }
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 46e231f7c5ce..322046cdf85f 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -780,7 +780,7 @@ out:
780 if (nid != -1) { 780 if (nid != -1) {
781 cpumask_t numa_mask = node_to_cpumask(nid); 781 cpumask_t numa_mask = node_to_cpumask(nid);
782 782
783 irq_set_affinity(irq, numa_mask); 783 irq_set_affinity(irq, &numa_mask);
784 } 784 }
785 785
786 return irq; 786 return irq;
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 2e680f34f727..0d0cd815e83e 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -288,7 +288,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
288 if (nid != -1) { 288 if (nid != -1) {
289 cpumask_t numa_mask = node_to_cpumask(nid); 289 cpumask_t numa_mask = node_to_cpumask(nid);
290 290
291 irq_set_affinity(irq, numa_mask); 291 irq_set_affinity(irq, &numa_mask);
292 } 292 }
293 err = request_irq(irq, sparc64_msiq_interrupt, 0, 293 err = request_irq(irq, sparc64_msiq_interrupt, 0,
294 "MSIQ", 294 "MSIQ",
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e396c1f17a92..1e5ac4e282e1 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -39,8 +39,6 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
39unsigned char boot_cpu_id = 0; 39unsigned char boot_cpu_id = 0;
40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ 40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
41 41
42cpumask_t cpu_online_map = CPU_MASK_NONE;
43cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
44cpumask_t smp_commenced_mask = CPU_MASK_NONE; 42cpumask_t smp_commenced_mask = CPU_MASK_NONE;
45 43
46/* The only guaranteed locking primitive available on all Sparc 44/* The only guaranteed locking primitive available on all Sparc
@@ -334,7 +332,7 @@ void __init smp_setup_cpu_possible_map(void)
334 instance = 0; 332 instance = 0;
335 while (!cpu_find_by_instance(instance, NULL, &mid)) { 333 while (!cpu_find_by_instance(instance, NULL, &mid)) {
336 if (mid < NR_CPUS) { 334 if (mid < NR_CPUS) {
337 cpu_set(mid, phys_cpu_present_map); 335 cpu_set(mid, cpu_possible_map);
338 cpu_set(mid, cpu_present_map); 336 cpu_set(mid, cpu_present_map);
339 } 337 }
340 instance++; 338 instance++;
@@ -354,7 +352,7 @@ void __init smp_prepare_boot_cpu(void)
354 352
355 current_thread_info()->cpu = cpuid; 353 current_thread_info()->cpu = cpuid;
356 cpu_set(cpuid, cpu_online_map); 354 cpu_set(cpuid, cpu_online_map);
357 cpu_set(cpuid, phys_cpu_present_map); 355 cpu_set(cpuid, cpu_possible_map);
358} 356}
359 357
360int __cpuinit __cpu_up(unsigned int cpu) 358int __cpuinit __cpu_up(unsigned int cpu)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index bfe99d82d458..46329799f346 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -49,14 +49,10 @@
49 49
50int sparc64_multi_core __read_mostly; 50int sparc64_multi_core __read_mostly;
51 51
52cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
53cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
54DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 52DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
55cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 53cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 54 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57 55
58EXPORT_SYMBOL(cpu_possible_map);
59EXPORT_SYMBOL(cpu_online_map);
60EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 56EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
61EXPORT_SYMBOL(cpu_core_map); 57EXPORT_SYMBOL(cpu_core_map);
62 58
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
index a4d45fc29b21..e1e97639231b 100644
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ b/arch/sparc/kernel/sparc_ksyms_32.c
@@ -112,10 +112,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
112#ifdef CONFIG_SMP 112#ifdef CONFIG_SMP
113/* IRQ implementation. */ 113/* IRQ implementation. */
114EXPORT_SYMBOL(synchronize_irq); 114EXPORT_SYMBOL(synchronize_irq);
115
116/* CPU online map and active count. */
117EXPORT_SYMBOL(cpu_online_map);
118EXPORT_SYMBOL(phys_cpu_present_map);
119#endif 115#endif
120 116
121EXPORT_SYMBOL(__udelay); 117EXPORT_SYMBOL(__udelay);
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 141da3759091..9df8f095a8b1 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -763,7 +763,7 @@ void __devinit setup_sparc64_timer(void)
763 sevt = &__get_cpu_var(sparc64_events); 763 sevt = &__get_cpu_var(sparc64_events);
764 764
765 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); 765 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
766 sevt->cpumask = cpumask_of_cpu(smp_processor_id()); 766 sevt->cpumask = cpumask_of(smp_processor_id());
767 767
768 clockevents_register_device(sevt); 768 clockevents_register_device(sevt);
769} 769}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 045772142844..98351c78bc81 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -25,13 +25,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
25#include "irq_user.h" 25#include "irq_user.h"
26#include "os.h" 26#include "os.h"
27 27
28/* CPU online map, set by smp_boot_cpus */
29cpumask_t cpu_online_map = CPU_MASK_NONE;
30cpumask_t cpu_possible_map = CPU_MASK_NONE;
31
32EXPORT_SYMBOL(cpu_online_map);
33EXPORT_SYMBOL(cpu_possible_map);
34
35/* Per CPU bogomips and other parameters 28/* Per CPU bogomips and other parameters
36 * The only piece used here is the ipi pipe, which is set before SMP is 29 * The only piece used here is the ipi pipe, which is set before SMP is
37 * started and never changed. 30 * started and never changed.
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 47f04f4a3464..b13a87a3ec95 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ static int itimer_next_event(unsigned long delta,
50static struct clock_event_device itimer_clockevent = { 50static struct clock_event_device itimer_clockevent = {
51 .name = "itimer", 51 .name = "itimer",
52 .rating = 250, 52 .rating = 250,
53 .cpumask = CPU_MASK_ALL, 53 .cpumask = cpu_all_mask,
54 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 54 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
55 .set_mode = itimer_set_mode, 55 .set_mode = itimer_set_mode,
56 .set_next_event = itimer_next_event, 56 .set_next_event = itimer_next_event,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0f44add3e0b7..249d1e0824b5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -601,19 +601,20 @@ config IOMMU_HELPER
601 601
602config MAXSMP 602config MAXSMP
603 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 603 bool "Configure Maximum number of SMP Processors and NUMA Nodes"
604 depends on X86_64 && SMP && BROKEN 604 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
605 select CPUMASK_OFFSTACK
605 default n 606 default n
606 help 607 help
607 Configure maximum number of CPUS and NUMA Nodes for this architecture. 608 Configure maximum number of CPUS and NUMA Nodes for this architecture.
608 If unsure, say N. 609 If unsure, say N.
609 610
610config NR_CPUS 611config NR_CPUS
611 int "Maximum number of CPUs (2-512)" if !MAXSMP 612 int "Maximum number of CPUs" if SMP && !MAXSMP
612 range 2 512 613 range 2 512 if SMP && !MAXSMP
613 depends on SMP 614 default "1" if !SMP
614 default "4096" if MAXSMP 615 default "4096" if MAXSMP
615 default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 616 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
616 default "8" 617 default "8" if SMP
617 help 618 help
618 This allows you to specify the maximum number of CPUs which this 619 This allows you to specify the maximum number of CPUs which this
619 kernel will support. The maximum supported value is 512 and the 620 kernel will support. The maximum supported value is 512 and the
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
index ce547f24a1cd..d8dd9f537911 100644
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ b/arch/x86/include/asm/bigsmp/apic.h
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void)
9 return (1); 9 return (1);
10} 10}
11 11
12static inline cpumask_t target_cpus(void) 12static inline const cpumask_t *target_cpus(void)
13{ 13{
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15 return cpu_online_map; 15 return &cpu_online_map;
16#else 16#else
17 return cpumask_of_cpu(0); 17 return &cpumask_of_cpu(0);
18#endif 18#endif
19} 19}
20 20
@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid)
79 79
80static inline int cpu_present_to_apicid(int mps_cpu) 80static inline int cpu_present_to_apicid(int mps_cpu)
81{ 81{
82 if (mps_cpu < NR_CPUS) 82 if (mps_cpu < nr_cpu_ids)
83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
84 84
85 return BAD_APICID; 85 return BAD_APICID;
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[];
94/* Mapping from cpu number to logical apicid */ 94/* Mapping from cpu number to logical apicid */
95static inline int cpu_to_logical_apicid(int cpu) 95static inline int cpu_to_logical_apicid(int cpu)
96{ 96{
97 if (cpu >= NR_CPUS) 97 if (cpu >= nr_cpu_ids)
98 return BAD_APICID; 98 return BAD_APICID;
99 return cpu_physical_id(cpu); 99 return cpu_physical_id(cpu);
100} 100}
@@ -119,16 +119,34 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
119} 119}
120 120
121/* As we are using single CPU as destination, pick only one CPU here */ 121/* As we are using single CPU as destination, pick only one CPU here */
122static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 122static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
123{ 123{
124 int cpu; 124 int cpu;
125 int apicid; 125 int apicid;
126 126
127 cpu = first_cpu(cpumask); 127 cpu = first_cpu(*cpumask);
128 apicid = cpu_to_logical_apicid(cpu); 128 apicid = cpu_to_logical_apicid(cpu);
129 return apicid; 129 return apicid;
130} 130}
131 131
132static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 for_each_cpu_and(cpu, cpumask, andmask)
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
144 if (cpu < nr_cpu_ids)
145 return cpu_to_logical_apicid(cpu);
146
147 return BAD_APICID;
148}
149
132static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 150static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
133{ 151{
134 return cpuid_apic >> index_msb; 152 return cpuid_apic >> index_msb;
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
index 9404c535b7ec..27fcd01b3ae6 100644
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ b/arch/x86/include/asm/bigsmp/ipi.h
@@ -1,25 +1,22 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18} 15}
19 16
20static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
21{ 18{
22 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
23} 20}
24 21
25#endif /* __ASM_MACH_IPI_H */ 22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index e6b82b17b072..dc27705f5443 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr)
320 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); 320 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
321} 321}
322 322
323#define SYS_VECTOR_FREE 0
324#define SYS_VECTOR_ALLOCED 1
325
326extern int first_system_vector; 323extern int first_system_vector;
327extern char system_vectors[]; 324/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
325extern unsigned long used_vectors[];
328 326
329static inline void alloc_system_vector(int vector) 327static inline void alloc_system_vector(int vector)
330{ 328{
331 if (system_vectors[vector] == SYS_VECTOR_FREE) { 329 if (!test_bit(vector, used_vectors)) {
332 system_vectors[vector] = SYS_VECTOR_ALLOCED; 330 set_bit(vector, used_vectors);
333 if (first_system_vector > vector) 331 if (first_system_vector > vector)
334 first_system_vector = vector; 332 first_system_vector = vector;
335 } else 333 } else
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index e24ef876915f..51ac1230294e 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void)
9 return (1); 9 return (1);
10} 10}
11 11
12static inline cpumask_t target_cpus_cluster(void) 12static inline const cpumask_t *target_cpus_cluster(void)
13{ 13{
14 return CPU_MASK_ALL; 14 return &CPU_MASK_ALL;
15} 15}
16 16
17static inline cpumask_t target_cpus(void) 17static inline const cpumask_t *target_cpus(void)
18{ 18{
19 return cpumask_of_cpu(smp_processor_id()); 19 return &cpumask_of_cpu(smp_processor_id());
20} 20}
21 21
22#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) 22#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS];
80static inline void setup_apic_routing(void) 80static inline void setup_apic_routing(void)
81{ 81{
82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); 82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
83 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 83 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
84 (apic_version[apic] == 0x14) ? 84 (apic_version[apic] == 0x14) ?
85 "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); 85 "Physical Cluster" : "Logical Cluster",
86 nr_ioapics, cpus_addr(*target_cpus())[0]);
86} 87}
87 88
88static inline int multi_timer_check(int apic, int irq) 89static inline int multi_timer_check(int apic, int irq)
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
100{ 101{
101 if (!mps_cpu) 102 if (!mps_cpu)
102 return boot_cpu_physical_apicid; 103 return boot_cpu_physical_apicid;
103 else if (mps_cpu < NR_CPUS) 104 else if (mps_cpu < nr_cpu_ids)
104 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 105 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
105 else 106 else
106 return BAD_APICID; 107 return BAD_APICID;
@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[];
120static inline int cpu_to_logical_apicid(int cpu) 121static inline int cpu_to_logical_apicid(int cpu)
121{ 122{
122#ifdef CONFIG_SMP 123#ifdef CONFIG_SMP
123 if (cpu >= NR_CPUS) 124 if (cpu >= nr_cpu_ids)
124 return BAD_APICID; 125 return BAD_APICID;
125 return (int)cpu_2_logical_apicid[cpu]; 126 return (int)cpu_2_logical_apicid[cpu];
126#else 127#else
127 return logical_smp_processor_id(); 128 return logical_smp_processor_id();
128#endif 129#endif
@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
146 return (1); 147 return (1);
147} 148}
148 149
149static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) 150static inline unsigned int
151cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
150{ 152{
151 int num_bits_set; 153 int num_bits_set;
152 int cpus_found = 0; 154 int cpus_found = 0;
153 int cpu; 155 int cpu;
154 int apicid; 156 int apicid;
155 157
156 num_bits_set = cpus_weight(cpumask); 158 num_bits_set = cpumask_weight(cpumask);
157 /* Return id to all */ 159 /* Return id to all */
158 if (num_bits_set == NR_CPUS) 160 if (num_bits_set == NR_CPUS)
159 return 0xFF; 161 return 0xFF;
@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
161 * The cpus in the mask must all be on the apic cluster. If are not 163 * The cpus in the mask must all be on the apic cluster. If are not
162 * on the same apicid cluster return default value of TARGET_CPUS. 164 * on the same apicid cluster return default value of TARGET_CPUS.
163 */ 165 */
164 cpu = first_cpu(cpumask); 166 cpu = cpumask_first(cpumask);
165 apicid = cpu_to_logical_apicid(cpu); 167 apicid = cpu_to_logical_apicid(cpu);
166 while (cpus_found < num_bits_set) { 168 while (cpus_found < num_bits_set) {
167 if (cpu_isset(cpu, cpumask)) { 169 if (cpumask_test_cpu(cpu, cpumask)) {
168 int new_apicid = cpu_to_logical_apicid(cpu); 170 int new_apicid = cpu_to_logical_apicid(cpu);
169 if (apicid_cluster(apicid) != 171 if (apicid_cluster(apicid) !=
170 apicid_cluster(new_apicid)){ 172 apicid_cluster(new_apicid)){
@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
179 return apicid; 181 return apicid;
180} 182}
181 183
182static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 184static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
183{ 185{
184 int num_bits_set; 186 int num_bits_set;
185 int cpus_found = 0; 187 int cpus_found = 0;
186 int cpu; 188 int cpu;
187 int apicid; 189 int apicid;
188 190
189 num_bits_set = cpus_weight(cpumask); 191 num_bits_set = cpus_weight(*cpumask);
190 /* Return id to all */ 192 /* Return id to all */
191 if (num_bits_set == NR_CPUS) 193 if (num_bits_set == NR_CPUS)
192 return cpu_to_logical_apicid(0); 194 return cpu_to_logical_apicid(0);
@@ -194,10 +196,52 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
194 * The cpus in the mask must all be on the apic cluster. If are not 196 * The cpus in the mask must all be on the apic cluster. If are not
195 * on the same apicid cluster return default value of TARGET_CPUS. 197 * on the same apicid cluster return default value of TARGET_CPUS.
196 */ 198 */
197 cpu = first_cpu(cpumask); 199 cpu = first_cpu(*cpumask);
200 apicid = cpu_to_logical_apicid(cpu);
201 while (cpus_found < num_bits_set) {
202 if (cpu_isset(cpu, *cpumask)) {
203 int new_apicid = cpu_to_logical_apicid(cpu);
204 if (apicid_cluster(apicid) !=
205 apicid_cluster(new_apicid)){
206 printk ("%s: Not a valid mask!\n", __func__);
207 return cpu_to_logical_apicid(0);
208 }
209 apicid = new_apicid;
210 cpus_found++;
211 }
212 cpu++;
213 }
214 return apicid;
215}
216
217
218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
219 const struct cpumask *andmask)
220{
221 int num_bits_set;
222 int cpus_found = 0;
223 int cpu;
224 int apicid = cpu_to_logical_apicid(0);
225 cpumask_var_t cpumask;
226
227 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
228 return apicid;
229
230 cpumask_and(cpumask, inmask, andmask);
231 cpumask_and(cpumask, cpumask, cpu_online_mask);
232
233 num_bits_set = cpumask_weight(cpumask);
234 /* Return id to all */
235 if (num_bits_set == NR_CPUS)
236 goto exit;
237 /*
238 * The cpus in the mask must all be on the apic cluster. If are not
239 * on the same apicid cluster return default value of TARGET_CPUS.
240 */
241 cpu = cpumask_first(cpumask);
198 apicid = cpu_to_logical_apicid(cpu); 242 apicid = cpu_to_logical_apicid(cpu);
199 while (cpus_found < num_bits_set) { 243 while (cpus_found < num_bits_set) {
200 if (cpu_isset(cpu, cpumask)) { 244 if (cpumask_test_cpu(cpu, cpumask)) {
201 int new_apicid = cpu_to_logical_apicid(cpu); 245 int new_apicid = cpu_to_logical_apicid(cpu);
202 if (apicid_cluster(apicid) != 246 if (apicid_cluster(apicid) !=
203 apicid_cluster(new_apicid)){ 247 apicid_cluster(new_apicid)){
@@ -209,6 +253,8 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
209 } 253 }
210 cpu++; 254 cpu++;
211 } 255 }
256exit:
257 free_cpumask_var(cpumask);
212 return apicid; 258 return apicid;
213} 259}
214 260
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
index 632a955fcc0a..7e8ed24d4b8a 100644
--- a/arch/x86/include/asm/es7000/ipi.h
+++ b/arch/x86/include/asm/es7000/ipi.h
@@ -1,24 +1,22 @@
1#ifndef __ASM_ES7000_IPI_H 1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H 2#define __ASM_ES7000_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15 if (!cpus_empty(mask))
16 send_IPI_mask(mask, vector);
17} 15}
18 16
19static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
20{ 18{
21 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
22} 20}
23 21
24#endif /* __ASM_ES7000_IPI_H */ 22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
index 0ac17d33a8c7..746f37a7963a 100644
--- a/arch/x86/include/asm/genapic_32.h
+++ b/arch/x86/include/asm/genapic_32.h
@@ -24,7 +24,7 @@ struct genapic {
24 int (*probe)(void); 24 int (*probe)(void);
25 25
26 int (*apic_id_registered)(void); 26 int (*apic_id_registered)(void);
27 cpumask_t (*target_cpus)(void); 27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode; 28 int int_delivery_mode;
29 int int_dest_mode; 29 int int_dest_mode;
30 int ESR_DISABLE; 30 int ESR_DISABLE;
@@ -57,12 +57,16 @@ struct genapic {
57 57
58 unsigned (*get_apic_id)(unsigned long x); 58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask; 59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 cpumask_t (*vector_allocation_domain)(int cpu); 61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
62 64
63#ifdef CONFIG_SMP 65#ifdef CONFIG_SMP
64 /* ipi */ 66 /* ipi */
65 void (*send_IPI_mask)(cpumask_t mask, int vector); 67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
66 void (*send_IPI_allbutself)(int vector); 70 void (*send_IPI_allbutself)(int vector);
67 void (*send_IPI_all)(int vector); 71 void (*send_IPI_all)(int vector);
68#endif 72#endif
@@ -114,6 +118,7 @@ struct genapic {
114 APICFUNC(get_apic_id) \ 118 APICFUNC(get_apic_id) \
115 .apic_id_mask = APIC_ID_MASK, \ 119 .apic_id_mask = APIC_ID_MASK, \
116 APICFUNC(cpu_mask_to_apicid) \ 120 APICFUNC(cpu_mask_to_apicid) \
121 APICFUNC(cpu_mask_to_apicid_and) \
117 APICFUNC(vector_allocation_domain) \ 122 APICFUNC(vector_allocation_domain) \
118 APICFUNC(acpi_madt_oem_check) \ 123 APICFUNC(acpi_madt_oem_check) \
119 IPIFUNC(send_IPI_mask) \ 124 IPIFUNC(send_IPI_mask) \
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
index 2cae011668b7..adf32fb56aa6 100644
--- a/arch/x86/include/asm/genapic_64.h
+++ b/arch/x86/include/asm/genapic_64.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_GENAPIC_64_H 1#ifndef _ASM_X86_GENAPIC_64_H
2#define _ASM_X86_GENAPIC_64_H 2#define _ASM_X86_GENAPIC_64_H
3 3
4#include <linux/cpumask.h>
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -18,16 +20,20 @@ struct genapic {
18 u32 int_delivery_mode; 20 u32 int_delivery_mode;
19 u32 int_dest_mode; 21 u32 int_dest_mode;
20 int (*apic_id_registered)(void); 22 int (*apic_id_registered)(void);
21 cpumask_t (*target_cpus)(void); 23 const struct cpumask *(*target_cpus)(void);
22 cpumask_t (*vector_allocation_domain)(int cpu); 24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
23 void (*init_apic_ldr)(void); 25 void (*init_apic_ldr)(void);
24 /* ipi */ 26 /* ipi */
25 void (*send_IPI_mask)(cpumask_t mask, int vector); 27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
26 void (*send_IPI_allbutself)(int vector); 30 void (*send_IPI_allbutself)(int vector);
27 void (*send_IPI_all)(int vector); 31 void (*send_IPI_all)(int vector);
28 void (*send_IPI_self)(int vector); 32 void (*send_IPI_self)(int vector);
29 /* */ 33 /* */
30 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
36 const struct cpumask *andmask);
31 unsigned int (*phys_pkg_id)(int index_msb); 37 unsigned int (*phys_pkg_id)(int index_msb);
32 unsigned int (*get_apic_id)(unsigned long x); 38 unsigned int (*get_apic_id)(unsigned long x);
33 unsigned long (*set_apic_id)(unsigned int id); 39 unsigned long (*set_apic_id)(unsigned int id);
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index f89dffb28aa9..c745a306f7d3 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 117 native_apic_mem_write(APIC_ICR, cfg);
118} 118}
119 119
120static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) 120static inline void send_IPI_mask_sequence(const struct cpumask *mask,
121 int vector)
121{ 122{
122 unsigned long flags; 123 unsigned long flags;
123 unsigned long query_cpu; 124 unsigned long query_cpu;
@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
128 * - mbligh 129 * - mbligh
129 */ 130 */
130 local_irq_save(flags); 131 local_irq_save(flags);
131 for_each_cpu_mask_nr(query_cpu, mask) { 132 for_each_cpu(query_cpu, mask) {
132 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
133 vector, APIC_DEST_PHYSICAL); 134 vector, APIC_DEST_PHYSICAL);
134 } 135 }
135 local_irq_restore(flags); 136 local_irq_restore(flags);
136} 137}
137 138
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
140 int vector)
141{
142 unsigned long flags;
143 unsigned int query_cpu;
144 unsigned int this_cpu = smp_processor_id();
145
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155}
156
138#endif /* _ASM_X86_IPI_H */ 157#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 28e409fc73f3..592688ed04d3 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -33,7 +33,7 @@ static inline int irq_canonicalize(int irq)
33 33
34#ifdef CONFIG_HOTPLUG_CPU 34#ifdef CONFIG_HOTPLUG_CPU
35#include <linux/cpumask.h> 35#include <linux/cpumask.h>
36extern void fixup_irqs(cpumask_t map); 36extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern unsigned int do_IRQ(struct pt_regs *regs); 39extern unsigned int do_IRQ(struct pt_regs *regs);
@@ -42,5 +42,6 @@ extern void native_init_IRQ(void);
42 42
43/* Interrupt vector management */ 43/* Interrupt vector management */
44extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 44extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
45extern int vector_used_by_percpu_irq(unsigned int vector);
45 46
46#endif /* _ASM_X86_IRQ_H */ 47#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
index 6cb3a467e067..cc09cbbee27e 100644
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ b/arch/x86/include/asm/mach-default/mach_apic.h
@@ -8,12 +8,12 @@
8 8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT) 9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10 10
11static inline cpumask_t target_cpus(void) 11static inline const struct cpumask *target_cpus(void)
12{ 12{
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 return cpu_online_map; 14 return cpu_online_mask;
15#else 15#else
16 return cpumask_of_cpu(0); 16 return cpumask_of(0);
17#endif 17#endif
18} 18}
19 19
@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void)
28#define apic_id_registered (genapic->apic_id_registered) 28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr) 29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
31#define phys_pkg_id (genapic->phys_pkg_id) 32#define phys_pkg_id (genapic->phys_pkg_id)
32#define vector_allocation_domain (genapic->vector_allocation_domain) 33#define vector_allocation_domain (genapic->vector_allocation_domain)
33#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) 34#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
@@ -61,9 +62,19 @@ static inline int apic_id_registered(void)
61 return physid_isset(read_apic_id(), phys_cpu_present_map); 62 return physid_isset(read_apic_id(), phys_cpu_present_map);
62} 63}
63 64
64static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
65{ 66{
66 return cpus_addr(cpumask)[0]; 67 return cpumask_bits(cpumask)[0];
68}
69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
71 const struct cpumask *andmask)
72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0];
75 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
76
77 return (unsigned int)(mask1 & mask2 & mask3);
67} 78}
68 79
69static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 80static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
@@ -88,7 +99,7 @@ static inline int apicid_to_node(int logical_apicid)
88#endif 99#endif
89} 100}
90 101
91static inline cpumask_t vector_allocation_domain(int cpu) 102static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
92{ 103{
93 /* Careful. Some cpus do not strictly honor the set of cpus 104 /* Careful. Some cpus do not strictly honor the set of cpus
94 * specified in the interrupt destination when using lowest 105 * specified in the interrupt destination when using lowest
@@ -98,8 +109,7 @@ static inline cpumask_t vector_allocation_domain(int cpu)
98 * deliver interrupts to the wrong hyperthread when only one 109 * deliver interrupts to the wrong hyperthread when only one
99 * hyperthread was specified in the interrupt desitination. 110 * hyperthread was specified in the interrupt desitination.
100 */ 111 */
101 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 112 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
102 return domain;
103} 113}
104#endif 114#endif
105 115
@@ -131,7 +141,7 @@ static inline int cpu_to_logical_apicid(int cpu)
131 141
132static inline int cpu_present_to_apicid(int mps_cpu) 142static inline int cpu_present_to_apicid(int mps_cpu)
133{ 143{
134 if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) 144 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
135 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 145 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
136 else 146 else
137 return BAD_APICID; 147 return BAD_APICID;
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
index fabca01ebacf..191312d155da 100644
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ b/arch/x86/include/asm/mach-default/mach_ipi.h
@@ -4,7 +4,8 @@
4/* Avoid include hell */ 4/* Avoid include hell */
5#define NMI_VECTOR 0x02 5#define NMI_VECTOR 0x02
6 6
7void send_IPI_mask_bitmask(cpumask_t mask, int vector); 7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
8void __send_IPI_shortcut(unsigned int shortcut, int vector); 9void __send_IPI_shortcut(unsigned int shortcut, int vector);
9 10
10extern int no_broadcast; 11extern int no_broadcast;
@@ -12,28 +13,27 @@ extern int no_broadcast;
12#ifdef CONFIG_X86_64 13#ifdef CONFIG_X86_64
13#include <asm/genapic.h> 14#include <asm/genapic.h>
14#define send_IPI_mask (genapic->send_IPI_mask) 15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
15#else 17#else
16static inline void send_IPI_mask(cpumask_t mask, int vector) 18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
17{ 19{
18 send_IPI_mask_bitmask(mask, vector); 20 send_IPI_mask_bitmask(mask, vector);
19} 21}
22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
20#endif 23#endif
21 24
22static inline void __local_send_IPI_allbutself(int vector) 25static inline void __local_send_IPI_allbutself(int vector)
23{ 26{
24 if (no_broadcast || vector == NMI_VECTOR) { 27 if (no_broadcast || vector == NMI_VECTOR)
25 cpumask_t mask = cpu_online_map; 28 send_IPI_mask_allbutself(cpu_online_mask, vector);
26 29 else
27 cpu_clear(smp_processor_id(), mask);
28 send_IPI_mask(mask, vector);
29 } else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31} 31}
32 32
33static inline void __local_send_IPI_all(int vector) 33static inline void __local_send_IPI_all(int vector)
34{ 34{
35 if (no_broadcast || vector == NMI_VECTOR) 35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_map, vector); 36 send_IPI_mask(cpu_online_mask, vector);
37 else 37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39} 39}
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
index e430f47df667..48553e958ad5 100644
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ b/arch/x86/include/asm/mach-generic/mach_apic.h
@@ -24,6 +24,7 @@
24#define check_phys_apicid_present (genapic->check_phys_apicid_present) 24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used) 25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
27#define vector_allocation_domain (genapic->vector_allocation_domain) 28#define vector_allocation_domain (genapic->vector_allocation_domain)
28#define enable_apic_mode (genapic->enable_apic_mode) 29#define enable_apic_mode (genapic->enable_apic_mode)
29#define phys_pkg_id (genapic->phys_pkg_id) 30#define phys_pkg_id (genapic->phys_pkg_id)
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
index 0bf2a06b7a4e..c80f00d29965 100644
--- a/arch/x86/include/asm/numaq/apic.h
+++ b/arch/x86/include/asm/numaq/apic.h
@@ -7,9 +7,9 @@
7 7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9 9
10static inline cpumask_t target_cpus(void) 10static inline const cpumask_t *target_cpus(void)
11{ 11{
12 return CPU_MASK_ALL; 12 return &CPU_MASK_ALL;
13} 13}
14 14
15#define NO_BALANCE_IRQ (1) 15#define NO_BALANCE_IRQ (1)
@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void)
122 * We use physical apicids here, not logical, so just return the default 122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us 123 * physical broadcast to stop people from breaking us
124 */ 124 */
125static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 125static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
126{
127 return (int) 0xF;
128}
129
130static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 const struct cpumask *andmask)
126{ 132{
127 return (int) 0xF; 133 return (int) 0xF;
128} 134}
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
index 935588d286cf..a8374c652778 100644
--- a/arch/x86/include/asm/numaq/ipi.h
+++ b/arch/x86/include/asm/numaq/ipi.h
@@ -1,25 +1,22 @@
1#ifndef __ASM_NUMAQ_IPI_H 1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H 2#define __ASM_NUMAQ_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18} 15}
19 16
20static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
21{ 18{
22 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
23} 20}
24 21
25#endif /* __ASM_NUMAQ_IPI_H */ 22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index d12811ce51d9..830b9fcb6427 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -60,7 +60,7 @@ struct smp_ops {
60 void (*cpu_die)(unsigned int cpu); 60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void); 61 void (*play_dead)(void);
62 62
63 void (*send_call_func_ipi)(cpumask_t mask); 63 void (*send_call_func_ipi)(const struct cpumask *mask);
64 void (*send_call_func_single_ipi)(int cpu); 64 void (*send_call_func_single_ipi)(int cpu);
65}; 65};
66 66
@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
125 125
126static inline void arch_send_call_function_ipi(cpumask_t mask) 126static inline void arch_send_call_function_ipi(cpumask_t mask)
127{ 127{
128 smp_ops.send_call_func_ipi(mask); 128 smp_ops.send_call_func_ipi(&mask);
129} 129}
130 130
131void cpu_disable_common(void); 131void cpu_disable_common(void);
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu);
138void native_play_dead(void); 138void native_play_dead(void);
139void play_dead_common(void); 139void play_dead_common(void);
140 140
141void native_send_call_func_ipi(cpumask_t mask); 141void native_send_call_func_ipi(const struct cpumask *mask);
142void native_send_call_func_single_ipi(int cpu); 142void native_send_call_func_single_ipi(int cpu);
143 143
144extern void prefill_possible_map(void); 144extern void prefill_possible_map(void);
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 9b3070f1c2ac..99327d1be49f 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -14,13 +14,13 @@
14 14
15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
16 16
17static inline cpumask_t target_cpus(void) 17static inline const cpumask_t *target_cpus(void)
18{ 18{
19 /* CPU_MASK_ALL (0xff) has undefined behaviour with 19 /* CPU_MASK_ALL (0xff) has undefined behaviour with
20 * dest_LowestPrio mode logical clustered apic interrupt routing 20 * dest_LowestPrio mode logical clustered apic interrupt routing
21 * Just start on cpu 0. IRQ balancing will spread load 21 * Just start on cpu 0. IRQ balancing will spread load
22 */ 22 */
23 return cpumask_of_cpu(0); 23 return &cpumask_of_cpu(0);
24} 24}
25 25
26#define INT_DELIVERY_MODE (dest_LowestPrio) 26#define INT_DELIVERY_MODE (dest_LowestPrio)
@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void)
137{ 137{
138} 138}
139 139
140static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 140static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
141{ 141{
142 int num_bits_set; 142 int num_bits_set;
143 int cpus_found = 0; 143 int cpus_found = 0;
144 int cpu; 144 int cpu;
145 int apicid; 145 int apicid;
146 146
147 num_bits_set = cpus_weight(cpumask); 147 num_bits_set = cpus_weight(*cpumask);
148 /* Return id to all */ 148 /* Return id to all */
149 if (num_bits_set == NR_CPUS) 149 if (num_bits_set == NR_CPUS)
150 return (int) 0xFF; 150 return (int) 0xFF;
@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
152 * The cpus in the mask must all be on the apic cluster. If are not 152 * The cpus in the mask must all be on the apic cluster. If are not
153 * on the same apicid cluster return default value of TARGET_CPUS. 153 * on the same apicid cluster return default value of TARGET_CPUS.
154 */ 154 */
155 cpu = first_cpu(cpumask); 155 cpu = first_cpu(*cpumask);
156 apicid = cpu_to_logical_apicid(cpu); 156 apicid = cpu_to_logical_apicid(cpu);
157 while (cpus_found < num_bits_set) { 157 while (cpus_found < num_bits_set) {
158 if (cpu_isset(cpu, cpumask)) { 158 if (cpu_isset(cpu, *cpumask)) {
159 int new_apicid = cpu_to_logical_apicid(cpu); 159 int new_apicid = cpu_to_logical_apicid(cpu);
160 if (apicid_cluster(apicid) != 160 if (apicid_cluster(apicid) !=
161 apicid_cluster(new_apicid)){ 161 apicid_cluster(new_apicid)){
@@ -170,6 +170,49 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
170 return apicid; 170 return apicid;
171} 171}
172 172
173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
174 const struct cpumask *andmask)
175{
176 int num_bits_set;
177 int cpus_found = 0;
178 int cpu;
179 int apicid = 0xFF;
180 cpumask_var_t cpumask;
181
182 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
183 return (int) 0xFF;
184
185 cpumask_and(cpumask, inmask, andmask);
186 cpumask_and(cpumask, cpumask, cpu_online_mask);
187
188 num_bits_set = cpumask_weight(cpumask);
189 /* Return id to all */
190 if (num_bits_set == nr_cpu_ids)
191 goto exit;
192 /*
193 * The cpus in the mask must all be on the apic cluster. If are not
194 * on the same apicid cluster return default value of TARGET_CPUS.
195 */
196 cpu = cpumask_first(cpumask);
197 apicid = cpu_to_logical_apicid(cpu);
198 while (cpus_found < num_bits_set) {
199 if (cpumask_test_cpu(cpu, cpumask)) {
200 int new_apicid = cpu_to_logical_apicid(cpu);
201 if (apicid_cluster(apicid) !=
202 apicid_cluster(new_apicid)){
203 printk ("%s: Not a valid mask!\n", __func__);
204 return 0xFF;
205 }
206 apicid = apicid | new_apicid;
207 cpus_found++;
208 }
209 cpu++;
210 }
211exit:
212 free_cpumask_var(cpumask);
213 return apicid;
214}
215
173/* cpuid returns the value latched in the HW at reset, not the APIC ID 216/* cpuid returns the value latched in the HW at reset, not the APIC ID
174 * register's value. For any box whose BIOS changes APIC IDs, like 217 * register's value. For any box whose BIOS changes APIC IDs, like
175 * clustered APIC systems, we must use hard_smp_processor_id. 218 * clustered APIC systems, we must use hard_smp_processor_id.
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
index 53bd1e7bd7b4..a8a2c24f50cc 100644
--- a/arch/x86/include/asm/summit/ipi.h
+++ b/arch/x86/include/asm/summit/ipi.h
@@ -1,9 +1,10 @@
1#ifndef __ASM_SUMMIT_IPI_H 1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H 2#define __ASM_SUMMIT_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const cpumask_t *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
14 cpu_clear(smp_processor_id(), mask); 15 cpu_clear(smp_processor_id(), mask);
15 16
16 if (!cpus_empty(mask)) 17 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector); 18 send_IPI_mask(&mask, vector);
18} 19}
19 20
20static inline void send_IPI_all(int vector) 21static inline void send_IPI_all(int vector)
21{ 22{
22 send_IPI_mask(cpu_online_map, vector); 23 send_IPI_mask(&cpu_online_map, vector);
23} 24}
24 25
25#endif /* __ASM_SUMMIT_IPI_H */ 26#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index ff386ff50ed7..79e31e9dcdda 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu);
226#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 226#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
227#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 227#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
228#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 228#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
229#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
230#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
229 231
230/* indicates that pointers to the topology cpumask_t maps are valid */ 232/* indicates that pointers to the topology cpumask_t maps are valid */
231#define arch_provides_topology_pointers yes 233#define arch_provides_topology_pointers yes
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index b5229affb953..6b7f824db160 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -119,8 +119,6 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
119 119
120int first_system_vector = 0xfe; 120int first_system_vector = 0xfe;
121 121
122char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
123
124/* 122/*
125 * Debug level, exported for io_apic.c 123 * Debug level, exported for io_apic.c
126 */ 124 */
@@ -142,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
142 struct clock_event_device *evt); 140 struct clock_event_device *evt);
143static void lapic_timer_setup(enum clock_event_mode mode, 141static void lapic_timer_setup(enum clock_event_mode mode,
144 struct clock_event_device *evt); 142 struct clock_event_device *evt);
145static void lapic_timer_broadcast(cpumask_t mask); 143static void lapic_timer_broadcast(const cpumask_t *mask);
146static void apic_pm_activate(void); 144static void apic_pm_activate(void);
147 145
148/* 146/*
@@ -455,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
455/* 453/*
456 * Local APIC timer broadcast function 454 * Local APIC timer broadcast function
457 */ 455 */
458static void lapic_timer_broadcast(cpumask_t mask) 456static void lapic_timer_broadcast(const cpumask_t *mask)
459{ 457{
460#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
461 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
@@ -471,7 +469,7 @@ static void __cpuinit setup_APIC_timer(void)
471 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 469 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
472 470
473 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 471 memcpy(levt, &lapic_clockevent, sizeof(*levt));
474 levt->cpumask = cpumask_of_cpu(smp_processor_id()); 472 levt->cpumask = cpumask_of(smp_processor_id());
475 473
476 clockevents_register_device(levt); 474 clockevents_register_device(levt);
477} 475}
@@ -1807,28 +1805,32 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1807void __cpuinit generic_processor_info(int apicid, int version) 1805void __cpuinit generic_processor_info(int apicid, int version)
1808{ 1806{
1809 int cpu; 1807 int cpu;
1810 cpumask_t tmp_map;
1811 1808
1812 /* 1809 /*
1813 * Validate version 1810 * Validate version
1814 */ 1811 */
1815 if (version == 0x0) { 1812 if (version == 0x0) {
1816 pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " 1813 pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
1817 "fixing up to 0x10. (tell your hw vendor)\n", 1814 "fixing up to 0x10. (tell your hw vendor)\n",
1818 version); 1815 version);
1819 version = 0x10; 1816 version = 0x10;
1820 } 1817 }
1821 apic_version[apicid] = version; 1818 apic_version[apicid] = version;
1822 1819
1823 if (num_processors >= NR_CPUS) { 1820 if (num_processors >= nr_cpu_ids) {
1824 pr_warning("WARNING: NR_CPUS limit of %i reached." 1821 int max = nr_cpu_ids;
1825 " Processor ignored.\n", NR_CPUS); 1822 int thiscpu = max + disabled_cpus;
1823
1824 pr_warning(
1825 "ACPI: NR_CPUS/possible_cpus limit of %i reached."
1826 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
1827
1828 disabled_cpus++;
1826 return; 1829 return;
1827 } 1830 }
1828 1831
1829 num_processors++; 1832 num_processors++;
1830 cpus_complement(tmp_map, cpu_present_map); 1833 cpu = cpumask_next_zero(-1, cpu_present_mask);
1831 cpu = first_cpu(tmp_map);
1832 1834
1833 physid_set(apicid, phys_cpu_present_map); 1835 physid_set(apicid, phys_cpu_present_map);
1834 if (apicid == boot_cpu_physical_apicid) { 1836 if (apicid == boot_cpu_physical_apicid) {
@@ -1878,8 +1880,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
1878 } 1880 }
1879#endif 1881#endif
1880 1882
1881 cpu_set(cpu, cpu_possible_map); 1883 set_cpu_possible(cpu, true);
1882 cpu_set(cpu, cpu_present_map); 1884 set_cpu_present(cpu, true);
1883} 1885}
1884 1886
1885#ifdef CONFIG_X86_64 1887#ifdef CONFIG_X86_64
@@ -2081,7 +2083,7 @@ __cpuinit int apic_is_clustered_box(void)
2081 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); 2083 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2082 bitmap_zero(clustermap, NUM_APIC_CLUSTERS); 2084 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2083 2085
2084 for (i = 0; i < NR_CPUS; i++) { 2086 for (i = 0; i < nr_cpu_ids; i++) {
2085 /* are we being called early in kernel startup? */ 2087 /* are we being called early in kernel startup? */
2086 if (bios_cpu_apicid) { 2088 if (bios_cpu_apicid) {
2087 id = bios_cpu_apicid[i]; 2089 id = bios_cpu_apicid[i];
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 68b5d8681cbb..c6ecda64f5f1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
534 per_cpu(cpuid4_info, cpu) = NULL; 534 per_cpu(cpuid4_info, cpu) = NULL;
535} 535}
536 536
537static int __cpuinit detect_cache_attributes(unsigned int cpu) 537static void get_cpu_leaves(void *_retval)
538{ 538{
539 struct _cpuid4_info *this_leaf; 539 int j, *retval = _retval, cpu = smp_processor_id();
540 unsigned long j;
541 int retval;
542 cpumask_t oldmask;
543
544 if (num_cache_leaves == 0)
545 return -ENOENT;
546
547 per_cpu(cpuid4_info, cpu) = kzalloc(
548 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
549 if (per_cpu(cpuid4_info, cpu) == NULL)
550 return -ENOMEM;
551
552 oldmask = current->cpus_allowed;
553 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
554 if (retval)
555 goto out;
556 540
557 /* Do cpuid and store the results */ 541 /* Do cpuid and store the results */
558 for (j = 0; j < num_cache_leaves; j++) { 542 for (j = 0; j < num_cache_leaves; j++) {
543 struct _cpuid4_info *this_leaf;
559 this_leaf = CPUID4_INFO_IDX(cpu, j); 544 this_leaf = CPUID4_INFO_IDX(cpu, j);
560 retval = cpuid4_cache_lookup(j, this_leaf); 545 *retval = cpuid4_cache_lookup(j, this_leaf);
561 if (unlikely(retval < 0)) { 546 if (unlikely(*retval < 0)) {
562 int i; 547 int i;
563 548
564 for (i = 0; i < j; i++) 549 for (i = 0; i < j; i++)
@@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
567 } 552 }
568 cache_shared_cpu_map_setup(cpu, j); 553 cache_shared_cpu_map_setup(cpu, j);
569 } 554 }
570 set_cpus_allowed_ptr(current, &oldmask); 555}
556
557static int __cpuinit detect_cache_attributes(unsigned int cpu)
558{
559 int retval;
560
561 if (num_cache_leaves == 0)
562 return -ENOENT;
563
564 per_cpu(cpuid4_info, cpu) = kzalloc(
565 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
566 if (per_cpu(cpuid4_info, cpu) == NULL)
567 return -ENOMEM;
571 568
572out: 569 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
573 if (retval) { 570 if (retval) {
574 kfree(per_cpu(cpuid4_info, cpu)); 571 kfree(per_cpu(cpuid4_info, cpu));
575 per_cpu(cpuid4_info, cpu) = NULL; 572 per_cpu(cpuid4_info, cpu) = NULL;
@@ -626,8 +623,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
626 cpumask_t *mask = &this_leaf->shared_cpu_map; 623 cpumask_t *mask = &this_leaf->shared_cpu_map;
627 624
628 n = type? 625 n = type?
629 cpulist_scnprintf(buf, len-2, *mask): 626 cpulist_scnprintf(buf, len-2, mask) :
630 cpumask_scnprintf(buf, len-2, *mask); 627 cpumask_scnprintf(buf, len-2, mask);
631 buf[n++] = '\n'; 628 buf[n++] = '\n';
632 buf[n] = '\0'; 629 buf[n] = '\0';
633 } 630 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 748c8f9e7a05..a5a5e0530370 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
83 * CPU Initialization 83 * CPU Initialization
84 */ 84 */
85 85
86struct thresh_restart {
87 struct threshold_block *b;
88 int reset;
89 u16 old_limit;
90};
91
86/* must be called with correct cpu affinity */ 92/* must be called with correct cpu affinity */
87static void threshold_restart_bank(struct threshold_block *b, 93static long threshold_restart_bank(void *_tr)
88 int reset, u16 old_limit)
89{ 94{
95 struct thresh_restart *tr = _tr;
90 u32 mci_misc_hi, mci_misc_lo; 96 u32 mci_misc_hi, mci_misc_lo;
91 97
92 rdmsr(b->address, mci_misc_lo, mci_misc_hi); 98 rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
93 99
94 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 100 if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
95 reset = 1; /* limit cannot be lower than err count */ 101 tr->reset = 1; /* limit cannot be lower than err count */
96 102
97 if (reset) { /* reset err count and overflow bit */ 103 if (tr->reset) { /* reset err count and overflow bit */
98 mci_misc_hi = 104 mci_misc_hi =
99 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | 105 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
100 (THRESHOLD_MAX - b->threshold_limit); 106 (THRESHOLD_MAX - tr->b->threshold_limit);
101 } else if (old_limit) { /* change limit w/o reset */ 107 } else if (tr->old_limit) { /* change limit w/o reset */
102 int new_count = (mci_misc_hi & THRESHOLD_MAX) + 108 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
103 (old_limit - b->threshold_limit); 109 (tr->old_limit - tr->b->threshold_limit);
104 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | 110 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
105 (new_count & THRESHOLD_MAX); 111 (new_count & THRESHOLD_MAX);
106 } 112 }
107 113
108 b->interrupt_enable ? 114 tr->b->interrupt_enable ?
109 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : 115 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
110 (mci_misc_hi &= ~MASK_INT_TYPE_HI); 116 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
111 117
112 mci_misc_hi |= MASK_COUNT_EN_HI; 118 mci_misc_hi |= MASK_COUNT_EN_HI;
113 wrmsr(b->address, mci_misc_lo, mci_misc_hi); 119 wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
120 return 0;
114} 121}
115 122
116/* cpu init entry point, called from mce.c with preempt off */ 123/* cpu init entry point, called from mce.c with preempt off */
@@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
120 unsigned int cpu = smp_processor_id(); 127 unsigned int cpu = smp_processor_id();
121 u8 lvt_off; 128 u8 lvt_off;
122 u32 low = 0, high = 0, address = 0; 129 u32 low = 0, high = 0, address = 0;
130 struct thresh_restart tr;
123 131
124 for (bank = 0; bank < NR_BANKS; ++bank) { 132 for (bank = 0; bank < NR_BANKS; ++bank) {
125 for (block = 0; block < NR_BLOCKS; ++block) { 133 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
162 wrmsr(address, low, high); 170 wrmsr(address, low, high);
163 171
164 threshold_defaults.address = address; 172 threshold_defaults.address = address;
165 threshold_restart_bank(&threshold_defaults, 0, 0); 173 tr.b = &threshold_defaults;
174 tr.reset = 0;
175 tr.old_limit = 0;
176 threshold_restart_bank(&tr);
166 } 177 }
167 } 178 }
168} 179}
@@ -251,20 +262,6 @@ struct threshold_attr {
251 ssize_t(*store) (struct threshold_block *, const char *, size_t count); 262 ssize_t(*store) (struct threshold_block *, const char *, size_t count);
252}; 263};
253 264
254static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
255 cpumask_t *newmask)
256{
257 *oldmask = current->cpus_allowed;
258 cpus_clear(*newmask);
259 cpu_set(cpu, *newmask);
260 set_cpus_allowed_ptr(current, newmask);
261}
262
263static void affinity_restore(const cpumask_t *oldmask)
264{
265 set_cpus_allowed_ptr(current, oldmask);
266}
267
268#define SHOW_FIELDS(name) \ 265#define SHOW_FIELDS(name) \
269static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ 266static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
270{ \ 267{ \
@@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
277 const char *buf, size_t count) 274 const char *buf, size_t count)
278{ 275{
279 char *end; 276 char *end;
280 cpumask_t oldmask, newmask; 277 struct thresh_restart tr;
281 unsigned long new = simple_strtoul(buf, &end, 0); 278 unsigned long new = simple_strtoul(buf, &end, 0);
282 if (end == buf) 279 if (end == buf)
283 return -EINVAL; 280 return -EINVAL;
284 b->interrupt_enable = !!new; 281 b->interrupt_enable = !!new;
285 282
286 affinity_set(b->cpu, &oldmask, &newmask); 283 tr.b = b;
287 threshold_restart_bank(b, 0, 0); 284 tr.reset = 0;
288 affinity_restore(&oldmask); 285 tr.old_limit = 0;
286 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
289 287
290 return end - buf; 288 return end - buf;
291} 289}
@@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
294 const char *buf, size_t count) 292 const char *buf, size_t count)
295{ 293{
296 char *end; 294 char *end;
297 cpumask_t oldmask, newmask; 295 struct thresh_restart tr;
298 u16 old;
299 unsigned long new = simple_strtoul(buf, &end, 0); 296 unsigned long new = simple_strtoul(buf, &end, 0);
300 if (end == buf) 297 if (end == buf)
301 return -EINVAL; 298 return -EINVAL;
@@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
303 new = THRESHOLD_MAX; 300 new = THRESHOLD_MAX;
304 if (new < 1) 301 if (new < 1)
305 new = 1; 302 new = 1;
306 old = b->threshold_limit; 303 tr.old_limit = b->threshold_limit;
307 b->threshold_limit = new; 304 b->threshold_limit = new;
305 tr.b = b;
306 tr.reset = 0;
308 307
309 affinity_set(b->cpu, &oldmask, &newmask); 308 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
310 threshold_restart_bank(b, 0, old);
311 affinity_restore(&oldmask);
312 309
313 return end - buf; 310 return end - buf;
314} 311}
315 312
316static ssize_t show_error_count(struct threshold_block *b, char *buf) 313static long local_error_count(void *_b)
317{ 314{
318 u32 high, low; 315 struct threshold_block *b = _b;
319 cpumask_t oldmask, newmask; 316 u32 low, high;
320 affinity_set(b->cpu, &oldmask, &newmask); 317
321 rdmsr(b->address, low, high); 318 rdmsr(b->address, low, high);
322 affinity_restore(&oldmask); 319 return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
323 return sprintf(buf, "%x\n", 320}
324 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); 321
322static ssize_t show_error_count(struct threshold_block *b, char *buf)
323{
324 return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b));
325} 325}
326 326
327static ssize_t store_error_count(struct threshold_block *b, 327static ssize_t store_error_count(struct threshold_block *b,
328 const char *buf, size_t count) 328 const char *buf, size_t count)
329{ 329{
330 cpumask_t oldmask, newmask; 330 struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
331 affinity_set(b->cpu, &oldmask, &newmask); 331
332 threshold_restart_bank(b, 1, 0); 332 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
333 affinity_restore(&oldmask);
334 return 1; 333 return 1;
335} 334}
336 335
@@ -463,12 +462,19 @@ out_free:
463 return err; 462 return err;
464} 463}
465 464
465static long local_allocate_threshold_blocks(void *_bank)
466{
467 unsigned int *bank = _bank;
468
469 return allocate_threshold_blocks(smp_processor_id(), *bank, 0,
470 MSR_IA32_MC0_MISC + *bank * 4);
471}
472
466/* symlinks sibling shared banks to first core. first core owns dir/files. */ 473/* symlinks sibling shared banks to first core. first core owns dir/files. */
467static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 474static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
468{ 475{
469 int i, err = 0; 476 int i, err = 0;
470 struct threshold_bank *b = NULL; 477 struct threshold_bank *b = NULL;
471 cpumask_t oldmask, newmask;
472 char name[32]; 478 char name[32];
473 479
474 sprintf(name, "threshold_bank%i", bank); 480 sprintf(name, "threshold_bank%i", bank);
@@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
519 525
520 per_cpu(threshold_banks, cpu)[bank] = b; 526 per_cpu(threshold_banks, cpu)[bank] = b;
521 527
522 affinity_set(cpu, &oldmask, &newmask); 528 err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank);
523 err = allocate_threshold_blocks(cpu, bank, 0,
524 MSR_IA32_MC0_MISC + bank * 4);
525 affinity_restore(&oldmask);
526
527 if (err) 529 if (err)
528 goto out_free; 530 goto out_free;
529 531
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index c0262791bda4..34185488e4fb 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
30 return 1; 30 return 1;
31} 31}
32 32
33static cpumask_t flat_target_cpus(void) 33static const struct cpumask *flat_target_cpus(void)
34{ 34{
35 return cpu_online_map; 35 return cpu_online_mask;
36} 36}
37 37
38static cpumask_t flat_vector_allocation_domain(int cpu) 38static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
39{ 39{
40 /* Careful. Some cpus do not strictly honor the set of cpus 40 /* Careful. Some cpus do not strictly honor the set of cpus
41 * specified in the interrupt destination when using lowest 41 * specified in the interrupt destination when using lowest
@@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu)
45 * deliver interrupts to the wrong hyperthread when only one 45 * deliver interrupts to the wrong hyperthread when only one
46 * hyperthread was specified in the interrupt desitination. 46 * hyperthread was specified in the interrupt desitination.
47 */ 47 */
48 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 48 cpumask_clear(retmask);
49 return domain; 49 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
50} 50}
51 51
52/* 52/*
@@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void)
69 apic_write(APIC_LDR, val); 69 apic_write(APIC_LDR, val);
70} 70}
71 71
72static void flat_send_IPI_mask(cpumask_t cpumask, int vector) 72static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
73{ 73{
74 unsigned long mask = cpus_addr(cpumask)[0];
75 unsigned long flags; 74 unsigned long flags;
76 75
77 local_irq_save(flags); 76 local_irq_save(flags);
@@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
79 local_irq_restore(flags); 78 local_irq_restore(flags);
80} 79}
81 80
81static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
82{
83 unsigned long mask = cpumask_bits(cpumask)[0];
84
85 _flat_send_IPI_mask(mask, vector);
86}
87
88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
89 int vector)
90{
91 unsigned long mask = cpumask_bits(cpumask)[0];
92 int cpu = smp_processor_id();
93
94 if (cpu < BITS_PER_LONG)
95 clear_bit(cpu, &mask);
96 _flat_send_IPI_mask(mask, vector);
97}
98
82static void flat_send_IPI_allbutself(int vector) 99static void flat_send_IPI_allbutself(int vector)
83{ 100{
101 int cpu = smp_processor_id();
84#ifdef CONFIG_HOTPLUG_CPU 102#ifdef CONFIG_HOTPLUG_CPU
85 int hotplug = 1; 103 int hotplug = 1;
86#else 104#else
87 int hotplug = 0; 105 int hotplug = 0;
88#endif 106#endif
89 if (hotplug || vector == NMI_VECTOR) { 107 if (hotplug || vector == NMI_VECTOR) {
90 cpumask_t allbutme = cpu_online_map; 108 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
109 unsigned long mask = cpumask_bits(cpu_online_mask)[0];
91 110
92 cpu_clear(smp_processor_id(), allbutme); 111 if (cpu < BITS_PER_LONG)
112 clear_bit(cpu, &mask);
93 113
94 if (!cpus_empty(allbutme)) 114 _flat_send_IPI_mask(mask, vector);
95 flat_send_IPI_mask(allbutme, vector); 115 }
96 } else if (num_online_cpus() > 1) { 116 } else if (num_online_cpus() > 1) {
97 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); 117 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
98 } 118 }
@@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector)
101static void flat_send_IPI_all(int vector) 121static void flat_send_IPI_all(int vector)
102{ 122{
103 if (vector == NMI_VECTOR) 123 if (vector == NMI_VECTOR)
104 flat_send_IPI_mask(cpu_online_map, vector); 124 flat_send_IPI_mask(cpu_online_mask, vector);
105 else 125 else
106 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
107} 127}
@@ -135,9 +155,18 @@ static int flat_apic_id_registered(void)
135 return physid_isset(read_xapic_id(), phys_cpu_present_map); 155 return physid_isset(read_xapic_id(), phys_cpu_present_map);
136} 156}
137 157
138static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) 158static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
159{
160 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
161}
162
163static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
164 const struct cpumask *andmask)
139{ 165{
140 return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; 166 unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
167 unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
168
169 return mask1 & mask2;
141} 170}
142 171
143static unsigned int phys_pkg_id(int index_msb) 172static unsigned int phys_pkg_id(int index_msb)
@@ -157,8 +186,10 @@ struct genapic apic_flat = {
157 .send_IPI_all = flat_send_IPI_all, 186 .send_IPI_all = flat_send_IPI_all,
158 .send_IPI_allbutself = flat_send_IPI_allbutself, 187 .send_IPI_allbutself = flat_send_IPI_allbutself,
159 .send_IPI_mask = flat_send_IPI_mask, 188 .send_IPI_mask = flat_send_IPI_mask,
189 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
160 .send_IPI_self = apic_send_IPI_self, 190 .send_IPI_self = apic_send_IPI_self,
161 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 191 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
192 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
162 .phys_pkg_id = phys_pkg_id, 193 .phys_pkg_id = phys_pkg_id,
163 .get_apic_id = get_apic_id, 194 .get_apic_id = get_apic_id,
164 .set_apic_id = set_apic_id, 195 .set_apic_id = set_apic_id,
@@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
188 return 0; 219 return 0;
189} 220}
190 221
191static cpumask_t physflat_target_cpus(void) 222static const struct cpumask *physflat_target_cpus(void)
192{ 223{
193 return cpu_online_map; 224 return cpu_online_mask;
194} 225}
195 226
196static cpumask_t physflat_vector_allocation_domain(int cpu) 227static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
197{ 228{
198 return cpumask_of_cpu(cpu); 229 cpumask_clear(retmask);
230 cpumask_set_cpu(cpu, retmask);
199} 231}
200 232
201static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) 233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
202{ 234{
203 send_IPI_mask_sequence(cpumask, vector); 235 send_IPI_mask_sequence(cpumask, vector);
204} 236}
205 237
206static void physflat_send_IPI_allbutself(int vector) 238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
239 int vector)
207{ 240{
208 cpumask_t allbutme = cpu_online_map; 241 send_IPI_mask_allbutself(cpumask, vector);
242}
209 243
210 cpu_clear(smp_processor_id(), allbutme); 244static void physflat_send_IPI_allbutself(int vector)
211 physflat_send_IPI_mask(allbutme, vector); 245{
246 send_IPI_mask_allbutself(cpu_online_mask, vector);
212} 247}
213 248
214static void physflat_send_IPI_all(int vector) 249static void physflat_send_IPI_all(int vector)
215{ 250{
216 physflat_send_IPI_mask(cpu_online_map, vector); 251 physflat_send_IPI_mask(cpu_online_mask, vector);
217} 252}
218 253
219static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) 254static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
220{ 255{
221 int cpu; 256 int cpu;
222 257
@@ -224,13 +259,31 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
224 * We're using fixed IRQ delivery, can only return one phys APIC ID. 259 * We're using fixed IRQ delivery, can only return one phys APIC ID.
225 * May as well be the first. 260 * May as well be the first.
226 */ 261 */
227 cpu = first_cpu(cpumask); 262 cpu = cpumask_first(cpumask);
228 if ((unsigned)cpu < nr_cpu_ids) 263 if ((unsigned)cpu < nr_cpu_ids)
229 return per_cpu(x86_cpu_to_apicid, cpu); 264 return per_cpu(x86_cpu_to_apicid, cpu);
230 else 265 else
231 return BAD_APICID; 266 return BAD_APICID;
232} 267}
233 268
269static unsigned int
270physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
271 const struct cpumask *andmask)
272{
273 int cpu;
274
275 /*
276 * We're using fixed IRQ delivery, can only return one phys APIC ID.
277 * May as well be the first.
278 */
279 for_each_cpu_and(cpu, cpumask, andmask)
280 if (cpumask_test_cpu(cpu, cpu_online_mask))
281 break;
282 if (cpu < nr_cpu_ids)
283 return per_cpu(x86_cpu_to_apicid, cpu);
284 return BAD_APICID;
285}
286
234struct genapic apic_physflat = { 287struct genapic apic_physflat = {
235 .name = "physical flat", 288 .name = "physical flat",
236 .acpi_madt_oem_check = physflat_acpi_madt_oem_check, 289 .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
@@ -243,8 +296,10 @@ struct genapic apic_physflat = {
243 .send_IPI_all = physflat_send_IPI_all, 296 .send_IPI_all = physflat_send_IPI_all,
244 .send_IPI_allbutself = physflat_send_IPI_allbutself, 297 .send_IPI_allbutself = physflat_send_IPI_allbutself,
245 .send_IPI_mask = physflat_send_IPI_mask, 298 .send_IPI_mask = physflat_send_IPI_mask,
299 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
246 .send_IPI_self = apic_send_IPI_self, 300 .send_IPI_self = apic_send_IPI_self,
247 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 301 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
302 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
248 .phys_pkg_id = phys_pkg_id, 303 .phys_pkg_id = phys_pkg_id,
249 .get_apic_id = get_apic_id, 304 .get_apic_id = get_apic_id,
250 .set_apic_id = set_apic_id, 305 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index f6a2c8eb48a6..6ce497cc372d 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
22 22
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
24 24
25static cpumask_t x2apic_target_cpus(void) 25static const struct cpumask *x2apic_target_cpus(void)
26{ 26{
27 return cpumask_of_cpu(0); 27 return cpumask_of(0);
28} 28}
29 29
30/* 30/*
31 * for now each logical cpu is in its own vector allocation domain. 31 * for now each logical cpu is in its own vector allocation domain.
32 */ 32 */
33static cpumask_t x2apic_vector_allocation_domain(int cpu) 33static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
34{ 34{
35 cpumask_t domain = CPU_MASK_NONE; 35 cpumask_clear(retmask);
36 cpu_set(cpu, domain); 36 cpumask_set_cpu(cpu, retmask);
37 return domain;
38} 37}
39 38
40static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
56 * at once. We have 16 cpu's in a cluster. This will minimize IPI register 55 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
57 * writes. 56 * writes.
58 */ 57 */
59static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
60{ 59{
61 unsigned long flags; 60 unsigned long flags;
62 unsigned long query_cpu; 61 unsigned long query_cpu;
63 62
64 local_irq_save(flags); 63 local_irq_save(flags);
65 for_each_cpu_mask(query_cpu, mask) { 64 for_each_cpu(query_cpu, mask)
66 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), 65 __x2apic_send_IPI_dest(
67 vector, APIC_DEST_LOGICAL); 66 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
68 } 67 vector, APIC_DEST_LOGICAL);
69 local_irq_restore(flags); 68 local_irq_restore(flags);
70} 69}
71 70
72static void x2apic_send_IPI_allbutself(int vector) 71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
72 int vector)
73{ 73{
74 cpumask_t mask = cpu_online_map; 74 unsigned long flags;
75 unsigned long query_cpu;
76 unsigned long this_cpu = smp_processor_id();
75 77
76 cpu_clear(smp_processor_id(), mask); 78 local_irq_save(flags);
79 for_each_cpu(query_cpu, mask)
80 if (query_cpu != this_cpu)
81 __x2apic_send_IPI_dest(
82 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
83 vector, APIC_DEST_LOGICAL);
84 local_irq_restore(flags);
85}
86
87static void x2apic_send_IPI_allbutself(int vector)
88{
89 unsigned long flags;
90 unsigned long query_cpu;
91 unsigned long this_cpu = smp_processor_id();
77 92
78 if (!cpus_empty(mask)) 93 local_irq_save(flags);
79 x2apic_send_IPI_mask(mask, vector); 94 for_each_online_cpu(query_cpu)
95 if (query_cpu != this_cpu)
96 __x2apic_send_IPI_dest(
97 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
98 vector, APIC_DEST_LOGICAL);
99 local_irq_restore(flags);
80} 100}
81 101
82static void x2apic_send_IPI_all(int vector) 102static void x2apic_send_IPI_all(int vector)
83{ 103{
84 x2apic_send_IPI_mask(cpu_online_map, vector); 104 x2apic_send_IPI_mask(cpu_online_mask, vector);
85} 105}
86 106
87static int x2apic_apic_id_registered(void) 107static int x2apic_apic_id_registered(void)
@@ -89,21 +109,38 @@ static int x2apic_apic_id_registered(void)
89 return 1; 109 return 1;
90} 110}
91 111
92static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
93{ 113{
94 int cpu; 114 int cpu;
95 115
96 /* 116 /*
97 * We're using fixed IRQ delivery, can only return one phys APIC ID. 117 * We're using fixed IRQ delivery, can only return one logical APIC ID.
98 * May as well be the first. 118 * May as well be the first.
99 */ 119 */
100 cpu = first_cpu(cpumask); 120 cpu = cpumask_first(cpumask);
101 if ((unsigned)cpu < NR_CPUS) 121 if ((unsigned)cpu < nr_cpu_ids)
102 return per_cpu(x86_cpu_to_logical_apicid, cpu); 122 return per_cpu(x86_cpu_to_logical_apicid, cpu);
103 else 123 else
104 return BAD_APICID; 124 return BAD_APICID;
105} 125}
106 126
127static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 const struct cpumask *andmask)
129{
130 int cpu;
131
132 /*
133 * We're using fixed IRQ delivery, can only return one logical APIC ID.
134 * May as well be the first.
135 */
136 for_each_cpu_and(cpu, cpumask, andmask)
137 if (cpumask_test_cpu(cpu, cpu_online_mask))
138 break;
139 if (cpu < nr_cpu_ids)
140 return per_cpu(x86_cpu_to_logical_apicid, cpu);
141 return BAD_APICID;
142}
143
107static unsigned int get_apic_id(unsigned long x) 144static unsigned int get_apic_id(unsigned long x)
108{ 145{
109 unsigned int id; 146 unsigned int id;
@@ -150,8 +187,10 @@ struct genapic apic_x2apic_cluster = {
150 .send_IPI_all = x2apic_send_IPI_all, 187 .send_IPI_all = x2apic_send_IPI_all,
151 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 188 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
152 .send_IPI_mask = x2apic_send_IPI_mask, 189 .send_IPI_mask = x2apic_send_IPI_mask,
190 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
153 .send_IPI_self = x2apic_send_IPI_self, 191 .send_IPI_self = x2apic_send_IPI_self,
154 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 192 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
193 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
155 .phys_pkg_id = phys_pkg_id, 194 .phys_pkg_id = phys_pkg_id,
156 .get_apic_id = get_apic_id, 195 .get_apic_id = get_apic_id,
157 .set_apic_id = set_apic_id, 196 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index d042211768b7..62895cf315ff 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
31 31
32static cpumask_t x2apic_target_cpus(void) 32static const struct cpumask *x2apic_target_cpus(void)
33{ 33{
34 return cpumask_of_cpu(0); 34 return cpumask_of(0);
35} 35}
36 36
37static cpumask_t x2apic_vector_allocation_domain(int cpu) 37static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
38{ 38{
39 cpumask_t domain = CPU_MASK_NONE; 39 cpumask_clear(retmask);
40 cpu_set(cpu, domain); 40 cpumask_set_cpu(cpu, retmask);
41 return domain;
42} 41}
43 42
44static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 43static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
54 x2apic_icr_write(cfg, apicid); 53 x2apic_icr_write(cfg, apicid);
55} 54}
56 55
57static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
58{ 57{
59 unsigned long flags; 58 unsigned long flags;
60 unsigned long query_cpu; 59 unsigned long query_cpu;
61 60
62 local_irq_save(flags); 61 local_irq_save(flags);
63 for_each_cpu_mask(query_cpu, mask) { 62 for_each_cpu(query_cpu, mask) {
64 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), 63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
65 vector, APIC_DEST_PHYSICAL); 64 vector, APIC_DEST_PHYSICAL);
66 } 65 }
67 local_irq_restore(flags); 66 local_irq_restore(flags);
68} 67}
69 68
70static void x2apic_send_IPI_allbutself(int vector) 69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
70 int vector)
71{ 71{
72 cpumask_t mask = cpu_online_map; 72 unsigned long flags;
73 unsigned long query_cpu;
74 unsigned long this_cpu = smp_processor_id();
75
76 local_irq_save(flags);
77 for_each_cpu(query_cpu, mask) {
78 if (query_cpu != this_cpu)
79 __x2apic_send_IPI_dest(
80 per_cpu(x86_cpu_to_apicid, query_cpu),
81 vector, APIC_DEST_PHYSICAL);
82 }
83 local_irq_restore(flags);
84}
73 85
74 cpu_clear(smp_processor_id(), mask); 86static void x2apic_send_IPI_allbutself(int vector)
87{
88 unsigned long flags;
89 unsigned long query_cpu;
90 unsigned long this_cpu = smp_processor_id();
75 91
76 if (!cpus_empty(mask)) 92 local_irq_save(flags);
77 x2apic_send_IPI_mask(mask, vector); 93 for_each_online_cpu(query_cpu)
94 if (query_cpu != this_cpu)
95 __x2apic_send_IPI_dest(
96 per_cpu(x86_cpu_to_apicid, query_cpu),
97 vector, APIC_DEST_PHYSICAL);
98 local_irq_restore(flags);
78} 99}
79 100
80static void x2apic_send_IPI_all(int vector) 101static void x2apic_send_IPI_all(int vector)
81{ 102{
82 x2apic_send_IPI_mask(cpu_online_map, vector); 103 x2apic_send_IPI_mask(cpu_online_mask, vector);
83} 104}
84 105
85static int x2apic_apic_id_registered(void) 106static int x2apic_apic_id_registered(void)
@@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void)
87 return 1; 108 return 1;
88} 109}
89 110
90static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
91{ 112{
92 int cpu; 113 int cpu;
93 114
@@ -95,13 +116,30 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
95 * We're using fixed IRQ delivery, can only return one phys APIC ID. 116 * We're using fixed IRQ delivery, can only return one phys APIC ID.
96 * May as well be the first. 117 * May as well be the first.
97 */ 118 */
98 cpu = first_cpu(cpumask); 119 cpu = cpumask_first(cpumask);
99 if ((unsigned)cpu < NR_CPUS) 120 if ((unsigned)cpu < nr_cpu_ids)
100 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
101 else 122 else
102 return BAD_APICID; 123 return BAD_APICID;
103} 124}
104 125
126static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
127 const struct cpumask *andmask)
128{
129 int cpu;
130
131 /*
132 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first.
134 */
135 for_each_cpu_and(cpu, cpumask, andmask)
136 if (cpumask_test_cpu(cpu, cpu_online_mask))
137 break;
138 if (cpu < nr_cpu_ids)
139 return per_cpu(x86_cpu_to_apicid, cpu);
140 return BAD_APICID;
141}
142
105static unsigned int get_apic_id(unsigned long x) 143static unsigned int get_apic_id(unsigned long x)
106{ 144{
107 unsigned int id; 145 unsigned int id;
@@ -145,8 +183,10 @@ struct genapic apic_x2apic_phys = {
145 .send_IPI_all = x2apic_send_IPI_all, 183 .send_IPI_all = x2apic_send_IPI_all,
146 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 184 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
147 .send_IPI_mask = x2apic_send_IPI_mask, 185 .send_IPI_mask = x2apic_send_IPI_mask,
186 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
148 .send_IPI_self = x2apic_send_IPI_self, 187 .send_IPI_self = x2apic_send_IPI_self,
149 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 188 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
189 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
150 .phys_pkg_id = phys_pkg_id, 190 .phys_pkg_id = phys_pkg_id,
151 .get_apic_id = get_apic_id, 191 .get_apic_id = get_apic_id,
152 .set_apic_id = set_apic_id, 192 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index dece17289731..b193e082f6ce 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -79,16 +79,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
79 79
80/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 80/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
81 81
82static cpumask_t uv_target_cpus(void) 82static const struct cpumask *uv_target_cpus(void)
83{ 83{
84 return cpumask_of_cpu(0); 84 return cpumask_of(0);
85} 85}
86 86
87static cpumask_t uv_vector_allocation_domain(int cpu) 87static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
88{ 88{
89 cpumask_t domain = CPU_MASK_NONE; 89 cpumask_clear(retmask);
90 cpu_set(cpu, domain); 90 cpumask_set_cpu(cpu, retmask);
91 return domain;
92} 91}
93 92
94int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 93int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
@@ -127,28 +126,37 @@ static void uv_send_IPI_one(int cpu, int vector)
127 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 126 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
128} 127}
129 128
130static void uv_send_IPI_mask(cpumask_t mask, int vector) 129static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
131{ 130{
132 unsigned int cpu; 131 unsigned int cpu;
133 132
134 for_each_possible_cpu(cpu) 133 for_each_cpu(cpu, mask)
135 if (cpu_isset(cpu, mask)) 134 uv_send_IPI_one(cpu, vector);
135}
136
137static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
138{
139 unsigned int cpu;
140 unsigned int this_cpu = smp_processor_id();
141
142 for_each_cpu(cpu, mask)
143 if (cpu != this_cpu)
136 uv_send_IPI_one(cpu, vector); 144 uv_send_IPI_one(cpu, vector);
137} 145}
138 146
139static void uv_send_IPI_allbutself(int vector) 147static void uv_send_IPI_allbutself(int vector)
140{ 148{
141 cpumask_t mask = cpu_online_map; 149 unsigned int cpu;
142 150 unsigned int this_cpu = smp_processor_id();
143 cpu_clear(smp_processor_id(), mask);
144 151
145 if (!cpus_empty(mask)) 152 for_each_online_cpu(cpu)
146 uv_send_IPI_mask(mask, vector); 153 if (cpu != this_cpu)
154 uv_send_IPI_one(cpu, vector);
147} 155}
148 156
149static void uv_send_IPI_all(int vector) 157static void uv_send_IPI_all(int vector)
150{ 158{
151 uv_send_IPI_mask(cpu_online_map, vector); 159 uv_send_IPI_mask(cpu_online_mask, vector);
152} 160}
153 161
154static int uv_apic_id_registered(void) 162static int uv_apic_id_registered(void)
@@ -160,7 +168,7 @@ static void uv_init_apic_ldr(void)
160{ 168{
161} 169}
162 170
163static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) 171static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
164{ 172{
165 int cpu; 173 int cpu;
166 174
@@ -168,13 +176,30 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
168 * We're using fixed IRQ delivery, can only return one phys APIC ID. 176 * We're using fixed IRQ delivery, can only return one phys APIC ID.
169 * May as well be the first. 177 * May as well be the first.
170 */ 178 */
171 cpu = first_cpu(cpumask); 179 cpu = cpumask_first(cpumask);
172 if ((unsigned)cpu < nr_cpu_ids) 180 if ((unsigned)cpu < nr_cpu_ids)
173 return per_cpu(x86_cpu_to_apicid, cpu); 181 return per_cpu(x86_cpu_to_apicid, cpu);
174 else 182 else
175 return BAD_APICID; 183 return BAD_APICID;
176} 184}
177 185
186static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
187 const struct cpumask *andmask)
188{
189 int cpu;
190
191 /*
192 * We're using fixed IRQ delivery, can only return one phys APIC ID.
193 * May as well be the first.
194 */
195 for_each_cpu_and(cpu, cpumask, andmask)
196 if (cpumask_test_cpu(cpu, cpu_online_mask))
197 break;
198 if (cpu < nr_cpu_ids)
199 return per_cpu(x86_cpu_to_apicid, cpu);
200 return BAD_APICID;
201}
202
178static unsigned int get_apic_id(unsigned long x) 203static unsigned int get_apic_id(unsigned long x)
179{ 204{
180 unsigned int id; 205 unsigned int id;
@@ -222,8 +247,10 @@ struct genapic apic_x2apic_uv_x = {
222 .send_IPI_all = uv_send_IPI_all, 247 .send_IPI_all = uv_send_IPI_all,
223 .send_IPI_allbutself = uv_send_IPI_allbutself, 248 .send_IPI_allbutself = uv_send_IPI_allbutself,
224 .send_IPI_mask = uv_send_IPI_mask, 249 .send_IPI_mask = uv_send_IPI_mask,
250 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
225 .send_IPI_self = uv_send_IPI_self, 251 .send_IPI_self = uv_send_IPI_self,
226 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 252 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
253 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
227 .phys_pkg_id = phys_pkg_id, 254 .phys_pkg_id = phys_pkg_id,
228 .get_apic_id = get_apic_id, 255 .get_apic_id = get_apic_id,
229 .set_apic_id = set_apic_id, 256 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 845ea097383e..cd759ad90690 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -248,7 +248,7 @@ static void hpet_legacy_clockevent_register(void)
248 * Start hpet with the boot cpu mask and make it 248 * Start hpet with the boot cpu mask and make it
249 * global after the IO_APIC has been initialized. 249 * global after the IO_APIC has been initialized.
250 */ 250 */
251 hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 251 hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
252 clockevents_register_device(&hpet_clockevent); 252 clockevents_register_device(&hpet_clockevent);
253 global_clock_event = &hpet_clockevent; 253 global_clock_event = &hpet_clockevent;
254 printk(KERN_DEBUG "hpet clockevent registered\n"); 254 printk(KERN_DEBUG "hpet clockevent registered\n");
@@ -303,7 +303,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
303 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 303 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
304 hpet_setup_msi_irq(hdev->irq); 304 hpet_setup_msi_irq(hdev->irq);
305 disable_irq(hdev->irq); 305 disable_irq(hdev->irq);
306 irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); 306 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
307 enable_irq(hdev->irq); 307 enable_irq(hdev->irq);
308 } 308 }
309 break; 309 break;
@@ -451,7 +451,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
451 return -1; 451 return -1;
452 452
453 disable_irq(dev->irq); 453 disable_irq(dev->irq);
454 irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); 454 irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
455 enable_irq(dev->irq); 455 enable_irq(dev->irq);
456 456
457 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", 457 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
@@ -502,7 +502,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
502 /* 5 usec minimum reprogramming delta. */ 502 /* 5 usec minimum reprogramming delta. */
503 evt->min_delta_ns = 5000; 503 evt->min_delta_ns = 5000;
504 504
505 evt->cpumask = cpumask_of_cpu(hdev->cpu); 505 evt->cpumask = cpumask_of(hdev->cpu);
506 clockevents_register_device(evt); 506 clockevents_register_device(evt);
507} 507}
508 508
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index c1b5e3ece1f2..10f92fb532f3 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -114,7 +114,7 @@ void __init setup_pit_timer(void)
114 * Start pit with the boot cpu mask and make it global after the 114 * Start pit with the boot cpu mask and make it global after the
115 * IO_APIC has been initialized. 115 * IO_APIC has been initialized.
116 */ 116 */
117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 117 pit_clockevent.cpumask = cpumask_of(smp_processor_id());
118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
119 pit_clockevent.shift); 119 pit_clockevent.shift);
120 pit_clockevent.max_delta_ns = 120 pit_clockevent.max_delta_ns =
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 74917658b004..62ecfc991e1e 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
136 136
137struct irq_cfg { 137struct irq_cfg {
138 struct irq_pin_list *irq_2_pin; 138 struct irq_pin_list *irq_2_pin;
139 cpumask_t domain; 139 cpumask_var_t domain;
140 cpumask_t old_domain; 140 cpumask_var_t old_domain;
141 unsigned move_cleanup_count; 141 unsigned move_cleanup_count;
142 u8 vector; 142 u8 vector;
143 u8 move_in_progress : 1; 143 u8 move_in_progress : 1;
@@ -152,22 +152,22 @@ static struct irq_cfg irq_cfgx[] = {
152#else 152#else
153static struct irq_cfg irq_cfgx[NR_IRQS] = { 153static struct irq_cfg irq_cfgx[NR_IRQS] = {
154#endif 154#endif
155 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 155 [0] = { .vector = IRQ0_VECTOR, },
156 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 156 [1] = { .vector = IRQ1_VECTOR, },
157 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, 157 [2] = { .vector = IRQ2_VECTOR, },
158 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, 158 [3] = { .vector = IRQ3_VECTOR, },
159 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, 159 [4] = { .vector = IRQ4_VECTOR, },
160 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, 160 [5] = { .vector = IRQ5_VECTOR, },
161 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, 161 [6] = { .vector = IRQ6_VECTOR, },
162 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, 162 [7] = { .vector = IRQ7_VECTOR, },
163 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, 163 [8] = { .vector = IRQ8_VECTOR, },
164 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, 164 [9] = { .vector = IRQ9_VECTOR, },
165 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, 165 [10] = { .vector = IRQ10_VECTOR, },
166 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, 166 [11] = { .vector = IRQ11_VECTOR, },
167 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, 167 [12] = { .vector = IRQ12_VECTOR, },
168 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, 168 [13] = { .vector = IRQ13_VECTOR, },
169 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, 169 [14] = { .vector = IRQ14_VECTOR, },
170 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 170 [15] = { .vector = IRQ15_VECTOR, },
171}; 171};
172 172
173int __init arch_early_irq_init(void) 173int __init arch_early_irq_init(void)
@@ -183,6 +183,10 @@ int __init arch_early_irq_init(void)
183 for (i = 0; i < count; i++) { 183 for (i = 0; i < count; i++) {
184 desc = irq_to_desc(i); 184 desc = irq_to_desc(i);
185 desc->chip_data = &cfg[i]; 185 desc->chip_data = &cfg[i];
186 alloc_bootmem_cpumask_var(&cfg[i].domain);
187 alloc_bootmem_cpumask_var(&cfg[i].old_domain);
188 if (i < NR_IRQS_LEGACY)
189 cpumask_setall(cfg[i].domain);
186 } 190 }
187 191
188 return 0; 192 return 0;
@@ -209,6 +213,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
209 node = cpu_to_node(cpu); 213 node = cpu_to_node(cpu);
210 214
211 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
216 if (cfg) {
217 /* FIXME: needs alloc_cpumask_var_node() */
218 if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
219 kfree(cfg);
220 cfg = NULL;
221 } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
222 free_cpumask_var(cfg->domain);
223 kfree(cfg);
224 cfg = NULL;
225 } else {
226 cpumask_clear(cfg->domain);
227 cpumask_clear(cfg->old_domain);
228 }
229 }
212 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); 230 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
213 231
214 return cfg; 232 return cfg;
@@ -333,13 +351,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
333 } 351 }
334} 352}
335 353
336static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 354static void
355set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
337{ 356{
338 struct irq_cfg *cfg = desc->chip_data; 357 struct irq_cfg *cfg = desc->chip_data;
339 358
340 if (!cfg->move_in_progress) { 359 if (!cfg->move_in_progress) {
341 /* it means that domain is not changed */ 360 /* it means that domain is not changed */
342 if (!cpus_intersects(desc->affinity, mask)) 361 if (!cpumask_intersects(&desc->affinity, mask))
343 cfg->move_desc_pending = 1; 362 cfg->move_desc_pending = 1;
344 } 363 }
345} 364}
@@ -354,7 +373,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
354#endif 373#endif
355 374
356#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC 375#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
357static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 376static inline void
377set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
358{ 378{
359} 379}
360#endif 380#endif
@@ -485,6 +505,26 @@ static void ioapic_mask_entry(int apic, int pin)
485} 505}
486 506
487#ifdef CONFIG_SMP 507#ifdef CONFIG_SMP
508static void send_cleanup_vector(struct irq_cfg *cfg)
509{
510 cpumask_var_t cleanup_mask;
511
512 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
513 unsigned int i;
514 cfg->move_cleanup_count = 0;
515 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
516 cfg->move_cleanup_count++;
517 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
518 send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
519 } else {
520 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
521 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
522 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
523 free_cpumask_var(cleanup_mask);
524 }
525 cfg->move_in_progress = 0;
526}
527
488static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 528static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
489{ 529{
490 int apic, pin; 530 int apic, pin;
@@ -520,41 +560,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
520 } 560 }
521} 561}
522 562
523static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); 563static int
564assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
524 565
525static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 566/*
567 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
568 * of that, or returns BAD_APICID and leaves desc->affinity untouched.
569 */
570static unsigned int
571set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
526{ 572{
527 struct irq_cfg *cfg; 573 struct irq_cfg *cfg;
528 unsigned long flags;
529 unsigned int dest;
530 cpumask_t tmp;
531 unsigned int irq; 574 unsigned int irq;
532 575
533 cpus_and(tmp, mask, cpu_online_map); 576 if (!cpumask_intersects(mask, cpu_online_mask))
534 if (cpus_empty(tmp)) 577 return BAD_APICID;
535 return;
536 578
537 irq = desc->irq; 579 irq = desc->irq;
538 cfg = desc->chip_data; 580 cfg = desc->chip_data;
539 if (assign_irq_vector(irq, cfg, mask)) 581 if (assign_irq_vector(irq, cfg, mask))
540 return; 582 return BAD_APICID;
541 583
584 cpumask_and(&desc->affinity, cfg->domain, mask);
542 set_extra_move_desc(desc, mask); 585 set_extra_move_desc(desc, mask);
586 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
587}
543 588
544 cpus_and(tmp, cfg->domain, mask); 589static void
545 dest = cpu_mask_to_apicid(tmp); 590set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
546 /* 591{
547 * Only the high 8 bits are valid. 592 struct irq_cfg *cfg;
548 */ 593 unsigned long flags;
549 dest = SET_APIC_LOGICAL_ID(dest); 594 unsigned int dest;
595 unsigned int irq;
596
597 irq = desc->irq;
598 cfg = desc->chip_data;
550 599
551 spin_lock_irqsave(&ioapic_lock, flags); 600 spin_lock_irqsave(&ioapic_lock, flags);
552 __target_IO_APIC_irq(irq, dest, cfg); 601 dest = set_desc_affinity(desc, mask);
553 desc->affinity = mask; 602 if (dest != BAD_APICID) {
603 /* Only the high 8 bits are valid. */
604 dest = SET_APIC_LOGICAL_ID(dest);
605 __target_IO_APIC_irq(irq, dest, cfg);
606 }
554 spin_unlock_irqrestore(&ioapic_lock, flags); 607 spin_unlock_irqrestore(&ioapic_lock, flags);
555} 608}
556 609
557static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 610static void
611set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
558{ 612{
559 struct irq_desc *desc; 613 struct irq_desc *desc;
560 614
@@ -1222,7 +1276,8 @@ void unlock_vector_lock(void)
1222 spin_unlock(&vector_lock); 1276 spin_unlock(&vector_lock);
1223} 1277}
1224 1278
1225static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1279static int
1280__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1226{ 1281{
1227 /* 1282 /*
1228 * NOTE! The local APIC isn't very good at handling 1283 * NOTE! The local APIC isn't very good at handling
@@ -1237,49 +1292,49 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1237 */ 1292 */
1238 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1293 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1239 unsigned int old_vector; 1294 unsigned int old_vector;
1240 int cpu; 1295 int cpu, err;
1296 cpumask_var_t tmp_mask;
1241 1297
1242 if ((cfg->move_in_progress) || cfg->move_cleanup_count) 1298 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1243 return -EBUSY; 1299 return -EBUSY;
1244 1300
1245 /* Only try and allocate irqs on cpus that are present */ 1301 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1246 cpus_and(mask, mask, cpu_online_map); 1302 return -ENOMEM;
1247 1303
1248 old_vector = cfg->vector; 1304 old_vector = cfg->vector;
1249 if (old_vector) { 1305 if (old_vector) {
1250 cpumask_t tmp; 1306 cpumask_and(tmp_mask, mask, cpu_online_mask);
1251 cpus_and(tmp, cfg->domain, mask); 1307 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1252 if (!cpus_empty(tmp)) 1308 if (!cpumask_empty(tmp_mask)) {
1309 free_cpumask_var(tmp_mask);
1253 return 0; 1310 return 0;
1311 }
1254 } 1312 }
1255 1313
1256 for_each_cpu_mask_nr(cpu, mask) { 1314 /* Only try and allocate irqs on cpus that are present */
1257 cpumask_t domain, new_mask; 1315 err = -ENOSPC;
1316 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1258 int new_cpu; 1317 int new_cpu;
1259 int vector, offset; 1318 int vector, offset;
1260 1319
1261 domain = vector_allocation_domain(cpu); 1320 vector_allocation_domain(cpu, tmp_mask);
1262 cpus_and(new_mask, domain, cpu_online_map);
1263 1321
1264 vector = current_vector; 1322 vector = current_vector;
1265 offset = current_offset; 1323 offset = current_offset;
1266next: 1324next:
1267 vector += 8; 1325 vector += 8;
1268 if (vector >= first_system_vector) { 1326 if (vector >= first_system_vector) {
1269 /* If we run out of vectors on large boxen, must share them. */ 1327 /* If out of vectors on large boxen, must share them. */
1270 offset = (offset + 1) % 8; 1328 offset = (offset + 1) % 8;
1271 vector = FIRST_DEVICE_VECTOR + offset; 1329 vector = FIRST_DEVICE_VECTOR + offset;
1272 } 1330 }
1273 if (unlikely(current_vector == vector)) 1331 if (unlikely(current_vector == vector))
1274 continue; 1332 continue;
1275#ifdef CONFIG_X86_64 1333
1276 if (vector == IA32_SYSCALL_VECTOR) 1334 if (test_bit(vector, used_vectors))
1277 goto next;
1278#else
1279 if (vector == SYSCALL_VECTOR)
1280 goto next; 1335 goto next;
1281#endif 1336
1282 for_each_cpu_mask_nr(new_cpu, new_mask) 1337 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1283 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1338 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1284 goto next; 1339 goto next;
1285 /* Found one! */ 1340 /* Found one! */
@@ -1287,18 +1342,21 @@ next:
1287 current_offset = offset; 1342 current_offset = offset;
1288 if (old_vector) { 1343 if (old_vector) {
1289 cfg->move_in_progress = 1; 1344 cfg->move_in_progress = 1;
1290 cfg->old_domain = cfg->domain; 1345 cpumask_copy(cfg->old_domain, cfg->domain);
1291 } 1346 }
1292 for_each_cpu_mask_nr(new_cpu, new_mask) 1347 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1293 per_cpu(vector_irq, new_cpu)[vector] = irq; 1348 per_cpu(vector_irq, new_cpu)[vector] = irq;
1294 cfg->vector = vector; 1349 cfg->vector = vector;
1295 cfg->domain = domain; 1350 cpumask_copy(cfg->domain, tmp_mask);
1296 return 0; 1351 err = 0;
1352 break;
1297 } 1353 }
1298 return -ENOSPC; 1354 free_cpumask_var(tmp_mask);
1355 return err;
1299} 1356}
1300 1357
1301static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1358static int
1359assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1302{ 1360{
1303 int err; 1361 int err;
1304 unsigned long flags; 1362 unsigned long flags;
@@ -1311,23 +1369,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1311 1369
1312static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1370static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1313{ 1371{
1314 cpumask_t mask;
1315 int cpu, vector; 1372 int cpu, vector;
1316 1373
1317 BUG_ON(!cfg->vector); 1374 BUG_ON(!cfg->vector);
1318 1375
1319 vector = cfg->vector; 1376 vector = cfg->vector;
1320 cpus_and(mask, cfg->domain, cpu_online_map); 1377 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1321 for_each_cpu_mask_nr(cpu, mask)
1322 per_cpu(vector_irq, cpu)[vector] = -1; 1378 per_cpu(vector_irq, cpu)[vector] = -1;
1323 1379
1324 cfg->vector = 0; 1380 cfg->vector = 0;
1325 cpus_clear(cfg->domain); 1381 cpumask_clear(cfg->domain);
1326 1382
1327 if (likely(!cfg->move_in_progress)) 1383 if (likely(!cfg->move_in_progress))
1328 return; 1384 return;
1329 cpus_and(mask, cfg->old_domain, cpu_online_map); 1385 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1330 for_each_cpu_mask_nr(cpu, mask) {
1331 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1386 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1332 vector++) { 1387 vector++) {
1333 if (per_cpu(vector_irq, cpu)[vector] != irq) 1388 if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -1350,7 +1405,7 @@ void __setup_vector_irq(int cpu)
1350 /* Mark the inuse vectors */ 1405 /* Mark the inuse vectors */
1351 for_each_irq_desc(irq, desc) { 1406 for_each_irq_desc(irq, desc) {
1352 cfg = desc->chip_data; 1407 cfg = desc->chip_data;
1353 if (!cpu_isset(cpu, cfg->domain)) 1408 if (!cpumask_test_cpu(cpu, cfg->domain))
1354 continue; 1409 continue;
1355 vector = cfg->vector; 1410 vector = cfg->vector;
1356 per_cpu(vector_irq, cpu)[vector] = irq; 1411 per_cpu(vector_irq, cpu)[vector] = irq;
@@ -1362,7 +1417,7 @@ void __setup_vector_irq(int cpu)
1362 continue; 1417 continue;
1363 1418
1364 cfg = irq_cfg(irq); 1419 cfg = irq_cfg(irq);
1365 if (!cpu_isset(cpu, cfg->domain)) 1420 if (!cpumask_test_cpu(cpu, cfg->domain))
1366 per_cpu(vector_irq, cpu)[vector] = -1; 1421 per_cpu(vector_irq, cpu)[vector] = -1;
1367 } 1422 }
1368} 1423}
@@ -1498,18 +1553,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1498{ 1553{
1499 struct irq_cfg *cfg; 1554 struct irq_cfg *cfg;
1500 struct IO_APIC_route_entry entry; 1555 struct IO_APIC_route_entry entry;
1501 cpumask_t mask; 1556 unsigned int dest;
1502 1557
1503 if (!IO_APIC_IRQ(irq)) 1558 if (!IO_APIC_IRQ(irq))
1504 return; 1559 return;
1505 1560
1506 cfg = desc->chip_data; 1561 cfg = desc->chip_data;
1507 1562
1508 mask = TARGET_CPUS; 1563 if (assign_irq_vector(irq, cfg, TARGET_CPUS))
1509 if (assign_irq_vector(irq, cfg, mask))
1510 return; 1564 return;
1511 1565
1512 cpus_and(mask, cfg->domain, mask); 1566 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
1513 1567
1514 apic_printk(APIC_VERBOSE,KERN_DEBUG 1568 apic_printk(APIC_VERBOSE,KERN_DEBUG
1515 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1569 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1519,8 +1573,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1519 1573
1520 1574
1521 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1575 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1522 cpu_mask_to_apicid(mask), trigger, polarity, 1576 dest, trigger, polarity, cfg->vector)) {
1523 cfg->vector)) {
1524 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1577 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1525 mp_ioapics[apic].mp_apicid, pin); 1578 mp_ioapics[apic].mp_apicid, pin);
1526 __clear_irq_vector(irq, cfg); 1579 __clear_irq_vector(irq, cfg);
@@ -2240,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2240 unsigned long flags; 2293 unsigned long flags;
2241 2294
2242 spin_lock_irqsave(&vector_lock, flags); 2295 spin_lock_irqsave(&vector_lock, flags);
2243 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); 2296 send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2244 spin_unlock_irqrestore(&vector_lock, flags); 2297 spin_unlock_irqrestore(&vector_lock, flags);
2245 2298
2246 return 1; 2299 return 1;
@@ -2289,18 +2342,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2289 * as simple as edge triggered migration and we can do the irq migration 2342 * as simple as edge triggered migration and we can do the irq migration
2290 * with a simple atomic update to IO-APIC RTE. 2343 * with a simple atomic update to IO-APIC RTE.
2291 */ 2344 */
2292static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) 2345static void
2346migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2293{ 2347{
2294 struct irq_cfg *cfg; 2348 struct irq_cfg *cfg;
2295 cpumask_t tmp, cleanup_mask;
2296 struct irte irte; 2349 struct irte irte;
2297 int modify_ioapic_rte; 2350 int modify_ioapic_rte;
2298 unsigned int dest; 2351 unsigned int dest;
2299 unsigned long flags; 2352 unsigned long flags;
2300 unsigned int irq; 2353 unsigned int irq;
2301 2354
2302 cpus_and(tmp, mask, cpu_online_map); 2355 if (!cpumask_intersects(mask, cpu_online_mask))
2303 if (cpus_empty(tmp))
2304 return; 2356 return;
2305 2357
2306 irq = desc->irq; 2358 irq = desc->irq;
@@ -2313,8 +2365,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2313 2365
2314 set_extra_move_desc(desc, mask); 2366 set_extra_move_desc(desc, mask);
2315 2367
2316 cpus_and(tmp, cfg->domain, mask); 2368 dest = cpu_mask_to_apicid_and(cfg->domain, mask);
2317 dest = cpu_mask_to_apicid(tmp);
2318 2369
2319 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2370 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2320 if (modify_ioapic_rte) { 2371 if (modify_ioapic_rte) {
@@ -2331,14 +2382,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2331 */ 2382 */
2332 modify_irte(irq, &irte); 2383 modify_irte(irq, &irte);
2333 2384
2334 if (cfg->move_in_progress) { 2385 if (cfg->move_in_progress)
2335 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2386 send_cleanup_vector(cfg);
2336 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2337 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2338 cfg->move_in_progress = 0;
2339 }
2340 2387
2341 desc->affinity = mask; 2388 cpumask_copy(&desc->affinity, mask);
2342} 2389}
2343 2390
2344static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2391static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2360,11 +2407,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2360 } 2407 }
2361 2408
2362 /* everthing is clear. we have right of way */ 2409 /* everthing is clear. we have right of way */
2363 migrate_ioapic_irq_desc(desc, desc->pending_mask); 2410 migrate_ioapic_irq_desc(desc, &desc->pending_mask);
2364 2411
2365 ret = 0; 2412 ret = 0;
2366 desc->status &= ~IRQ_MOVE_PENDING; 2413 desc->status &= ~IRQ_MOVE_PENDING;
2367 cpus_clear(desc->pending_mask); 2414 cpumask_clear(&desc->pending_mask);
2368 2415
2369unmask: 2416unmask:
2370 unmask_IO_APIC_irq_desc(desc); 2417 unmask_IO_APIC_irq_desc(desc);
@@ -2389,7 +2436,7 @@ static void ir_irq_migration(struct work_struct *work)
2389 continue; 2436 continue;
2390 } 2437 }
2391 2438
2392 desc->chip->set_affinity(irq, desc->pending_mask); 2439 desc->chip->set_affinity(irq, &desc->pending_mask);
2393 spin_unlock_irqrestore(&desc->lock, flags); 2440 spin_unlock_irqrestore(&desc->lock, flags);
2394 } 2441 }
2395 } 2442 }
@@ -2398,18 +2445,20 @@ static void ir_irq_migration(struct work_struct *work)
2398/* 2445/*
2399 * Migrates the IRQ destination in the process context. 2446 * Migrates the IRQ destination in the process context.
2400 */ 2447 */
2401static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 2448static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2449 const struct cpumask *mask)
2402{ 2450{
2403 if (desc->status & IRQ_LEVEL) { 2451 if (desc->status & IRQ_LEVEL) {
2404 desc->status |= IRQ_MOVE_PENDING; 2452 desc->status |= IRQ_MOVE_PENDING;
2405 desc->pending_mask = mask; 2453 cpumask_copy(&desc->pending_mask, mask);
2406 migrate_irq_remapped_level_desc(desc); 2454 migrate_irq_remapped_level_desc(desc);
2407 return; 2455 return;
2408 } 2456 }
2409 2457
2410 migrate_ioapic_irq_desc(desc, mask); 2458 migrate_ioapic_irq_desc(desc, mask);
2411} 2459}
2412static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2460static void set_ir_ioapic_affinity_irq(unsigned int irq,
2461 const struct cpumask *mask)
2413{ 2462{
2414 struct irq_desc *desc = irq_to_desc(irq); 2463 struct irq_desc *desc = irq_to_desc(irq);
2415 2464
@@ -2444,7 +2493,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2444 if (!cfg->move_cleanup_count) 2493 if (!cfg->move_cleanup_count)
2445 goto unlock; 2494 goto unlock;
2446 2495
2447 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) 2496 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2448 goto unlock; 2497 goto unlock;
2449 2498
2450 __get_cpu_var(vector_irq)[vector] = -1; 2499 __get_cpu_var(vector_irq)[vector] = -1;
@@ -2481,20 +2530,14 @@ static void irq_complete_move(struct irq_desc **descp)
2481 2530
2482 vector = ~get_irq_regs()->orig_ax; 2531 vector = ~get_irq_regs()->orig_ax;
2483 me = smp_processor_id(); 2532 me = smp_processor_id();
2484 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2485 cpumask_t cleanup_mask;
2486
2487#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC 2533#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2488 *descp = desc = move_irq_desc(desc, me); 2534 *descp = desc = move_irq_desc(desc, me);
2489 /* get the new one */ 2535 /* get the new one */
2490 cfg = desc->chip_data; 2536 cfg = desc->chip_data;
2491#endif 2537#endif
2492 2538
2493 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2539 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2494 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 2540 send_cleanup_vector(cfg);
2495 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2496 cfg->move_in_progress = 0;
2497 }
2498} 2541}
2499#else 2542#else
2500static inline void irq_complete_move(struct irq_desc **descp) {} 2543static inline void irq_complete_move(struct irq_desc **descp) {}
@@ -3216,16 +3259,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3216 struct irq_cfg *cfg; 3259 struct irq_cfg *cfg;
3217 int err; 3260 int err;
3218 unsigned dest; 3261 unsigned dest;
3219 cpumask_t tmp;
3220 3262
3221 cfg = irq_cfg(irq); 3263 cfg = irq_cfg(irq);
3222 tmp = TARGET_CPUS; 3264 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3223 err = assign_irq_vector(irq, cfg, tmp);
3224 if (err) 3265 if (err)
3225 return err; 3266 return err;
3226 3267
3227 cpus_and(tmp, cfg->domain, tmp); 3268 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
3228 dest = cpu_mask_to_apicid(tmp);
3229 3269
3230#ifdef CONFIG_INTR_REMAP 3270#ifdef CONFIG_INTR_REMAP
3231 if (irq_remapped(irq)) { 3271 if (irq_remapped(irq)) {
@@ -3279,26 +3319,18 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3279} 3319}
3280 3320
3281#ifdef CONFIG_SMP 3321#ifdef CONFIG_SMP
3282static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3322static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3283{ 3323{
3284 struct irq_desc *desc = irq_to_desc(irq); 3324 struct irq_desc *desc = irq_to_desc(irq);
3285 struct irq_cfg *cfg; 3325 struct irq_cfg *cfg;
3286 struct msi_msg msg; 3326 struct msi_msg msg;
3287 unsigned int dest; 3327 unsigned int dest;
3288 cpumask_t tmp;
3289 3328
3290 cpus_and(tmp, mask, cpu_online_map); 3329 dest = set_desc_affinity(desc, mask);
3291 if (cpus_empty(tmp)) 3330 if (dest == BAD_APICID)
3292 return; 3331 return;
3293 3332
3294 cfg = desc->chip_data; 3333 cfg = desc->chip_data;
3295 if (assign_irq_vector(irq, cfg, mask))
3296 return;
3297
3298 set_extra_move_desc(desc, mask);
3299
3300 cpus_and(tmp, cfg->domain, mask);
3301 dest = cpu_mask_to_apicid(tmp);
3302 3334
3303 read_msi_msg_desc(desc, &msg); 3335 read_msi_msg_desc(desc, &msg);
3304 3336
@@ -3308,37 +3340,27 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3308 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3340 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3309 3341
3310 write_msi_msg_desc(desc, &msg); 3342 write_msi_msg_desc(desc, &msg);
3311 desc->affinity = mask;
3312} 3343}
3313#ifdef CONFIG_INTR_REMAP 3344#ifdef CONFIG_INTR_REMAP
3314/* 3345/*
3315 * Migrate the MSI irq to another cpumask. This migration is 3346 * Migrate the MSI irq to another cpumask. This migration is
3316 * done in the process context using interrupt-remapping hardware. 3347 * done in the process context using interrupt-remapping hardware.
3317 */ 3348 */
3318static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3349static void
3350ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3319{ 3351{
3320 struct irq_desc *desc = irq_to_desc(irq); 3352 struct irq_desc *desc = irq_to_desc(irq);
3321 struct irq_cfg *cfg; 3353 struct irq_cfg *cfg = desc->chip_data;
3322 unsigned int dest; 3354 unsigned int dest;
3323 cpumask_t tmp, cleanup_mask;
3324 struct irte irte; 3355 struct irte irte;
3325 3356
3326 cpus_and(tmp, mask, cpu_online_map);
3327 if (cpus_empty(tmp))
3328 return;
3329
3330 if (get_irte(irq, &irte)) 3357 if (get_irte(irq, &irte))
3331 return; 3358 return;
3332 3359
3333 cfg = desc->chip_data; 3360 dest = set_desc_affinity(desc, mask);
3334 if (assign_irq_vector(irq, cfg, mask)) 3361 if (dest == BAD_APICID)
3335 return; 3362 return;
3336 3363
3337 set_extra_move_desc(desc, mask);
3338
3339 cpus_and(tmp, cfg->domain, mask);
3340 dest = cpu_mask_to_apicid(tmp);
3341
3342 irte.vector = cfg->vector; 3364 irte.vector = cfg->vector;
3343 irte.dest_id = IRTE_DEST(dest); 3365 irte.dest_id = IRTE_DEST(dest);
3344 3366
@@ -3352,14 +3374,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3352 * at the new destination. So, time to cleanup the previous 3374 * at the new destination. So, time to cleanup the previous
3353 * vector allocation. 3375 * vector allocation.
3354 */ 3376 */
3355 if (cfg->move_in_progress) { 3377 if (cfg->move_in_progress)
3356 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 3378 send_cleanup_vector(cfg);
3357 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3358 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3359 cfg->move_in_progress = 0;
3360 }
3361
3362 desc->affinity = mask;
3363} 3379}
3364 3380
3365#endif 3381#endif
@@ -3550,26 +3566,18 @@ void arch_teardown_msi_irq(unsigned int irq)
3550 3566
3551#ifdef CONFIG_DMAR 3567#ifdef CONFIG_DMAR
3552#ifdef CONFIG_SMP 3568#ifdef CONFIG_SMP
3553static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3569static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3554{ 3570{
3555 struct irq_desc *desc = irq_to_desc(irq); 3571 struct irq_desc *desc = irq_to_desc(irq);
3556 struct irq_cfg *cfg; 3572 struct irq_cfg *cfg;
3557 struct msi_msg msg; 3573 struct msi_msg msg;
3558 unsigned int dest; 3574 unsigned int dest;
3559 cpumask_t tmp;
3560 3575
3561 cpus_and(tmp, mask, cpu_online_map); 3576 dest = set_desc_affinity(desc, mask);
3562 if (cpus_empty(tmp)) 3577 if (dest == BAD_APICID)
3563 return; 3578 return;
3564 3579
3565 cfg = desc->chip_data; 3580 cfg = desc->chip_data;
3566 if (assign_irq_vector(irq, cfg, mask))
3567 return;
3568
3569 set_extra_move_desc(desc, mask);
3570
3571 cpus_and(tmp, cfg->domain, mask);
3572 dest = cpu_mask_to_apicid(tmp);
3573 3581
3574 dmar_msi_read(irq, &msg); 3582 dmar_msi_read(irq, &msg);
3575 3583
@@ -3579,7 +3587,6 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3579 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3587 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3580 3588
3581 dmar_msi_write(irq, &msg); 3589 dmar_msi_write(irq, &msg);
3582 desc->affinity = mask;
3583} 3590}
3584 3591
3585#endif /* CONFIG_SMP */ 3592#endif /* CONFIG_SMP */
@@ -3613,26 +3620,18 @@ int arch_setup_dmar_msi(unsigned int irq)
3613#ifdef CONFIG_HPET_TIMER 3620#ifdef CONFIG_HPET_TIMER
3614 3621
3615#ifdef CONFIG_SMP 3622#ifdef CONFIG_SMP
3616static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3623static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3617{ 3624{
3618 struct irq_desc *desc = irq_to_desc(irq); 3625 struct irq_desc *desc = irq_to_desc(irq);
3619 struct irq_cfg *cfg; 3626 struct irq_cfg *cfg;
3620 struct msi_msg msg; 3627 struct msi_msg msg;
3621 unsigned int dest; 3628 unsigned int dest;
3622 cpumask_t tmp;
3623 3629
3624 cpus_and(tmp, mask, cpu_online_map); 3630 dest = set_desc_affinity(desc, mask);
3625 if (cpus_empty(tmp)) 3631 if (dest == BAD_APICID)
3626 return; 3632 return;
3627 3633
3628 cfg = desc->chip_data; 3634 cfg = desc->chip_data;
3629 if (assign_irq_vector(irq, cfg, mask))
3630 return;
3631
3632 set_extra_move_desc(desc, mask);
3633
3634 cpus_and(tmp, cfg->domain, mask);
3635 dest = cpu_mask_to_apicid(tmp);
3636 3635
3637 hpet_msi_read(irq, &msg); 3636 hpet_msi_read(irq, &msg);
3638 3637
@@ -3642,7 +3641,6 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3642 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3641 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3643 3642
3644 hpet_msi_write(irq, &msg); 3643 hpet_msi_write(irq, &msg);
3645 desc->affinity = mask;
3646} 3644}
3647 3645
3648#endif /* CONFIG_SMP */ 3646#endif /* CONFIG_SMP */
@@ -3697,28 +3695,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3697 write_ht_irq_msg(irq, &msg); 3695 write_ht_irq_msg(irq, &msg);
3698} 3696}
3699 3697
3700static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3698static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3701{ 3699{
3702 struct irq_desc *desc = irq_to_desc(irq); 3700 struct irq_desc *desc = irq_to_desc(irq);
3703 struct irq_cfg *cfg; 3701 struct irq_cfg *cfg;
3704 unsigned int dest; 3702 unsigned int dest;
3705 cpumask_t tmp;
3706 3703
3707 cpus_and(tmp, mask, cpu_online_map); 3704 dest = set_desc_affinity(desc, mask);
3708 if (cpus_empty(tmp)) 3705 if (dest == BAD_APICID)
3709 return; 3706 return;
3710 3707
3711 cfg = desc->chip_data; 3708 cfg = desc->chip_data;
3712 if (assign_irq_vector(irq, cfg, mask))
3713 return;
3714
3715 set_extra_move_desc(desc, mask);
3716
3717 cpus_and(tmp, cfg->domain, mask);
3718 dest = cpu_mask_to_apicid(tmp);
3719 3709
3720 target_ht_irq(irq, dest, cfg->vector); 3710 target_ht_irq(irq, dest, cfg->vector);
3721 desc->affinity = mask;
3722} 3711}
3723 3712
3724#endif 3713#endif
@@ -3738,17 +3727,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3738{ 3727{
3739 struct irq_cfg *cfg; 3728 struct irq_cfg *cfg;
3740 int err; 3729 int err;
3741 cpumask_t tmp;
3742 3730
3743 cfg = irq_cfg(irq); 3731 cfg = irq_cfg(irq);
3744 tmp = TARGET_CPUS; 3732 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3745 err = assign_irq_vector(irq, cfg, tmp);
3746 if (!err) { 3733 if (!err) {
3747 struct ht_irq_msg msg; 3734 struct ht_irq_msg msg;
3748 unsigned dest; 3735 unsigned dest;
3749 3736
3750 cpus_and(tmp, cfg->domain, tmp); 3737 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
3751 dest = cpu_mask_to_apicid(tmp);
3752 3738
3753 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3739 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3754 3740
@@ -3784,7 +3770,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3784int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, 3770int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3785 unsigned long mmr_offset) 3771 unsigned long mmr_offset)
3786{ 3772{
3787 const cpumask_t *eligible_cpu = get_cpu_mask(cpu); 3773 const struct cpumask *eligible_cpu = cpumask_of(cpu);
3788 struct irq_cfg *cfg; 3774 struct irq_cfg *cfg;
3789 int mmr_pnode; 3775 int mmr_pnode;
3790 unsigned long mmr_value; 3776 unsigned long mmr_value;
@@ -3794,7 +3780,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3794 3780
3795 cfg = irq_cfg(irq); 3781 cfg = irq_cfg(irq);
3796 3782
3797 err = assign_irq_vector(irq, cfg, *eligible_cpu); 3783 err = assign_irq_vector(irq, cfg, eligible_cpu);
3798 if (err != 0) 3784 if (err != 0)
3799 return err; 3785 return err;
3800 3786
@@ -3813,7 +3799,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3813 entry->polarity = 0; 3799 entry->polarity = 0;
3814 entry->trigger = 0; 3800 entry->trigger = 0;
3815 entry->mask = 0; 3801 entry->mask = 0;
3816 entry->dest = cpu_mask_to_apicid(*eligible_cpu); 3802 entry->dest = cpu_mask_to_apicid(eligible_cpu);
3817 3803
3818 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3804 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3819 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3805 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -4024,7 +4010,7 @@ void __init setup_ioapic_dest(void)
4024 int pin, ioapic, irq, irq_entry; 4010 int pin, ioapic, irq, irq_entry;
4025 struct irq_desc *desc; 4011 struct irq_desc *desc;
4026 struct irq_cfg *cfg; 4012 struct irq_cfg *cfg;
4027 cpumask_t mask; 4013 const struct cpumask *mask;
4028 4014
4029 if (skip_ioapic_setup == 1) 4015 if (skip_ioapic_setup == 1)
4030 return; 4016 return;
@@ -4055,7 +4041,7 @@ void __init setup_ioapic_dest(void)
4055 */ 4041 */
4056 if (desc->status & 4042 if (desc->status &
4057 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4043 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4058 mask = desc->affinity; 4044 mask = &desc->affinity;
4059 else 4045 else
4060 mask = TARGET_CPUS; 4046 mask = TARGET_CPUS;
4061 4047
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index f1c688e46f35..285bbf8831fa 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
116/* 116/*
117 * This is only used on smaller machines. 117 * This is only used on smaller machines.
118 */ 118 */
119void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) 119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
120{ 120{
121 unsigned long mask = cpus_addr(cpumask)[0]; 121 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags; 122 unsigned long flags;
123 123
124 local_irq_save(flags); 124 local_irq_save(flags);
125 WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); 125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector); 126 __send_IPI_dest_field(mask, vector);
127 local_irq_restore(flags); 127 local_irq_restore(flags);
128} 128}
129 129
130void send_IPI_mask_sequence(cpumask_t mask, int vector) 130void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
131{ 131{
132 unsigned long flags; 132 unsigned long flags;
133 unsigned int query_cpu; 133 unsigned int query_cpu;
@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
139 */ 139 */
140 140
141 local_irq_save(flags); 141 local_irq_save(flags);
142 for_each_possible_cpu(query_cpu) { 142 for_each_cpu(query_cpu, mask)
143 if (cpu_isset(query_cpu, mask)) { 143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags);
145}
146
147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
148{
149 unsigned long flags;
150 unsigned int query_cpu;
151 unsigned int this_cpu = smp_processor_id();
152
153 /* See Hack comment above */
154
155 local_irq_save(flags);
156 for_each_cpu(query_cpu, mask)
157 if (query_cpu != this_cpu)
144 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), 158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
145 vector); 159 vector);
146 }
147 }
148 local_irq_restore(flags); 160 local_irq_restore(flags);
149} 161}
150 162
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3f1d9d18df67..bce53e1352a0 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -9,6 +9,7 @@
9#include <asm/apic.h> 9#include <asm/apic.h>
10#include <asm/io_apic.h> 10#include <asm/io_apic.h>
11#include <asm/smp.h> 11#include <asm/smp.h>
12#include <asm/irq.h>
12 13
13atomic_t irq_err_count; 14atomic_t irq_err_count;
14 15
@@ -190,3 +191,5 @@ u64 arch_irq_stat(void)
190#endif 191#endif
191 return sum; 192 return sum;
192} 193}
194
195EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 119fc9c8ff7f..9dc5588f336a 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs)
233#ifdef CONFIG_HOTPLUG_CPU 233#ifdef CONFIG_HOTPLUG_CPU
234#include <mach_apic.h> 234#include <mach_apic.h>
235 235
236void fixup_irqs(cpumask_t map) 236/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
237void fixup_irqs(void)
237{ 238{
238 unsigned int irq; 239 unsigned int irq;
239 static int warned; 240 static int warned;
240 struct irq_desc *desc; 241 struct irq_desc *desc;
241 242
242 for_each_irq_desc(irq, desc) { 243 for_each_irq_desc(irq, desc) {
243 cpumask_t mask; 244 const struct cpumask *affinity;
244 245
245 if (!desc) 246 if (!desc)
246 continue; 247 continue;
247 if (irq == 2) 248 if (irq == 2)
248 continue; 249 continue;
249 250
250 cpus_and(mask, desc->affinity, map); 251 affinity = &desc->affinity;
251 if (any_online_cpu(mask) == NR_CPUS) { 252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
252 printk("Breaking affinity for irq %i\n", irq); 253 printk("Breaking affinity for irq %i\n", irq);
253 mask = map; 254 affinity = cpu_all_mask;
254 } 255 }
255 if (desc->chip->set_affinity) 256 if (desc->chip->set_affinity)
256 desc->chip->set_affinity(irq, mask); 257 desc->chip->set_affinity(irq, affinity);
257 else if (desc->action && !(warned++)) 258 else if (desc->action && !(warned++))
258 printk("Cannot set affinity for irq %i\n", irq); 259 printk("Cannot set affinity for irq %i\n", irq);
259 } 260 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index a174a217eb1a..6383d50f82ea 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -80,16 +80,17 @@ asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
80} 80}
81 81
82#ifdef CONFIG_HOTPLUG_CPU 82#ifdef CONFIG_HOTPLUG_CPU
83void fixup_irqs(cpumask_t map) 83/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
84void fixup_irqs(void)
84{ 85{
85 unsigned int irq; 86 unsigned int irq;
86 static int warned; 87 static int warned;
87 struct irq_desc *desc; 88 struct irq_desc *desc;
88 89
89 for_each_irq_desc(irq, desc) { 90 for_each_irq_desc(irq, desc) {
90 cpumask_t mask;
91 int break_affinity = 0; 91 int break_affinity = 0;
92 int set_affinity = 1; 92 int set_affinity = 1;
93 const struct cpumask *affinity;
93 94
94 if (!desc) 95 if (!desc)
95 continue; 96 continue;
@@ -99,23 +100,23 @@ void fixup_irqs(cpumask_t map)
99 /* interrupt's are disabled at this point */ 100 /* interrupt's are disabled at this point */
100 spin_lock(&desc->lock); 101 spin_lock(&desc->lock);
101 102
103 affinity = &desc->affinity;
102 if (!irq_has_action(irq) || 104 if (!irq_has_action(irq) ||
103 cpus_equal(desc->affinity, map)) { 105 cpumask_equal(affinity, cpu_online_mask)) {
104 spin_unlock(&desc->lock); 106 spin_unlock(&desc->lock);
105 continue; 107 continue;
106 } 108 }
107 109
108 cpus_and(mask, desc->affinity, map); 110 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
109 if (cpus_empty(mask)) {
110 break_affinity = 1; 111 break_affinity = 1;
111 mask = map; 112 affinity = cpu_all_mask;
112 } 113 }
113 114
114 if (desc->chip->mask) 115 if (desc->chip->mask)
115 desc->chip->mask(irq); 116 desc->chip->mask(irq);
116 117
117 if (desc->chip->set_affinity) 118 if (desc->chip->set_affinity)
118 desc->chip->set_affinity(irq, mask); 119 desc->chip->set_affinity(irq, affinity);
119 else if (!(warned++)) 120 else if (!(warned++))
120 set_affinity = 0; 121 set_affinity = 0;
121 122
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 203384ed2b5d..84723295f88a 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -110,6 +110,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
110 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 110 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
111}; 111};
112 112
113int vector_used_by_percpu_irq(unsigned int vector)
114{
115 int cpu;
116
117 for_each_online_cpu(cpu) {
118 if (per_cpu(vector_irq, cpu)[vector] != -1)
119 return 1;
120 }
121
122 return 0;
123}
124
113/* Overridden in paravirt.c */ 125/* Overridden in paravirt.c */
114void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); 126void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
115 127
@@ -146,10 +158,12 @@ void __init native_init_IRQ(void)
146 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 158 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
147 159
148 /* IPI for single call function */ 160 /* IPI for single call function */
149 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); 161 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
162 call_function_single_interrupt);
150 163
151 /* Low priority IPI to cleanup after moving an irq */ 164 /* Low priority IPI to cleanup after moving an irq */
152 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 165 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
166 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
153#endif 167#endif
154 168
155#ifdef CONFIG_X86_LOCAL_APIC 169#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 6190e6ef546c..31ebfe38e96c 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -69,6 +69,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
69 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 69 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
70}; 70};
71 71
72int vector_used_by_percpu_irq(unsigned int vector)
73{
74 int cpu;
75
76 for_each_online_cpu(cpu) {
77 if (per_cpu(vector_irq, cpu)[vector] != -1)
78 return 1;
79 }
80
81 return 0;
82}
83
72void __init init_ISA_irqs(void) 84void __init init_ISA_irqs(void)
73{ 85{
74 int i; 86 int i;
@@ -121,6 +133,7 @@ static void __init smp_intr_init(void)
121 133
122 /* Low priority IPI to cleanup after moving an irq */ 134 /* Low priority IPI to cleanup after moving an irq */
123 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 135 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
136 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
124#endif 137#endif
125} 138}
126 139
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 3b599518c322..c12314c9e86f 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = {
287 .set_mode = mfgpt_set_mode, 287 .set_mode = mfgpt_set_mode,
288 .set_next_event = mfgpt_next_event, 288 .set_next_event = mfgpt_next_event,
289 .rating = 250, 289 .rating = 250,
290 .cpumask = CPU_MASK_ALL, 290 .cpumask = cpu_all_mask,
291 .shift = 32 291 .shift = 32
292}; 292};
293 293
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 72e0e4e712d6..39643b1df061 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -650,10 +650,7 @@ static int crash_nmi_callback(struct notifier_block *self,
650 650
651static void smp_send_nmi_allbutself(void) 651static void smp_send_nmi_allbutself(void)
652{ 652{
653 cpumask_t mask = cpu_online_map; 653 send_IPI_allbutself(NMI_VECTOR);
654 cpu_clear(safe_smp_processor_id(), mask);
655 if (!cpus_empty(mask))
656 send_IPI_mask(mask, NMI_VECTOR);
657} 654}
658 655
659static struct notifier_block crash_nmi_nb = { 656static struct notifier_block crash_nmi_nb = {
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ae0c0d3bb770..0b63b08e7530 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void)
152 old_size = PERCPU_ENOUGH_ROOM; 152 old_size = PERCPU_ENOUGH_ROOM;
153 align = max_t(unsigned long, PAGE_SIZE, align); 153 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align); 154 size = roundup(old_size, align);
155
156 printk(KERN_INFO
157 "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
158 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
159
155 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 160 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
156 size); 161 size);
157 162
@@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void)
168 "cpu %d has no node %d or node-local memory\n", 173 "cpu %d has no node %d or node-local memory\n",
169 cpu, node); 174 cpu, node);
170 if (ptr) 175 if (ptr)
171 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", 176 printk(KERN_DEBUG
177 "per cpu data for cpu%d at %016lx\n",
172 cpu, __pa(ptr)); 178 cpu, __pa(ptr));
173 } 179 }
174 else { 180 else {
175 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 181 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
176 __pa(MAX_DMA_ADDRESS)); 182 __pa(MAX_DMA_ADDRESS));
177 if (ptr) 183 if (ptr)
178 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", 184 printk(KERN_DEBUG
179 cpu, node, __pa(ptr)); 185 "per cpu data for cpu%d on node%d "
186 "at %016lx\n",
187 cpu, node, __pa(ptr));
180 } 188 }
181#endif 189#endif
182 per_cpu_offset(cpu) = ptr - __per_cpu_start; 190 per_cpu_offset(cpu) = ptr - __per_cpu_start;
183 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 191 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
184 } 192 }
185 193
186 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
187 NR_CPUS, nr_cpu_ids, nr_node_ids);
188
189 /* Setup percpu data maps */ 194 /* Setup percpu data maps */
190 setup_per_cpu_maps(); 195 setup_per_cpu_maps();
191 196
@@ -282,7 +287,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
282 else 287 else
283 cpu_clear(cpu, *mask); 288 cpu_clear(cpu, *mask);
284 289
285 cpulist_scnprintf(buf, sizeof(buf), *mask); 290 cpulist_scnprintf(buf, sizeof(buf), mask);
286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 291 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 292 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
288 } 293 }
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 7e558db362c1..beea2649a240 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 118 WARN_ON(1);
119 return; 119 return;
120 } 120 }
121 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 122}
123 123
124void native_send_call_func_single_ipi(int cpu) 124void native_send_call_func_single_ipi(int cpu)
125{ 125{
126 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); 126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 127}
128 128
129void native_send_call_func_ipi(cpumask_t mask) 129void native_send_call_func_ipi(const struct cpumask *mask)
130{ 130{
131 cpumask_t allbutself; 131 cpumask_t allbutself;
132 132
133 allbutself = cpu_online_map; 133 allbutself = cpu_online_map;
134 cpu_clear(smp_processor_id(), allbutself); 134 cpu_clear(smp_processor_id(), allbutself);
135 135
136 if (cpus_equal(mask, allbutself) && 136 if (cpus_equal(*mask, allbutself) &&
137 cpus_equal(cpu_online_map, cpu_callout_map)) 137 cpus_equal(cpu_online_map, cpu_callout_map))
138 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 138 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
139 else 139 else
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f8500c969442..31869bf5fabd 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,14 +102,8 @@ EXPORT_SYMBOL(smp_num_siblings);
102/* Last level cache ID of each logical CPU */ 102/* Last level cache ID of each logical CPU */
103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
104 104
105/* bitmap of online cpus */
106cpumask_t cpu_online_map __read_mostly;
107EXPORT_SYMBOL(cpu_online_map);
108
109cpumask_t cpu_callin_map; 105cpumask_t cpu_callin_map;
110cpumask_t cpu_callout_map; 106cpumask_t cpu_callout_map;
111cpumask_t cpu_possible_map;
112EXPORT_SYMBOL(cpu_possible_map);
113 107
114/* representing HT siblings of each logical CPU */ 108/* representing HT siblings of each logical CPU */
115DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 109DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
@@ -1260,6 +1254,15 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1260 check_nmi_watchdog(); 1254 check_nmi_watchdog();
1261} 1255}
1262 1256
1257static int __initdata setup_possible_cpus = -1;
1258static int __init _setup_possible_cpus(char *str)
1259{
1260 get_option(&str, &setup_possible_cpus);
1261 return 0;
1262}
1263early_param("possible_cpus", _setup_possible_cpus);
1264
1265
1263/* 1266/*
1264 * cpu_possible_map should be static, it cannot change as cpu's 1267 * cpu_possible_map should be static, it cannot change as cpu's
1265 * are onlined, or offlined. The reason is per-cpu data-structures 1268 * are onlined, or offlined. The reason is per-cpu data-structures
@@ -1272,7 +1275,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1272 * 1275 *
1273 * Three ways to find out the number of additional hotplug CPUs: 1276 * Three ways to find out the number of additional hotplug CPUs:
1274 * - If the BIOS specified disabled CPUs in ACPI/mptables use that. 1277 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1275 * - The user can overwrite it with additional_cpus=NUM 1278 * - The user can overwrite it with possible_cpus=NUM
1276 * - Otherwise don't reserve additional CPUs. 1279 * - Otherwise don't reserve additional CPUs.
1277 * We do this because additional CPUs waste a lot of memory. 1280 * We do this because additional CPUs waste a lot of memory.
1278 * -AK 1281 * -AK
@@ -1285,9 +1288,17 @@ __init void prefill_possible_map(void)
1285 if (!num_processors) 1288 if (!num_processors)
1286 num_processors = 1; 1289 num_processors = 1;
1287 1290
1288 possible = num_processors + disabled_cpus; 1291 if (setup_possible_cpus == -1)
1289 if (possible > NR_CPUS) 1292 possible = num_processors + disabled_cpus;
1290 possible = NR_CPUS; 1293 else
1294 possible = setup_possible_cpus;
1295
1296 if (possible > CONFIG_NR_CPUS) {
1297 printk(KERN_WARNING
1298 "%d Processors exceeds NR_CPUS limit of %d\n",
1299 possible, CONFIG_NR_CPUS);
1300 possible = CONFIG_NR_CPUS;
1301 }
1291 1302
1292 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1303 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1293 possible, max_t(int, possible - num_processors, 0)); 1304 possible, max_t(int, possible - num_processors, 0));
@@ -1352,7 +1363,7 @@ void cpu_disable_common(void)
1352 lock_vector_lock(); 1363 lock_vector_lock();
1353 remove_cpu_from_maps(cpu); 1364 remove_cpu_from_maps(cpu);
1354 unlock_vector_lock(); 1365 unlock_vector_lock();
1355 fixup_irqs(cpu_online_map); 1366 fixup_irqs();
1356} 1367}
1357 1368
1358int native_cpu_disable(void) 1369int native_cpu_disable(void)
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index 8da059f949be..ce5054642247 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -163,7 +163,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
163 * We have to send the IPI only to 163 * We have to send the IPI only to
164 * CPUs affected. 164 * CPUs affected.
165 */ 165 */
166 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); 166 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
167 167
168 while (!cpus_empty(flush_cpumask)) 168 while (!cpus_empty(flush_cpumask))
169 /* nothing. lockup detection does not belong here */ 169 /* nothing. lockup detection does not belong here */
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index 29887d7081a9..f8be6f1d2e48 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
191 * We have to send the IPI only to 191 * We have to send the IPI only to
192 * CPUs affected. 192 * CPUs affected.
193 */ 193 */
194 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); 194 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
195 195
196 while (!cpus_empty(f->flush_cpumask)) 196 while (!cpus_empty(f->flush_cpumask))
197 cpu_relax(); 197 cpu_relax();
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 141907ab6e22..2d1f4c7e4052 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -72,9 +72,6 @@
72 72
73#include "cpu/mcheck/mce.h" 73#include "cpu/mcheck/mce.h"
74 74
75DECLARE_BITMAP(used_vectors, NR_VECTORS);
76EXPORT_SYMBOL_GPL(used_vectors);
77
78asmlinkage int system_call(void); 75asmlinkage int system_call(void);
79 76
80/* Do we ignore FPU interrupts ? */ 77/* Do we ignore FPU interrupts ? */
@@ -89,6 +86,9 @@ gate_desc idt_table[256]
89 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 86 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
90#endif 87#endif
91 88
89DECLARE_BITMAP(used_vectors, NR_VECTORS);
90EXPORT_SYMBOL_GPL(used_vectors);
91
92static int ignore_nmis; 92static int ignore_nmis;
93 93
94static inline void conditional_sti(struct pt_regs *regs) 94static inline void conditional_sti(struct pt_regs *regs)
@@ -941,9 +941,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
941 941
942void __init trap_init(void) 942void __init trap_init(void)
943{ 943{
944#ifdef CONFIG_X86_32
945 int i; 944 int i;
946#endif
947 945
948#ifdef CONFIG_EISA 946#ifdef CONFIG_EISA
949 void __iomem *p = early_ioremap(0x0FFFD9, 4); 947 void __iomem *p = early_ioremap(0x0FFFD9, 4);
@@ -1000,11 +998,15 @@ void __init trap_init(void)
1000 } 998 }
1001 999
1002 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 1000 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
1001#endif
1003 1002
1004 /* Reserve all the builtin and the syscall vector: */ 1003 /* Reserve all the builtin and the syscall vector: */
1005 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 1004 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1006 set_bit(i, used_vectors); 1005 set_bit(i, used_vectors);
1007 1006
1007#ifdef CONFIG_X86_64
1008 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
1009#else
1008 set_bit(SYSCALL_VECTOR, used_vectors); 1010 set_bit(SYSCALL_VECTOR, used_vectors);
1009#endif 1011#endif
1010 /* 1012 /*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 254ee07f8635..c4c1f9e09402 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void)
226 /* Upper bound is clockevent's use of ulong for cycle deltas. */ 226 /* Upper bound is clockevent's use of ulong for cycle deltas. */
227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); 227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt);
228 evt->min_delta_ns = clockevent_delta2ns(1, evt); 228 evt->min_delta_ns = clockevent_delta2ns(1, evt);
229 evt->cpumask = cpumask_of_cpu(cpu); 229 evt->cpumask = cpumask_of(cpu);
230 230
231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", 231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n",
232 evt->name, evt->mult, evt->shift); 232 evt->name, evt->mult, evt->shift);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 50a779264bb1..a7ed208f81e3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -738,7 +738,7 @@ static void lguest_time_init(void)
738 738
739 /* We can't set cpumask in the initializer: damn C limitations! Set it 739 /* We can't set cpumask in the initializer: damn C limitations! Set it
740 * here and register our timer device. */ 740 * here and register our timer device. */
741 lguest_clockevent.cpumask = cpumask_of_cpu(0); 741 lguest_clockevent.cpumask = cpumask_of(0);
742 clockevents_register_device(&lguest_clockevent); 742 clockevents_register_device(&lguest_clockevent);
743 743
744 /* Finally, we unblock the timer interrupt. */ 744 /* Finally, we unblock the timer interrupt. */
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
index 3624a364b7f3..bc4c7840b2a8 100644
--- a/arch/x86/mach-generic/bigsmp.c
+++ b/arch/x86/mach-generic/bigsmp.c
@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
42 { } 42 { }
43}; 43};
44 44
45static cpumask_t vector_allocation_domain(int cpu) 45static void vector_allocation_domain(int cpu, cpumask_t *retmask)
46{ 46{
47 return cpumask_of_cpu(cpu); 47 cpus_clear(*retmask);
48 cpu_set(cpu, *retmask);
48} 49}
49 50
50static int probe_bigsmp(void) 51static int probe_bigsmp(void)
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 7b4e6d0d1690..4ba5ccaa1584 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
87} 87}
88#endif 88#endif
89 89
90static cpumask_t vector_allocation_domain(int cpu) 90static void vector_allocation_domain(int cpu, cpumask_t *retmask)
91{ 91{
92 /* Careful. Some cpus do not strictly honor the set of cpus 92 /* Careful. Some cpus do not strictly honor the set of cpus
93 * specified in the interrupt destination when using lowest 93 * specified in the interrupt destination when using lowest
@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu)
97 * deliver interrupts to the wrong hyperthread when only one 97 * deliver interrupts to the wrong hyperthread when only one
98 * hyperthread was specified in the interrupt desitination. 98 * hyperthread was specified in the interrupt desitination.
99 */ 99 */
100 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 100 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
101 return domain;
102} 101}
103 102
104struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); 103struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
index 71a309b122e6..511d7941364f 100644
--- a/arch/x86/mach-generic/numaq.c
+++ b/arch/x86/mach-generic/numaq.c
@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38 return 0; 38 return 0;
39} 39}
40 40
41static cpumask_t vector_allocation_domain(int cpu) 41static void vector_allocation_domain(int cpu, cpumask_t *retmask)
42{ 42{
43 /* Careful. Some cpus do not strictly honor the set of cpus 43 /* Careful. Some cpus do not strictly honor the set of cpus
44 * specified in the interrupt destination when using lowest 44 * specified in the interrupt destination when using lowest
@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu)
48 * deliver interrupts to the wrong hyperthread when only one 48 * deliver interrupts to the wrong hyperthread when only one
49 * hyperthread was specified in the interrupt desitination. 49 * hyperthread was specified in the interrupt desitination.
50 */ 50 */
51 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 51 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
52 return domain;
53} 52}
54 53
55struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); 54struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
index 2c6d234e0009..2821ffc188b5 100644
--- a/arch/x86/mach-generic/summit.c
+++ b/arch/x86/mach-generic/summit.c
@@ -24,7 +24,7 @@ static int probe_summit(void)
24 return 0; 24 return 0;
25} 25}
26 26
27static cpumask_t vector_allocation_domain(int cpu) 27static void vector_allocation_domain(int cpu, cpumask_t *retmask)
28{ 28{
29 /* Careful. Some cpus do not strictly honor the set of cpus 29 /* Careful. Some cpus do not strictly honor the set of cpus
30 * specified in the interrupt destination when using lowest 30 * specified in the interrupt destination when using lowest
@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu)
34 * deliver interrupts to the wrong hyperthread when only one 34 * deliver interrupts to the wrong hyperthread when only one
35 * hyperthread was specified in the interrupt desitination. 35 * hyperthread was specified in the interrupt desitination.
36 */ 36 */
37 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 37 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
38 return domain;
39} 38}
40 39
41struct genapic apic_summit = APIC_INIT("summit", probe_summit); 40struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 52145007bd7e..a5bc05492b1e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -63,11 +63,6 @@ static int voyager_extended_cpus = 1;
63/* Used for the invalidate map that's also checked in the spinlock */ 63/* Used for the invalidate map that's also checked in the spinlock */
64static volatile unsigned long smp_invalidate_needed; 64static volatile unsigned long smp_invalidate_needed;
65 65
66/* Bitmask of currently online CPUs - used by setup.c for
67 /proc/cpuinfo, visible externally but still physical */
68cpumask_t cpu_online_map = CPU_MASK_NONE;
69EXPORT_SYMBOL(cpu_online_map);
70
71/* Bitmask of CPUs present in the system - exported by i386_syms.c, used 66/* Bitmask of CPUs present in the system - exported by i386_syms.c, used
72 * by scheduler but indexed physically */ 67 * by scheduler but indexed physically */
73cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 68cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
@@ -218,8 +213,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
218/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
219cpumask_t cpu_callin_map = CPU_MASK_NONE; 214cpumask_t cpu_callin_map = CPU_MASK_NONE;
220cpumask_t cpu_callout_map = CPU_MASK_NONE; 215cpumask_t cpu_callout_map = CPU_MASK_NONE;
221cpumask_t cpu_possible_map = CPU_MASK_NONE;
222EXPORT_SYMBOL(cpu_possible_map);
223 216
224/* The per processor IRQ masks (these are usually kept in sync) */ 217/* The per processor IRQ masks (these are usually kept in sync) */
225static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -679,7 +672,7 @@ void __init smp_boot_cpus(void)
679 672
680 /* loop over all the extended VIC CPUs and boot them. The 673 /* loop over all the extended VIC CPUs and boot them. The
681 * Quad CPUs must be bootstrapped by their extended VIC cpu */ 674 * Quad CPUs must be bootstrapped by their extended VIC cpu */
682 for (i = 0; i < NR_CPUS; i++) { 675 for (i = 0; i < nr_cpu_ids; i++) {
683 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) 676 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
684 continue; 677 continue;
685 do_boot_cpu(i); 678 do_boot_cpu(i);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index cebcbf152d46..71a14f89f89e 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -278,7 +278,7 @@ void __init numa_init_array(void)
278 int rr, i; 278 int rr, i;
279 279
280 rr = first_node(node_online_map); 280 rr = first_node(node_online_map);
281 for (i = 0; i < NR_CPUS; i++) { 281 for (i = 0; i < nr_cpu_ids; i++) {
282 if (early_cpu_to_node(i) != NUMA_NO_NODE) 282 if (early_cpu_to_node(i) != NUMA_NO_NODE)
283 continue; 283 continue;
284 numa_set_node(i, rr); 284 numa_set_node(i, rr);
@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
549 memnodemap[0] = 0; 549 memnodemap[0] = 0;
550 node_set_online(0); 550 node_set_online(0);
551 node_set(0, node_possible_map); 551 node_set(0, node_possible_map);
552 for (i = 0; i < NR_CPUS; i++) 552 for (i = 0; i < nr_cpu_ids; i++)
553 numa_set_node(i, 0); 553 numa_set_node(i, 0);
554 e820_register_active_regions(0, start_pfn, last_pfn); 554 e820_register_active_regions(0, start_pfn, last_pfn);
555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); 555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 51c0a2fc14fe..09737c8af074 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
382 if (!node_online(i)) 382 if (!node_online(i))
383 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 383 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
384 384
385 for (i = 0; i < NR_CPUS; i++) { 385 for (i = 0; i < nr_cpu_ids; i++) {
386 int node = early_cpu_to_node(i); 386 int node = early_cpu_to_node(i);
387 387
388 if (node == NUMA_NO_NODE) 388 if (node == NUMA_NO_NODE)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 773d68d3e912..503c240e26c7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1082,7 +1082,7 @@ static void drop_other_mm_ref(void *info)
1082 1082
1083static void xen_drop_mm_ref(struct mm_struct *mm) 1083static void xen_drop_mm_ref(struct mm_struct *mm)
1084{ 1084{
1085 cpumask_t mask; 1085 cpumask_var_t mask;
1086 unsigned cpu; 1086 unsigned cpu;
1087 1087
1088 if (current->active_mm == mm) { 1088 if (current->active_mm == mm) {
@@ -1094,7 +1094,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1094 } 1094 }
1095 1095
1096 /* Get the "official" set of cpus referring to our pagetable. */ 1096 /* Get the "official" set of cpus referring to our pagetable. */
1097 mask = mm->cpu_vm_mask; 1097 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1098 for_each_online_cpu(cpu) {
1099 if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
1100 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1101 continue;
1102 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1103 }
1104 return;
1105 }
1106 cpumask_copy(mask, &mm->cpu_vm_mask);
1098 1107
1099 /* It's possible that a vcpu may have a stale reference to our 1108 /* It's possible that a vcpu may have a stale reference to our
1100 cr3, because its in lazy mode, and it hasn't yet flushed 1109 cr3, because its in lazy mode, and it hasn't yet flushed
@@ -1103,11 +1112,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1103 if needed. */ 1112 if needed. */
1104 for_each_online_cpu(cpu) { 1113 for_each_online_cpu(cpu) {
1105 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) 1114 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1106 cpu_set(cpu, mask); 1115 cpumask_set_cpu(cpu, mask);
1107 } 1116 }
1108 1117
1109 if (!cpus_empty(mask)) 1118 if (!cpumask_empty(mask))
1110 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); 1119 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1120 free_cpumask_var(mask);
1111} 1121}
1112#else 1122#else
1113static void xen_drop_mm_ref(struct mm_struct *mm) 1123static void xen_drop_mm_ref(struct mm_struct *mm)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index acd9b6705e02..c44e2069c7c7 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -33,7 +33,7 @@
33#include "xen-ops.h" 33#include "xen-ops.h"
34#include "mmu.h" 34#include "mmu.h"
35 35
36cpumask_t xen_cpu_initialized_map; 36cpumask_var_t xen_cpu_initialized_map;
37 37
38static DEFINE_PER_CPU(int, resched_irq); 38static DEFINE_PER_CPU(int, resched_irq);
39static DEFINE_PER_CPU(int, callfunc_irq); 39static DEFINE_PER_CPU(int, callfunc_irq);
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
158{ 158{
159 int i, rc; 159 int i, rc;
160 160
161 for (i = 0; i < NR_CPUS; i++) { 161 for (i = 0; i < nr_cpu_ids; i++) {
162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
163 if (rc >= 0) { 163 if (rc >= 0) {
164 num_processors++; 164 num_processors++;
@@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
192 if (xen_smp_intr_init(0)) 192 if (xen_smp_intr_init(0))
193 BUG(); 193 BUG();
194 194
195 xen_cpu_initialized_map = cpumask_of_cpu(0); 195 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
196 panic("could not allocate xen_cpu_initialized_map\n");
197
198 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
196 199
197 /* Restrict the possible_map according to max_cpus. */ 200 /* Restrict the possible_map according to max_cpus. */
198 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 201 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
199 for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) 202 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
200 continue; 203 continue;
201 cpu_clear(cpu, cpu_possible_map); 204 cpu_clear(cpu, cpu_possible_map);
202 } 205 }
@@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
221 struct vcpu_guest_context *ctxt; 224 struct vcpu_guest_context *ctxt;
222 struct desc_struct *gdt; 225 struct desc_struct *gdt;
223 226
224 if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) 227 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
225 return 0; 228 return 0;
226 229
227 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 230 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
@@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu)
408 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 411 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
409} 412}
410 413
411static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 414static void xen_send_IPI_mask(const struct cpumask *mask,
415 enum ipi_vector vector)
412{ 416{
413 unsigned cpu; 417 unsigned cpu;
414 418
415 cpus_and(mask, mask, cpu_online_map); 419 for_each_cpu_and(cpu, mask, cpu_online_mask)
416
417 for_each_cpu_mask_nr(cpu, mask)
418 xen_send_IPI_one(cpu, vector); 420 xen_send_IPI_one(cpu, vector);
419} 421}
420 422
421static void xen_smp_send_call_function_ipi(cpumask_t mask) 423static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
422{ 424{
423 int cpu; 425 int cpu;
424 426
425 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 427 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
426 428
427 /* Make sure other vcpus get a chance to run if they need to. */ 429 /* Make sure other vcpus get a chance to run if they need to. */
428 for_each_cpu_mask_nr(cpu, mask) { 430 for_each_cpu(cpu, mask) {
429 if (xen_vcpu_stolen(cpu)) { 431 if (xen_vcpu_stolen(cpu)) {
430 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 432 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
431 break; 433 break;
@@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
435 437
436static void xen_smp_send_call_function_single_ipi(int cpu) 438static void xen_smp_send_call_function_single_ipi(int cpu)
437{ 439{
438 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); 440 xen_send_IPI_mask(cpumask_of(cpu),
441 XEN_CALL_FUNCTION_SINGLE_VECTOR);
439} 442}
440 443
441static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 444static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 2a234db5949b..212ffe012b76 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled)
35 pfn_to_mfn(xen_start_info->console.domU.mfn); 35 pfn_to_mfn(xen_start_info->console.domU.mfn);
36 } else { 36 } else {
37#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
38 xen_cpu_initialized_map = cpu_online_map; 38 BUG_ON(xen_cpu_initialized_map == NULL);
39 cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
39#endif 40#endif
40 xen_vcpu_restore(); 41 xen_vcpu_restore();
41 } 42 }
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c9f7cda48ed7..65d75a6be0ba 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -437,7 +437,7 @@ void xen_setup_timer(int cpu)
437 evt = &per_cpu(xen_clock_events, cpu); 437 evt = &per_cpu(xen_clock_events, cpu);
438 memcpy(evt, xen_clockevent, sizeof(*evt)); 438 memcpy(evt, xen_clockevent, sizeof(*evt));
439 439
440 evt->cpumask = cpumask_of_cpu(cpu); 440 evt->cpumask = cpumask_of(cpu);
441 evt->irq = irq; 441 evt->irq = irq;
442 442
443 setup_runstate_info(cpu); 443 setup_runstate_info(cpu);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9e1afae8461f..c1f8faf0a2c5 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void);
58__cpuinit void xen_init_lock_cpu(int cpu); 58__cpuinit void xen_init_lock_cpu(int cpu);
59void xen_uninit_lock_cpu(int cpu); 59void xen_uninit_lock_cpu(int cpu);
60 60
61extern cpumask_t xen_cpu_initialized_map; 61extern cpumask_var_t xen_cpu_initialized_map;
62#else 62#else
63static inline void xen_smp_init(void) {} 63static inline void xen_smp_init(void) {}
64#endif 64#endif