aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cputopology.txt48
-rw-r--r--arch/alpha/include/asm/smp.h1
-rw-r--r--arch/alpha/include/asm/topology.h17
-rw-r--r--arch/alpha/kernel/irq.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/setup.c5
-rw-r--r--arch/alpha/kernel/smp.c7
-rw-r--r--arch/alpha/kernel/sys_dp264.c8
-rw-r--r--arch/alpha/kernel/sys_titan.c4
-rw-r--r--arch/arm/common/gic.c4
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c3
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-davinci/time.c2
-rw-r--r--arch/arm/mach-imx/time.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-ns9xxx/time-ns9360.c2
-rw-r--r--arch/arm/mach-omap1/time.c2
-rw-r--r--arch/arm/mach-omap1/timer32k.c2
-rw-r--r--arch/arm/mach-omap2/timer-gp.c2
-rw-r--r--arch/arm/mach-pxa/time.c2
-rw-r--r--arch/arm/mach-realview/core.c2
-rw-r--r--arch/arm/mach-realview/localtimer.c4
-rw-r--r--arch/arm/mach-sa1100/time.c2
-rw-r--r--arch/arm/mach-versatile/core.c2
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c4
-rw-r--r--arch/arm/plat-mxc/time.c2
-rw-r--r--arch/arm/plat-orion/time.c2
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/blackfin/kernel/time-ts.c2
-rw-r--r--arch/cris/arch-v32/kernel/irq.c4
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/include/asm/smp.h1
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c2
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/topology.h7
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/iosapic.c35
-rw-r--r--arch/ia64/kernel/irq.c9
-rw-r--r--arch/ia64/kernel/msi_ia64.c12
-rw-r--r--arch/ia64/kernel/smpboot.c10
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/sn/kernel/irq.c6
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c7
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c27
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/kernel/smpboot.c6
-rw-r--r--arch/m68knommu/platform/coldfire/pit.c2
-rw-r--r--arch/mips/include/asm/irq.h3
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h4
-rw-r--r--arch/mips/include/asm/smp.h3
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c4
-rw-r--r--arch/mips/kernel/cevt-ds1287.c2
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c4
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cevt-txx9.c2
-rw-r--r--arch/mips/kernel/i8253.c2
-rw-r--r--arch/mips/kernel/irq-gic.c6
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c7
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/mti-malta/malta-smtc.c6
-rw-r--r--arch/mips/nxp/pnx8550/common/time.c1
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c8
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c8
-rw-r--r--arch/mips/sibyte/sb1250/irq.c8
-rw-r--r--arch/mips/sibyte/sb1250/smp.c8
-rw-r--r--arch/mips/sni/time.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/irq.c6
-rw-r--r--arch/parisc/kernel/smp.c15
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/mpic.c4
-rw-r--r--arch/powerpc/sysdev/mpic.h2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/topology.h1
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/topology.c5
-rw-r--r--arch/sh/include/asm/smp.h2
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sh/kernel/smp.c10
-rw-r--r--arch/sh/kernel/timers/timer-broadcast.c2
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c2
-rw-r--r--arch/sparc/include/asm/smp_32.h2
-rw-r--r--arch/sparc/include/asm/topology_64.h11
-rw-r--r--arch/sparc/kernel/irq_64.c11
-rw-r--r--arch/sparc/kernel/of_device_64.c4
-rw-r--r--arch/sparc/kernel/pci_msi.c4
-rw-r--r--arch/sparc/kernel/smp_32.c6
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/sparc_ksyms_32.c4
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/um/kernel/smp.c7
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/x86/include/asm/pci.h10
-rw-r--r--arch/x86/include/asm/topology.h36
-rw-r--r--arch/x86/kernel/apic.c8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/x86/kernel/hpet.c8
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/io_apic.c78
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c10
-rw-r--r--arch/x86/kernel/smpboot.c17
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c7
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--block/blk.h4
-rw-r--r--drivers/base/cpu.c46
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/topology.c4
-rw-r--r--drivers/clocksource/tcb_clksrc.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c17
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c8
-rw-r--r--drivers/parisc/iosapic.c7
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/xen/events.c6
-rw-r--r--fs/seq_file.c3
-rw-r--r--include/asm-generic/topology.h14
-rw-r--r--include/asm-m32r/smp.h2
-rw-r--r--include/linux/bitmap.h35
-rw-r--r--include/linux/clockchips.h4
-rw-r--r--include/linux/cpumask.h284
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irq.h3
-rw-r--r--include/linux/seq_file.h7
-rw-r--r--include/linux/smp.h18
-rw-r--r--include/linux/threads.h16
-rw-r--r--init/Kconfig9
-rw-r--r--kernel/cpu.c97
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/manage.c22
-rw-r--r--kernel/irq/migration.c14
-rw-r--r--kernel/irq/proc.c29
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_stats.h2
-rw-r--r--kernel/smp.c143
-rw-r--r--kernel/taskstats.c2
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c12
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/cpumask.c54
-rw-r--r--mm/slub.c2
167 files changed, 902 insertions, 759 deletions
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
index bd699da24666..45932ec21cee 100644
--- a/Documentation/cputopology.txt
+++ b/Documentation/cputopology.txt
@@ -31,3 +31,51 @@ not defined by include/asm-XXX/topology.h:
312) core_id: 0 312) core_id: 0
323) thread_siblings: just the given CPU 323) thread_siblings: just the given CPU
334) core_siblings: just the given CPU 334) core_siblings: just the given CPU
34
35Additionally, cpu topology information is provided under
36/sys/devices/system/cpu and includes these files. The internal
37source for the output is in brackets ("[]").
38
39 kernel_max: the maximum cpu index allowed by the kernel configuration.
40 [NR_CPUS-1]
41
42 offline: cpus that are not online because they have been
43 HOTPLUGGED off (see cpu-hotplug.txt) or exceed the limit
44 of cpus allowed by the kernel configuration (kernel_max
45 above). [~cpu_online_mask + cpus >= NR_CPUS]
46
47 online: cpus that are online and being scheduled [cpu_online_mask]
48
49 possible: cpus that have been allocated resources and can be
50 brought online if they are present. [cpu_possible_mask]
51
52 present: cpus that have been identified as being present in the
53 system. [cpu_present_mask]
54
55The format for the above output is compatible with cpulist_parse()
56[see <linux/cpumask.h>]. Some examples follow.
57
58In this example, there are 64 cpus in the system but cpus 32-63 exceed
59the kernel max which is limited to 0..31 by the NR_CPUS config option
60being 32. Note also that cpus 2 and 4-31 are not online but could be
61brought online as they are both present and possible.
62
63 kernel_max: 31
64 offline: 2,4-31,32-63
65 online: 0-1,3
66 possible: 0-31
67 present: 0-31
68
69In this example, the NR_CPUS config option is 128, but the kernel was
70started with possible_cpus=144. There are 4 cpus in the system and cpu2
71was manually taken offline (and is the only cpu that can be brought
72online.)
73
74 kernel_max: 127
75 offline: 2,4-127,128-143
76 online: 0-1,3
77 possible: 0-127
78 present: 0-3
79
80See cpu-hotplug.txt for the possible_cpus=NUM kernel start parameter
81as well as more information on the various cpumask's.
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 544c69af8168..547e90951cec 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -45,7 +45,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
45#define raw_smp_processor_id() (current_thread_info()->cpu) 45#define raw_smp_processor_id() (current_thread_info()->cpu)
46 46
47extern int smp_num_cpus; 47extern int smp_num_cpus;
48#define cpu_possible_map cpu_present_map
49 48
50extern void arch_send_call_function_single_ipi(int cpu); 49extern void arch_send_call_function_single_ipi(int cpu);
51extern void arch_send_call_function_ipi(cpumask_t mask); 50extern void arch_send_call_function_ipi(cpumask_t mask);
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index 149532e162c4..b4f284c72ff3 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -39,7 +39,24 @@ static inline cpumask_t node_to_cpumask(int node)
39 return node_cpu_mask; 39 return node_cpu_mask;
40} 40}
41 41
42extern struct cpumask node_to_cpumask_map[];
43/* FIXME: This is dumb, recalculating every time. But simple. */
44static const struct cpumask *cpumask_of_node(int node)
45{
46 int cpu;
47
48 cpumask_clear(&node_to_cpumask_map[node]);
49
50 for_each_online_cpu(cpu) {
51 if (cpu_to_node(cpu) == node)
52 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
53 }
54
55 return &node_to_cpumask_map[node];
56}
57
42#define pcibus_to_cpumask(bus) (cpu_online_map) 58#define pcibus_to_cpumask(bus) (cpu_online_map)
59#define cpumask_of_pcibus(bus) (cpu_online_mask)
43 60
44#endif /* !CONFIG_NUMA */ 61#endif /* !CONFIG_NUMA */
45# include <asm-generic/topology.h> 62# include <asm-generic/topology.h>
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index c626a821cdcb..d0f1620007f7 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
55 last_cpu = cpu; 55 last_cpu = cpu;
56 56
57 irq_desc[irq].affinity = cpumask_of_cpu(cpu); 57 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
58 irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); 58 irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
59 return 0; 59 return 0;
60} 60}
61#endif /* CONFIG_SMP */ 61#endif /* CONFIG_SMP */
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 351407e07e71..f238370c907d 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -94,6 +94,7 @@ common_shutdown_1(void *generic_ptr)
94 flags |= 0x00040000UL; /* "remain halted" */ 94 flags |= 0x00040000UL; /* "remain halted" */
95 *pflags = flags; 95 *pflags = flags;
96 cpu_clear(cpuid, cpu_present_map); 96 cpu_clear(cpuid, cpu_present_map);
97 cpu_clear(cpuid, cpu_possible_map);
97 halt(); 98 halt();
98 } 99 }
99#endif 100#endif
@@ -120,6 +121,7 @@ common_shutdown_1(void *generic_ptr)
120#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
121 /* Wait for the secondaries to halt. */ 122 /* Wait for the secondaries to halt. */
122 cpu_clear(boot_cpuid, cpu_present_map); 123 cpu_clear(boot_cpuid, cpu_present_map);
124 cpu_clear(boot_cpuid, cpu_possible_map);
123 while (cpus_weight(cpu_present_map)) 125 while (cpus_weight(cpu_present_map))
124 barrier(); 126 barrier();
125#endif 127#endif
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index a449e999027c..02bee6983ce2 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -79,6 +79,11 @@ int alpha_l3_cacheshape;
79unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 79unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
80#endif 80#endif
81 81
82#ifdef CONFIG_NUMA
83struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
84EXPORT_SYMBOL(node_to_cpumask_map);
85#endif
86
82/* Which processor we booted from. */ 87/* Which processor we booted from. */
83int boot_cpuid; 88int boot_cpuid;
84 89
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index cf7da10097bb..d953e510f68d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -70,11 +70,6 @@ enum ipi_message_type {
70/* Set to a secondary's cpuid when it comes online. */ 70/* Set to a secondary's cpuid when it comes online. */
71static int smp_secondary_alive __devinitdata = 0; 71static int smp_secondary_alive __devinitdata = 0;
72 72
73/* Which cpus ids came online. */
74cpumask_t cpu_online_map;
75
76EXPORT_SYMBOL(cpu_online_map);
77
78int smp_num_probed; /* Internal processor count */ 73int smp_num_probed; /* Internal processor count */
79int smp_num_cpus = 1; /* Number that came online. */ 74int smp_num_cpus = 1; /* Number that came online. */
80EXPORT_SYMBOL(smp_num_cpus); 75EXPORT_SYMBOL(smp_num_cpus);
@@ -440,6 +435,7 @@ setup_smp(void)
440 ((char *)cpubase + i*hwrpb->processor_size); 435 ((char *)cpubase + i*hwrpb->processor_size);
441 if ((cpu->flags & 0x1cc) == 0x1cc) { 436 if ((cpu->flags & 0x1cc) == 0x1cc) {
442 smp_num_probed++; 437 smp_num_probed++;
438 cpu_set(i, cpu_possible_map);
443 cpu_set(i, cpu_present_map); 439 cpu_set(i, cpu_present_map);
444 cpu->pal_revision = boot_cpu_palrev; 440 cpu->pal_revision = boot_cpu_palrev;
445 } 441 }
@@ -473,6 +469,7 @@ smp_prepare_cpus(unsigned int max_cpus)
473 469
474 /* Nothing to do on a UP box, or when told not to. */ 470 /* Nothing to do on a UP box, or when told not to. */
475 if (smp_num_probed == 1 || max_cpus == 0) { 471 if (smp_num_probed == 1 || max_cpus == 0) {
472 cpu_possible_map = cpumask_of_cpu(boot_cpuid);
476 cpu_present_map = cpumask_of_cpu(boot_cpuid); 473 cpu_present_map = cpumask_of_cpu(boot_cpuid);
477 printk(KERN_INFO "SMP mode deactivated.\n"); 474 printk(KERN_INFO "SMP mode deactivated.\n");
478 return; 475 return;
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index c71b0fd7a61f..ab44c164d9d4 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -177,19 +177,19 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
177} 177}
178 178
179static void 179static void
180dp264_set_affinity(unsigned int irq, cpumask_t affinity) 180dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
181{ 181{
182 spin_lock(&dp264_irq_lock); 182 spin_lock(&dp264_irq_lock);
183 cpu_set_irq_affinity(irq, affinity); 183 cpu_set_irq_affinity(irq, *affinity);
184 tsunami_update_irq_hw(cached_irq_mask); 184 tsunami_update_irq_hw(cached_irq_mask);
185 spin_unlock(&dp264_irq_lock); 185 spin_unlock(&dp264_irq_lock);
186} 186}
187 187
188static void 188static void
189clipper_set_affinity(unsigned int irq, cpumask_t affinity) 189clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
190{ 190{
191 spin_lock(&dp264_irq_lock); 191 spin_lock(&dp264_irq_lock);
192 cpu_set_irq_affinity(irq - 16, affinity); 192 cpu_set_irq_affinity(irq - 16, *affinity);
193 tsunami_update_irq_hw(cached_irq_mask); 193 tsunami_update_irq_hw(cached_irq_mask);
194 spin_unlock(&dp264_irq_lock); 194 spin_unlock(&dp264_irq_lock);
195} 195}
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 52c91ccc1648..27f840a4ad3d 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -158,10 +158,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
158} 158}
159 159
160static void 160static void
161titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) 161titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
162{ 162{
163 spin_lock(&titan_irq_lock); 163 spin_lock(&titan_irq_lock);
164 titan_cpu_set_irq_affinity(irq - 16, affinity); 164 titan_cpu_set_irq_affinity(irq - 16, *affinity);
165 titan_update_irq_hw(titan_cached_irq_mask); 165 titan_update_irq_hw(titan_cached_irq_mask);
166 spin_unlock(&titan_irq_lock); 166 spin_unlock(&titan_irq_lock);
167} 167}
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 7fc9860a97d7..c6884ba1d5ed 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -109,11 +109,11 @@ static void gic_unmask_irq(unsigned int irq)
109} 109}
110 110
111#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
112static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) 112static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
113{ 113{
114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); 114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
115 unsigned int shift = (irq % 4) * 8; 115 unsigned int shift = (irq % 4) * 8;
116 unsigned int cpu = first_cpu(mask_val); 116 unsigned int cpu = cpumask_first(mask_val);
117 u32 val; 117 u32 val;
118 118
119 spin_lock(&irq_controller_lock); 119 spin_lock(&irq_controller_lock);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2f3eb795fa6e..7141cee1fab7 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -174,7 +174,7 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
174 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 174 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
175 175
176 spin_lock_irq(&desc->lock); 176 spin_lock_irq(&desc->lock);
177 desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); 177 desc->chip->set_affinity(irq, cpumask_of(cpu));
178 spin_unlock_irq(&desc->lock); 178 spin_unlock_irq(&desc->lock);
179} 179}
180 180
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 019237d21622..55fa7ff96a3e 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -34,16 +34,6 @@
34#include <asm/ptrace.h> 34#include <asm/ptrace.h>
35 35
36/* 36/*
37 * bitmask of present and online CPUs.
38 * The present bitmask indicates that the CPU is physically present.
39 * The online bitmask indicates that the CPU is up and running.
40 */
41cpumask_t cpu_possible_map;
42EXPORT_SYMBOL(cpu_possible_map);
43cpumask_t cpu_online_map;
44EXPORT_SYMBOL(cpu_online_map);
45
46/*
47 * as from 2.5, kernels no longer have an init_tasks structure 37 * as from 2.5, kernels no longer have an init_tasks structure
48 * so we need some other way of telling a new secondary core 38 * so we need some other way of telling a new secondary core
49 * where to place its SVC stack 39 * where to place its SVC stack
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index d140eae53ded..1ff1bda0a894 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -178,7 +178,6 @@ static struct clock_event_device clkevt = {
178 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 178 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
179 .shift = 32, 179 .shift = 32,
180 .rating = 150, 180 .rating = 150,
181 .cpumask = CPU_MASK_CPU0,
182 .set_next_event = clkevt32k_next_event, 181 .set_next_event = clkevt32k_next_event,
183 .set_mode = clkevt32k_mode, 182 .set_mode = clkevt32k_mode,
184}; 183};
@@ -206,7 +205,7 @@ void __init at91rm9200_timer_init(void)
206 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); 205 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
207 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); 206 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
208 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; 207 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
209 clkevt.cpumask = cpumask_of_cpu(0); 208 clkevt.cpumask = cpumask_of(0);
210 clockevents_register_device(&clkevt); 209 clockevents_register_device(&clkevt);
211 210
212 /* register clocksource */ 211 /* register clocksource */
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 122fd77ed580..b63e1d5f1bad 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -91,7 +91,6 @@ static struct clock_event_device pit_clkevt = {
91 .features = CLOCK_EVT_FEAT_PERIODIC, 91 .features = CLOCK_EVT_FEAT_PERIODIC,
92 .shift = 32, 92 .shift = 32,
93 .rating = 100, 93 .rating = 100,
94 .cpumask = CPU_MASK_CPU0,
95 .set_mode = pit_clkevt_mode, 94 .set_mode = pit_clkevt_mode,
96}; 95};
97 96
@@ -173,6 +172,7 @@ static void __init at91sam926x_pit_init(void)
173 172
174 /* Set up and register clockevents */ 173 /* Set up and register clockevents */
175 pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); 174 pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
175 pit_clkevt.cpumask = cpumask_of(0);
176 clockevents_register_device(&pit_clkevt); 176 clockevents_register_device(&pit_clkevt);
177} 177}
178 178
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 3b9a296b5c4b..f8bcd29d17a6 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -322,7 +322,7 @@ static void __init davinci_timer_init(void)
322 clockevent_davinci.min_delta_ns = 322 clockevent_davinci.min_delta_ns =
323 clockevent_delta2ns(1, &clockevent_davinci); 323 clockevent_delta2ns(1, &clockevent_davinci);
324 324
325 clockevent_davinci.cpumask = cpumask_of_cpu(0); 325 clockevent_davinci.cpumask = cpumask_of(0);
326 clockevents_register_device(&clockevent_davinci); 326 clockevents_register_device(&clockevent_davinci);
327} 327}
328 328
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c
index a11765f5f23b..aff0ebcfa847 100644
--- a/arch/arm/mach-imx/time.c
+++ b/arch/arm/mach-imx/time.c
@@ -184,7 +184,7 @@ static int __init imx_clockevent_init(unsigned long rate)
184 clockevent_imx.min_delta_ns = 184 clockevent_imx.min_delta_ns =
185 clockevent_delta2ns(0xf, &clockevent_imx); 185 clockevent_delta2ns(0xf, &clockevent_imx);
186 186
187 clockevent_imx.cpumask = cpumask_of_cpu(0); 187 clockevent_imx.cpumask = cpumask_of(0);
188 188
189 clockevents_register_device(&clockevent_imx); 189 clockevents_register_device(&clockevent_imx);
190 190
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 7766f469456b..f4656d2ac8a8 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -487,7 +487,7 @@ static int __init ixp4xx_clockevent_init(void)
487 clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); 487 clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
488 clockevent_ixp4xx.min_delta_ns = 488 clockevent_ixp4xx.min_delta_ns =
489 clockevent_delta2ns(0xf, &clockevent_ixp4xx); 489 clockevent_delta2ns(0xf, &clockevent_ixp4xx);
490 clockevent_ixp4xx.cpumask = cpumask_of_cpu(0); 490 clockevent_ixp4xx.cpumask = cpumask_of(0);
491 491
492 clockevents_register_device(&clockevent_ixp4xx); 492 clockevents_register_device(&clockevent_ixp4xx);
493 return 0; 493 return 0;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 345a14cb73c3..444d9c0f5ca6 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -182,7 +182,7 @@ static void __init msm_timer_init(void)
182 clockevent_delta2ns(0xf0000000 >> clock->shift, ce); 182 clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
183 /* 4 gets rounded down to 3 */ 183 /* 4 gets rounded down to 3 */
184 ce->min_delta_ns = clockevent_delta2ns(4, ce); 184 ce->min_delta_ns = clockevent_delta2ns(4, ce);
185 ce->cpumask = cpumask_of_cpu(0); 185 ce->cpumask = cpumask_of(0);
186 186
187 cs->mult = clocksource_hz2mult(clock->freq, cs->shift); 187 cs->mult = clocksource_hz2mult(clock->freq, cs->shift);
188 res = clocksource_register(cs); 188 res = clocksource_register(cs);
diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c
index a63424d083d9..41df69721769 100644
--- a/arch/arm/mach-ns9xxx/time-ns9360.c
+++ b/arch/arm/mach-ns9xxx/time-ns9360.c
@@ -173,7 +173,7 @@ static void __init ns9360_timer_init(void)
173 ns9360_clockevent_device.min_delta_ns = 173 ns9360_clockevent_device.min_delta_ns =
174 clockevent_delta2ns(1, &ns9360_clockevent_device); 174 clockevent_delta2ns(1, &ns9360_clockevent_device);
175 175
176 ns9360_clockevent_device.cpumask = cpumask_of_cpu(0); 176 ns9360_clockevent_device.cpumask = cpumask_of(0);
177 clockevents_register_device(&ns9360_clockevent_device); 177 clockevents_register_device(&ns9360_clockevent_device);
178 178
179 setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT, 179 setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT,
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 2cf7e32bd293..495a32c287b4 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -173,7 +173,7 @@ static __init void omap_init_mpu_timer(unsigned long rate)
173 clockevent_mpu_timer1.min_delta_ns = 173 clockevent_mpu_timer1.min_delta_ns =
174 clockevent_delta2ns(1, &clockevent_mpu_timer1); 174 clockevent_delta2ns(1, &clockevent_mpu_timer1);
175 175
176 clockevent_mpu_timer1.cpumask = cpumask_of_cpu(0); 176 clockevent_mpu_timer1.cpumask = cpumask_of(0);
177 clockevents_register_device(&clockevent_mpu_timer1); 177 clockevents_register_device(&clockevent_mpu_timer1);
178} 178}
179 179
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 705367ece174..fd3f7396e162 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -187,7 +187,7 @@ static __init void omap_init_32k_timer(void)
187 clockevent_32k_timer.min_delta_ns = 187 clockevent_32k_timer.min_delta_ns =
188 clockevent_delta2ns(1, &clockevent_32k_timer); 188 clockevent_delta2ns(1, &clockevent_32k_timer);
189 189
190 clockevent_32k_timer.cpumask = cpumask_of_cpu(0); 190 clockevent_32k_timer.cpumask = cpumask_of(0);
191 clockevents_register_device(&clockevent_32k_timer); 191 clockevents_register_device(&clockevent_32k_timer);
192} 192}
193 193
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 589393bedade..ae6036300f60 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -120,7 +120,7 @@ static void __init omap2_gp_clockevent_init(void)
120 clockevent_gpt.min_delta_ns = 120 clockevent_gpt.min_delta_ns =
121 clockevent_delta2ns(1, &clockevent_gpt); 121 clockevent_delta2ns(1, &clockevent_gpt);
122 122
123 clockevent_gpt.cpumask = cpumask_of_cpu(0); 123 clockevent_gpt.cpumask = cpumask_of(0);
124 clockevents_register_device(&clockevent_gpt); 124 clockevents_register_device(&clockevent_gpt);
125} 125}
126 126
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index 001624158519..95656a72268d 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -122,7 +122,6 @@ static struct clock_event_device ckevt_pxa_osmr0 = {
122 .features = CLOCK_EVT_FEAT_ONESHOT, 122 .features = CLOCK_EVT_FEAT_ONESHOT,
123 .shift = 32, 123 .shift = 32,
124 .rating = 200, 124 .rating = 200,
125 .cpumask = CPU_MASK_CPU0,
126 .set_next_event = pxa_osmr0_set_next_event, 125 .set_next_event = pxa_osmr0_set_next_event,
127 .set_mode = pxa_osmr0_set_mode, 126 .set_mode = pxa_osmr0_set_mode,
128}; 127};
@@ -163,6 +162,7 @@ static void __init pxa_timer_init(void)
163 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); 162 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0);
164 ckevt_pxa_osmr0.min_delta_ns = 163 ckevt_pxa_osmr0.min_delta_ns =
165 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; 164 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1;
165 ckevt_pxa_osmr0.cpumask = cpumask_of(0);
166 166
167 cksrc_pxa_oscr0.mult = 167 cksrc_pxa_oscr0.mult =
168 clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift); 168 clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift);
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 5f1d55963ced..bd2aa4f16141 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -624,7 +624,7 @@ static struct clock_event_device timer0_clockevent = {
624 .set_mode = timer_set_mode, 624 .set_mode = timer_set_mode,
625 .set_next_event = timer_set_next_event, 625 .set_next_event = timer_set_next_event,
626 .rating = 300, 626 .rating = 300,
627 .cpumask = CPU_MASK_ALL, 627 .cpumask = cpu_all_mask,
628}; 628};
629 629
630static void __init realview_clockevents_init(unsigned int timer_irq) 630static void __init realview_clockevents_init(unsigned int timer_irq)
diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c
index 9019ef2e5611..67d6d9cc68b2 100644
--- a/arch/arm/mach-realview/localtimer.c
+++ b/arch/arm/mach-realview/localtimer.c
@@ -154,7 +154,7 @@ void __cpuinit local_timer_setup(void)
154 clk->set_mode = local_timer_set_mode; 154 clk->set_mode = local_timer_set_mode;
155 clk->set_next_event = local_timer_set_next_event; 155 clk->set_next_event = local_timer_set_next_event;
156 clk->irq = IRQ_LOCALTIMER; 156 clk->irq = IRQ_LOCALTIMER;
157 clk->cpumask = cpumask_of_cpu(cpu); 157 clk->cpumask = cpumask_of(cpu);
158 clk->shift = 20; 158 clk->shift = 20;
159 clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); 159 clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift);
160 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 160 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
@@ -193,7 +193,7 @@ void __cpuinit local_timer_setup(void)
193 clk->rating = 200; 193 clk->rating = 200;
194 clk->set_mode = dummy_timer_set_mode; 194 clk->set_mode = dummy_timer_set_mode;
195 clk->broadcast = smp_timer_broadcast; 195 clk->broadcast = smp_timer_broadcast;
196 clk->cpumask = cpumask_of_cpu(cpu); 196 clk->cpumask = cpumask_of(cpu);
197 197
198 clockevents_register_device(clk); 198 clockevents_register_device(clk);
199} 199}
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index 8c5e727f3b75..711c0295c66f 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -73,7 +73,6 @@ static struct clock_event_device ckevt_sa1100_osmr0 = {
73 .features = CLOCK_EVT_FEAT_ONESHOT, 73 .features = CLOCK_EVT_FEAT_ONESHOT,
74 .shift = 32, 74 .shift = 32,
75 .rating = 200, 75 .rating = 200,
76 .cpumask = CPU_MASK_CPU0,
77 .set_next_event = sa1100_osmr0_set_next_event, 76 .set_next_event = sa1100_osmr0_set_next_event,
78 .set_mode = sa1100_osmr0_set_mode, 77 .set_mode = sa1100_osmr0_set_mode,
79}; 78};
@@ -110,6 +109,7 @@ static void __init sa1100_timer_init(void)
110 clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); 109 clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0);
111 ckevt_sa1100_osmr0.min_delta_ns = 110 ckevt_sa1100_osmr0.min_delta_ns =
112 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; 111 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1;
112 ckevt_sa1100_osmr0.cpumask = cpumask_of(0);
113 113
114 cksrc_sa1100_oscr.mult = 114 cksrc_sa1100_oscr.mult =
115 clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift); 115 clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index df25aa138509..1c43494f5c42 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -1005,7 +1005,7 @@ static void __init versatile_timer_init(void)
1005 timer0_clockevent.min_delta_ns = 1005 timer0_clockevent.min_delta_ns =
1006 clockevent_delta2ns(0xf, &timer0_clockevent); 1006 clockevent_delta2ns(0xf, &timer0_clockevent);
1007 1007
1008 timer0_clockevent.cpumask = cpumask_of_cpu(0); 1008 timer0_clockevent.cpumask = cpumask_of(0);
1009 clockevents_register_device(&timer0_clockevent); 1009 clockevents_register_device(&timer0_clockevent);
1010} 1010}
1011 1011
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 4de366e8b4c5..6d6bd5899240 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -260,10 +260,10 @@ static void em_stop(void)
260static void em_route_irq(int irq, unsigned int cpu) 260static void em_route_irq(int irq, unsigned int cpu)
261{ 261{
262 struct irq_desc *desc = irq_desc + irq; 262 struct irq_desc *desc = irq_desc + irq;
263 cpumask_t mask = cpumask_of_cpu(cpu); 263 const struct cpumask *mask = cpumask_of(cpu);
264 264
265 spin_lock_irq(&desc->lock); 265 spin_lock_irq(&desc->lock);
266 desc->affinity = mask; 266 desc->affinity = *mask;
267 desc->chip->set_affinity(irq, mask); 267 desc->chip->set_affinity(irq, mask);
268 spin_unlock_irq(&desc->lock); 268 spin_unlock_irq(&desc->lock);
269} 269}
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index fd28f5194f71..758a1293bcfa 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -190,7 +190,7 @@ static int __init mxc_clockevent_init(void)
190 clockevent_mxc.min_delta_ns = 190 clockevent_mxc.min_delta_ns =
191 clockevent_delta2ns(0xff, &clockevent_mxc); 191 clockevent_delta2ns(0xff, &clockevent_mxc);
192 192
193 clockevent_mxc.cpumask = cpumask_of_cpu(0); 193 clockevent_mxc.cpumask = cpumask_of(0);
194 194
195 clockevents_register_device(&clockevent_mxc); 195 clockevents_register_device(&clockevent_mxc);
196 196
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c
index 544d6b327f3a..6fa2923e6dca 100644
--- a/arch/arm/plat-orion/time.c
+++ b/arch/arm/plat-orion/time.c
@@ -149,7 +149,6 @@ static struct clock_event_device orion_clkevt = {
149 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 149 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
150 .shift = 32, 150 .shift = 32,
151 .rating = 300, 151 .rating = 300,
152 .cpumask = CPU_MASK_CPU0,
153 .set_next_event = orion_clkevt_next_event, 152 .set_next_event = orion_clkevt_next_event,
154 .set_mode = orion_clkevt_mode, 153 .set_mode = orion_clkevt_mode,
155}; 154};
@@ -199,5 +198,6 @@ void __init orion_time_init(unsigned int irq, unsigned int tclk)
199 orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); 198 orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift);
200 orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); 199 orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt);
201 orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); 200 orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt);
201 orion_clkevt.cpumask = cpumask_of(0);
202 clockevents_register_device(&orion_clkevt); 202 clockevents_register_device(&orion_clkevt);
203} 203}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 283481d74a5b..0ff46bf873b0 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -106,7 +106,6 @@ static struct clock_event_device comparator = {
106 .features = CLOCK_EVT_FEAT_ONESHOT, 106 .features = CLOCK_EVT_FEAT_ONESHOT,
107 .shift = 16, 107 .shift = 16,
108 .rating = 50, 108 .rating = 50,
109 .cpumask = CPU_MASK_CPU0,
110 .set_next_event = comparator_next_event, 109 .set_next_event = comparator_next_event,
111 .set_mode = comparator_mode, 110 .set_mode = comparator_mode,
112}; 111};
@@ -134,6 +133,7 @@ void __init time_init(void)
134 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); 133 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
135 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); 134 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
136 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; 135 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
136 comparator.cpumask = cpumask_of(0);
137 137
138 sysreg_write(COMPARE, 0); 138 sysreg_write(COMPARE, 0);
139 timer_irqaction.dev_id = &comparator; 139 timer_irqaction.dev_id = &comparator;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index e887efc86c29..0ed2badfd746 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -162,7 +162,6 @@ static struct clock_event_device clockevent_bfin = {
162 .name = "bfin_core_timer", 162 .name = "bfin_core_timer",
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .shift = 32, 164 .shift = 32,
165 .cpumask = CPU_MASK_CPU0,
166 .set_next_event = bfin_timer_set_next_event, 165 .set_next_event = bfin_timer_set_next_event,
167 .set_mode = bfin_timer_set_mode, 166 .set_mode = bfin_timer_set_mode,
168}; 167};
@@ -193,6 +192,7 @@ static int __init bfin_clockevent_init(void)
193 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 192 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
194 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); 193 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
195 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); 194 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
195 clockevent_bfin.cpumask = cpumask_of(0);
196 clockevents_register_device(&clockevent_bfin); 196 clockevents_register_device(&clockevent_bfin);
197 197
198 return 0; 198 return 0;
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 173c141ac9ba..295131fee710 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -325,11 +325,11 @@ static void end_crisv32_irq(unsigned int irq)
325{ 325{
326} 326}
327 327
328void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest) 328void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
329{ 329{
330 unsigned long flags; 330 unsigned long flags;
331 spin_lock_irqsave(&irq_lock, flags); 331 spin_lock_irqsave(&irq_lock, flags);
332 irq_allocations[irq - FIRST_IRQ].mask = dest; 332 irq_allocations[irq - FIRST_IRQ].mask = *dest;
333 spin_unlock_irqrestore(&irq_lock, flags); 333 spin_unlock_irqrestore(&irq_lock, flags);
334} 334}
335 335
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 52e16c6436f9..9dac17334640 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -29,11 +29,7 @@
29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; 29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
30 30
31/* CPU masks */ 31/* CPU masks */
32cpumask_t cpu_online_map = CPU_MASK_NONE;
33EXPORT_SYMBOL(cpu_online_map);
34cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 32cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
35cpumask_t cpu_possible_map;
36EXPORT_SYMBOL(cpu_possible_map);
37EXPORT_SYMBOL(phys_cpu_present_map); 33EXPORT_SYMBOL(phys_cpu_present_map);
38 34
39/* Variables used during SMP boot */ 35/* Variables used during SMP boot */
diff --git a/arch/cris/include/asm/smp.h b/arch/cris/include/asm/smp.h
index dba33aba3e95..c615a06dd757 100644
--- a/arch/cris/include/asm/smp.h
+++ b/arch/cris/include/asm/smp.h
@@ -4,7 +4,6 @@
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5 5
6extern cpumask_t phys_cpu_present_map; 6extern cpumask_t phys_cpu_present_map;
7extern cpumask_t cpu_possible_map;
8 7
9#define raw_smp_processor_id() (current_thread_info()->cpu) 8#define raw_smp_processor_id() (current_thread_info()->cpu)
10 9
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index c2f58ff364e7..cc0a3182db3c 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq)
22} 22}
23 23
24static void 24static void
25hpsim_set_affinity_noop (unsigned int a, cpumask_t b) 25hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
26{ 26{
27} 27}
28 28
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 12d96e0cd513..21c402365d0e 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -57,7 +57,6 @@ extern struct smp_boot_data {
57 57
58extern char no_int_routing __devinitdata; 58extern char no_int_routing __devinitdata;
59 59
60extern cpumask_t cpu_online_map;
61extern cpumask_t cpu_core_map[NR_CPUS]; 60extern cpumask_t cpu_core_map[NR_CPUS];
62DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 61DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
63extern int smp_num_siblings; 62extern int smp_num_siblings;
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 35bcb641c9e5..66f0f1ef9e7f 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -34,6 +34,7 @@
34 * Returns a bitmask of CPUs on Node 'node'. 34 * Returns a bitmask of CPUs on Node 'node'.
35 */ 35 */
36#define node_to_cpumask(node) (node_to_cpu_mask[node]) 36#define node_to_cpumask(node) (node_to_cpu_mask[node])
37#define cpumask_of_node(node) (&node_to_cpu_mask[node])
37 38
38/* 39/*
39 * Returns the number of the node containing Node 'nid'. 40 * Returns the number of the node containing Node 'nid'.
@@ -45,7 +46,7 @@
45/* 46/*
46 * Returns the number of the first CPU on Node 'node'. 47 * Returns the number of the first CPU on Node 'node'.
47 */ 48 */
48#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 49#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
49 50
50/* 51/*
51 * Determines the node for a given pci bus 52 * Determines the node for a given pci bus
@@ -121,6 +122,10 @@ extern void arch_fix_phys_package_id(int num, u32 slot);
121 node_to_cpumask(pcibus_to_node(bus)) \ 122 node_to_cpumask(pcibus_to_node(bus)) \
122 ) 123 )
123 124
125#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
126 cpu_all_mask : \
127 cpumask_from_node(pcibus_to_node(bus)))
128
124#include <asm-generic/topology.h> 129#include <asm-generic/topology.h>
125 130
126#endif /* _ASM_IA64_TOPOLOGY_H */ 131#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index bd7acc71e8a9..54ae373e6e22 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -1001,7 +1001,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
1001 node = pxm_to_node(pxm); 1001 node = pxm_to_node(pxm);
1002 1002
1003 if (node >= MAX_NUMNODES || !node_online(node) || 1003 if (node >= MAX_NUMNODES || !node_online(node) ||
1004 cpus_empty(node_to_cpumask(node))) 1004 cpumask_empty(cpumask_of_node(node)))
1005 return AE_OK; 1005 return AE_OK;
1006 1006
1007 /* We know a gsi to node mapping! */ 1007 /* We know a gsi to node mapping! */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 5c4674ae8aea..5cfd3d91001a 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -330,25 +330,25 @@ unmask_irq (unsigned int irq)
330 330
331 331
332static void 332static void
333iosapic_set_affinity (unsigned int irq, cpumask_t mask) 333iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
334{ 334{
335#ifdef CONFIG_SMP 335#ifdef CONFIG_SMP
336 u32 high32, low32; 336 u32 high32, low32;
337 int dest, rte_index; 337 int cpu, dest, rte_index;
338 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 338 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
339 struct iosapic_rte_info *rte; 339 struct iosapic_rte_info *rte;
340 struct iosapic *iosapic; 340 struct iosapic *iosapic;
341 341
342 irq &= (~IA64_IRQ_REDIRECTED); 342 irq &= (~IA64_IRQ_REDIRECTED);
343 343
344 cpus_and(mask, mask, cpu_online_map); 344 cpu = cpumask_first_and(cpu_online_mask, mask);
345 if (cpus_empty(mask)) 345 if (cpu >= nr_cpu_ids)
346 return; 346 return;
347 347
348 if (irq_prepare_move(irq, first_cpu(mask))) 348 if (irq_prepare_move(irq, cpu))
349 return; 349 return;
350 350
351 dest = cpu_physical_id(first_cpu(mask)); 351 dest = cpu_physical_id(cpu);
352 352
353 if (!iosapic_intr_info[irq].count) 353 if (!iosapic_intr_info[irq].count)
354 return; /* not an IOSAPIC interrupt */ 354 return; /* not an IOSAPIC interrupt */
@@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq)
695#ifdef CONFIG_NUMA 695#ifdef CONFIG_NUMA
696 { 696 {
697 int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; 697 int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
698 cpumask_t cpu_mask; 698 const struct cpumask *cpu_mask;
699 699
700 iosapic_index = find_iosapic(gsi); 700 iosapic_index = find_iosapic(gsi);
701 if (iosapic_index < 0 || 701 if (iosapic_index < 0 ||
702 iosapic_lists[iosapic_index].node == MAX_NUMNODES) 702 iosapic_lists[iosapic_index].node == MAX_NUMNODES)
703 goto skip_numa_setup; 703 goto skip_numa_setup;
704 704
705 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); 705 cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
706 cpus_and(cpu_mask, cpu_mask, domain); 706 num_cpus = 0;
707 for_each_cpu_mask(numa_cpu, cpu_mask) { 707 for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
708 if (!cpu_online(numa_cpu)) 708 if (cpu_online(numa_cpu))
709 cpu_clear(numa_cpu, cpu_mask); 709 num_cpus++;
710 } 710 }
711 711
712 num_cpus = cpus_weight(cpu_mask);
713
714 if (!num_cpus) 712 if (!num_cpus)
715 goto skip_numa_setup; 713 goto skip_numa_setup;
716 714
717 /* Use irq assignment to distribute across cpus in node */ 715 /* Use irq assignment to distribute across cpus in node */
718 cpu_index = irq % num_cpus; 716 cpu_index = irq % num_cpus;
719 717
720 for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) 718 for_each_cpu_and(numa_cpu, cpu_mask, &domain)
721 numa_cpu = next_cpu(numa_cpu, cpu_mask); 719 if (cpu_online(numa_cpu) && i++ >= cpu_index)
720 break;
722 721
723 if (numa_cpu != NR_CPUS) 722 if (numa_cpu < nr_cpu_ids)
724 return cpu_physical_id(numa_cpu); 723 return cpu_physical_id(numa_cpu);
725 } 724 }
726skip_numa_setup: 725skip_numa_setup:
@@ -731,7 +730,7 @@ skip_numa_setup:
731 * case of NUMA.) 730 * case of NUMA.)
732 */ 731 */
733 do { 732 do {
734 if (++cpu >= NR_CPUS) 733 if (++cpu >= nr_cpu_ids)
735 cpu = 0; 734 cpu = 0;
736 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); 735 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
737 736
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7fd18f54c056..0b6db53fedcf 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS];
133 */ 133 */
134static void migrate_irqs(void) 134static void migrate_irqs(void)
135{ 135{
136 cpumask_t mask;
137 irq_desc_t *desc; 136 irq_desc_t *desc;
138 int irq, new_cpu; 137 int irq, new_cpu;
139 138
@@ -152,15 +151,14 @@ static void migrate_irqs(void)
152 if (desc->status == IRQ_PER_CPU) 151 if (desc->status == IRQ_PER_CPU)
153 continue; 152 continue;
154 153
155 cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); 154 if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
156 if (any_online_cpu(mask) == NR_CPUS) { 155 >= nr_cpu_ids) {
157 /* 156 /*
158 * Save it for phase 2 processing 157 * Save it for phase 2 processing
159 */ 158 */
160 vectors_in_migration[irq] = irq; 159 vectors_in_migration[irq] = irq;
161 160
162 new_cpu = any_online_cpu(cpu_online_map); 161 new_cpu = any_online_cpu(cpu_online_map);
163 mask = cpumask_of_cpu(new_cpu);
164 162
165 /* 163 /*
166 * Al three are essential, currently WARN_ON.. maybe panic? 164 * Al three are essential, currently WARN_ON.. maybe panic?
@@ -168,7 +166,8 @@ static void migrate_irqs(void)
168 if (desc->chip && desc->chip->disable && 166 if (desc->chip && desc->chip->disable &&
169 desc->chip->enable && desc->chip->set_affinity) { 167 desc->chip->enable && desc->chip->set_affinity) {
170 desc->chip->disable(irq); 168 desc->chip->disable(irq);
171 desc->chip->set_affinity(irq, mask); 169 desc->chip->set_affinity(irq,
170 cpumask_of(new_cpu));
172 desc->chip->enable(irq); 171 desc->chip->enable(irq);
173 } else { 172 } else {
174 WARN_ON((!(desc->chip) || !(desc->chip->disable) || 173 WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 702a09c13238..890339339035 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -49,11 +49,12 @@
49static struct irq_chip ia64_msi_chip; 49static struct irq_chip ia64_msi_chip;
50 50
51#ifdef CONFIG_SMP 51#ifdef CONFIG_SMP
52static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 52static void ia64_set_msi_irq_affinity(unsigned int irq,
53 const cpumask_t *cpu_mask)
53{ 54{
54 struct msi_msg msg; 55 struct msi_msg msg;
55 u32 addr, data; 56 u32 addr, data;
56 int cpu = first_cpu(cpu_mask); 57 int cpu = first_cpu(*cpu_mask);
57 58
58 if (!cpu_online(cpu)) 59 if (!cpu_online(cpu))
59 return; 60 return;
@@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq)
166 167
167#ifdef CONFIG_DMAR 168#ifdef CONFIG_DMAR
168#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
169static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 170static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
170{ 171{
171 struct irq_cfg *cfg = irq_cfg + irq; 172 struct irq_cfg *cfg = irq_cfg + irq;
172 struct msi_msg msg; 173 struct msi_msg msg;
173 int cpu = first_cpu(mask); 174 int cpu = cpumask_first(mask);
174
175 175
176 if (!cpu_online(cpu)) 176 if (!cpu_online(cpu))
177 return; 177 return;
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
188 188
189 dmar_msi_write(irq, &msg); 189 dmar_msi_write(irq, &msg);
190 irq_desc[irq].affinity = mask; 190 irq_desc[irq].affinity = *mask;
191} 191}
192#endif /* CONFIG_SMP */ 192#endif /* CONFIG_SMP */
193 193
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 1dcbb85fc4ee..11463994a7d5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu;
131 */ 131 */
132DEFINE_PER_CPU(int, cpu_state); 132DEFINE_PER_CPU(int, cpu_state);
133 133
134/* Bitmasks of currently online, and possible CPUs */
135cpumask_t cpu_online_map;
136EXPORT_SYMBOL(cpu_online_map);
137cpumask_t cpu_possible_map = CPU_MASK_NONE;
138EXPORT_SYMBOL(cpu_possible_map);
139
140cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 134cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
141EXPORT_SYMBOL(cpu_core_map); 135EXPORT_SYMBOL(cpu_core_map);
142DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); 136DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
@@ -688,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu)
688{ 682{
689 int new_cpei_cpu; 683 int new_cpei_cpu;
690 irq_desc_t *desc = NULL; 684 irq_desc_t *desc = NULL;
691 cpumask_t mask; 685 const struct cpumask *mask;
692 int retval = 0; 686 int retval = 0;
693 687
694 /* 688 /*
@@ -701,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu)
701 * Now re-target the CPEI to a different processor 695 * Now re-target the CPEI to a different processor
702 */ 696 */
703 new_cpei_cpu = any_online_cpu(cpu_online_map); 697 new_cpei_cpu = any_online_cpu(cpu_online_map);
704 mask = cpumask_of_cpu(new_cpei_cpu); 698 mask = cpumask_of(new_cpei_cpu);
705 set_cpei_target_cpu(new_cpei_cpu); 699 set_cpei_target_cpu(new_cpei_cpu);
706 desc = irq_desc + ia64_cpe_irq; 700 desc = irq_desc + ia64_cpe_irq;
707 /* 701 /*
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index c75b914f2d6b..a8d61a3e9a94 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
219 cpumask_t shared_cpu_map; 219 cpumask_t shared_cpu_map;
220 220
221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); 221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
222 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); 222 len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
223 len += sprintf(buf+len, "\n"); 223 len += sprintf(buf+len, "\n");
224 return len; 224 return len;
225} 225}
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 0c66dbdd1d72..66fd705e82c0 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -227,14 +227,14 @@ finish_up:
227 return new_irq_info; 227 return new_irq_info;
228} 228}
229 229
230static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) 230static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
231{ 231{
232 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 232 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
233 nasid_t nasid; 233 nasid_t nasid;
234 int slice; 234 int slice;
235 235
236 nasid = cpuid_to_nasid(first_cpu(mask)); 236 nasid = cpuid_to_nasid(cpumask_first(mask));
237 slice = cpuid_to_slice(first_cpu(mask)); 237 slice = cpuid_to_slice(cpumask_first(mask));
238 238
239 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, 239 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
240 sn_irq_lh[irq], list) 240 sn_irq_lh[irq], list)
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 83f190ffe350..ca553b0429ce 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
151} 151}
152 152
153#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
154static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 154static void sn_set_msi_irq_affinity(unsigned int irq,
155 const struct cpumask *cpu_mask)
155{ 156{
156 struct msi_msg msg; 157 struct msi_msg msg;
157 int slice; 158 int slice;
@@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
164 struct sn_pcibus_provider *provider; 165 struct sn_pcibus_provider *provider;
165 unsigned int cpu; 166 unsigned int cpu;
166 167
167 cpu = first_cpu(cpu_mask); 168 cpu = cpumask_first(cpu_mask);
168 sn_irq_info = sn_msi_info[irq].sn_irq_info; 169 sn_irq_info = sn_msi_info[irq].sn_irq_info;
169 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) 170 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
170 return; 171 return;
@@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
204 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 205 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
205 206
206 write_msi_msg(irq, &msg); 207 write_msi_msg(irq, &msg);
207 irq_desc[irq].affinity = cpu_mask; 208 irq_desc[irq].affinity = *cpu_mask;
208} 209}
209#endif /* CONFIG_SMP */ 210#endif /* CONFIG_SMP */
210 211
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 636588e7e068..be339477f906 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -385,7 +385,6 @@ static int sn_topology_show(struct seq_file *s, void *d)
385 int j; 385 int j;
386 const char *slabname; 386 const char *slabname;
387 int ordinal; 387 int ordinal;
388 cpumask_t cpumask;
389 char slice; 388 char slice;
390 struct cpuinfo_ia64 *c; 389 struct cpuinfo_ia64 *c;
391 struct sn_hwperf_port_info *ptdata; 390 struct sn_hwperf_port_info *ptdata;
@@ -473,23 +472,21 @@ static int sn_topology_show(struct seq_file *s, void *d)
473 * CPUs on this node, if any 472 * CPUs on this node, if any
474 */ 473 */
475 if (!SN_HWPERF_IS_IONODE(obj)) { 474 if (!SN_HWPERF_IS_IONODE(obj)) {
476 cpumask = node_to_cpumask(ordinal); 475 for_each_cpu_and(i, cpu_online_mask,
477 for_each_online_cpu(i) { 476 cpumask_of_node(ordinal)) {
478 if (cpu_isset(i, cpumask)) { 477 slice = 'a' + cpuid_to_slice(i);
479 slice = 'a' + cpuid_to_slice(i); 478 c = cpu_data(i);
480 c = cpu_data(i); 479 seq_printf(s, "cpu %d %s%c local"
481 seq_printf(s, "cpu %d %s%c local" 480 " freq %luMHz, arch ia64",
482 " freq %luMHz, arch ia64", 481 i, obj->location, slice,
483 i, obj->location, slice, 482 c->proc_freq / 1000000);
484 c->proc_freq / 1000000); 483 for_each_online_cpu(j) {
485 for_each_online_cpu(j) { 484 seq_printf(s, j ? ":%d" : ", dist %d",
486 seq_printf(s, j ? ":%d" : ", dist %d", 485 node_distance(
487 node_distance(
488 cpu_to_node(i), 486 cpu_to_node(i),
489 cpu_to_node(j))); 487 cpu_to_node(j)));
490 }
491 seq_putc(s, '\n');
492 } 488 }
489 seq_putc(s, '\n');
493 } 490 }
494 } 491 }
495 } 492 }
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 29047d5c259a..cabba332cc48 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,6 +10,7 @@ config M32R
10 default y 10 default y
11 select HAVE_IDE 11 select HAVE_IDE
12 select HAVE_OPROFILE 12 select HAVE_OPROFILE
13 select INIT_ALL_POSSIBLE
13 14
14config SBUS 15config SBUS
15 bool 16 bool
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 39cb6da72dcb..0f06b3722e96 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -73,17 +73,11 @@ static unsigned int bsp_phys_id = -1;
73/* Bitmask of physically existing CPUs */ 73/* Bitmask of physically existing CPUs */
74physid_mask_t phys_cpu_present_map; 74physid_mask_t phys_cpu_present_map;
75 75
76/* Bitmask of currently online CPUs */
77cpumask_t cpu_online_map;
78EXPORT_SYMBOL(cpu_online_map);
79
80cpumask_t cpu_bootout_map; 76cpumask_t cpu_bootout_map;
81cpumask_t cpu_bootin_map; 77cpumask_t cpu_bootin_map;
82static cpumask_t cpu_callin_map; 78static cpumask_t cpu_callin_map;
83cpumask_t cpu_callout_map; 79cpumask_t cpu_callout_map;
84EXPORT_SYMBOL(cpu_callout_map); 80EXPORT_SYMBOL(cpu_callout_map);
85cpumask_t cpu_possible_map = CPU_MASK_ALL;
86EXPORT_SYMBOL(cpu_possible_map);
87 81
88/* Per CPU bogomips and other parameters */ 82/* Per CPU bogomips and other parameters */
89struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned; 83struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c
index c5b916700b22..2a12e7fa9748 100644
--- a/arch/m68knommu/platform/coldfire/pit.c
+++ b/arch/m68knommu/platform/coldfire/pit.c
@@ -156,7 +156,7 @@ void hw_timer_init(void)
156{ 156{
157 u32 imr; 157 u32 imr;
158 158
159 cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 159 cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id());
160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); 160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
161 cf_pit_clockevent.max_delta_ns = 161 cf_pit_clockevent.max_delta_ns =
162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent); 162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent);
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index a58f0eecc68f..abc62aa744ac 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -49,7 +49,8 @@ static inline void smtc_im_ack_irq(unsigned int irq)
49#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 49#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
50#include <linux/cpumask.h> 50#include <linux/cpumask.h>
51 51
52extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); 52extern void plat_set_irq_affinity(unsigned int irq,
53 const struct cpumask *affinity);
53extern void smtc_forward_irq(unsigned int irq); 54extern void smtc_forward_irq(unsigned int irq);
54 55
55/* 56/*
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 7785bec732f2..c1c3f5b2f18f 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -25,11 +25,13 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
26#define parent_node(node) (node) 26#define parent_node(node) (node)
27#define node_to_cpumask(node) (hub_data(node)->h_cpus) 27#define node_to_cpumask(node) (hub_data(node)->h_cpus)
28#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 28#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
29#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
29struct pci_bus; 30struct pci_bus;
30extern int pcibus_to_node(struct pci_bus *); 31extern int pcibus_to_node(struct pci_bus *);
31 32
32#define pcibus_to_cpumask(bus) (cpu_online_map) 33#define pcibus_to_cpumask(bus) (cpu_online_map)
34#define cpumask_of_pcibus(bus) (cpu_online_mask)
33 35
34extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 36extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
35 37
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 0ff5b523ea77..86557b5d1b3f 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -38,9 +38,6 @@ extern int __cpu_logical_map[NR_CPUS];
38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
39#define SMP_CALL_FUNCTION 0x2 39#define SMP_CALL_FUNCTION 0x2
40 40
41extern cpumask_t phys_cpu_present_map;
42#define cpu_possible_map phys_cpu_present_map
43
44extern void asmlinkage smp_bootstrap(void); 41extern void asmlinkage smp_bootstrap(void);
45 42
46/* 43/*
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index d7f8a782aae4..03965cb1b252 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -146,7 +146,7 @@ void __init plat_time_init(void)
146 146
147 BUG_ON(HZ != 100); 147 BUG_ON(HZ != 100);
148 148
149 cd->cpumask = cpumask_of_cpu(cpu); 149 cd->cpumask = cpumask_of(cpu);
150 clockevents_register_device(cd); 150 clockevents_register_device(cd);
151 action->dev_id = cd; 151 action->dev_id = cd;
152 setup_irq(JAZZ_TIMER_IRQ, action); 152 setup_irq(JAZZ_TIMER_IRQ, action);
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 0a57f86945f1..b820661678b0 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -126,7 +126,7 @@ void __cpuinit sb1480_clockevent_init(void)
126 cd->min_delta_ns = clockevent_delta2ns(2, cd); 126 cd->min_delta_ns = clockevent_delta2ns(2, cd);
127 cd->rating = 200; 127 cd->rating = 200;
128 cd->irq = irq; 128 cd->irq = irq;
129 cd->cpumask = cpumask_of_cpu(cpu); 129 cd->cpumask = cpumask_of(cpu);
130 cd->set_next_event = sibyte_next_event; 130 cd->set_next_event = sibyte_next_event;
131 cd->set_mode = sibyte_set_mode; 131 cd->set_mode = sibyte_set_mode;
132 clockevents_register_device(cd); 132 clockevents_register_device(cd);
@@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(void)
148 action->name = name; 148 action->name = name;
149 action->dev_id = cd; 149 action->dev_id = cd;
150 150
151 irq_set_affinity(irq, cpumask_of_cpu(cpu)); 151 irq_set_affinity(irq, cpumask_of(cpu));
152 setup_irq(irq, action); 152 setup_irq(irq, action);
153} 153}
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index df4acb68bfb5..1ada45ea0700 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -88,7 +88,6 @@ static void ds1287_event_handler(struct clock_event_device *dev)
88static struct clock_event_device ds1287_clockevent = { 88static struct clock_event_device ds1287_clockevent = {
89 .name = "ds1287", 89 .name = "ds1287",
90 .features = CLOCK_EVT_FEAT_PERIODIC, 90 .features = CLOCK_EVT_FEAT_PERIODIC,
91 .cpumask = CPU_MASK_CPU0,
92 .set_next_event = ds1287_set_next_event, 91 .set_next_event = ds1287_set_next_event,
93 .set_mode = ds1287_set_mode, 92 .set_mode = ds1287_set_mode,
94 .event_handler = ds1287_event_handler, 93 .event_handler = ds1287_event_handler,
@@ -122,6 +121,7 @@ int __init ds1287_clockevent_init(int irq)
122 clockevent_set_clock(cd, 32768); 121 clockevent_set_clock(cd, 32768);
123 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 122 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
124 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 123 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
124 cd->cpumask = cpumask_of(0);
125 125
126 clockevents_register_device(&ds1287_clockevent); 126 clockevents_register_device(&ds1287_clockevent);
127 127
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 6e2f58520afb..e9b787feedcb 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -96,7 +96,6 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
96static struct clock_event_device gt641xx_timer0_clockevent = { 96static struct clock_event_device gt641xx_timer0_clockevent = {
97 .name = "gt641xx-timer0", 97 .name = "gt641xx-timer0",
98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
99 .cpumask = CPU_MASK_CPU0,
100 .irq = GT641XX_TIMER0_IRQ, 99 .irq = GT641XX_TIMER0_IRQ,
101 .set_next_event = gt641xx_timer0_set_next_event, 100 .set_next_event = gt641xx_timer0_set_next_event,
102 .set_mode = gt641xx_timer0_set_mode, 101 .set_mode = gt641xx_timer0_set_mode,
@@ -132,6 +131,7 @@ static int __init gt641xx_timer0_clockevent_init(void)
132 clockevent_set_clock(cd, gt641xx_base_clock); 131 clockevent_set_clock(cd, gt641xx_base_clock);
133 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 132 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 133 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
134 cd->cpumask = cpumask_of(0);
135 135
136 clockevents_register_device(&gt641xx_timer0_clockevent); 136 clockevents_register_device(&gt641xx_timer0_clockevent);
137 137
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 4a4c59f2737a..e1ec83b68031 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -195,7 +195,7 @@ int __cpuinit mips_clockevent_init(void)
195 195
196 cd->rating = 300; 196 cd->rating = 300;
197 cd->irq = irq; 197 cd->irq = irq;
198 cd->cpumask = cpumask_of_cpu(cpu); 198 cd->cpumask = cpumask_of(cpu);
199 cd->set_next_event = mips_next_event; 199 cd->set_next_event = mips_next_event;
200 cd->set_mode = mips_set_clock_mode; 200 cd->set_mode = mips_set_clock_mode;
201 cd->event_handler = mips_event_handler; 201 cd->event_handler = mips_event_handler;
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 63ac3ad462bc..a2eebaafda52 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -125,7 +125,7 @@ void __cpuinit sb1250_clockevent_init(void)
125 cd->min_delta_ns = clockevent_delta2ns(2, cd); 125 cd->min_delta_ns = clockevent_delta2ns(2, cd);
126 cd->rating = 200; 126 cd->rating = 200;
127 cd->irq = irq; 127 cd->irq = irq;
128 cd->cpumask = cpumask_of_cpu(cpu); 128 cd->cpumask = cpumask_of(cpu);
129 cd->set_next_event = sibyte_next_event; 129 cd->set_next_event = sibyte_next_event;
130 cd->set_mode = sibyte_set_mode; 130 cd->set_mode = sibyte_set_mode;
131 clockevents_register_device(cd); 131 clockevents_register_device(cd);
@@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(void)
147 action->name = name; 147 action->name = name;
148 action->dev_id = cd; 148 action->dev_id = cd;
149 149
150 irq_set_affinity(irq, cpumask_of_cpu(cpu)); 150 irq_set_affinity(irq, cpumask_of(cpu));
151 setup_irq(irq, action); 151 setup_irq(irq, action);
152} 152}
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 5162fe4b5952..6d45e24db5bf 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -292,7 +292,7 @@ int __cpuinit mips_clockevent_init(void)
292 292
293 cd->rating = 300; 293 cd->rating = 300;
294 cd->irq = irq; 294 cd->irq = irq;
295 cd->cpumask = cpumask_of_cpu(cpu); 295 cd->cpumask = cpumask_of(cpu);
296 cd->set_next_event = mips_next_event; 296 cd->set_next_event = mips_next_event;
297 cd->set_mode = mips_set_clock_mode; 297 cd->set_mode = mips_set_clock_mode;
298 cd->event_handler = mips_event_handler; 298 cd->event_handler = mips_event_handler;
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index b5fc4eb412d2..eccf7d6096bd 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -112,7 +112,6 @@ static struct clock_event_device txx9tmr_clock_event_device = {
112 .name = "TXx9", 112 .name = "TXx9",
113 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 113 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
114 .rating = 200, 114 .rating = 200,
115 .cpumask = CPU_MASK_CPU0,
116 .set_mode = txx9tmr_set_mode, 115 .set_mode = txx9tmr_set_mode,
117 .set_next_event = txx9tmr_set_next_event, 116 .set_next_event = txx9tmr_set_next_event,
118}; 117};
@@ -150,6 +149,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
150 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); 149 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
151 cd->min_delta_ns = clockevent_delta2ns(0xf, cd); 150 cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
152 cd->irq = irq; 151 cd->irq = irq;
152 cd->cpumask = cpumask_of(0),
153 clockevents_register_device(cd); 153 clockevents_register_device(cd);
154 setup_irq(irq, &txx9tmr_irq); 154 setup_irq(irq, &txx9tmr_irq);
155 printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", 155 printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index b6ac55162b9a..f4d187825f96 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -115,7 +115,7 @@ void __init setup_pit_timer(void)
115 * Start pit with the boot cpu mask and make it global after the 115 * Start pit with the boot cpu mask and make it global after the
116 * IO_APIC has been initialized. 116 * IO_APIC has been initialized.
117 */ 117 */
118 cd->cpumask = cpumask_of_cpu(cpu); 118 cd->cpumask = cpumask_of(cpu);
119 clockevent_set_clock(cd, CLOCK_TICK_RATE); 119 clockevent_set_clock(cd, CLOCK_TICK_RATE);
120 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); 120 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
121 cd->min_delta_ns = clockevent_delta2ns(0xF, cd); 121 cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index f0a4bb19e096..494a49a317e9 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq)
155 155
156static DEFINE_SPINLOCK(gic_lock); 156static DEFINE_SPINLOCK(gic_lock);
157 157
158static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) 158static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
159{ 159{
160 cpumask_t tmp = CPU_MASK_NONE; 160 cpumask_t tmp = CPU_MASK_NONE;
161 unsigned long flags; 161 unsigned long flags;
@@ -164,7 +164,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
164 pr_debug(KERN_DEBUG "%s called\n", __func__); 164 pr_debug(KERN_DEBUG "%s called\n", __func__);
165 irq -= _irqbase; 165 irq -= _irqbase;
166 166
167 cpus_and(tmp, cpumask, cpu_online_map); 167 cpumask_and(&tmp, cpumask, cpu_online_mask);
168 if (cpus_empty(tmp)) 168 if (cpus_empty(tmp))
169 return; 169 return;
170 170
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
188 188
189 } 189 }
190 irq_desc[irq].affinity = cpumask; 190 irq_desc[irq].affinity = *cpumask;
191 spin_unlock_irqrestore(&gic_lock, flags); 191 spin_unlock_irqrestore(&gic_lock, flags);
192 192
193} 193}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index ca476c4f62a5..f27beca4b26d 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -51,10 +51,10 @@ static int __init allowcpus(char *str)
51 int len; 51 int len;
52 52
53 cpus_clear(cpu_allow_map); 53 cpus_clear(cpu_allow_map);
54 if (cpulist_parse(str, cpu_allow_map) == 0) { 54 if (cpulist_parse(str, &cpu_allow_map) == 0) {
55 cpu_set(0, cpu_allow_map); 55 cpu_set(0, cpu_allow_map);
56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); 56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
57 len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map); 57 len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map);
58 buf[len] = '\0'; 58 buf[len] = '\0';
59 pr_debug("Allowable CPUs: %s\n", buf); 59 pr_debug("Allowable CPUs: %s\n", buf);
60 return 1; 60 return 1;
@@ -226,7 +226,7 @@ void __init cmp_smp_setup(void)
226 226
227 for (i = 1; i < NR_CPUS; i++) { 227 for (i = 1; i < NR_CPUS; i++) {
228 if (amon_cpu_avail(i)) { 228 if (amon_cpu_avail(i)) {
229 cpu_set(i, phys_cpu_present_map); 229 cpu_set(i, cpu_possible_map);
230 __cpu_number_map[i] = ++ncpu; 230 __cpu_number_map[i] = ++ncpu;
231 __cpu_logical_map[ncpu] = i; 231 __cpu_logical_map[ncpu] = i;
232 } 232 }
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 87a1816c1f45..6f7ee5ac46ee 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
70 write_vpe_c0_vpeconf0(tmp); 70 write_vpe_c0_vpeconf0(tmp);
71 71
72 /* Record this as available CPU */ 72 /* Record this as available CPU */
73 cpu_set(tc, phys_cpu_present_map); 73 cpu_set(tc, cpu_possible_map);
74 __cpu_number_map[tc] = ++ncpu; 74 __cpu_number_map[tc] = ++ncpu;
75 __cpu_logical_map[ncpu] = tc; 75 __cpu_logical_map[ncpu] = tc;
76 } 76 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 8bf88faf5afd..3da94704f816 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -44,15 +44,10 @@
44#include <asm/mipsmtregs.h> 44#include <asm/mipsmtregs.h>
45#endif /* CONFIG_MIPS_MT_SMTC */ 45#endif /* CONFIG_MIPS_MT_SMTC */
46 46
47cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
48volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 47volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
49cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
50int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 48int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
51int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 49int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 50
53EXPORT_SYMBOL(phys_cpu_present_map);
54EXPORT_SYMBOL(cpu_online_map);
55
56extern void cpu_idle(void); 51extern void cpu_idle(void);
57 52
58/* Number of TCs (or siblings in Intel speak) per CPU core */ 53/* Number of TCs (or siblings in Intel speak) per CPU core */
@@ -195,7 +190,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
195/* preload SMP state for boot cpu */ 190/* preload SMP state for boot cpu */
196void __devinit smp_prepare_boot_cpu(void) 191void __devinit smp_prepare_boot_cpu(void)
197{ 192{
198 cpu_set(0, phys_cpu_present_map); 193 cpu_set(0, cpu_possible_map);
199 cpu_set(0, cpu_online_map); 194 cpu_set(0, cpu_online_map);
200 cpu_set(0, cpu_callin_map); 195 cpu_set(0, cpu_callin_map);
201} 196}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 897fb2b4751c..b6cca01ff82b 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -290,7 +290,7 @@ static void smtc_configure_tlb(void)
290 * possibly leave some TCs/VPEs as "slave" processors. 290 * possibly leave some TCs/VPEs as "slave" processors.
291 * 291 *
292 * Use c0_MVPConf0 to find out how many TCs are available, setting up 292 * Use c0_MVPConf0 to find out how many TCs are available, setting up
293 * phys_cpu_present_map and the logical/physical mappings. 293 * cpu_possible_map and the logical/physical mappings.
294 */ 294 */
295 295
296int __init smtc_build_cpu_map(int start_cpu_slot) 296int __init smtc_build_cpu_map(int start_cpu_slot)
@@ -304,7 +304,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
304 */ 304 */
305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
307 cpu_set(i, phys_cpu_present_map); 307 cpu_set(i, cpu_possible_map);
308 __cpu_number_map[i] = i; 308 __cpu_number_map[i] = i;
309 __cpu_logical_map[i] = i; 309 __cpu_logical_map[i] = i;
310 } 310 }
@@ -521,7 +521,7 @@ void smtc_prepare_cpus(int cpus)
521 * Pull any physically present but unused TCs out of circulation. 521 * Pull any physically present but unused TCs out of circulation.
522 */ 522 */
523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
524 cpu_clear(tc, phys_cpu_present_map); 524 cpu_clear(tc, cpu_possible_map);
525 cpu_clear(tc, cpu_present_map); 525 cpu_clear(tc, cpu_present_map);
526 tc++; 526 tc++;
527 } 527 }
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index f84a46a8ae6e..aabd7274507b 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -114,9 +114,9 @@ struct plat_smp_ops msmtc_smp_ops = {
114 */ 114 */
115 115
116 116
117void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) 117void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
118{ 118{
119 cpumask_t tmask = affinity; 119 cpumask_t tmask = *affinity;
120 int cpu = 0; 120 int cpu = 0;
121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); 121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
122 122
@@ -139,7 +139,7 @@ void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity)
139 * be made to forward to an offline "CPU". 139 * be made to forward to an offline "CPU".
140 */ 140 */
141 141
142 for_each_cpu_mask(cpu, affinity) { 142 for_each_cpu(cpu, affinity) {
143 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 143 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
144 cpu_clear(cpu, tmask); 144 cpu_clear(cpu, tmask);
145 } 145 }
diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c
index 62f495b57f93..cf293b279098 100644
--- a/arch/mips/nxp/pnx8550/common/time.c
+++ b/arch/mips/nxp/pnx8550/common/time.c
@@ -102,6 +102,7 @@ __init void plat_time_init(void)
102 unsigned int p; 102 unsigned int p;
103 unsigned int pow2p; 103 unsigned int pow2p;
104 104
105 pnx8xxx_clockevent.cpumask = cpu_none_mask;
105 clockevents_register_device(&pnx8xxx_clockevent); 106 clockevents_register_device(&pnx8xxx_clockevent);
106 clocksource_register(&pnx_clocksource); 107 clocksource_register(&pnx_clocksource);
107 108
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 3a7df647ca77..f78c29b68d77 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -141,7 +141,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
141} 141}
142 142
143/* 143/*
144 * Detect available CPUs, populate phys_cpu_present_map before smp_init 144 * Detect available CPUs, populate cpu_possible_map before smp_init
145 * 145 *
146 * We don't want to start the secondary CPU yet nor do we have a nice probing 146 * We don't want to start the secondary CPU yet nor do we have a nice probing
147 * feature in PMON so we just assume presence of the secondary core. 147 * feature in PMON so we just assume presence of the secondary core.
@@ -150,10 +150,10 @@ static void __init yos_smp_setup(void)
150{ 150{
151 int i; 151 int i;
152 152
153 cpus_clear(phys_cpu_present_map); 153 cpus_clear(cpu_possible_map);
154 154
155 for (i = 0; i < 2; i++) { 155 for (i = 0; i < 2; i++) {
156 cpu_set(i, phys_cpu_present_map); 156 cpu_set(i, cpu_possible_map);
157 __cpu_number_map[i] = i; 157 __cpu_number_map[i] = i;
158 __cpu_logical_map[i] = i; 158 __cpu_logical_map[i] = i;
159 } 159 }
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index ba5cdebeaf0d..5b47d6b65275 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
76 /* Only let it join in if it's marked enabled */ 76 /* Only let it join in if it's marked enabled */
77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) && 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 (tot_cpus_found != NR_CPUS)) { 78 (tot_cpus_found != NR_CPUS)) {
79 cpu_set(cpuid, phys_cpu_present_map); 79 cpu_set(cpuid, cpu_possible_map);
80 alloc_cpupda(cpuid, tot_cpus_found); 80 alloc_cpupda(cpuid, tot_cpus_found);
81 cpus_found++; 81 cpus_found++;
82 tot_cpus_found++; 82 tot_cpus_found++;
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 1327c2746fb7..f024057a35f8 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -134,7 +134,7 @@ void __cpuinit hub_rt_clock_event_init(void)
134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
135 cd->rating = 200; 135 cd->rating = 200;
136 cd->irq = irq; 136 cd->irq = irq;
137 cd->cpumask = cpumask_of_cpu(cpu); 137 cd->cpumask = cpumask_of(cpu);
138 cd->set_next_event = rt_next_event; 138 cd->set_next_event = rt_next_event;
139 cd->set_mode = rt_set_mode; 139 cd->set_mode = rt_set_mode;
140 clockevents_register_device(cd); 140 clockevents_register_device(cd);
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index a35818ed4263..12b465d404df 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq);
50static void disable_bcm1480_irq(unsigned int irq); 50static void disable_bcm1480_irq(unsigned int irq);
51static void ack_bcm1480_irq(unsigned int irq); 51static void ack_bcm1480_irq(unsigned int irq);
52#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
53static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask); 53static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
54#endif 54#endif
55 55
56#ifdef CONFIG_PCI 56#ifdef CONFIG_PCI
@@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
109} 109}
110 110
111#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
112static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) 112static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
113{ 113{
114 int i = 0, old_cpu, cpu, int_on, k; 114 int i = 0, old_cpu, cpu, int_on, k;
115 u64 cur_ints; 115 u64 cur_ints;
@@ -117,11 +117,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
117 unsigned long flags; 117 unsigned long flags;
118 unsigned int irq_dirty; 118 unsigned int irq_dirty;
119 119
120 if (cpus_weight(mask) != 1) { 120 if (cpumask_weight(mask) != 1) {
121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
122 return; 122 return;
123 } 123 }
124 i = first_cpu(mask); 124 i = cpumask_first(mask);
125 125
126 /* Convert logical CPU to physical CPU */ 126 /* Convert logical CPU to physical CPU */
127 cpu = cpu_logical_map(i); 127 cpu = cpu_logical_map(i);
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index bd9eeb43ed0e..dddfda8e8294 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -136,7 +136,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
136 136
137/* 137/*
138 * Use CFE to find out how many CPUs are available, setting up 138 * Use CFE to find out how many CPUs are available, setting up
139 * phys_cpu_present_map and the logical/physical mappings. 139 * cpu_possible_map and the logical/physical mappings.
140 * XXXKW will the boot CPU ever not be physical 0? 140 * XXXKW will the boot CPU ever not be physical 0?
141 * 141 *
142 * Common setup before any secondaries are started 142 * Common setup before any secondaries are started
@@ -145,14 +145,14 @@ static void __init bcm1480_smp_setup(void)
145{ 145{
146 int i, num; 146 int i, num;
147 147
148 cpus_clear(phys_cpu_present_map); 148 cpus_clear(cpu_possible_map);
149 cpu_set(0, phys_cpu_present_map); 149 cpu_set(0, cpu_possible_map);
150 __cpu_number_map[0] = 0; 150 __cpu_number_map[0] = 0;
151 __cpu_logical_map[0] = 0; 151 __cpu_logical_map[0] = 0;
152 152
153 for (i = 1, num = 0; i < NR_CPUS; i++) { 153 for (i = 1, num = 0; i < NR_CPUS; i++) {
154 if (cfe_cpu_stop(i) == 0) { 154 if (cfe_cpu_stop(i) == 0) {
155 cpu_set(i, phys_cpu_present_map); 155 cpu_set(i, cpu_possible_map);
156 __cpu_number_map[i] = ++num; 156 __cpu_number_map[i] = ++num;
157 __cpu_logical_map[num] = i; 157 __cpu_logical_map[num] = i;
158 } 158 }
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index a5158483986e..808ac2959b8c 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq);
50static void disable_sb1250_irq(unsigned int irq); 50static void disable_sb1250_irq(unsigned int irq);
51static void ack_sb1250_irq(unsigned int irq); 51static void ack_sb1250_irq(unsigned int irq);
52#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
53static void sb1250_set_affinity(unsigned int irq, cpumask_t mask); 53static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
54#endif 54#endif
55 55
56#ifdef CONFIG_SIBYTE_HAS_LDT 56#ifdef CONFIG_SIBYTE_HAS_LDT
@@ -103,16 +103,16 @@ void sb1250_unmask_irq(int cpu, int irq)
103} 103}
104 104
105#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
106static void sb1250_set_affinity(unsigned int irq, cpumask_t mask) 106static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
107{ 107{
108 int i = 0, old_cpu, cpu, int_on; 108 int i = 0, old_cpu, cpu, int_on;
109 u64 cur_ints; 109 u64 cur_ints;
110 struct irq_desc *desc = irq_desc + irq; 110 struct irq_desc *desc = irq_desc + irq;
111 unsigned long flags; 111 unsigned long flags;
112 112
113 i = first_cpu(mask); 113 i = cpumask_first(mask);
114 114
115 if (cpus_weight(mask) > 1) { 115 if (cpumask_weight(mask) > 1) {
116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
117 return; 117 return;
118 } 118 }
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 0734b933e969..5950a288a7da 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -124,7 +124,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
124 124
125/* 125/*
126 * Use CFE to find out how many CPUs are available, setting up 126 * Use CFE to find out how many CPUs are available, setting up
127 * phys_cpu_present_map and the logical/physical mappings. 127 * cpu_possible_map and the logical/physical mappings.
128 * XXXKW will the boot CPU ever not be physical 0? 128 * XXXKW will the boot CPU ever not be physical 0?
129 * 129 *
130 * Common setup before any secondaries are started 130 * Common setup before any secondaries are started
@@ -133,14 +133,14 @@ static void __init sb1250_smp_setup(void)
133{ 133{
134 int i, num; 134 int i, num;
135 135
136 cpus_clear(phys_cpu_present_map); 136 cpus_clear(cpu_possible_map);
137 cpu_set(0, phys_cpu_present_map); 137 cpu_set(0, cpu_possible_map);
138 __cpu_number_map[0] = 0; 138 __cpu_number_map[0] = 0;
139 __cpu_logical_map[0] = 0; 139 __cpu_logical_map[0] = 0;
140 140
141 for (i = 1, num = 0; i < NR_CPUS; i++) { 141 for (i = 1, num = 0; i < NR_CPUS; i++) {
142 if (cfe_cpu_stop(i) == 0) { 142 if (cfe_cpu_stop(i) == 0) {
143 cpu_set(i, phys_cpu_present_map); 143 cpu_set(i, cpu_possible_map);
144 __cpu_number_map[i] = ++num; 144 __cpu_number_map[i] = ++num;
145 __cpu_logical_map[num] = i; 145 __cpu_logical_map[num] = i;
146 } 146 }
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 796e3ce28720..69f5f88711cc 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -80,7 +80,7 @@ static void __init sni_a20r_timer_setup(void)
80 struct irqaction *action = &a20r_irqaction; 80 struct irqaction *action = &a20r_irqaction;
81 unsigned int cpu = smp_processor_id(); 81 unsigned int cpu = smp_processor_id();
82 82
83 cd->cpumask = cpumask_of_cpu(cpu); 83 cd->cpumask = cpumask_of(cpu);
84 clockevents_register_device(cd); 84 clockevents_register_device(cd);
85 action->dev_id = cd; 85 action->dev_id = cd;
86 setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction); 86 setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 644a70b1b04e..aacf11d33723 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,6 +11,7 @@ config PARISC
11 select HAVE_OPROFILE 11 select HAVE_OPROFILE
12 select RTC_CLASS 12 select RTC_CLASS
13 select RTC_DRV_PARISC 13 select RTC_DRV_PARISC
14 select INIT_ALL_POSSIBLE
14 help 15 help
15 The PA-RISC microprocessor is designed by Hewlett-Packard and used 16 The PA-RISC microprocessor is designed by Hewlett-Packard and used
16 in many of their workstations & servers (HP9000 700 and 800 series, 17 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 23ef950df008..4cea935e2f99 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
131 return 0; 131 return 0;
132} 132}
133 133
134static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) 134static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
135{ 135{
136 if (cpu_check_affinity(irq, &dest)) 136 if (cpu_check_affinity(irq, dest))
137 return; 137 return;
138 138
139 irq_desc[irq].affinity = dest; 139 irq_desc[irq].affinity = *dest;
140} 140}
141#endif 141#endif
142 142
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index d47f3975c9c6..80bc000523fa 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -67,21 +67,6 @@ static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is boo
67 67
68static int parisc_max_cpus __read_mostly = 1; 68static int parisc_max_cpus __read_mostly = 1;
69 69
70/* online cpus are ones that we've managed to bring up completely
71 * possible cpus are all valid cpu
72 * present cpus are all detected cpu
73 *
74 * On startup we bring up the "possible" cpus. Since we discover
75 * CPUs later, we add them as hotplug, so the possible cpu mask is
76 * empty in the beginning.
77 */
78
79cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
80cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
81
82EXPORT_SYMBOL(cpu_online_map);
83EXPORT_SYMBOL(cpu_possible_map);
84
85DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; 70DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
86 71
87enum ipi_message_type { 72enum ipi_message_type {
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index c32da6f97999..bcf25c2b8d21 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node)
22 return numa_cpumask_lookup_table[node]; 22 return numa_cpumask_lookup_table[node];
23} 23}
24 24
25#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
26
25static inline int node_to_first_cpu(int node) 27static inline int node_to_first_cpu(int node)
26{ 28{
27 cpumask_t tmp; 29 return cpumask_first(cpumask_of_node(node));
28 tmp = node_to_cpumask(node);
29 return first_cpu(tmp);
30} 30}
31 31
32int of_node_to_nid(struct device_node *device); 32int of_node_to_nid(struct device_node *device);
@@ -46,6 +46,10 @@ static inline int pcibus_to_node(struct pci_bus *bus)
46 node_to_cpumask(pcibus_to_node(bus)) \ 46 node_to_cpumask(pcibus_to_node(bus)) \
47 ) 47 )
48 48
49#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
50 cpu_all_mask : \
51 cpumask_of_node(pcibus_to_node(bus)))
52
49/* sched_domains SD_NODE_INIT for PPC64 machines */ 53/* sched_domains SD_NODE_INIT for PPC64 machines */
50#define SD_NODE_INIT (struct sched_domain) { \ 54#define SD_NODE_INIT (struct sched_domain) { \
51 .span = CPU_MASK_NONE, \ 55 .span = CPU_MASK_NONE, \
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ac222d0ab12e..23b8b5e36f98 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map)
237 mask = map; 237 mask = map;
238 } 238 }
239 if (irq_desc[irq].chip->set_affinity) 239 if (irq_desc[irq].chip->set_affinity)
240 irq_desc[irq].chip->set_affinity(irq, mask); 240 irq_desc[irq].chip->set_affinity(irq, &mask);
241 else if (irq_desc[irq].action && !(warned++)) 241 else if (irq_desc[irq].action && !(warned++))
242 printk("Cannot set affinity for irq %i\n", irq); 242 printk("Cannot set affinity for irq %i\n", irq);
243 } 243 }
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ac3f721d235..65484b2200b3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -59,13 +59,9 @@
59 59
60struct thread_info *secondary_ti; 60struct thread_info *secondary_ti;
61 61
62cpumask_t cpu_possible_map = CPU_MASK_NONE;
63cpumask_t cpu_online_map = CPU_MASK_NONE;
64DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 62DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
65DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 63DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
66 64
67EXPORT_SYMBOL(cpu_online_map);
68EXPORT_SYMBOL(cpu_possible_map);
69EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 65EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
70EXPORT_PER_CPU_SYMBOL(cpu_core_map); 66EXPORT_PER_CPU_SYMBOL(cpu_core_map);
71 67
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e1f3a5140429..99f1ddd68582 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -844,7 +844,7 @@ static void register_decrementer_clockevent(int cpu)
844 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; 844 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
845 845
846 *dec = decrementer_clockevent; 846 *dec = decrementer_clockevent;
847 dec->cpumask = cpumask_of_cpu(cpu); 847 dec->cpumask = cpumask_of(cpu);
848 848
849 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", 849 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
850 dec->name, dec->mult, dec->shift, cpu); 850 dec->name, dec->mult, dec->shift, cpu);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 906a0a2a9fe1..1410443731eb 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -80,10 +80,10 @@ static void cpu_affinity_set(struct spu *spu, int cpu)
80 u64 route; 80 u64 route;
81 81
82 if (nr_cpus_node(spu->node)) { 82 if (nr_cpus_node(spu->node)) {
83 cpumask_t spumask = node_to_cpumask(spu->node); 83 const struct cpumask *spumask = cpumask_of_node(spu->node),
84 cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); 84 *cpumask = cpumask_of_node(cpu_to_node(cpu));
85 85
86 if (!cpus_intersects(spumask, cpumask)) 86 if (!cpumask_intersects(spumask, cpumask))
87 return; 87 return;
88 } 88 }
89 89
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 2ad914c47493..6a0ad196aeb3 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -166,9 +166,9 @@ void spu_update_sched_info(struct spu_context *ctx)
166static int __node_allowed(struct spu_context *ctx, int node) 166static int __node_allowed(struct spu_context *ctx, int node)
167{ 167{
168 if (nr_cpus_node(node)) { 168 if (nr_cpus_node(node)) {
169 cpumask_t mask = node_to_cpumask(node); 169 const struct cpumask *mask = cpumask_of_node(node);
170 170
171 if (cpus_intersects(mask, ctx->cpus_allowed)) 171 if (cpumask_intersects(mask, &ctx->cpus_allowed))
172 return 1; 172 return 1;
173 } 173 }
174 174
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index f7a69021b7bf..84e058f1e1cc 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq)
332 lpar_xirr_info_set((0xff << 24) | irq); 332 lpar_xirr_info_set((0xff << 24) | irq);
333} 333}
334 334
335static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) 335static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
336{ 336{
337 unsigned int irq; 337 unsigned int irq;
338 int status; 338 int status;
@@ -870,7 +870,7 @@ void xics_migrate_irqs_away(void)
870 870
871 /* Reset affinity to all cpus */ 871 /* Reset affinity to all cpus */
872 irq_desc[virq].affinity = CPU_MASK_ALL; 872 irq_desc[virq].affinity = CPU_MASK_ALL;
873 desc->chip->set_affinity(virq, CPU_MASK_ALL); 873 desc->chip->set_affinity(virq, cpu_all_mask);
874unlock: 874unlock:
875 spin_unlock_irqrestore(&desc->lock, flags); 875 spin_unlock_irqrestore(&desc->lock, flags);
876 } 876 }
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index c82babb70074..3e0d89dcdba2 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -806,7 +806,7 @@ static void mpic_end_ipi(unsigned int irq)
806 806
807#endif /* CONFIG_SMP */ 807#endif /* CONFIG_SMP */
808 808
809void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 809void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
810{ 810{
811 struct mpic *mpic = mpic_from_irq(irq); 811 struct mpic *mpic = mpic_from_irq(irq);
812 unsigned int src = mpic_irq_to_hw(irq); 812 unsigned int src = mpic_irq_to_hw(irq);
@@ -818,7 +818,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
818 } else { 818 } else {
819 cpumask_t tmp; 819 cpumask_t tmp;
820 820
821 cpus_and(tmp, cpumask, cpu_online_map); 821 cpumask_and(&tmp, cpumask, cpu_online_mask);
822 822
823 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 823 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
824 mpic_physmask(cpus_addr(tmp)[0])); 824 mpic_physmask(cpus_addr(tmp)[0]));
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 6209c62a426d..3cef2af10f42 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
36 36
37extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); 37extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
38extern void mpic_set_vector(unsigned int virq, unsigned int vector); 38extern void mpic_set_vector(unsigned int virq, unsigned int vector);
39extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); 39extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
40 40
41#endif /* _POWERPC_SYSDEV_MPIC_H */ 41#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8152fefc97b9..19577aeffd7b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -83,6 +83,7 @@ config S390
83 select HAVE_KRETPROBES 83 select HAVE_KRETPROBES
84 select HAVE_KVM if 64BIT 84 select HAVE_KVM if 64BIT
85 select HAVE_ARCH_TRACEHOOK 85 select HAVE_ARCH_TRACEHOOK
86 select INIT_ALL_POSSIBLE
86 87
87source "init/Kconfig" 88source "init/Kconfig"
88 89
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index d96c91643458..fff4a86c602a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -6,6 +6,7 @@
6#define mc_capable() (1) 6#define mc_capable() (1)
7 7
8cpumask_t cpu_coregroup_map(unsigned int cpu); 8cpumask_t cpu_coregroup_map(unsigned int cpu);
9const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9 10
10extern cpumask_t cpu_core_map[NR_CPUS]; 11extern cpumask_t cpu_core_map[NR_CPUS];
11 12
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fc78541dc57..3ed5c7a83c6c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -55,12 +55,6 @@
55struct _lowcore *lowcore_ptr[NR_CPUS]; 55struct _lowcore *lowcore_ptr[NR_CPUS];
56EXPORT_SYMBOL(lowcore_ptr); 56EXPORT_SYMBOL(lowcore_ptr);
57 57
58cpumask_t cpu_online_map = CPU_MASK_NONE;
59EXPORT_SYMBOL(cpu_online_map);
60
61cpumask_t cpu_possible_map = CPU_MASK_ALL;
62EXPORT_SYMBOL(cpu_possible_map);
63
64static struct task_struct *current_set[NR_CPUS]; 58static struct task_struct *current_set[NR_CPUS];
65 59
66static u8 smp_cpu_type; 60static u8 smp_cpu_type;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 5be981a36c3e..d649600df5b9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -160,7 +160,7 @@ void init_cpu_timer(void)
160 cd->min_delta_ns = 1; 160 cd->min_delta_ns = 1;
161 cd->max_delta_ns = LONG_MAX; 161 cd->max_delta_ns = LONG_MAX;
162 cd->rating = 400; 162 cd->rating = 400;
163 cd->cpumask = cpumask_of_cpu(cpu); 163 cd->cpumask = cpumask_of(cpu);
164 cd->set_next_event = s390_next_event; 164 cd->set_next_event = s390_next_event;
165 cd->set_mode = s390_set_mode; 165 cd->set_mode = s390_set_mode;
166 166
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 90e9ba11eba1..cc362c9ea8f1 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -97,6 +97,11 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
97 return mask; 97 return mask;
98} 98}
99 99
100const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
101{
102 return &cpu_core_map[cpu];
103}
104
100static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) 105static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
101{ 106{
102 unsigned int cpu; 107 unsigned int cpu;
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 85b660c17eb0..c24e9c6a1736 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -31,7 +31,7 @@ enum {
31}; 31};
32 32
33void smp_message_recv(unsigned int msg); 33void smp_message_recv(unsigned int msg);
34void smp_timer_broadcast(cpumask_t mask); 34void smp_timer_broadcast(const struct cpumask *mask);
35 35
36void local_timer_interrupt(void); 36void local_timer_interrupt(void);
37void local_timer_setup(unsigned int cpu); 37void local_timer_setup(unsigned int cpu);
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index 95f0085e098a..9aa160d0efe5 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -33,6 +33,7 @@
33#define parent_node(node) ((void)(node),0) 33#define parent_node(node) ((void)(node),0)
34 34
35#define node_to_cpumask(node) ((void)node, cpu_online_map) 35#define node_to_cpumask(node) ((void)node, cpu_online_map)
36#define cpumask_of_node(node) ((void)node, cpu_online_mask)
36#define node_to_first_cpu(node) ((void)(node),0) 37#define node_to_first_cpu(node) ((void)(node),0)
37 38
38#define pcibus_to_node(bus) ((void)(bus), -1) 39#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 3c5ad1660bbc..8f4027412614 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -31,12 +31,6 @@
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33 33
34cpumask_t cpu_possible_map;
35EXPORT_SYMBOL(cpu_possible_map);
36
37cpumask_t cpu_online_map;
38EXPORT_SYMBOL(cpu_online_map);
39
40static inline void __init smp_store_cpu_info(unsigned int cpu) 34static inline void __init smp_store_cpu_info(unsigned int cpu)
41{ 35{
42 struct sh_cpuinfo *c = cpu_data + cpu; 36 struct sh_cpuinfo *c = cpu_data + cpu;
@@ -190,11 +184,11 @@ void arch_send_call_function_single_ipi(int cpu)
190 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 184 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
191} 185}
192 186
193void smp_timer_broadcast(cpumask_t mask) 187void smp_timer_broadcast(const struct cpumask *mask)
194{ 188{
195 int cpu; 189 int cpu;
196 190
197 for_each_cpu_mask(cpu, mask) 191 for_each_cpu(cpu, mask)
198 plat_send_ipi(cpu, SMP_MSG_TIMER); 192 plat_send_ipi(cpu, SMP_MSG_TIMER);
199} 193}
200 194
diff --git a/arch/sh/kernel/timers/timer-broadcast.c b/arch/sh/kernel/timers/timer-broadcast.c
index c2317635230f..96e8eaea1e62 100644
--- a/arch/sh/kernel/timers/timer-broadcast.c
+++ b/arch/sh/kernel/timers/timer-broadcast.c
@@ -51,7 +51,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
51 clk->mult = 1; 51 clk->mult = 1;
52 clk->set_mode = dummy_timer_set_mode; 52 clk->set_mode = dummy_timer_set_mode;
53 clk->broadcast = smp_timer_broadcast; 53 clk->broadcast = smp_timer_broadcast;
54 clk->cpumask = cpumask_of_cpu(cpu); 54 clk->cpumask = cpumask_of(cpu);
55 55
56 clockevents_register_device(clk); 56 clockevents_register_device(clk);
57} 57}
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 3c61ddd4d43e..0db3f9510336 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -263,7 +263,7 @@ static int tmu_timer_init(void)
263 tmu0_clockevent.min_delta_ns = 263 tmu0_clockevent.min_delta_ns =
264 clockevent_delta2ns(1, &tmu0_clockevent); 264 clockevent_delta2ns(1, &tmu0_clockevent);
265 265
266 tmu0_clockevent.cpumask = cpumask_of_cpu(0); 266 tmu0_clockevent.cpumask = cpumask_of(0);
267 267
268 clockevents_register_device(&tmu0_clockevent); 268 clockevents_register_device(&tmu0_clockevent);
269 269
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index a8180e546a48..8408d9d2a662 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -29,8 +29,6 @@
29 */ 29 */
30 30
31extern unsigned char boot_cpu_id; 31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34 32
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, 33typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long); 34 unsigned long, unsigned long);
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 001c04027c82..3270cfb1ab53 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -16,8 +16,12 @@ static inline cpumask_t node_to_cpumask(int node)
16{ 16{
17 return numa_cpumask_lookup_table[node]; 17 return numa_cpumask_lookup_table[node];
18} 18}
19#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
19 20
20/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 21/*
22 * Returns a pointer to the cpumask of CPUs on Node 'node'.
23 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
24 */
21#define node_to_cpumask_ptr(v, node) \ 25#define node_to_cpumask_ptr(v, node) \
22 cpumask_t *v = &(numa_cpumask_lookup_table[node]) 26 cpumask_t *v = &(numa_cpumask_lookup_table[node])
23 27
@@ -26,9 +30,7 @@ static inline cpumask_t node_to_cpumask(int node)
26 30
27static inline int node_to_first_cpu(int node) 31static inline int node_to_first_cpu(int node)
28{ 32{
29 cpumask_t tmp; 33 return cpumask_first(cpumask_of_node(node));
30 tmp = node_to_cpumask(node);
31 return first_cpu(tmp);
32} 34}
33 35
34struct pci_bus; 36struct pci_bus;
@@ -82,5 +84,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
82#endif /* CONFIG_SMP */ 84#endif /* CONFIG_SMP */
83 85
84#define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) 86#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
87#define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu])
85 88
86#endif /* _ASM_SPARC64_TOPOLOGY_H */ 89#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index a3ea2bcb95de..cab8e0286871 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
312 } 312 }
313} 313}
314 314
315static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) 315static void sun4u_set_affinity(unsigned int virt_irq,
316 const struct cpumask *mask)
316{ 317{
317 sun4u_irq_enable(virt_irq); 318 sun4u_irq_enable(virt_irq);
318} 319}
@@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq)
362 ino, err); 363 ino, err);
363} 364}
364 365
365static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) 366static void sun4v_set_affinity(unsigned int virt_irq,
367 const struct cpumask *mask)
366{ 368{
367 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 369 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
368 unsigned long cpuid = irq_choose_cpu(virt_irq); 370 unsigned long cpuid = irq_choose_cpu(virt_irq);
@@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq)
429 dev_handle, dev_ino, err); 431 dev_handle, dev_ino, err);
430} 432}
431 433
432static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) 434static void sun4v_virt_set_affinity(unsigned int virt_irq,
435 const struct cpumask *mask)
433{ 436{
434 unsigned long cpuid, dev_handle, dev_ino; 437 unsigned long cpuid, dev_handle, dev_ino;
435 int err; 438 int err;
@@ -851,7 +854,7 @@ void fixup_irqs(void)
851 !(irq_desc[irq].status & IRQ_PER_CPU)) { 854 !(irq_desc[irq].status & IRQ_PER_CPU)) {
852 if (irq_desc[irq].chip->set_affinity) 855 if (irq_desc[irq].chip->set_affinity)
853 irq_desc[irq].chip->set_affinity(irq, 856 irq_desc[irq].chip->set_affinity(irq,
854 irq_desc[irq].affinity); 857 &irq_desc[irq].affinity);
855 } 858 }
856 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 859 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
857 } 860 }
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 46e231f7c5ce..4873f28905b0 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -778,9 +778,9 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
778out: 778out:
779 nid = of_node_to_nid(dp); 779 nid = of_node_to_nid(dp);
780 if (nid != -1) { 780 if (nid != -1) {
781 cpumask_t numa_mask = node_to_cpumask(nid); 781 cpumask_t numa_mask = *cpumask_of_node(nid);
782 782
783 irq_set_affinity(irq, numa_mask); 783 irq_set_affinity(irq, &numa_mask);
784 } 784 }
785 785
786 return irq; 786 return irq;
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 2e680f34f727..4ef282e81912 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -286,9 +286,9 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
286 286
287 nid = pbm->numa_node; 287 nid = pbm->numa_node;
288 if (nid != -1) { 288 if (nid != -1) {
289 cpumask_t numa_mask = node_to_cpumask(nid); 289 cpumask_t numa_mask = *cpumask_of_node(nid);
290 290
291 irq_set_affinity(irq, numa_mask); 291 irq_set_affinity(irq, &numa_mask);
292 } 292 }
293 err = request_irq(irq, sparc64_msiq_interrupt, 0, 293 err = request_irq(irq, sparc64_msiq_interrupt, 0,
294 "MSIQ", 294 "MSIQ",
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e396c1f17a92..1e5ac4e282e1 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -39,8 +39,6 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
39unsigned char boot_cpu_id = 0; 39unsigned char boot_cpu_id = 0;
40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ 40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
41 41
42cpumask_t cpu_online_map = CPU_MASK_NONE;
43cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
44cpumask_t smp_commenced_mask = CPU_MASK_NONE; 42cpumask_t smp_commenced_mask = CPU_MASK_NONE;
45 43
46/* The only guaranteed locking primitive available on all Sparc 44/* The only guaranteed locking primitive available on all Sparc
@@ -334,7 +332,7 @@ void __init smp_setup_cpu_possible_map(void)
334 instance = 0; 332 instance = 0;
335 while (!cpu_find_by_instance(instance, NULL, &mid)) { 333 while (!cpu_find_by_instance(instance, NULL, &mid)) {
336 if (mid < NR_CPUS) { 334 if (mid < NR_CPUS) {
337 cpu_set(mid, phys_cpu_present_map); 335 cpu_set(mid, cpu_possible_map);
338 cpu_set(mid, cpu_present_map); 336 cpu_set(mid, cpu_present_map);
339 } 337 }
340 instance++; 338 instance++;
@@ -354,7 +352,7 @@ void __init smp_prepare_boot_cpu(void)
354 352
355 current_thread_info()->cpu = cpuid; 353 current_thread_info()->cpu = cpuid;
356 cpu_set(cpuid, cpu_online_map); 354 cpu_set(cpuid, cpu_online_map);
357 cpu_set(cpuid, phys_cpu_present_map); 355 cpu_set(cpuid, cpu_possible_map);
358} 356}
359 357
360int __cpuinit __cpu_up(unsigned int cpu) 358int __cpuinit __cpu_up(unsigned int cpu)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index bfe99d82d458..46329799f346 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -49,14 +49,10 @@
49 49
50int sparc64_multi_core __read_mostly; 50int sparc64_multi_core __read_mostly;
51 51
52cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
53cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
54DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 52DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
55cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 53cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 54 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57 55
58EXPORT_SYMBOL(cpu_possible_map);
59EXPORT_SYMBOL(cpu_online_map);
60EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 56EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
61EXPORT_SYMBOL(cpu_core_map); 57EXPORT_SYMBOL(cpu_core_map);
62 58
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
index a4d45fc29b21..e1e97639231b 100644
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ b/arch/sparc/kernel/sparc_ksyms_32.c
@@ -112,10 +112,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
112#ifdef CONFIG_SMP 112#ifdef CONFIG_SMP
113/* IRQ implementation. */ 113/* IRQ implementation. */
114EXPORT_SYMBOL(synchronize_irq); 114EXPORT_SYMBOL(synchronize_irq);
115
116/* CPU online map and active count. */
117EXPORT_SYMBOL(cpu_online_map);
118EXPORT_SYMBOL(phys_cpu_present_map);
119#endif 115#endif
120 116
121EXPORT_SYMBOL(__udelay); 117EXPORT_SYMBOL(__udelay);
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 141da3759091..9df8f095a8b1 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -763,7 +763,7 @@ void __devinit setup_sparc64_timer(void)
763 sevt = &__get_cpu_var(sparc64_events); 763 sevt = &__get_cpu_var(sparc64_events);
764 764
765 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); 765 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
766 sevt->cpumask = cpumask_of_cpu(smp_processor_id()); 766 sevt->cpumask = cpumask_of(smp_processor_id());
767 767
768 clockevents_register_device(sevt); 768 clockevents_register_device(sevt);
769} 769}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 045772142844..98351c78bc81 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -25,13 +25,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
25#include "irq_user.h" 25#include "irq_user.h"
26#include "os.h" 26#include "os.h"
27 27
28/* CPU online map, set by smp_boot_cpus */
29cpumask_t cpu_online_map = CPU_MASK_NONE;
30cpumask_t cpu_possible_map = CPU_MASK_NONE;
31
32EXPORT_SYMBOL(cpu_online_map);
33EXPORT_SYMBOL(cpu_possible_map);
34
35/* Per CPU bogomips and other parameters 28/* Per CPU bogomips and other parameters
36 * The only piece used here is the ipi pipe, which is set before SMP is 29 * The only piece used here is the ipi pipe, which is set before SMP is
37 * started and never changed. 30 * started and never changed.
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 47f04f4a3464..b13a87a3ec95 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ static int itimer_next_event(unsigned long delta,
50static struct clock_event_device itimer_clockevent = { 50static struct clock_event_device itimer_clockevent = {
51 .name = "itimer", 51 .name = "itimer",
52 .rating = 250, 52 .rating = 250,
53 .cpumask = CPU_MASK_ALL, 53 .cpumask = cpu_all_mask,
54 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 54 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
55 .set_mode = itimer_set_mode, 55 .set_mode = itimer_set_mode,
56 .set_next_event = itimer_next_event, 56 .set_next_event = itimer_next_event,
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 66834c41c049..a977de23cb4d 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -102,9 +102,9 @@ extern void pci_iommu_alloc(void);
102 102
103#ifdef CONFIG_NUMA 103#ifdef CONFIG_NUMA
104/* Returns the node based on pci bus */ 104/* Returns the node based on pci bus */
105static inline int __pcibus_to_node(struct pci_bus *bus) 105static inline int __pcibus_to_node(const struct pci_bus *bus)
106{ 106{
107 struct pci_sysdata *sd = bus->sysdata; 107 const struct pci_sysdata *sd = bus->sysdata;
108 108
109 return sd->node; 109 return sd->node;
110} 110}
@@ -113,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
113{ 113{
114 return node_to_cpumask(__pcibus_to_node(bus)); 114 return node_to_cpumask(__pcibus_to_node(bus));
115} 115}
116
117static inline const struct cpumask *
118cpumask_of_pcibus(const struct pci_bus *bus)
119{
120 return cpumask_of_node(__pcibus_to_node(bus));
121}
116#endif 122#endif
117 123
118#endif /* _ASM_X86_PCI_H */ 124#endif /* _ASM_X86_PCI_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index ff386ff50ed7..168203c0c316 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu)
61 * 61 *
62 * Side note: this function creates the returned cpumask on the stack 62 * Side note: this function creates the returned cpumask on the stack
63 * so with a high NR_CPUS count, excessive stack space is used. The 63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * node_to_cpumask_ptr function should be used whenever possible. 64 * cpumask_of_node function should be used whenever possible.
65 */ 65 */
66static inline cpumask_t node_to_cpumask(int node) 66static inline cpumask_t node_to_cpumask(int node)
67{ 67{
68 return node_to_cpumask_map[node]; 68 return node_to_cpumask_map[node];
69} 69}
70 70
71/* Returns a bitmask of CPUs on Node 'node'. */
72static inline const struct cpumask *cpumask_of_node(int node)
73{
74 return &node_to_cpumask_map[node];
75}
76
71#else /* CONFIG_X86_64 */ 77#else /* CONFIG_X86_64 */
72 78
73/* Mappings between node number and cpus on that node. */ 79/* Mappings between node number and cpus on that node. */
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
82#ifdef CONFIG_DEBUG_PER_CPU_MAPS 88#ifdef CONFIG_DEBUG_PER_CPU_MAPS
83extern int cpu_to_node(int cpu); 89extern int cpu_to_node(int cpu);
84extern int early_cpu_to_node(int cpu); 90extern int early_cpu_to_node(int cpu);
85extern const cpumask_t *_node_to_cpumask_ptr(int node); 91extern const cpumask_t *cpumask_of_node(int node);
86extern cpumask_t node_to_cpumask(int node); 92extern cpumask_t node_to_cpumask(int node);
87 93
88#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 94#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu)
103} 109}
104 110
105/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
106static inline const cpumask_t *_node_to_cpumask_ptr(int node) 112static inline const cpumask_t *cpumask_of_node(int node)
107{ 113{
108 return &node_to_cpumask_map[node]; 114 return &node_to_cpumask_map[node];
109} 115}
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node)
116 122
117#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
118 124
119/* Replace default node_to_cpumask_ptr with optimized version */ 125/*
126 * Replace default node_to_cpumask_ptr with optimized version
127 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
128 */
120#define node_to_cpumask_ptr(v, node) \ 129#define node_to_cpumask_ptr(v, node) \
121 const cpumask_t *v = _node_to_cpumask_ptr(node) 130 const cpumask_t *v = cpumask_of_node(node)
122 131
123#define node_to_cpumask_ptr_next(v, node) \ 132#define node_to_cpumask_ptr_next(v, node) \
124 v = _node_to_cpumask_ptr(node) 133 v = cpumask_of_node(node)
125 134
126#endif /* CONFIG_X86_64 */ 135#endif /* CONFIG_X86_64 */
127 136
@@ -187,7 +196,7 @@ extern int __node_distance(int, int);
187#define cpu_to_node(cpu) 0 196#define cpu_to_node(cpu) 0
188#define early_cpu_to_node(cpu) 0 197#define early_cpu_to_node(cpu) 0
189 198
190static inline const cpumask_t *_node_to_cpumask_ptr(int node) 199static inline const cpumask_t *cpumask_of_node(int node)
191{ 200{
192 return &cpu_online_map; 201 return &cpu_online_map;
193} 202}
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node)
200 return first_cpu(cpu_online_map); 209 return first_cpu(cpu_online_map);
201} 210}
202 211
203/* Replace default node_to_cpumask_ptr with optimized version */ 212/*
213 * Replace default node_to_cpumask_ptr with optimized version
214 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
215 */
204#define node_to_cpumask_ptr(v, node) \ 216#define node_to_cpumask_ptr(v, node) \
205 const cpumask_t *v = _node_to_cpumask_ptr(node) 217 const cpumask_t *v = cpumask_of_node(node)
206 218
207#define node_to_cpumask_ptr_next(v, node) \ 219#define node_to_cpumask_ptr_next(v, node) \
208 v = _node_to_cpumask_ptr(node) 220 v = cpumask_of_node(node)
209#endif 221#endif
210 222
211#include <asm-generic/topology.h> 223#include <asm-generic/topology.h>
@@ -214,12 +226,12 @@ static inline int node_to_first_cpu(int node)
214/* Returns the number of the first CPU on Node 'node'. */ 226/* Returns the number of the first CPU on Node 'node'. */
215static inline int node_to_first_cpu(int node) 227static inline int node_to_first_cpu(int node)
216{ 228{
217 node_to_cpumask_ptr(mask, node); 229 return cpumask_first(cpumask_of_node(node));
218 return first_cpu(*mask);
219} 230}
220#endif 231#endif
221 232
222extern cpumask_t cpu_coregroup_map(int cpu); 233extern cpumask_t cpu_coregroup_map(int cpu);
234extern const struct cpumask *cpu_coregroup_mask(int cpu);
223 235
224#ifdef ENABLE_TOPO_DEFINES 236#ifdef ENABLE_TOPO_DEFINES
225#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 237#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index b5229affb953..6107b41da9a5 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -142,7 +142,7 @@ static int lapic_next_event(unsigned long delta,
142 struct clock_event_device *evt); 142 struct clock_event_device *evt);
143static void lapic_timer_setup(enum clock_event_mode mode, 143static void lapic_timer_setup(enum clock_event_mode mode,
144 struct clock_event_device *evt); 144 struct clock_event_device *evt);
145static void lapic_timer_broadcast(cpumask_t mask); 145static void lapic_timer_broadcast(const struct cpumask *mask);
146static void apic_pm_activate(void); 146static void apic_pm_activate(void);
147 147
148/* 148/*
@@ -455,10 +455,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
455/* 455/*
456 * Local APIC timer broadcast function 456 * Local APIC timer broadcast function
457 */ 457 */
458static void lapic_timer_broadcast(cpumask_t mask) 458static void lapic_timer_broadcast(const struct cpumask *mask)
459{ 459{
460#ifdef CONFIG_SMP 460#ifdef CONFIG_SMP
461 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 461 send_IPI_mask(*mask, LOCAL_TIMER_VECTOR);
462#endif 462#endif
463} 463}
464 464
@@ -471,7 +471,7 @@ static void __cpuinit setup_APIC_timer(void)
471 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 471 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
472 472
473 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 473 memcpy(levt, &lapic_clockevent, sizeof(*levt));
474 levt->cpumask = cpumask_of_cpu(smp_processor_id()); 474 levt->cpumask = cpumask_of(smp_processor_id());
475 475
476 clockevents_register_device(levt); 476 clockevents_register_device(levt);
477} 477}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 68b5d8681cbb..15cf14e9bf26 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -626,8 +626,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
626 cpumask_t *mask = &this_leaf->shared_cpu_map; 626 cpumask_t *mask = &this_leaf->shared_cpu_map;
627 627
628 n = type? 628 n = type?
629 cpulist_scnprintf(buf, len-2, *mask): 629 cpulist_scnprintf(buf, len-2, mask) :
630 cpumask_scnprintf(buf, len-2, *mask); 630 cpumask_scnprintf(buf, len-2, mask);
631 buf[n++] = '\n'; 631 buf[n++] = '\n';
632 buf[n] = '\0'; 632 buf[n] = '\0';
633 } 633 }
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 845ea097383e..cd759ad90690 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -248,7 +248,7 @@ static void hpet_legacy_clockevent_register(void)
248 * Start hpet with the boot cpu mask and make it 248 * Start hpet with the boot cpu mask and make it
249 * global after the IO_APIC has been initialized. 249 * global after the IO_APIC has been initialized.
250 */ 250 */
251 hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 251 hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
252 clockevents_register_device(&hpet_clockevent); 252 clockevents_register_device(&hpet_clockevent);
253 global_clock_event = &hpet_clockevent; 253 global_clock_event = &hpet_clockevent;
254 printk(KERN_DEBUG "hpet clockevent registered\n"); 254 printk(KERN_DEBUG "hpet clockevent registered\n");
@@ -303,7 +303,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
303 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 303 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
304 hpet_setup_msi_irq(hdev->irq); 304 hpet_setup_msi_irq(hdev->irq);
305 disable_irq(hdev->irq); 305 disable_irq(hdev->irq);
306 irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); 306 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
307 enable_irq(hdev->irq); 307 enable_irq(hdev->irq);
308 } 308 }
309 break; 309 break;
@@ -451,7 +451,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
451 return -1; 451 return -1;
452 452
453 disable_irq(dev->irq); 453 disable_irq(dev->irq);
454 irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); 454 irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
455 enable_irq(dev->irq); 455 enable_irq(dev->irq);
456 456
457 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", 457 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
@@ -502,7 +502,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
502 /* 5 usec minimum reprogramming delta. */ 502 /* 5 usec minimum reprogramming delta. */
503 evt->min_delta_ns = 5000; 503 evt->min_delta_ns = 5000;
504 504
505 evt->cpumask = cpumask_of_cpu(hdev->cpu); 505 evt->cpumask = cpumask_of(hdev->cpu);
506 clockevents_register_device(evt); 506 clockevents_register_device(evt);
507} 507}
508 508
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index c1b5e3ece1f2..10f92fb532f3 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -114,7 +114,7 @@ void __init setup_pit_timer(void)
114 * Start pit with the boot cpu mask and make it global after the 114 * Start pit with the boot cpu mask and make it global after the
115 * IO_APIC has been initialized. 115 * IO_APIC has been initialized.
116 */ 116 */
117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 117 pit_clockevent.cpumask = cpumask_of(smp_processor_id());
118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
119 pit_clockevent.shift); 119 pit_clockevent.shift);
120 pit_clockevent.max_delta_ns = 120 pit_clockevent.max_delta_ns =
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index f6ea94b74da1..e7745961ed31 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -550,13 +550,14 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
550 spin_unlock_irqrestore(&ioapic_lock, flags); 550 spin_unlock_irqrestore(&ioapic_lock, flags);
551} 551}
552 552
553static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 553static void set_ioapic_affinity_irq(unsigned int irq,
554 const struct cpumask *mask)
554{ 555{
555 struct irq_desc *desc; 556 struct irq_desc *desc;
556 557
557 desc = irq_to_desc(irq); 558 desc = irq_to_desc(irq);
558 559
559 set_ioapic_affinity_irq_desc(desc, mask); 560 set_ioapic_affinity_irq_desc(desc, *mask);
560} 561}
561#endif /* CONFIG_SMP */ 562#endif /* CONFIG_SMP */
562 563
@@ -2392,7 +2393,7 @@ static void ir_irq_migration(struct work_struct *work)
2392 continue; 2393 continue;
2393 } 2394 }
2394 2395
2395 desc->chip->set_affinity(irq, desc->pending_mask); 2396 desc->chip->set_affinity(irq, &desc->pending_mask);
2396 spin_unlock_irqrestore(&desc->lock, flags); 2397 spin_unlock_irqrestore(&desc->lock, flags);
2397 } 2398 }
2398 } 2399 }
@@ -2412,11 +2413,12 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mas
2412 2413
2413 migrate_ioapic_irq_desc(desc, mask); 2414 migrate_ioapic_irq_desc(desc, mask);
2414} 2415}
2415static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2416static void set_ir_ioapic_affinity_irq(unsigned int irq,
2417 const struct cpumask *mask)
2416{ 2418{
2417 struct irq_desc *desc = irq_to_desc(irq); 2419 struct irq_desc *desc = irq_to_desc(irq);
2418 2420
2419 set_ir_ioapic_affinity_irq_desc(desc, mask); 2421 set_ir_ioapic_affinity_irq_desc(desc, *mask);
2420} 2422}
2421#endif 2423#endif
2422 2424
@@ -3285,7 +3287,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3285} 3287}
3286 3288
3287#ifdef CONFIG_SMP 3289#ifdef CONFIG_SMP
3288static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3290static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3289{ 3291{
3290 struct irq_desc *desc = irq_to_desc(irq); 3292 struct irq_desc *desc = irq_to_desc(irq);
3291 struct irq_cfg *cfg; 3293 struct irq_cfg *cfg;
@@ -3293,17 +3295,16 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3293 unsigned int dest; 3295 unsigned int dest;
3294 cpumask_t tmp; 3296 cpumask_t tmp;
3295 3297
3296 cpus_and(tmp, mask, cpu_online_map); 3298 if (!cpumask_intersects(mask, cpu_online_mask))
3297 if (cpus_empty(tmp))
3298 return; 3299 return;
3299 3300
3300 cfg = desc->chip_data; 3301 cfg = desc->chip_data;
3301 if (assign_irq_vector(irq, cfg, mask)) 3302 if (assign_irq_vector(irq, cfg, *mask))
3302 return; 3303 return;
3303 3304
3304 set_extra_move_desc(desc, mask); 3305 set_extra_move_desc(desc, *mask);
3305 3306
3306 cpus_and(tmp, cfg->domain, mask); 3307 cpumask_and(&tmp, &cfg->domain, mask);
3307 dest = cpu_mask_to_apicid(tmp); 3308 dest = cpu_mask_to_apicid(tmp);
3308 3309
3309 read_msi_msg_desc(desc, &msg); 3310 read_msi_msg_desc(desc, &msg);
@@ -3314,14 +3315,15 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3314 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3315 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3315 3316
3316 write_msi_msg_desc(desc, &msg); 3317 write_msi_msg_desc(desc, &msg);
3317 desc->affinity = mask; 3318 cpumask_copy(&desc->affinity, mask);
3318} 3319}
3319#ifdef CONFIG_INTR_REMAP 3320#ifdef CONFIG_INTR_REMAP
3320/* 3321/*
3321 * Migrate the MSI irq to another cpumask. This migration is 3322 * Migrate the MSI irq to another cpumask. This migration is
3322 * done in the process context using interrupt-remapping hardware. 3323 * done in the process context using interrupt-remapping hardware.
3323 */ 3324 */
3324static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3325static void ir_set_msi_irq_affinity(unsigned int irq,
3326 const struct cpumask *mask)
3325{ 3327{
3326 struct irq_desc *desc = irq_to_desc(irq); 3328 struct irq_desc *desc = irq_to_desc(irq);
3327 struct irq_cfg *cfg; 3329 struct irq_cfg *cfg;
@@ -3329,20 +3331,19 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3329 cpumask_t tmp, cleanup_mask; 3331 cpumask_t tmp, cleanup_mask;
3330 struct irte irte; 3332 struct irte irte;
3331 3333
3332 cpus_and(tmp, mask, cpu_online_map); 3334 if (!cpumask_intersects(mask, cpu_online_mask))
3333 if (cpus_empty(tmp))
3334 return; 3335 return;
3335 3336
3336 if (get_irte(irq, &irte)) 3337 if (get_irte(irq, &irte))
3337 return; 3338 return;
3338 3339
3339 cfg = desc->chip_data; 3340 cfg = desc->chip_data;
3340 if (assign_irq_vector(irq, cfg, mask)) 3341 if (assign_irq_vector(irq, cfg, *mask))
3341 return; 3342 return;
3342 3343
3343 set_extra_move_desc(desc, mask); 3344 set_extra_move_desc(desc, *mask);
3344 3345
3345 cpus_and(tmp, cfg->domain, mask); 3346 cpumask_and(&tmp, &cfg->domain, mask);
3346 dest = cpu_mask_to_apicid(tmp); 3347 dest = cpu_mask_to_apicid(tmp);
3347 3348
3348 irte.vector = cfg->vector; 3349 irte.vector = cfg->vector;
@@ -3365,7 +3366,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3365 cfg->move_in_progress = 0; 3366 cfg->move_in_progress = 0;
3366 } 3367 }
3367 3368
3368 desc->affinity = mask; 3369 cpumask_copy(&desc->affinity, mask);
3369} 3370}
3370 3371
3371#endif 3372#endif
@@ -3556,7 +3557,7 @@ void arch_teardown_msi_irq(unsigned int irq)
3556 3557
3557#ifdef CONFIG_DMAR 3558#ifdef CONFIG_DMAR
3558#ifdef CONFIG_SMP 3559#ifdef CONFIG_SMP
3559static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3560static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3560{ 3561{
3561 struct irq_desc *desc = irq_to_desc(irq); 3562 struct irq_desc *desc = irq_to_desc(irq);
3562 struct irq_cfg *cfg; 3563 struct irq_cfg *cfg;
@@ -3564,17 +3565,16 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3564 unsigned int dest; 3565 unsigned int dest;
3565 cpumask_t tmp; 3566 cpumask_t tmp;
3566 3567
3567 cpus_and(tmp, mask, cpu_online_map); 3568 if (!cpumask_intersects(mask, cpu_online_mask))
3568 if (cpus_empty(tmp))
3569 return; 3569 return;
3570 3570
3571 cfg = desc->chip_data; 3571 cfg = desc->chip_data;
3572 if (assign_irq_vector(irq, cfg, mask)) 3572 if (assign_irq_vector(irq, cfg, *mask))
3573 return; 3573 return;
3574 3574
3575 set_extra_move_desc(desc, mask); 3575 set_extra_move_desc(desc, *mask);
3576 3576
3577 cpus_and(tmp, cfg->domain, mask); 3577 cpumask_and(&tmp, &cfg->domain, mask);
3578 dest = cpu_mask_to_apicid(tmp); 3578 dest = cpu_mask_to_apicid(tmp);
3579 3579
3580 dmar_msi_read(irq, &msg); 3580 dmar_msi_read(irq, &msg);
@@ -3585,7 +3585,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3585 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3585 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3586 3586
3587 dmar_msi_write(irq, &msg); 3587 dmar_msi_write(irq, &msg);
3588 desc->affinity = mask; 3588 cpumask_copy(&desc->affinity, mask);
3589} 3589}
3590 3590
3591#endif /* CONFIG_SMP */ 3591#endif /* CONFIG_SMP */
@@ -3619,7 +3619,7 @@ int arch_setup_dmar_msi(unsigned int irq)
3619#ifdef CONFIG_HPET_TIMER 3619#ifdef CONFIG_HPET_TIMER
3620 3620
3621#ifdef CONFIG_SMP 3621#ifdef CONFIG_SMP
3622static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3622static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3623{ 3623{
3624 struct irq_desc *desc = irq_to_desc(irq); 3624 struct irq_desc *desc = irq_to_desc(irq);
3625 struct irq_cfg *cfg; 3625 struct irq_cfg *cfg;
@@ -3627,17 +3627,16 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3627 unsigned int dest; 3627 unsigned int dest;
3628 cpumask_t tmp; 3628 cpumask_t tmp;
3629 3629
3630 cpus_and(tmp, mask, cpu_online_map); 3630 if (!cpumask_intersects(mask, cpu_online_mask))
3631 if (cpus_empty(tmp))
3632 return; 3631 return;
3633 3632
3634 cfg = desc->chip_data; 3633 cfg = desc->chip_data;
3635 if (assign_irq_vector(irq, cfg, mask)) 3634 if (assign_irq_vector(irq, cfg, *mask))
3636 return; 3635 return;
3637 3636
3638 set_extra_move_desc(desc, mask); 3637 set_extra_move_desc(desc, *mask);
3639 3638
3640 cpus_and(tmp, cfg->domain, mask); 3639 cpumask_and(&tmp, &cfg->domain, mask);
3641 dest = cpu_mask_to_apicid(tmp); 3640 dest = cpu_mask_to_apicid(tmp);
3642 3641
3643 hpet_msi_read(irq, &msg); 3642 hpet_msi_read(irq, &msg);
@@ -3648,7 +3647,7 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3648 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3647 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3649 3648
3650 hpet_msi_write(irq, &msg); 3649 hpet_msi_write(irq, &msg);
3651 desc->affinity = mask; 3650 cpumask_copy(&desc->affinity, mask);
3652} 3651}
3653 3652
3654#endif /* CONFIG_SMP */ 3653#endif /* CONFIG_SMP */
@@ -3703,28 +3702,27 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3703 write_ht_irq_msg(irq, &msg); 3702 write_ht_irq_msg(irq, &msg);
3704} 3703}
3705 3704
3706static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3705static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3707{ 3706{
3708 struct irq_desc *desc = irq_to_desc(irq); 3707 struct irq_desc *desc = irq_to_desc(irq);
3709 struct irq_cfg *cfg; 3708 struct irq_cfg *cfg;
3710 unsigned int dest; 3709 unsigned int dest;
3711 cpumask_t tmp; 3710 cpumask_t tmp;
3712 3711
3713 cpus_and(tmp, mask, cpu_online_map); 3712 if (!cpumask_intersects(mask, cpu_online_mask))
3714 if (cpus_empty(tmp))
3715 return; 3713 return;
3716 3714
3717 cfg = desc->chip_data; 3715 cfg = desc->chip_data;
3718 if (assign_irq_vector(irq, cfg, mask)) 3716 if (assign_irq_vector(irq, cfg, *mask))
3719 return; 3717 return;
3720 3718
3721 set_extra_move_desc(desc, mask); 3719 set_extra_move_desc(desc, *mask);
3722 3720
3723 cpus_and(tmp, cfg->domain, mask); 3721 cpumask_and(&tmp, &cfg->domain, mask);
3724 dest = cpu_mask_to_apicid(tmp); 3722 dest = cpu_mask_to_apicid(tmp);
3725 3723
3726 target_ht_irq(irq, dest, cfg->vector); 3724 target_ht_irq(irq, dest, cfg->vector);
3727 desc->affinity = mask; 3725 cpumask_copy(&desc->affinity, mask);
3728} 3726}
3729 3727
3730#endif 3728#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 119fc9c8ff7f..9cf9cbbf7a02 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -253,7 +253,7 @@ void fixup_irqs(cpumask_t map)
253 mask = map; 253 mask = map;
254 } 254 }
255 if (desc->chip->set_affinity) 255 if (desc->chip->set_affinity)
256 desc->chip->set_affinity(irq, mask); 256 desc->chip->set_affinity(irq, &mask);
257 else if (desc->action && !(warned++)) 257 else if (desc->action && !(warned++))
258 printk("Cannot set affinity for irq %i\n", irq); 258 printk("Cannot set affinity for irq %i\n", irq);
259 } 259 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index a174a217eb1a..54c69d47a771 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -115,7 +115,7 @@ void fixup_irqs(cpumask_t map)
115 desc->chip->mask(irq); 115 desc->chip->mask(irq);
116 116
117 if (desc->chip->set_affinity) 117 if (desc->chip->set_affinity)
118 desc->chip->set_affinity(irq, mask); 118 desc->chip->set_affinity(irq, &mask);
119 else if (!(warned++)) 119 else if (!(warned++))
120 set_affinity = 0; 120 set_affinity = 0;
121 121
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 3b599518c322..c12314c9e86f 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = {
287 .set_mode = mfgpt_set_mode, 287 .set_mode = mfgpt_set_mode,
288 .set_next_event = mfgpt_next_event, 288 .set_next_event = mfgpt_next_event,
289 .rating = 250, 289 .rating = 250,
290 .cpumask = CPU_MASK_ALL, 290 .cpumask = cpu_all_mask,
291 .shift = 32 291 .shift = 32
292}; 292};
293 293
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ae0c0d3bb770..8e8b1193add5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -282,7 +282,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
282 else 282 else
283 cpu_clear(cpu, *mask); 283 cpu_clear(cpu, *mask);
284 284
285 cpulist_scnprintf(buf, sizeof(buf), *mask); 285 cpulist_scnprintf(buf, sizeof(buf), mask);
286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
288 } 288 }
@@ -334,25 +334,25 @@ static const cpumask_t cpu_mask_none;
334/* 334/*
335 * Returns a pointer to the bitmask of CPUs on Node 'node'. 335 * Returns a pointer to the bitmask of CPUs on Node 'node'.
336 */ 336 */
337const cpumask_t *_node_to_cpumask_ptr(int node) 337const cpumask_t *cpumask_of_node(int node)
338{ 338{
339 if (node_to_cpumask_map == NULL) { 339 if (node_to_cpumask_map == NULL) {
340 printk(KERN_WARNING 340 printk(KERN_WARNING
341 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 341 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
342 node); 342 node);
343 dump_stack(); 343 dump_stack();
344 return (const cpumask_t *)&cpu_online_map; 344 return (const cpumask_t *)&cpu_online_map;
345 } 345 }
346 if (node >= nr_node_ids) { 346 if (node >= nr_node_ids) {
347 printk(KERN_WARNING 347 printk(KERN_WARNING
348 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 348 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
349 node, nr_node_ids); 349 node, nr_node_ids);
350 dump_stack(); 350 dump_stack();
351 return &cpu_mask_none; 351 return &cpu_mask_none;
352 } 352 }
353 return &node_to_cpumask_map[node]; 353 return &node_to_cpumask_map[node];
354} 354}
355EXPORT_SYMBOL(_node_to_cpumask_ptr); 355EXPORT_SYMBOL(cpumask_of_node);
356 356
357/* 357/*
358 * Returns a bitmask of CPUs on Node 'node'. 358 * Returns a bitmask of CPUs on Node 'node'.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f8500c969442..c5392058cd07 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,14 +102,8 @@ EXPORT_SYMBOL(smp_num_siblings);
102/* Last level cache ID of each logical CPU */ 102/* Last level cache ID of each logical CPU */
103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
104 104
105/* bitmap of online cpus */
106cpumask_t cpu_online_map __read_mostly;
107EXPORT_SYMBOL(cpu_online_map);
108
109cpumask_t cpu_callin_map; 105cpumask_t cpu_callin_map;
110cpumask_t cpu_callout_map; 106cpumask_t cpu_callout_map;
111cpumask_t cpu_possible_map;
112EXPORT_SYMBOL(cpu_possible_map);
113 107
114/* representing HT siblings of each logical CPU */ 108/* representing HT siblings of each logical CPU */
115DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 109DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
@@ -502,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
502} 496}
503 497
504/* maps the cpu to the sched domain representing multi-core */ 498/* maps the cpu to the sched domain representing multi-core */
505cpumask_t cpu_coregroup_map(int cpu) 499const struct cpumask *cpu_coregroup_mask(int cpu)
506{ 500{
507 struct cpuinfo_x86 *c = &cpu_data(cpu); 501 struct cpuinfo_x86 *c = &cpu_data(cpu);
508 /* 502 /*
@@ -510,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu)
510 * And for power savings, we return cpu_core_map 504 * And for power savings, we return cpu_core_map
511 */ 505 */
512 if (sched_mc_power_savings || sched_smt_power_savings) 506 if (sched_mc_power_savings || sched_smt_power_savings)
513 return per_cpu(cpu_core_map, cpu); 507 return &per_cpu(cpu_core_map, cpu);
514 else 508 else
515 return c->llc_shared_map; 509 return &c->llc_shared_map;
510}
511
512cpumask_t cpu_coregroup_map(int cpu)
513{
514 return *cpu_coregroup_mask(cpu);
516} 515}
517 516
518static void impress_friends(void) 517static void impress_friends(void)
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 254ee07f8635..c4c1f9e09402 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void)
226 /* Upper bound is clockevent's use of ulong for cycle deltas. */ 226 /* Upper bound is clockevent's use of ulong for cycle deltas. */
227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); 227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt);
228 evt->min_delta_ns = clockevent_delta2ns(1, evt); 228 evt->min_delta_ns = clockevent_delta2ns(1, evt);
229 evt->cpumask = cpumask_of_cpu(cpu); 229 evt->cpumask = cpumask_of(cpu);
230 230
231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", 231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n",
232 evt->name, evt->mult, evt->shift); 232 evt->name, evt->mult, evt->shift);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 50a779264bb1..a7ed208f81e3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -738,7 +738,7 @@ static void lguest_time_init(void)
738 738
739 /* We can't set cpumask in the initializer: damn C limitations! Set it 739 /* We can't set cpumask in the initializer: damn C limitations! Set it
740 * here and register our timer device. */ 740 * here and register our timer device. */
741 lguest_clockevent.cpumask = cpumask_of_cpu(0); 741 lguest_clockevent.cpumask = cpumask_of(0);
742 clockevents_register_device(&lguest_clockevent); 742 clockevents_register_device(&lguest_clockevent);
743 743
744 /* Finally, we unblock the timer interrupt. */ 744 /* Finally, we unblock the timer interrupt. */
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 52145007bd7e..9c990185e9f2 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -63,11 +63,6 @@ static int voyager_extended_cpus = 1;
63/* Used for the invalidate map that's also checked in the spinlock */ 63/* Used for the invalidate map that's also checked in the spinlock */
64static volatile unsigned long smp_invalidate_needed; 64static volatile unsigned long smp_invalidate_needed;
65 65
66/* Bitmask of currently online CPUs - used by setup.c for
67 /proc/cpuinfo, visible externally but still physical */
68cpumask_t cpu_online_map = CPU_MASK_NONE;
69EXPORT_SYMBOL(cpu_online_map);
70
71/* Bitmask of CPUs present in the system - exported by i386_syms.c, used 66/* Bitmask of CPUs present in the system - exported by i386_syms.c, used
72 * by scheduler but indexed physically */ 67 * by scheduler but indexed physically */
73cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 68cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
@@ -218,8 +213,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
218/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
219cpumask_t cpu_callin_map = CPU_MASK_NONE; 214cpumask_t cpu_callin_map = CPU_MASK_NONE;
220cpumask_t cpu_callout_map = CPU_MASK_NONE; 215cpumask_t cpu_callout_map = CPU_MASK_NONE;
221cpumask_t cpu_possible_map = CPU_MASK_NONE;
222EXPORT_SYMBOL(cpu_possible_map);
223 216
224/* The per processor IRQ masks (these are usually kept in sync) */ 217/* The per processor IRQ masks (these are usually kept in sync) */
225static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c9f7cda48ed7..65d75a6be0ba 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -437,7 +437,7 @@ void xen_setup_timer(int cpu)
437 evt = &per_cpu(xen_clock_events, cpu); 437 evt = &per_cpu(xen_clock_events, cpu);
438 memcpy(evt, xen_clockevent, sizeof(*evt)); 438 memcpy(evt, xen_clockevent, sizeof(*evt));
439 439
440 evt->cpumask = cpumask_of_cpu(cpu); 440 evt->cpumask = cpumask_of(cpu);
441 evt->irq = irq; 441 evt->irq = irq;
442 442
443 setup_runstate_info(cpu); 443 setup_runstate_info(cpu);
diff --git a/block/blk.h b/block/blk.h
index d2e49af90db5..6e1ed40534e9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -99,8 +99,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
99static inline int blk_cpu_to_group(int cpu) 99static inline int blk_cpu_to_group(int cpu)
100{ 100{
101#ifdef CONFIG_SCHED_MC 101#ifdef CONFIG_SCHED_MC
102 cpumask_t mask = cpu_coregroup_map(cpu); 102 const struct cpumask *mask = cpu_coregroup_mask(cpu);
103 return first_cpu(mask); 103 return cpumask_first(mask);
104#elif defined(CONFIG_SCHED_SMT) 104#elif defined(CONFIG_SCHED_SMT)
105 return first_cpu(per_cpu(cpu_sibling_map, cpu)); 105 return first_cpu(per_cpu(cpu_sibling_map, cpu));
106#else 106#else
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 64f5d54f7edc..2aef96f20b30 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -109,7 +109,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
109 */ 109 */
110static ssize_t print_cpus_map(char *buf, cpumask_t *map) 110static ssize_t print_cpus_map(char *buf, cpumask_t *map)
111{ 111{
112 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *map); 112 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);
113 113
114 buf[n++] = '\n'; 114 buf[n++] = '\n';
115 buf[n] = '\0'; 115 buf[n] = '\0';
@@ -128,10 +128,54 @@ print_cpus_func(online);
128print_cpus_func(possible); 128print_cpus_func(possible);
129print_cpus_func(present); 129print_cpus_func(present);
130 130
131/*
132 * Print values for NR_CPUS and offlined cpus
133 */
134static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf)
135{
136 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", CONFIG_NR_CPUS - 1);
137 return n;
138}
139static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
140
141/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
142unsigned int total_cpus;
143
144static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
145{
146 int n = 0, len = PAGE_SIZE-2;
147 cpumask_var_t offline;
148
149 /* display offline cpus < nr_cpu_ids */
150 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
151 return -ENOMEM;
152 cpumask_complement(offline, cpu_online_mask);
153 n = cpulist_scnprintf(buf, len, offline);
154 free_cpumask_var(offline);
155
156 /* display offline cpus >= nr_cpu_ids */
157 if (total_cpus && nr_cpu_ids < total_cpus) {
158 if (n && n < len)
159 buf[n++] = ',';
160
161 if (nr_cpu_ids == total_cpus-1)
162 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
163 else
164 n += snprintf(&buf[n], len - n, "%d-%d",
165 nr_cpu_ids, total_cpus-1);
166 }
167
168 n += snprintf(&buf[n], len - n, "\n");
169 return n;
170}
171static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
172
131static struct sysdev_class_attribute *cpu_state_attr[] = { 173static struct sysdev_class_attribute *cpu_state_attr[] = {
132 &attr_online_map, 174 &attr_online_map,
133 &attr_possible_map, 175 &attr_possible_map,
134 &attr_present_map, 176 &attr_present_map,
177 &attr_kernel_max,
178 &attr_offline,
135}; 179};
136 180
137static int cpu_states_init(void) 181static int cpu_states_init(void)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index f5207090885a..91636cd8b6c9 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -30,8 +30,8 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
30 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); 30 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
31 31
32 len = type? 32 len = type?
33 cpulist_scnprintf(buf, PAGE_SIZE-2, *mask): 33 cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
34 cpumask_scnprintf(buf, PAGE_SIZE-2, *mask); 34 cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
35 buf[len++] = '\n'; 35 buf[len++] = '\n';
36 buf[len] = '\0'; 36 buf[len] = '\0';
37 return len; 37 return len;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 199cd97e32e6..a8bc1cbcfa7c 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -49,8 +49,8 @@ static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
49 49
50 if (len > 1) { 50 if (len > 1) {
51 n = type? 51 n = type?
52 cpulist_scnprintf(buf, len-2, *mask): 52 cpulist_scnprintf(buf, len-2, mask) :
53 cpumask_scnprintf(buf, len-2, *mask); 53 cpumask_scnprintf(buf, len-2, mask);
54 buf[n++] = '\n'; 54 buf[n++] = '\n';
55 buf[n] = '\0'; 55 buf[n] = '\0';
56 } 56 }
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index f450588e5858..254f1064d973 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -154,7 +154,6 @@ static struct tc_clkevt_device clkevt = {
154 .shift = 32, 154 .shift = 32,
155 /* Should be lower than at91rm9200's system timer */ 155 /* Should be lower than at91rm9200's system timer */
156 .rating = 125, 156 .rating = 125,
157 .cpumask = CPU_MASK_CPU0,
158 .set_next_event = tc_next_event, 157 .set_next_event = tc_next_event,
159 .set_mode = tc_mode, 158 .set_mode = tc_mode,
160 }, 159 },
@@ -195,6 +194,7 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
195 clkevt.clkevt.max_delta_ns 194 clkevt.clkevt.max_delta_ns
196 = clockevent_delta2ns(0xffff, &clkevt.clkevt); 195 = clockevent_delta2ns(0xffff, &clkevt.clkevt);
197 clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; 196 clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
197 clkevt.clkevt.cpumask = cpumask_of(0);
198 198
199 setup_irq(irq, &tc_irqaction); 199 setup_irq(irq, &tc_irqaction);
200 200
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 757035ea246f..3128a5090dbd 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -659,12 +659,12 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
659 659
660 WARN_ON_ONCE(!in_interrupt()); 660 WARN_ON_ONCE(!in_interrupt());
661 if (ehca_debug_level >= 3) 661 if (ehca_debug_level >= 3)
662 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 662 ehca_dmp(cpu_online_mask, cpumask_size(), "");
663 663
664 spin_lock_irqsave(&pool->last_cpu_lock, flags); 664 spin_lock_irqsave(&pool->last_cpu_lock, flags);
665 cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); 665 cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
666 if (cpu >= nr_cpu_ids) 666 if (cpu >= nr_cpu_ids)
667 cpu = first_cpu(cpu_online_map); 667 cpu = cpumask_first(cpu_online_mask);
668 pool->last_cpu = cpu; 668 pool->last_cpu = cpu;
669 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 669 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
670 670
@@ -855,7 +855,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
855 case CPU_UP_CANCELED_FROZEN: 855 case CPU_UP_CANCELED_FROZEN:
856 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 856 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
857 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 857 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
858 kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 858 kthread_bind(cct->task, cpumask_any(cpu_online_mask));
859 destroy_comp_task(pool, cpu); 859 destroy_comp_task(pool, cpu);
860 break; 860 break;
861 case CPU_ONLINE: 861 case CPU_ONLINE:
@@ -902,7 +902,7 @@ int ehca_create_comp_pool(void)
902 return -ENOMEM; 902 return -ENOMEM;
903 903
904 spin_lock_init(&pool->last_cpu_lock); 904 spin_lock_init(&pool->last_cpu_lock);
905 pool->last_cpu = any_online_cpu(cpu_online_map); 905 pool->last_cpu = cpumask_any(cpu_online_mask);
906 906
907 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); 907 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
908 if (pool->cpu_comp_tasks == NULL) { 908 if (pool->cpu_comp_tasks == NULL) {
@@ -934,10 +934,9 @@ void ehca_destroy_comp_pool(void)
934 934
935 unregister_hotcpu_notifier(&comp_pool_callback_nb); 935 unregister_hotcpu_notifier(&comp_pool_callback_nb);
936 936
937 for (i = 0; i < NR_CPUS; i++) { 937 for_each_online_cpu(i)
938 if (cpu_online(i)) 938 destroy_comp_task(pool, i);
939 destroy_comp_task(pool, i); 939
940 }
941 free_percpu(pool->cpu_comp_tasks); 940 free_percpu(pool->cpu_comp_tasks);
942 kfree(pool); 941 kfree(pool);
943} 942}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 239d4e8068ac..23173982b32c 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1679,7 +1679,7 @@ static int find_best_unit(struct file *fp,
1679 * InfiniPath chip to that processor (we assume reasonable connectivity, 1679 * InfiniPath chip to that processor (we assume reasonable connectivity,
1680 * for now). This code assumes that if affinity has been set 1680 * for now). This code assumes that if affinity has been set
1681 * before this point, that at most one cpu is set; for now this 1681 * before this point, that at most one cpu is set; for now this
1682 * is reasonable. I check for both cpus_empty() and cpus_full(), 1682 * is reasonable. I check for both cpumask_empty() and cpumask_full(),
1683 * in case some kernel variant sets none of the bits when no 1683 * in case some kernel variant sets none of the bits when no
1684 * affinity is set. 2.6.11 and 12 kernels have all present 1684 * affinity is set. 2.6.11 and 12 kernels have all present
1685 * cpus set. Some day we'll have to fix it up further to handle 1685 * cpus set. Some day we'll have to fix it up further to handle
@@ -1688,11 +1688,11 @@ static int find_best_unit(struct file *fp,
1688 * information. There may be some issues with dual core numbering 1688 * information. There may be some issues with dual core numbering
1689 * as well. This needs more work prior to release. 1689 * as well. This needs more work prior to release.
1690 */ 1690 */
1691 if (!cpus_empty(current->cpus_allowed) && 1691 if (!cpumask_empty(&current->cpus_allowed) &&
1692 !cpus_full(current->cpus_allowed)) { 1692 !cpumask_full(&current->cpus_allowed)) {
1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1694 for (i = 0; i < ncpus; i++) 1694 for (i = 0; i < ncpus; i++)
1695 if (cpu_isset(i, current->cpus_allowed)) { 1695 if (cpumask_test_cpu(i, &current->cpus_allowed)) {
1696 ipath_cdbg(PROC, "%s[%u] affinity set for " 1696 ipath_cdbg(PROC, "%s[%u] affinity set for "
1697 "cpu %d/%d\n", current->comm, 1697 "cpu %d/%d\n", current->comm,
1698 current->pid, i, ncpus); 1698 current->pid, i, ncpus);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 7beffcab2745..9dedbbd218c3 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -704,16 +704,17 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
704} 704}
705 705
706#ifdef CONFIG_SMP 706#ifdef CONFIG_SMP
707static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest) 707static void iosapic_set_affinity_irq(unsigned int irq,
708 const struct cpumask *dest)
708{ 709{
709 struct vector_info *vi = iosapic_get_vector(irq); 710 struct vector_info *vi = iosapic_get_vector(irq);
710 u32 d0, d1, dummy_d0; 711 u32 d0, d1, dummy_d0;
711 unsigned long flags; 712 unsigned long flags;
712 713
713 if (cpu_check_affinity(irq, &dest)) 714 if (cpu_check_affinity(irq, dest))
714 return; 715 return;
715 716
716 vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest)); 717 vi->txn_addr = txn_affinity_addr(irq, cpumask_first(dest));
717 718
718 spin_lock_irqsave(&iosapic_lock, flags); 719 spin_lock_irqsave(&iosapic_lock, flags);
719 /* d1 contains the destination CPU, so only want to set that 720 /* d1 contains the destination CPU, so only want to set that
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 5d72866897a8..c88485860a0a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -74,7 +74,7 @@ static ssize_t local_cpus_show(struct device *dev,
74 int len; 74 int len;
75 75
76 mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); 76 mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
77 len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); 77 len = cpumask_scnprintf(buf, PAGE_SIZE-2, &mask);
78 buf[len++] = '\n'; 78 buf[len++] = '\n';
79 buf[len] = '\0'; 79 buf[len] = '\0';
80 return len; 80 return len;
@@ -88,7 +88,7 @@ static ssize_t local_cpulist_show(struct device *dev,
88 int len; 88 int len;
89 89
90 mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); 90 mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
91 len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); 91 len = cpulist_scnprintf(buf, PAGE_SIZE-2, &mask);
92 buf[len++] = '\n'; 92 buf[len++] = '\n';
93 buf[len] = '\0'; 93 buf[len] = '\0';
94 return len; 94 return len;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 003a9b3c293f..5b3f5937ecf5 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -55,8 +55,8 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
55 55
56 cpumask = pcibus_to_cpumask(to_pci_bus(dev)); 56 cpumask = pcibus_to_cpumask(to_pci_bus(dev));
57 ret = type? 57 ret = type?
58 cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask): 58 cpulist_scnprintf(buf, PAGE_SIZE-2, &cpumask) :
59 cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); 59 cpumask_scnprintf(buf, PAGE_SIZE-2, &cpumask);
60 buf[ret++] = '\n'; 60 buf[ret++] = '\n';
61 buf[ret] = '\0'; 61 buf[ret] = '\0';
62 return ret; 62 return ret;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 46625cd38743..add640ff5c6c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -588,7 +588,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
588 spin_unlock(&irq_mapping_update_lock); 588 spin_unlock(&irq_mapping_update_lock);
589 589
590 /* new event channels are always bound to cpu 0 */ 590 /* new event channels are always bound to cpu 0 */
591 irq_set_affinity(irq, cpumask_of_cpu(0)); 591 irq_set_affinity(irq, cpumask_of(0));
592 592
593 /* Unmask the event channel. */ 593 /* Unmask the event channel. */
594 enable_irq(irq); 594 enable_irq(irq);
@@ -617,9 +617,9 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
617} 617}
618 618
619 619
620static void set_affinity_irq(unsigned irq, cpumask_t dest) 620static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
621{ 621{
622 unsigned tcpu = first_cpu(dest); 622 unsigned tcpu = cpumask_first(dest);
623 rebind_irq_to_cpu(irq, tcpu); 623 rebind_irq_to_cpu(irq, tcpu);
624} 624}
625 625
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 16c211558c22..c99358a52176 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -462,7 +462,8 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
462 return -1; 462 return -1;
463} 463}
464 464
465int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits) 465int seq_bitmap(struct seq_file *m, const unsigned long *bits,
466 unsigned int nr_bits)
466{ 467{
467 if (m->count < m->size) { 468 if (m->count < m->size) {
468 int len = bitmap_scnprintf(m->buf + m->count, 469 int len = bitmap_scnprintf(m->buf + m->count,
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 54bbf6e04ee8..0e9e2bc0ee96 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -40,6 +40,9 @@
40#ifndef node_to_cpumask 40#ifndef node_to_cpumask
41#define node_to_cpumask(node) ((void)node, cpu_online_map) 41#define node_to_cpumask(node) ((void)node, cpu_online_map)
42#endif 42#endif
43#ifndef cpumask_of_node
44#define cpumask_of_node(node) ((void)node, cpu_online_mask)
45#endif
43#ifndef node_to_first_cpu 46#ifndef node_to_first_cpu
44#define node_to_first_cpu(node) ((void)(node),0) 47#define node_to_first_cpu(node) ((void)(node),0)
45#endif 48#endif
@@ -54,9 +57,18 @@
54 ) 57 )
55#endif 58#endif
56 59
60#ifndef cpumask_of_pcibus
61#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
62 cpu_all_mask : \
63 cpumask_of_node(pcibus_to_node(bus)))
64#endif
65
57#endif /* CONFIG_NUMA */ 66#endif /* CONFIG_NUMA */
58 67
59/* returns pointer to cpumask for specified node */ 68/*
69 * returns pointer to cpumask for specified node
70 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
71 */
60#ifndef node_to_cpumask_ptr 72#ifndef node_to_cpumask_ptr
61 73
62#define node_to_cpumask_ptr(v, node) \ 74#define node_to_cpumask_ptr(v, node) \
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index c5dd66916692..b96a6d2ffbc3 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -63,8 +63,6 @@ extern volatile int cpu_2_physid[NR_CPUS];
63#define raw_smp_processor_id() (current_thread_info()->cpu) 63#define raw_smp_processor_id() (current_thread_info()->cpu)
64 64
65extern cpumask_t cpu_callout_map; 65extern cpumask_t cpu_callout_map;
66extern cpumask_t cpu_possible_map;
67extern cpumask_t cpu_present_map;
68 66
69static __inline__ int hard_smp_processor_id(void) 67static __inline__ int hard_smp_processor_id(void)
70{ 68{
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index a08c33a26ca9..2878811c6134 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -137,9 +137,12 @@ extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
137 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ 137 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
138) 138)
139 139
140#define small_const_nbits(nbits) \
141 (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
142
140static inline void bitmap_zero(unsigned long *dst, int nbits) 143static inline void bitmap_zero(unsigned long *dst, int nbits)
141{ 144{
142 if (nbits <= BITS_PER_LONG) 145 if (small_const_nbits(nbits))
143 *dst = 0UL; 146 *dst = 0UL;
144 else { 147 else {
145 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 148 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
@@ -150,7 +153,7 @@ static inline void bitmap_zero(unsigned long *dst, int nbits)
150static inline void bitmap_fill(unsigned long *dst, int nbits) 153static inline void bitmap_fill(unsigned long *dst, int nbits)
151{ 154{
152 size_t nlongs = BITS_TO_LONGS(nbits); 155 size_t nlongs = BITS_TO_LONGS(nbits);
153 if (nlongs > 1) { 156 if (!small_const_nbits(nbits)) {
154 int len = (nlongs - 1) * sizeof(unsigned long); 157 int len = (nlongs - 1) * sizeof(unsigned long);
155 memset(dst, 0xff, len); 158 memset(dst, 0xff, len);
156 } 159 }
@@ -160,7 +163,7 @@ static inline void bitmap_fill(unsigned long *dst, int nbits)
160static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 163static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
161 int nbits) 164 int nbits)
162{ 165{
163 if (nbits <= BITS_PER_LONG) 166 if (small_const_nbits(nbits))
164 *dst = *src; 167 *dst = *src;
165 else { 168 else {
166 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 169 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
@@ -171,7 +174,7 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
171static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, 174static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
172 const unsigned long *src2, int nbits) 175 const unsigned long *src2, int nbits)
173{ 176{
174 if (nbits <= BITS_PER_LONG) 177 if (small_const_nbits(nbits))
175 *dst = *src1 & *src2; 178 *dst = *src1 & *src2;
176 else 179 else
177 __bitmap_and(dst, src1, src2, nbits); 180 __bitmap_and(dst, src1, src2, nbits);
@@ -180,7 +183,7 @@ static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
180static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, 183static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
181 const unsigned long *src2, int nbits) 184 const unsigned long *src2, int nbits)
182{ 185{
183 if (nbits <= BITS_PER_LONG) 186 if (small_const_nbits(nbits))
184 *dst = *src1 | *src2; 187 *dst = *src1 | *src2;
185 else 188 else
186 __bitmap_or(dst, src1, src2, nbits); 189 __bitmap_or(dst, src1, src2, nbits);
@@ -189,7 +192,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
189static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, 192static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
190 const unsigned long *src2, int nbits) 193 const unsigned long *src2, int nbits)
191{ 194{
192 if (nbits <= BITS_PER_LONG) 195 if (small_const_nbits(nbits))
193 *dst = *src1 ^ *src2; 196 *dst = *src1 ^ *src2;
194 else 197 else
195 __bitmap_xor(dst, src1, src2, nbits); 198 __bitmap_xor(dst, src1, src2, nbits);
@@ -198,7 +201,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
198static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, 201static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
199 const unsigned long *src2, int nbits) 202 const unsigned long *src2, int nbits)
200{ 203{
201 if (nbits <= BITS_PER_LONG) 204 if (small_const_nbits(nbits))
202 *dst = *src1 & ~(*src2); 205 *dst = *src1 & ~(*src2);
203 else 206 else
204 __bitmap_andnot(dst, src1, src2, nbits); 207 __bitmap_andnot(dst, src1, src2, nbits);
@@ -207,7 +210,7 @@ static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
207static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, 210static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
208 int nbits) 211 int nbits)
209{ 212{
210 if (nbits <= BITS_PER_LONG) 213 if (small_const_nbits(nbits))
211 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); 214 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
212 else 215 else
213 __bitmap_complement(dst, src, nbits); 216 __bitmap_complement(dst, src, nbits);
@@ -216,7 +219,7 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
216static inline int bitmap_equal(const unsigned long *src1, 219static inline int bitmap_equal(const unsigned long *src1,
217 const unsigned long *src2, int nbits) 220 const unsigned long *src2, int nbits)
218{ 221{
219 if (nbits <= BITS_PER_LONG) 222 if (small_const_nbits(nbits))
220 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); 223 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
221 else 224 else
222 return __bitmap_equal(src1, src2, nbits); 225 return __bitmap_equal(src1, src2, nbits);
@@ -225,7 +228,7 @@ static inline int bitmap_equal(const unsigned long *src1,
225static inline int bitmap_intersects(const unsigned long *src1, 228static inline int bitmap_intersects(const unsigned long *src1,
226 const unsigned long *src2, int nbits) 229 const unsigned long *src2, int nbits)
227{ 230{
228 if (nbits <= BITS_PER_LONG) 231 if (small_const_nbits(nbits))
229 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; 232 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
230 else 233 else
231 return __bitmap_intersects(src1, src2, nbits); 234 return __bitmap_intersects(src1, src2, nbits);
@@ -234,7 +237,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
234static inline int bitmap_subset(const unsigned long *src1, 237static inline int bitmap_subset(const unsigned long *src1,
235 const unsigned long *src2, int nbits) 238 const unsigned long *src2, int nbits)
236{ 239{
237 if (nbits <= BITS_PER_LONG) 240 if (small_const_nbits(nbits))
238 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); 241 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
239 else 242 else
240 return __bitmap_subset(src1, src2, nbits); 243 return __bitmap_subset(src1, src2, nbits);
@@ -242,7 +245,7 @@ static inline int bitmap_subset(const unsigned long *src1,
242 245
243static inline int bitmap_empty(const unsigned long *src, int nbits) 246static inline int bitmap_empty(const unsigned long *src, int nbits)
244{ 247{
245 if (nbits <= BITS_PER_LONG) 248 if (small_const_nbits(nbits))
246 return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); 249 return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
247 else 250 else
248 return __bitmap_empty(src, nbits); 251 return __bitmap_empty(src, nbits);
@@ -250,7 +253,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
250 253
251static inline int bitmap_full(const unsigned long *src, int nbits) 254static inline int bitmap_full(const unsigned long *src, int nbits)
252{ 255{
253 if (nbits <= BITS_PER_LONG) 256 if (small_const_nbits(nbits))
254 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); 257 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
255 else 258 else
256 return __bitmap_full(src, nbits); 259 return __bitmap_full(src, nbits);
@@ -258,7 +261,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
258 261
259static inline int bitmap_weight(const unsigned long *src, int nbits) 262static inline int bitmap_weight(const unsigned long *src, int nbits)
260{ 263{
261 if (nbits <= BITS_PER_LONG) 264 if (small_const_nbits(nbits))
262 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 265 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
263 return __bitmap_weight(src, nbits); 266 return __bitmap_weight(src, nbits);
264} 267}
@@ -266,7 +269,7 @@ static inline int bitmap_weight(const unsigned long *src, int nbits)
266static inline void bitmap_shift_right(unsigned long *dst, 269static inline void bitmap_shift_right(unsigned long *dst,
267 const unsigned long *src, int n, int nbits) 270 const unsigned long *src, int n, int nbits)
268{ 271{
269 if (nbits <= BITS_PER_LONG) 272 if (small_const_nbits(nbits))
270 *dst = *src >> n; 273 *dst = *src >> n;
271 else 274 else
272 __bitmap_shift_right(dst, src, n, nbits); 275 __bitmap_shift_right(dst, src, n, nbits);
@@ -275,7 +278,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
275static inline void bitmap_shift_left(unsigned long *dst, 278static inline void bitmap_shift_left(unsigned long *dst,
276 const unsigned long *src, int n, int nbits) 279 const unsigned long *src, int n, int nbits)
277{ 280{
278 if (nbits <= BITS_PER_LONG) 281 if (small_const_nbits(nbits))
279 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); 282 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
280 else 283 else
281 __bitmap_shift_left(dst, src, n, nbits); 284 __bitmap_shift_left(dst, src, n, nbits);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index ed3a5d473e52..cea153697ec7 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -82,13 +82,13 @@ struct clock_event_device {
82 int shift; 82 int shift;
83 int rating; 83 int rating;
84 int irq; 84 int irq;
85 cpumask_t cpumask; 85 const struct cpumask *cpumask;
86 int (*set_next_event)(unsigned long evt, 86 int (*set_next_event)(unsigned long evt,
87 struct clock_event_device *); 87 struct clock_event_device *);
88 void (*set_mode)(enum clock_event_mode mode, 88 void (*set_mode)(enum clock_event_mode mode,
89 struct clock_event_device *); 89 struct clock_event_device *);
90 void (*event_handler)(struct clock_event_device *); 90 void (*event_handler)(struct clock_event_device *);
91 void (*broadcast)(cpumask_t mask); 91 void (*broadcast)(const struct cpumask *mask);
92 struct list_head list; 92 struct list_head list;
93 enum clock_event_mode mode; 93 enum clock_event_mode mode;
94 ktime_t next_event; 94 ktime_t next_event;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 21e1dd43e52a..7c178a6baae3 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -268,6 +268,25 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
268 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 268 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
269} 269}
270 270
271/**
272 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
273 * @bitmap: the bitmap
274 *
275 * There are a few places where cpumask_var_t isn't appropriate and
276 * static cpumasks must be used (eg. very early boot), yet we don't
277 * expose the definition of 'struct cpumask'.
278 *
279 * This does the conversion, and can be used as a constant initializer.
280 */
281#define to_cpumask(bitmap) \
282 ((struct cpumask *)(1 ? (bitmap) \
283 : (void *)sizeof(__check_is_bitmap(bitmap))))
284
285static inline int __check_is_bitmap(const unsigned long *bitmap)
286{
287 return 1;
288}
289
271/* 290/*
272 * Special-case data structure for "single bit set only" constant CPU masks. 291 * Special-case data structure for "single bit set only" constant CPU masks.
273 * 292 *
@@ -278,11 +297,11 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
278extern const unsigned long 297extern const unsigned long
279 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 298 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
280 299
281static inline const cpumask_t *get_cpu_mask(unsigned int cpu) 300static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
282{ 301{
283 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 302 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
284 p -= cpu / BITS_PER_LONG; 303 p -= cpu / BITS_PER_LONG;
285 return (const cpumask_t *)p; 304 return to_cpumask(p);
286} 305}
287 306
288/* 307/*
@@ -339,36 +358,6 @@ extern cpumask_t cpu_mask_all;
339#endif 358#endif
340#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) 359#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
341 360
342#define cpumask_scnprintf(buf, len, src) \
343 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
344static inline int __cpumask_scnprintf(char *buf, int len,
345 const cpumask_t *srcp, int nbits)
346{
347 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
348}
349
350#define cpumask_parse_user(ubuf, ulen, dst) \
351 __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS)
352static inline int __cpumask_parse_user(const char __user *buf, int len,
353 cpumask_t *dstp, int nbits)
354{
355 return bitmap_parse_user(buf, len, dstp->bits, nbits);
356}
357
358#define cpulist_scnprintf(buf, len, src) \
359 __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
360static inline int __cpulist_scnprintf(char *buf, int len,
361 const cpumask_t *srcp, int nbits)
362{
363 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
364}
365
366#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
367static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
368{
369 return bitmap_parselist(buf, dstp->bits, nbits);
370}
371
372#define cpu_remap(oldbit, old, new) \ 361#define cpu_remap(oldbit, old, new) \
373 __cpu_remap((oldbit), &(old), &(new), NR_CPUS) 362 __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
374static inline int __cpu_remap(int oldbit, 363static inline int __cpu_remap(int oldbit,
@@ -446,74 +435,63 @@ int __next_cpu_nr(int n, const cpumask_t *srcp);
446 435
447/* 436/*
448 * The following particular system cpumasks and operations manage 437 * The following particular system cpumasks and operations manage
449 * possible, present, active and online cpus. Each of them is a fixed size 438 * possible, present, active and online cpus.
450 * bitmap of size NR_CPUS. 439 *
440 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
441 * cpu_present_mask - has bit 'cpu' set iff cpu is populated
442 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
443 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
451 * 444 *
452 * #ifdef CONFIG_HOTPLUG_CPU 445 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
453 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
454 * cpu_present_map - has bit 'cpu' set iff cpu is populated
455 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
456 * cpu_active_map - has bit 'cpu' set iff cpu available to migration
457 * #else
458 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
459 * cpu_present_map - copy of cpu_possible_map
460 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
461 * #endif
462 * 446 *
463 * In either case, NR_CPUS is fixed at compile time, as the static 447 * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
464 * size of these bitmaps. The cpu_possible_map is fixed at boot 448 * that it is possible might ever be plugged in at anytime during the
465 * time, as the set of CPU id's that it is possible might ever 449 * life of that system boot. The cpu_present_mask is dynamic(*),
466 * be plugged in at anytime during the life of that system boot. 450 * representing which CPUs are currently plugged in. And
467 * The cpu_present_map is dynamic(*), representing which CPUs 451 * cpu_online_mask is the dynamic subset of cpu_present_mask,
468 * are currently plugged in. And cpu_online_map is the dynamic 452 * indicating those CPUs available for scheduling.
469 * subset of cpu_present_map, indicating those CPUs available
470 * for scheduling.
471 * 453 *
472 * If HOTPLUG is enabled, then cpu_possible_map is forced to have 454 * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
473 * all NR_CPUS bits set, otherwise it is just the set of CPUs that 455 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
474 * ACPI reports present at boot. 456 * ACPI reports present at boot.
475 * 457 *
476 * If HOTPLUG is enabled, then cpu_present_map varies dynamically, 458 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
477 * depending on what ACPI reports as currently plugged in, otherwise 459 * depending on what ACPI reports as currently plugged in, otherwise
478 * cpu_present_map is just a copy of cpu_possible_map. 460 * cpu_present_mask is just a copy of cpu_possible_mask.
479 * 461 *
480 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not 462 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
481 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. 463 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
482 * 464 *
483 * Subtleties: 465 * Subtleties:
484 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 466 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
485 * assumption that their single CPU is online. The UP 467 * assumption that their single CPU is online. The UP
486 * cpu_{online,possible,present}_maps are placebos. Changing them 468 * cpu_{online,possible,present}_masks are placebos. Changing them
487 * will have no useful affect on the following num_*_cpus() 469 * will have no useful affect on the following num_*_cpus()
488 * and cpu_*() macros in the UP case. This ugliness is a UP 470 * and cpu_*() macros in the UP case. This ugliness is a UP
489 * optimization - don't waste any instructions or memory references 471 * optimization - don't waste any instructions or memory references
490 * asking if you're online or how many CPUs there are if there is 472 * asking if you're online or how many CPUs there are if there is
491 * only one CPU. 473 * only one CPU.
492 * 2) Most SMP arch's #define some of these maps to be some
493 * other map specific to that arch. Therefore, the following
494 * must be #define macros, not inlines. To see why, examine
495 * the assembly code produced by the following. Note that
496 * set1() writes phys_x_map, but set2() writes x_map:
497 * int x_map, phys_x_map;
498 * #define set1(a) x_map = a
499 * inline void set2(int a) { x_map = a; }
500 * #define x_map phys_x_map
501 * main(){ set1(3); set2(5); }
502 */ 474 */
503 475
504extern cpumask_t cpu_possible_map; 476extern const struct cpumask *const cpu_possible_mask;
505extern cpumask_t cpu_online_map; 477extern const struct cpumask *const cpu_online_mask;
506extern cpumask_t cpu_present_map; 478extern const struct cpumask *const cpu_present_mask;
507extern cpumask_t cpu_active_map; 479extern const struct cpumask *const cpu_active_mask;
480
481/* These strip const, as traditionally they weren't const. */
482#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
483#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
484#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
485#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
508 486
509#if NR_CPUS > 1 487#if NR_CPUS > 1
510#define num_online_cpus() cpus_weight_nr(cpu_online_map) 488#define num_online_cpus() cpumask_weight(cpu_online_mask)
511#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) 489#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
512#define num_present_cpus() cpus_weight_nr(cpu_present_map) 490#define num_present_cpus() cpumask_weight(cpu_present_mask)
513#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) 491#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
514#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) 492#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
515#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) 493#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
516#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) 494#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
517#else 495#else
518#define num_online_cpus() 1 496#define num_online_cpus() 1
519#define num_possible_cpus() 1 497#define num_possible_cpus() 1
@@ -526,10 +504,6 @@ extern cpumask_t cpu_active_map;
526 504
527#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 505#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
528 506
529#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
530#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
531#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
532
533/* These are the new versions of the cpumask operators: passed by pointer. 507/* These are the new versions of the cpumask operators: passed by pointer.
534 * The older versions will be implemented in terms of these, then deleted. */ 508 * The older versions will be implemented in terms of these, then deleted. */
535#define cpumask_bits(maskp) ((maskp)->bits) 509#define cpumask_bits(maskp) ((maskp)->bits)
@@ -540,9 +514,6 @@ extern cpumask_t cpu_active_map;
540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 514 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
541} 515}
542 516
543/* This produces more efficient code. */
544#define nr_cpumask_bits NR_CPUS
545
546#else /* NR_CPUS > BITS_PER_LONG */ 517#else /* NR_CPUS > BITS_PER_LONG */
547 518
548#define CPU_BITS_ALL \ 519#define CPU_BITS_ALL \
@@ -550,9 +521,15 @@ extern cpumask_t cpu_active_map;
550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 521 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 522 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
552} 523}
524#endif /* NR_CPUS > BITS_PER_LONG */
553 525
526#ifdef CONFIG_CPUMASK_OFFSTACK
527/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
528 * not all bits may be allocated. */
554#define nr_cpumask_bits nr_cpu_ids 529#define nr_cpumask_bits nr_cpu_ids
555#endif /* NR_CPUS > BITS_PER_LONG */ 530#else
531#define nr_cpumask_bits NR_CPUS
532#endif
556 533
557/* verify cpu argument to cpumask_* operators */ 534/* verify cpu argument to cpumask_* operators */
558static inline unsigned int cpumask_check(unsigned int cpu) 535static inline unsigned int cpumask_check(unsigned int cpu)
@@ -714,7 +691,7 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
714 * No static inline type checking - see Subtlety (1) above. 691 * No static inline type checking - see Subtlety (1) above.
715 */ 692 */
716#define cpumask_test_cpu(cpu, cpumask) \ 693#define cpumask_test_cpu(cpu, cpumask) \
717 test_bit(cpumask_check(cpu), (cpumask)->bits) 694 test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
718 695
719/** 696/**
720 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask 697 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -946,22 +923,61 @@ static inline void cpumask_copy(struct cpumask *dstp,
946#define cpumask_of(cpu) (get_cpu_mask(cpu)) 923#define cpumask_of(cpu) (get_cpu_mask(cpu))
947 924
948/** 925/**
949 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 926 * cpumask_scnprintf - print a cpumask into a string as comma-separated hex
950 * @bitmap: the bitmap 927 * @buf: the buffer to sprintf into
928 * @len: the length of the buffer
929 * @srcp: the cpumask to print
951 * 930 *
952 * There are a few places where cpumask_var_t isn't appropriate and 931 * If len is zero, returns zero. Otherwise returns the length of the
953 * static cpumasks must be used (eg. very early boot), yet we don't 932 * (nul-terminated) @buf string.
954 * expose the definition of 'struct cpumask'. 933 */
934static inline int cpumask_scnprintf(char *buf, int len,
935 const struct cpumask *srcp)
936{
937 return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
938}
939
940/**
941 * cpumask_parse_user - extract a cpumask from a user string
942 * @buf: the buffer to extract from
943 * @len: the length of the buffer
944 * @dstp: the cpumask to set.
955 * 945 *
956 * This does the conversion, and can be used as a constant initializer. 946 * Returns -errno, or 0 for success.
957 */ 947 */
958#define to_cpumask(bitmap) \ 948static inline int cpumask_parse_user(const char __user *buf, int len,
959 ((struct cpumask *)(1 ? (bitmap) \ 949 struct cpumask *dstp)
960 : (void *)sizeof(__check_is_bitmap(bitmap)))) 950{
951 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
952}
961 953
962static inline int __check_is_bitmap(const unsigned long *bitmap) 954/**
955 * cpulist_scnprintf - print a cpumask into a string as comma-separated list
956 * @buf: the buffer to sprintf into
957 * @len: the length of the buffer
958 * @srcp: the cpumask to print
959 *
960 * If len is zero, returns zero. Otherwise returns the length of the
961 * (nul-terminated) @buf string.
962 */
963static inline int cpulist_scnprintf(char *buf, int len,
964 const struct cpumask *srcp)
963{ 965{
964 return 1; 966 return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
967 nr_cpumask_bits);
968}
969
970/**
971 * cpulist_parse_user - extract a cpumask from a user string of ranges
972 * @buf: the buffer to extract from
973 * @len: the length of the buffer
974 * @dstp: the cpumask to set.
975 *
976 * Returns -errno, or 0 for success.
977 */
978static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
979{
980 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
965} 981}
966 982
967/** 983/**
@@ -995,6 +1011,7 @@ static inline size_t cpumask_size(void)
995#ifdef CONFIG_CPUMASK_OFFSTACK 1011#ifdef CONFIG_CPUMASK_OFFSTACK
996typedef struct cpumask *cpumask_var_t; 1012typedef struct cpumask *cpumask_var_t;
997 1013
1014bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
998bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 1015bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
999void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1016void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
1000void free_cpumask_var(cpumask_var_t mask); 1017void free_cpumask_var(cpumask_var_t mask);
@@ -1008,6 +1025,12 @@ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1008 return true; 1025 return true;
1009} 1026}
1010 1027
1028static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
1029 int node)
1030{
1031 return true;
1032}
1033
1011static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) 1034static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1012{ 1035{
1013} 1036}
@@ -1021,12 +1044,6 @@ static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
1021} 1044}
1022#endif /* CONFIG_CPUMASK_OFFSTACK */ 1045#endif /* CONFIG_CPUMASK_OFFSTACK */
1023 1046
1024/* The pointer versions of the maps, these will become the primary versions. */
1025#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
1026#define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
1027#define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
1028#define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
1029
1030/* It's common to want to use cpu_all_mask in struct member initializers, 1047/* It's common to want to use cpu_all_mask in struct member initializers,
1031 * so it has to refer to an address rather than a pointer. */ 1048 * so it has to refer to an address rather than a pointer. */
1032extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 1049extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
@@ -1035,51 +1052,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
1035/* First bits of cpu_bit_bitmap are in fact unset. */ 1052/* First bits of cpu_bit_bitmap are in fact unset. */
1036#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 1053#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
1037 1054
1038/* Wrappers for arch boot code to manipulate normally-constant masks */ 1055#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
1039static inline void set_cpu_possible(unsigned int cpu, bool possible) 1056#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
1040{ 1057#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
1041 if (possible)
1042 cpumask_set_cpu(cpu, &cpu_possible_map);
1043 else
1044 cpumask_clear_cpu(cpu, &cpu_possible_map);
1045}
1046
1047static inline void set_cpu_present(unsigned int cpu, bool present)
1048{
1049 if (present)
1050 cpumask_set_cpu(cpu, &cpu_present_map);
1051 else
1052 cpumask_clear_cpu(cpu, &cpu_present_map);
1053}
1054
1055static inline void set_cpu_online(unsigned int cpu, bool online)
1056{
1057 if (online)
1058 cpumask_set_cpu(cpu, &cpu_online_map);
1059 else
1060 cpumask_clear_cpu(cpu, &cpu_online_map);
1061}
1062
1063static inline void set_cpu_active(unsigned int cpu, bool active)
1064{
1065 if (active)
1066 cpumask_set_cpu(cpu, &cpu_active_map);
1067 else
1068 cpumask_clear_cpu(cpu, &cpu_active_map);
1069}
1070
1071static inline void init_cpu_present(const struct cpumask *src)
1072{
1073 cpumask_copy(&cpu_present_map, src);
1074}
1075
1076static inline void init_cpu_possible(const struct cpumask *src)
1077{
1078 cpumask_copy(&cpu_possible_map, src);
1079}
1080 1058
1081static inline void init_cpu_online(const struct cpumask *src) 1059/* Wrappers for arch boot code to manipulate normally-constant masks */
1082{ 1060void set_cpu_possible(unsigned int cpu, bool possible);
1083 cpumask_copy(&cpu_online_map, src); 1061void set_cpu_present(unsigned int cpu, bool present);
1084} 1062void set_cpu_online(unsigned int cpu, bool online);
1063void set_cpu_active(unsigned int cpu, bool active);
1064void init_cpu_present(const struct cpumask *src);
1065void init_cpu_possible(const struct cpumask *src);
1066void init_cpu_online(const struct cpumask *src);
1085#endif /* __LINUX_CPUMASK_H */ 1067#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index be3c484b5242..dfaee6bd265b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -111,13 +111,13 @@ extern void enable_irq(unsigned int irq);
111 111
112extern cpumask_t irq_default_affinity; 112extern cpumask_t irq_default_affinity;
113 113
114extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); 114extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
115extern int irq_can_set_affinity(unsigned int irq); 115extern int irq_can_set_affinity(unsigned int irq);
116extern int irq_select_affinity(unsigned int irq); 116extern int irq_select_affinity(unsigned int irq);
117 117
118#else /* CONFIG_SMP */ 118#else /* CONFIG_SMP */
119 119
120static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 120static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
121{ 121{
122 return -EINVAL; 122 return -EINVAL;
123} 123}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 98564dc64476..5845bdc1ac09 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -113,7 +113,8 @@ struct irq_chip {
113 void (*eoi)(unsigned int irq); 113 void (*eoi)(unsigned int irq);
114 114
115 void (*end)(unsigned int irq); 115 void (*end)(unsigned int irq);
116 void (*set_affinity)(unsigned int irq, cpumask_t dest); 116 void (*set_affinity)(unsigned int irq,
117 const struct cpumask *dest);
117 int (*retrigger)(unsigned int irq); 118 int (*retrigger)(unsigned int irq);
118 int (*set_type)(unsigned int irq, unsigned int flow_type); 119 int (*set_type)(unsigned int irq, unsigned int flow_type);
119 int (*set_wake)(unsigned int irq, unsigned int on); 120 int (*set_wake)(unsigned int irq, unsigned int on);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index b3dfa72f13b9..40ea5058c2ec 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -50,10 +50,11 @@ int seq_path(struct seq_file *, struct path *, char *);
50int seq_dentry(struct seq_file *, struct dentry *, char *); 50int seq_dentry(struct seq_file *, struct dentry *, char *);
51int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 51int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
52 char *esc); 52 char *esc);
53int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); 53int seq_bitmap(struct seq_file *m, const unsigned long *bits,
54static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) 54 unsigned int nr_bits);
55static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
55{ 56{
56 return seq_bitmap(m, mask->bits, NR_CPUS); 57 return seq_bitmap(m, mask->bits, nr_cpu_ids);
57} 58}
58 59
59static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) 60static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 6e7ba16ff454..b82466968101 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -21,6 +21,9 @@ struct call_single_data {
21 u16 priv; 21 u16 priv;
22}; 22};
23 23
24/* total number of cpus in this system (may exceed NR_CPUS) */
25extern unsigned int total_cpus;
26
24#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
25 28
26#include <linux/preempt.h> 29#include <linux/preempt.h>
@@ -64,15 +67,16 @@ extern void smp_cpus_done(unsigned int max_cpus);
64 * Call a function on all other processors 67 * Call a function on all other processors
65 */ 68 */
66int smp_call_function(void(*func)(void *info), void *info, int wait); 69int smp_call_function(void(*func)(void *info), void *info, int wait);
67/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ 70void smp_call_function_many(const struct cpumask *mask,
68int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 71 void (*func)(void *info), void *info, bool wait);
69 int wait);
70 72
71static inline void smp_call_function_many(const struct cpumask *mask, 73/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
72 void (*func)(void *info), void *info, 74static inline int
73 int wait) 75smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
76 int wait)
74{ 77{
75 smp_call_function_mask(*mask, func, info, wait); 78 smp_call_function_many(&mask, func, info, wait);
79 return 0;
76} 80}
77 81
78int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 82int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 38d1a5d6568e..052b12bec8bd 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -8,17 +8,17 @@
8 */ 8 */
9 9
10/* 10/*
11 * Maximum supported processors that can run under SMP. This value is 11 * Maximum supported processors. Setting this smaller saves quite a
12 * set via configure setting. The maximum is equal to the size of the 12 * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps.
13 * bitmasks used on that platform, i.e. 32 or 64. Setting this smaller
14 * saves quite a bit of memory.
15 */ 13 */
16#ifdef CONFIG_SMP 14#ifndef CONFIG_NR_CPUS
17#define NR_CPUS CONFIG_NR_CPUS 15/* FIXME: This should be fixed in the arch's Kconfig */
18#else 16#define CONFIG_NR_CPUS 1
19#define NR_CPUS 1
20#endif 17#endif
21 18
19/* Places which use this should consider cpumask_var_t. */
20#define NR_CPUS CONFIG_NR_CPUS
21
22#define MIN_THREADS_LEFT_FOR_ROOT 4 22#define MIN_THREADS_LEFT_FOR_ROOT 4
23 23
24/* 24/*
diff --git a/init/Kconfig b/init/Kconfig
index 13627191a60d..f6281711166d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -924,6 +924,15 @@ config KMOD
924 924
925endif # MODULES 925endif # MODULES
926 926
927config INIT_ALL_POSSIBLE
928 bool
929 help
930 Back when each arch used to define their own cpu_online_map and
931 cpu_possible_map, some of them chose to initialize cpu_possible_map
932 with all 1s, and others with all 0s. When they were centralised,
933 it was better to provide this option than to break all the archs
934 and have several arch maintainers persuing me down dark alleys.
935
927config STOP_MACHINE 936config STOP_MACHINE
928 bool 937 bool
929 default y 938 default y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8ea32e8d68b0..2c9f78f3a2fc 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,29 +15,8 @@
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18/* 18#ifdef CONFIG_SMP
19 * Represents all cpu's present in the system 19/* Serializes the updates to cpu_online_mask, cpu_present_mask */
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24cpumask_t cpu_present_map __read_mostly;
25EXPORT_SYMBOL(cpu_present_map);
26
27#ifndef CONFIG_SMP
28
29/*
30 * Represents all cpu's that are currently online.
31 */
32cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
33EXPORT_SYMBOL(cpu_online_map);
34
35cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
36EXPORT_SYMBOL(cpu_possible_map);
37
38#else /* CONFIG_SMP */
39
40/* Serializes the updates to cpu_online_map, cpu_present_map */
41static DEFINE_MUTEX(cpu_add_remove_lock); 20static DEFINE_MUTEX(cpu_add_remove_lock);
42 21
43static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
@@ -64,8 +43,6 @@ void __init cpu_hotplug_init(void)
64 cpu_hotplug.refcount = 0; 43 cpu_hotplug.refcount = 0;
65} 44}
66 45
67cpumask_t cpu_active_map;
68
69#ifdef CONFIG_HOTPLUG_CPU 46#ifdef CONFIG_HOTPLUG_CPU
70 47
71void get_online_cpus(void) 48void get_online_cpus(void)
@@ -96,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
96 73
97/* 74/*
98 * The following two API's must be used when attempting 75 * The following two API's must be used when attempting
99 * to serialize the updates to cpu_online_map, cpu_present_map. 76 * to serialize the updates to cpu_online_mask, cpu_present_mask.
100 */ 77 */
101void cpu_maps_update_begin(void) 78void cpu_maps_update_begin(void)
102{ 79{
@@ -502,3 +479,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
502 479
503const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 480const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
504EXPORT_SYMBOL(cpu_all_bits); 481EXPORT_SYMBOL(cpu_all_bits);
482
483#ifdef CONFIG_INIT_ALL_POSSIBLE
484static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
485 = CPU_BITS_ALL;
486#else
487static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
488#endif
489const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
490EXPORT_SYMBOL(cpu_possible_mask);
491
492static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
493const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
494EXPORT_SYMBOL(cpu_online_mask);
495
496static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
497const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
498EXPORT_SYMBOL(cpu_present_mask);
499
500static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
501const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
502EXPORT_SYMBOL(cpu_active_mask);
503
504void set_cpu_possible(unsigned int cpu, bool possible)
505{
506 if (possible)
507 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
508 else
509 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
510}
511
512void set_cpu_present(unsigned int cpu, bool present)
513{
514 if (present)
515 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
516 else
517 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
518}
519
520void set_cpu_online(unsigned int cpu, bool online)
521{
522 if (online)
523 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
524 else
525 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
526}
527
528void set_cpu_active(unsigned int cpu, bool active)
529{
530 if (active)
531 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
532 else
533 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
534}
535
536void init_cpu_present(const struct cpumask *src)
537{
538 cpumask_copy(to_cpumask(cpu_present_bits), src);
539}
540
541void init_cpu_possible(const struct cpumask *src)
542{
543 cpumask_copy(to_cpumask(cpu_possible_bits), src);
544}
545
546void init_cpu_online(const struct cpumask *src)
547{
548 cpumask_copy(to_cpumask(cpu_online_bits), src);
549}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 96c0ba13b8cd..39c1a4c1c5a9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -896,7 +896,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
896 if (!*buf) { 896 if (!*buf) {
897 cpus_clear(trialcs.cpus_allowed); 897 cpus_clear(trialcs.cpus_allowed);
898 } else { 898 } else {
899 retval = cpulist_parse(buf, trialcs.cpus_allowed); 899 retval = cpulist_parse(buf, &trialcs.cpus_allowed);
900 if (retval < 0) 900 if (retval < 0)
901 return retval; 901 return retval;
902 902
@@ -1482,7 +1482,7 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1482 mask = cs->cpus_allowed; 1482 mask = cs->cpus_allowed;
1483 mutex_unlock(&callback_mutex); 1483 mutex_unlock(&callback_mutex);
1484 1484
1485 return cpulist_scnprintf(page, PAGE_SIZE, mask); 1485 return cpulist_scnprintf(page, PAGE_SIZE, &mask);
1486} 1486}
1487 1487
1488static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) 1488static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6eb3c7952b64..f63c706d25e1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -46,7 +46,7 @@ void dynamic_irq_init(unsigned int irq)
46 desc->irq_count = 0; 46 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 47 desc->irqs_unhandled = 0;
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49 cpus_setall(desc->affinity); 49 cpumask_setall(&desc->affinity);
50#endif 50#endif
51 spin_unlock_irqrestore(&desc->lock, flags); 51 spin_unlock_irqrestore(&desc->lock, flags);
52} 52}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 540f6c49f3fa..61c4a9b62165 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -79,7 +79,7 @@ int irq_can_set_affinity(unsigned int irq)
79 * @cpumask: cpumask 79 * @cpumask: cpumask
80 * 80 *
81 */ 81 */
82int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 82int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
83{ 83{
84 struct irq_desc *desc = irq_to_desc(irq); 84 struct irq_desc *desc = irq_to_desc(irq);
85 unsigned long flags; 85 unsigned long flags;
@@ -91,14 +91,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
91 91
92#ifdef CONFIG_GENERIC_PENDING_IRQ 92#ifdef CONFIG_GENERIC_PENDING_IRQ
93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
94 desc->affinity = cpumask; 94 cpumask_copy(&desc->affinity, cpumask);
95 desc->chip->set_affinity(irq, cpumask); 95 desc->chip->set_affinity(irq, cpumask);
96 } else { 96 } else {
97 desc->status |= IRQ_MOVE_PENDING; 97 desc->status |= IRQ_MOVE_PENDING;
98 desc->pending_mask = cpumask; 98 cpumask_copy(&desc->pending_mask, cpumask);
99 } 99 }
100#else 100#else
101 desc->affinity = cpumask; 101 cpumask_copy(&desc->affinity, cpumask);
102 desc->chip->set_affinity(irq, cpumask); 102 desc->chip->set_affinity(irq, cpumask);
103#endif 103#endif
104 desc->status |= IRQ_AFFINITY_SET; 104 desc->status |= IRQ_AFFINITY_SET;
@@ -112,26 +112,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
112 */ 112 */
113int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 113int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
114{ 114{
115 cpumask_t mask;
116
117 if (!irq_can_set_affinity(irq)) 115 if (!irq_can_set_affinity(irq))
118 return 0; 116 return 0;
119 117
120 cpus_and(mask, cpu_online_map, irq_default_affinity);
121
122 /* 118 /*
123 * Preserve an userspace affinity setup, but make sure that 119 * Preserve an userspace affinity setup, but make sure that
124 * one of the targets is online. 120 * one of the targets is online.
125 */ 121 */
126 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 122 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
127 if (cpus_intersects(desc->affinity, cpu_online_map)) 123 if (cpumask_any_and(&desc->affinity, cpu_online_mask)
128 mask = desc->affinity; 124 < nr_cpu_ids)
125 goto set_affinity;
129 else 126 else
130 desc->status &= ~IRQ_AFFINITY_SET; 127 desc->status &= ~IRQ_AFFINITY_SET;
131 } 128 }
132 129
133 desc->affinity = mask; 130 cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity);
134 desc->chip->set_affinity(irq, mask); 131set_affinity:
132 desc->chip->set_affinity(irq, &desc->affinity);
135 133
136 return 0; 134 return 0;
137} 135}
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 9db681d95814..bd72329e630c 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,7 +4,6 @@
4void move_masked_irq(int irq) 4void move_masked_irq(int irq)
5{ 5{
6 struct irq_desc *desc = irq_to_desc(irq); 6 struct irq_desc *desc = irq_to_desc(irq);
7 cpumask_t tmp;
8 7
9 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 8 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
10 return; 9 return;
@@ -19,7 +18,7 @@ void move_masked_irq(int irq)
19 18
20 desc->status &= ~IRQ_MOVE_PENDING; 19 desc->status &= ~IRQ_MOVE_PENDING;
21 20
22 if (unlikely(cpus_empty(desc->pending_mask))) 21 if (unlikely(cpumask_empty(&desc->pending_mask)))
23 return; 22 return;
24 23
25 if (!desc->chip->set_affinity) 24 if (!desc->chip->set_affinity)
@@ -27,8 +26,6 @@ void move_masked_irq(int irq)
27 26
28 assert_spin_locked(&desc->lock); 27 assert_spin_locked(&desc->lock);
29 28
30 cpus_and(tmp, desc->pending_mask, cpu_online_map);
31
32 /* 29 /*
33 * If there was a valid mask to work with, please 30 * If there was a valid mask to work with, please
34 * do the disable, re-program, enable sequence. 31 * do the disable, re-program, enable sequence.
@@ -41,10 +38,13 @@ void move_masked_irq(int irq)
41 * For correct operation this depends on the caller 38 * For correct operation this depends on the caller
42 * masking the irqs. 39 * masking the irqs.
43 */ 40 */
44 if (likely(!cpus_empty(tmp))) { 41 if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
45 desc->chip->set_affinity(irq,tmp); 42 < nr_cpu_ids)) {
43 cpumask_and(&desc->affinity,
44 &desc->pending_mask, cpu_online_mask);
45 desc->chip->set_affinity(irq, &desc->affinity);
46 } 46 }
47 cpus_clear(desc->pending_mask); 47 cpumask_clear(&desc->pending_mask);
48} 48}
49 49
50void move_native_irq(int irq) 50void move_native_irq(int irq)
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index f6b3440f05bc..d2c0e5ee53c5 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -40,33 +40,42 @@ static ssize_t irq_affinity_proc_write(struct file *file,
40 const char __user *buffer, size_t count, loff_t *pos) 40 const char __user *buffer, size_t count, loff_t *pos)
41{ 41{
42 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; 42 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
43 cpumask_t new_value; 43 cpumask_var_t new_value;
44 int err; 44 int err;
45 45
46 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || 46 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
47 irq_balancing_disabled(irq)) 47 irq_balancing_disabled(irq))
48 return -EIO; 48 return -EIO;
49 49
50 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
51 return -ENOMEM;
52
50 err = cpumask_parse_user(buffer, count, new_value); 53 err = cpumask_parse_user(buffer, count, new_value);
51 if (err) 54 if (err)
52 return err; 55 goto free_cpumask;
53 56
54 if (!is_affinity_mask_valid(new_value)) 57 if (!is_affinity_mask_valid(*new_value)) {
55 return -EINVAL; 58 err = -EINVAL;
59 goto free_cpumask;
60 }
56 61
57 /* 62 /*
58 * Do not allow disabling IRQs completely - it's a too easy 63 * Do not allow disabling IRQs completely - it's a too easy
59 * way to make the system unusable accidentally :-) At least 64 * way to make the system unusable accidentally :-) At least
60 * one online CPU still has to be targeted. 65 * one online CPU still has to be targeted.
61 */ 66 */
62 if (!cpus_intersects(new_value, cpu_online_map)) 67 if (!cpumask_intersects(new_value, cpu_online_mask)) {
63 /* Special case for empty set - allow the architecture 68 /* Special case for empty set - allow the architecture
64 code to set default SMP affinity. */ 69 code to set default SMP affinity. */
65 return irq_select_affinity_usr(irq) ? -EINVAL : count; 70 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
66 71 } else {
67 irq_set_affinity(irq, new_value); 72 irq_set_affinity(irq, new_value);
73 err = count;
74 }
68 75
69 return count; 76free_cpumask:
77 free_cpumask_var(new_value);
78 return err;
70} 79}
71 80
72static int irq_affinity_proc_open(struct inode *inode, struct file *file) 81static int irq_affinity_proc_open(struct inode *inode, struct file *file)
@@ -95,7 +104,7 @@ static ssize_t default_affinity_write(struct file *file,
95 cpumask_t new_value; 104 cpumask_t new_value;
96 int err; 105 int err;
97 106
98 err = cpumask_parse_user(buffer, count, new_value); 107 err = cpumask_parse_user(buffer, count, &new_value);
99 if (err) 108 if (err)
100 return err; 109 return err;
101 110
diff --git a/kernel/profile.c b/kernel/profile.c
index 60adefb59b5e..4cb7d68fed82 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -442,7 +442,7 @@ void profile_tick(int type)
442static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, 442static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
443 int count, int *eof, void *data) 443 int count, int *eof, void *data)
444{ 444{
445 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); 445 int len = cpumask_scnprintf(page, count, (cpumask_t *)data);
446 if (count - len < 2) 446 if (count - len < 2)
447 return -EINVAL; 447 return -EINVAL;
448 len += sprintf(page + len, "\n"); 448 len += sprintf(page + len, "\n");
@@ -456,7 +456,7 @@ static int prof_cpu_mask_write_proc(struct file *file,
456 unsigned long full_count = count, err; 456 unsigned long full_count = count, err;
457 cpumask_t new_value; 457 cpumask_t new_value;
458 458
459 err = cpumask_parse_user(buffer, count, new_value); 459 err = cpumask_parse_user(buffer, count, &new_value);
460 if (err) 460 if (err)
461 return err; 461 return err;
462 462
diff --git a/kernel/sched.c b/kernel/sched.c
index fff1c4a20b65..f2095660efec 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6644,7 +6644,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6644 struct sched_group *group = sd->groups; 6644 struct sched_group *group = sd->groups;
6645 char str[256]; 6645 char str[256];
6646 6646
6647 cpulist_scnprintf(str, sizeof(str), sd->span); 6647 cpulist_scnprintf(str, sizeof(str), &sd->span);
6648 cpus_clear(*groupmask); 6648 cpus_clear(*groupmask);
6649 6649
6650 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 6650 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -6697,7 +6697,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6697 6697
6698 cpus_or(*groupmask, *groupmask, group->cpumask); 6698 cpus_or(*groupmask, *groupmask, group->cpumask);
6699 6699
6700 cpulist_scnprintf(str, sizeof(str), group->cpumask); 6700 cpulist_scnprintf(str, sizeof(str), &group->cpumask);
6701 printk(KERN_CONT " %s", str); 6701 printk(KERN_CONT " %s", str);
6702 6702
6703 group = group->next; 6703 group = group->next;
@@ -7098,7 +7098,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
7098{ 7098{
7099 int group; 7099 int group;
7100#ifdef CONFIG_SCHED_MC 7100#ifdef CONFIG_SCHED_MC
7101 *mask = cpu_coregroup_map(cpu); 7101 *mask = *cpu_coregroup_mask(cpu);
7102 cpus_and(*mask, *mask, *cpu_map); 7102 cpus_and(*mask, *mask, *cpu_map);
7103 group = first_cpu(*mask); 7103 group = first_cpu(*mask);
7104#elif defined(CONFIG_SCHED_SMT) 7104#elif defined(CONFIG_SCHED_SMT)
@@ -7471,7 +7471,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7471 sd = &per_cpu(core_domains, i); 7471 sd = &per_cpu(core_domains, i);
7472 SD_INIT(sd, MC); 7472 SD_INIT(sd, MC);
7473 set_domain_attribute(sd, attr); 7473 set_domain_attribute(sd, attr);
7474 sd->span = cpu_coregroup_map(i); 7474 sd->span = *cpu_coregroup_mask(i);
7475 cpus_and(sd->span, sd->span, *cpu_map); 7475 cpus_and(sd->span, sd->span, *cpu_map);
7476 sd->parent = p; 7476 sd->parent = p;
7477 p->child = sd; 7477 p->child = sd;
@@ -7514,7 +7514,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7514 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7514 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7515 SCHED_CPUMASK_VAR(send_covered, allmasks); 7515 SCHED_CPUMASK_VAR(send_covered, allmasks);
7516 7516
7517 *this_core_map = cpu_coregroup_map(i); 7517 *this_core_map = *cpu_coregroup_mask(i);
7518 cpus_and(*this_core_map, *this_core_map, *cpu_map); 7518 cpus_and(*this_core_map, *this_core_map, *cpu_map);
7519 if (i != first_cpu(*this_core_map)) 7519 if (i != first_cpu(*this_core_map))
7520 continue; 7520 continue;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 3b01098164c8..b59fd9cdc1b6 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -42,7 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
42 for_each_domain(cpu, sd) { 42 for_each_domain(cpu, sd) {
43 enum cpu_idle_type itype; 43 enum cpu_idle_type itype;
44 44
45 cpumask_scnprintf(mask_str, mask_len, sd->span); 45 cpumask_scnprintf(mask_str, mask_len, &sd->span);
46 seq_printf(seq, "domain%d %s", dcount++, mask_str); 46 seq_printf(seq, "domain%d %s", dcount++, mask_str);
47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
48 itype++) { 48 itype++) {
diff --git a/kernel/smp.c b/kernel/smp.c
index 75c8dde58c55..172b18268909 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -24,8 +24,8 @@ struct call_function_data {
24 struct call_single_data csd; 24 struct call_single_data csd;
25 spinlock_t lock; 25 spinlock_t lock;
26 unsigned int refs; 26 unsigned int refs;
27 cpumask_t cpumask;
28 struct rcu_head rcu_head; 27 struct rcu_head rcu_head;
28 unsigned long cpumask_bits[];
29}; 29};
30 30
31struct call_single_queue { 31struct call_single_queue {
@@ -110,13 +110,13 @@ void generic_smp_call_function_interrupt(void)
110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) { 110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
111 int refs; 111 int refs;
112 112
113 if (!cpu_isset(cpu, data->cpumask)) 113 if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
114 continue; 114 continue;
115 115
116 data->csd.func(data->csd.info); 116 data->csd.func(data->csd.info);
117 117
118 spin_lock(&data->lock); 118 spin_lock(&data->lock);
119 cpu_clear(cpu, data->cpumask); 119 cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
120 WARN_ON(data->refs == 0); 120 WARN_ON(data->refs == 0);
121 data->refs--; 121 data->refs--;
122 refs = data->refs; 122 refs = data->refs;
@@ -266,51 +266,19 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
266 generic_exec_single(cpu, data); 266 generic_exec_single(cpu, data);
267} 267}
268 268
269/* Dummy function */ 269/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
270static void quiesce_dummy(void *unused) 270#ifndef arch_send_call_function_ipi_mask
271{ 271#define arch_send_call_function_ipi_mask(maskp) \
272} 272 arch_send_call_function_ipi(*(maskp))
273 273#endif
274/*
275 * Ensure stack based data used in call function mask is safe to free.
276 *
277 * This is needed by smp_call_function_mask when using on-stack data, because
278 * a single call function queue is shared by all CPUs, and any CPU may pick up
279 * the data item on the queue at any time before it is deleted. So we need to
280 * ensure that all CPUs have transitioned through a quiescent state after
281 * this call.
282 *
283 * This is a very slow function, implemented by sending synchronous IPIs to
284 * all possible CPUs. For this reason, we have to alloc data rather than use
285 * stack based data even in the case of synchronous calls. The stack based
286 * data is then just used for deadlock/oom fallback which will be very rare.
287 *
288 * If a faster scheme can be made, we could go back to preferring stack based
289 * data -- the data allocation/free is non-zero cost.
290 */
291static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
292{
293 struct call_single_data data;
294 int cpu;
295
296 data.func = quiesce_dummy;
297 data.info = NULL;
298
299 for_each_cpu_mask(cpu, mask) {
300 data.flags = CSD_FLAG_WAIT;
301 generic_exec_single(cpu, &data);
302 }
303}
304 274
305/** 275/**
306 * smp_call_function_mask(): Run a function on a set of other CPUs. 276 * smp_call_function_many(): Run a function on a set of other CPUs.
307 * @mask: The set of cpus to run on. 277 * @mask: The set of cpus to run on (only runs on online subset).
308 * @func: The function to run. This must be fast and non-blocking. 278 * @func: The function to run. This must be fast and non-blocking.
309 * @info: An arbitrary pointer to pass to the function. 279 * @info: An arbitrary pointer to pass to the function.
310 * @wait: If true, wait (atomically) until function has completed on other CPUs. 280 * @wait: If true, wait (atomically) until function has completed on other CPUs.
311 * 281 *
312 * Returns 0 on success, else a negative status code.
313 *
314 * If @wait is true, then returns once @func has returned. Note that @wait 282 * If @wait is true, then returns once @func has returned. Note that @wait
315 * will be implicitly turned on in case of allocation failures, since 283 * will be implicitly turned on in case of allocation failures, since
316 * we fall back to on-stack allocation. 284 * we fall back to on-stack allocation.
@@ -319,53 +287,57 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
319 * hardware interrupt handler or from a bottom half handler. Preemption 287 * hardware interrupt handler or from a bottom half handler. Preemption
320 * must be disabled when calling this function. 288 * must be disabled when calling this function.
321 */ 289 */
322int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 290void smp_call_function_many(const struct cpumask *mask,
323 int wait) 291 void (*func)(void *), void *info,
292 bool wait)
324{ 293{
325 struct call_function_data d; 294 struct call_function_data *data;
326 struct call_function_data *data = NULL;
327 cpumask_t allbutself;
328 unsigned long flags; 295 unsigned long flags;
329 int cpu, num_cpus; 296 int cpu, next_cpu;
330 int slowpath = 0;
331 297
332 /* Can deadlock when called with interrupts disabled */ 298 /* Can deadlock when called with interrupts disabled */
333 WARN_ON(irqs_disabled()); 299 WARN_ON(irqs_disabled());
334 300
335 cpu = smp_processor_id(); 301 /* So, what's a CPU they want? Ignoring this one. */
336 allbutself = cpu_online_map; 302 cpu = cpumask_first_and(mask, cpu_online_mask);
337 cpu_clear(cpu, allbutself); 303 if (cpu == smp_processor_id())
338 cpus_and(mask, mask, allbutself); 304 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
339 num_cpus = cpus_weight(mask); 305 /* No online cpus? We're done. */
340 306 if (cpu >= nr_cpu_ids)
341 /* 307 return;
342 * If zero CPUs, return. If just a single CPU, turn this request 308
343 * into a targetted single call instead since it's faster. 309 /* Do we have another CPU which isn't us? */
344 */ 310 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
345 if (!num_cpus) 311 if (next_cpu == smp_processor_id())
346 return 0; 312 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
347 else if (num_cpus == 1) { 313
348 cpu = first_cpu(mask); 314 /* Fastpath: do that cpu by itself. */
349 return smp_call_function_single(cpu, func, info, wait); 315 if (next_cpu >= nr_cpu_ids) {
316 smp_call_function_single(cpu, func, info, wait);
317 return;
350 } 318 }
351 319
352 data = kmalloc(sizeof(*data), GFP_ATOMIC); 320 data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
353 if (data) { 321 if (unlikely(!data)) {
354 data->csd.flags = CSD_FLAG_ALLOC; 322 /* Slow path. */
355 if (wait) 323 for_each_online_cpu(cpu) {
356 data->csd.flags |= CSD_FLAG_WAIT; 324 if (cpu == smp_processor_id())
357 } else { 325 continue;
358 data = &d; 326 if (cpumask_test_cpu(cpu, mask))
359 data->csd.flags = CSD_FLAG_WAIT; 327 smp_call_function_single(cpu, func, info, wait);
360 wait = 1; 328 }
361 slowpath = 1; 329 return;
362 } 330 }
363 331
364 spin_lock_init(&data->lock); 332 spin_lock_init(&data->lock);
333 data->csd.flags = CSD_FLAG_ALLOC;
334 if (wait)
335 data->csd.flags |= CSD_FLAG_WAIT;
365 data->csd.func = func; 336 data->csd.func = func;
366 data->csd.info = info; 337 data->csd.info = info;
367 data->refs = num_cpus; 338 cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
368 data->cpumask = mask; 339 cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
340 data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
369 341
370 spin_lock_irqsave(&call_function_lock, flags); 342 spin_lock_irqsave(&call_function_lock, flags);
371 list_add_tail_rcu(&data->csd.list, &call_function_queue); 343 list_add_tail_rcu(&data->csd.list, &call_function_queue);
@@ -377,18 +349,13 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
377 smp_mb(); 349 smp_mb();
378 350
379 /* Send a message to all CPUs in the map */ 351 /* Send a message to all CPUs in the map */
380 arch_send_call_function_ipi(mask); 352 arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
381 353
382 /* optionally wait for the CPUs to complete */ 354 /* optionally wait for the CPUs to complete */
383 if (wait) { 355 if (wait)
384 csd_flag_wait(&data->csd); 356 csd_flag_wait(&data->csd);
385 if (unlikely(slowpath))
386 smp_call_function_mask_quiesce_stack(mask);
387 }
388
389 return 0;
390} 357}
391EXPORT_SYMBOL(smp_call_function_mask); 358EXPORT_SYMBOL(smp_call_function_many);
392 359
393/** 360/**
394 * smp_call_function(): Run a function on all other CPUs. 361 * smp_call_function(): Run a function on all other CPUs.
@@ -396,7 +363,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
396 * @info: An arbitrary pointer to pass to the function. 363 * @info: An arbitrary pointer to pass to the function.
397 * @wait: If true, wait (atomically) until function has completed on other CPUs. 364 * @wait: If true, wait (atomically) until function has completed on other CPUs.
398 * 365 *
399 * Returns 0 on success, else a negative status code. 366 * Returns 0.
400 * 367 *
401 * If @wait is true, then returns once @func has returned; otherwise 368 * If @wait is true, then returns once @func has returned; otherwise
402 * it returns just before the target cpu calls @func. In case of allocation 369 * it returns just before the target cpu calls @func. In case of allocation
@@ -407,12 +374,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
407 */ 374 */
408int smp_call_function(void (*func)(void *), void *info, int wait) 375int smp_call_function(void (*func)(void *), void *info, int wait)
409{ 376{
410 int ret;
411
412 preempt_disable(); 377 preempt_disable();
413 ret = smp_call_function_mask(cpu_online_map, func, info, wait); 378 smp_call_function_many(cpu_online_mask, func, info, wait);
414 preempt_enable(); 379 preempt_enable();
415 return ret; 380 return 0;
416} 381}
417EXPORT_SYMBOL(smp_call_function); 382EXPORT_SYMBOL(smp_call_function);
418 383
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index bd6be76303cf..6d7dc4ec4aa5 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -352,7 +352,7 @@ static int parse(struct nlattr *na, cpumask_t *mask)
352 if (!data) 352 if (!data)
353 return -ENOMEM; 353 return -ENOMEM;
354 nla_strlcpy(data, na, len); 354 nla_strlcpy(data, na, len);
355 ret = cpulist_parse(data, *mask); 355 ret = cpulist_parse(data, mask);
356 kfree(data); 356 kfree(data);
357 return ret; 357 return ret;
358} 358}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index f8d968063cea..ea2f48af83cf 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -166,6 +166,8 @@ static void clockevents_notify_released(void)
166void clockevents_register_device(struct clock_event_device *dev) 166void clockevents_register_device(struct clock_event_device *dev)
167{ 167{
168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
169 BUG_ON(!dev->cpumask);
170
169 /* 171 /*
170 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 172 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
171 * on it, so fix it up and emit a warning: 173 * on it, so fix it up and emit a warning:
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f98a1b7b16e9..9590af2327be 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -150,7 +150,7 @@ static void tick_do_broadcast(cpumask_t mask)
150 */ 150 */
151 cpu = first_cpu(mask); 151 cpu = first_cpu(mask);
152 td = &per_cpu(tick_cpu_device, cpu); 152 td = &per_cpu(tick_cpu_device, cpu);
153 td->evtdev->broadcast(mask); 153 td->evtdev->broadcast(&mask);
154 } 154 }
155} 155}
156 156
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index df12434b43ca..f8372be74122 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -136,7 +136,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
136 */ 136 */
137static void tick_setup_device(struct tick_device *td, 137static void tick_setup_device(struct tick_device *td,
138 struct clock_event_device *newdev, int cpu, 138 struct clock_event_device *newdev, int cpu,
139 const cpumask_t *cpumask) 139 const struct cpumask *cpumask)
140{ 140{
141 ktime_t next_event; 141 ktime_t next_event;
142 void (*handler)(struct clock_event_device *) = NULL; 142 void (*handler)(struct clock_event_device *) = NULL;
@@ -171,8 +171,8 @@ static void tick_setup_device(struct tick_device *td,
171 * When the device is not per cpu, pin the interrupt to the 171 * When the device is not per cpu, pin the interrupt to the
172 * current cpu: 172 * current cpu:
173 */ 173 */
174 if (!cpus_equal(newdev->cpumask, *cpumask)) 174 if (!cpumask_equal(newdev->cpumask, cpumask))
175 irq_set_affinity(newdev->irq, *cpumask); 175 irq_set_affinity(newdev->irq, cpumask);
176 176
177 /* 177 /*
178 * When global broadcasting is active, check if the current 178 * When global broadcasting is active, check if the current
@@ -202,14 +202,14 @@ static int tick_check_new_device(struct clock_event_device *newdev)
202 spin_lock_irqsave(&tick_device_lock, flags); 202 spin_lock_irqsave(&tick_device_lock, flags);
203 203
204 cpu = smp_processor_id(); 204 cpu = smp_processor_id();
205 if (!cpu_isset(cpu, newdev->cpumask)) 205 if (!cpumask_test_cpu(cpu, newdev->cpumask))
206 goto out_bc; 206 goto out_bc;
207 207
208 td = &per_cpu(tick_cpu_device, cpu); 208 td = &per_cpu(tick_cpu_device, cpu);
209 curdev = td->evtdev; 209 curdev = td->evtdev;
210 210
211 /* cpu local device ? */ 211 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { 212 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
213 213
214 /* 214 /*
215 * If the cpu affinity of the device interrupt can not 215 * If the cpu affinity of the device interrupt can not
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 222 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 223 * by a non cpu local device
224 */ 224 */
225 if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) 225 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
226 goto out_bc; 226 goto out_bc;
227 } 227 }
228 228
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4185d5221633..0e91f43b6baf 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2674,7 +2674,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
2674 2674
2675 mutex_lock(&tracing_cpumask_update_lock); 2675 mutex_lock(&tracing_cpumask_update_lock);
2676 2676
2677 len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2677 len = cpumask_scnprintf(mask_str, count, &tracing_cpumask);
2678 if (count - len < 2) { 2678 if (count - len < 2) {
2679 count = -EINVAL; 2679 count = -EINVAL;
2680 goto out_err; 2680 goto out_err;
@@ -2695,7 +2695,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2695 int err, cpu; 2695 int err, cpu;
2696 2696
2697 mutex_lock(&tracing_cpumask_update_lock); 2697 mutex_lock(&tracing_cpumask_update_lock);
2698 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2698 err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new);
2699 if (err) 2699 if (err)
2700 goto err_unlock; 2700 goto err_unlock;
2701 2701
diff --git a/lib/Kconfig b/lib/Kconfig
index fd4118e097f0..2ba43c4a5b07 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -159,4 +159,11 @@ config CHECK_SIGNATURE
159config HAVE_LMB 159config HAVE_LMB
160 boolean 160 boolean
161 161
162config CPUMASK_OFFSTACK
163 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
164 help
165 Use dynamic allocation for cpumask_var_t, instead of putting
166 them on the stack. This is a bit more expensive, but avoids
167 stack overflow.
168
162endmenu 169endmenu
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 8d03f22c6ced..a24edf137f41 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -76,15 +76,28 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
76 76
77/* These are not inline because of header tangles. */ 77/* These are not inline because of header tangles. */
78#ifdef CONFIG_CPUMASK_OFFSTACK 78#ifdef CONFIG_CPUMASK_OFFSTACK
79bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 79/**
80 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
81 * @mask: pointer to cpumask_var_t where the cpumask is returned
82 * @flags: GFP_ flags
83 *
84 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
85 * a nop returning a constant 1 (in <linux/cpumask.h>)
86 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
87 *
88 * In addition, mask will be NULL if this fails. Note that gcc is
89 * usually smart enough to know that mask can never be NULL if
90 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
91 * too.
92 */
93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
80{ 94{
81 if (likely(slab_is_available())) 95 if (likely(slab_is_available()))
82 *mask = kmalloc(cpumask_size(), flags); 96 *mask = kmalloc_node(cpumask_size(), flags, node);
83 else { 97 else {
84#ifdef CONFIG_DEBUG_PER_CPU_MAPS 98#ifdef CONFIG_DEBUG_PER_CPU_MAPS
85 printk(KERN_ERR 99 printk(KERN_ERR
86 "=> alloc_cpumask_var: kmalloc not available!\n"); 100 "=> alloc_cpumask_var: kmalloc not available!\n");
87 dump_stack();
88#endif 101#endif
89 *mask = NULL; 102 *mask = NULL;
90 } 103 }
@@ -96,19 +109,54 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
96#endif 109#endif
97 return *mask != NULL; 110 return *mask != NULL;
98} 111}
112EXPORT_SYMBOL(alloc_cpumask_var_node);
113
114/**
115 * alloc_cpumask_var - allocate a struct cpumask
116 * @mask: pointer to cpumask_var_t where the cpumask is returned
117 * @flags: GFP_ flags
118 *
119 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
120 * a nop returning a constant 1 (in <linux/cpumask.h>).
121 *
122 * See alloc_cpumask_var_node.
123 */
124bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
125{
126 return alloc_cpumask_var_node(mask, flags, numa_node_id());
127}
99EXPORT_SYMBOL(alloc_cpumask_var); 128EXPORT_SYMBOL(alloc_cpumask_var);
100 129
130/**
131 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
132 * @mask: pointer to cpumask_var_t where the cpumask is returned
133 *
134 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
135 * a nop returning a constant 1 (in <linux/cpumask.h>)
136 * Either returns an allocated (zero-filled) cpumask, or causes the
137 * system to panic.
138 */
101void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 139void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
102{ 140{
103 *mask = alloc_bootmem(cpumask_size()); 141 *mask = alloc_bootmem(cpumask_size());
104} 142}
105 143
144/**
145 * free_cpumask_var - frees memory allocated for a struct cpumask.
146 * @mask: cpumask to free
147 *
148 * This is safe on a NULL mask.
149 */
106void free_cpumask_var(cpumask_var_t mask) 150void free_cpumask_var(cpumask_var_t mask)
107{ 151{
108 kfree(mask); 152 kfree(mask);
109} 153}
110EXPORT_SYMBOL(free_cpumask_var); 154EXPORT_SYMBOL(free_cpumask_var);
111 155
156/**
157 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
158 * @mask: cpumask to free
159 */
112void __init free_bootmem_cpumask_var(cpumask_var_t mask) 160void __init free_bootmem_cpumask_var(cpumask_var_t mask)
113{ 161{
114 free_bootmem((unsigned long)mask, cpumask_size()); 162 free_bootmem((unsigned long)mask, cpumask_size());
diff --git a/mm/slub.c b/mm/slub.c
index 6cb7ad107852..0d861c3154b6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3642,7 +3642,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3642 len < PAGE_SIZE - 60) { 3642 len < PAGE_SIZE - 60) {
3643 len += sprintf(buf + len, " cpus="); 3643 len += sprintf(buf + len, " cpus=");
3644 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3644 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3645 l->cpus); 3645 &l->cpus);
3646 } 3646 }
3647 3647
3648 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3648 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&