aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cputopology.txt48
-rw-r--r--arch/alpha/include/asm/topology.h17
-rw-r--r--arch/alpha/kernel/irq.c3
-rw-r--r--arch/alpha/kernel/setup.c5
-rw-r--r--arch/avr32/include/asm/bitops.h5
-rw-r--r--arch/blackfin/include/asm/bitops.h1
-rw-r--r--arch/cris/include/asm/bitops.h1
-rw-r--r--arch/h8300/include/asm/bitops.h1
-rw-r--r--arch/ia64/include/asm/irq.h2
-rw-r--r--arch/ia64/include/asm/topology.h9
-rw-r--r--arch/ia64/kernel/acpi.c3
-rw-r--r--arch/ia64/kernel/iosapic.c23
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c27
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m68knommu/include/asm/bitops.h1
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h4
-rw-r--r--arch/parisc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/kernel/topology.c5
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sparc/include/asm/topology_64.h13
-rw-r--r--arch/sparc/kernel/of_device_64.c2
-rw-r--r--arch/sparc/kernel/pci_msi.c2
-rw-r--r--arch/x86/include/asm/es7000/apic.h32
-rw-r--r--arch/x86/include/asm/lguest.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h4
-rw-r--r--arch/x86/include/asm/pci.h10
-rw-r--r--arch/x86/include/asm/summit/apic.h42
-rw-r--r--arch/x86/include/asm/topology.h36
-rw-r--r--arch/x86/kernel/acpi/boot.c31
-rw-r--r--arch/x86/kernel/apic.c4
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c24
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/io_apic.c6
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/setup_percpu.c33
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c7
-rw-r--r--block/blk.h4
-rw-r--r--drivers/acpi/processor_core.c14
-rw-r--r--drivers/acpi/processor_perflib.c28
-rw-r--r--drivers/acpi/processor_throttling.c80
-rw-r--r--drivers/base/cpu.c44
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c17
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c8
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c2
-rw-r--r--fs/seq_file.c3
-rw-r--r--include/acpi/processor.h4
-rw-r--r--include/asm-frv/bitops.h13
-rw-r--r--include/asm-m32r/bitops.h1
-rw-r--r--include/asm-m68k/bitops.h5
-rw-r--r--include/asm-mn10300/bitops.h11
-rw-r--r--include/asm-xtensa/bitops.h11
-rw-r--r--include/linux/bitmap.h35
-rw-r--r--include/linux/bitops.h13
-rw-r--r--include/linux/cpumask.h221
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/rcuclassic.h4
-rw-r--r--include/linux/seq_file.h7
-rw-r--r--include/linux/smp.h18
-rw-r--r--include/linux/stop_machine.h6
-rw-r--r--include/linux/threads.h16
-rw-r--r--include/linux/tick.h4
-rw-r--r--init/main.c13
-rw-r--r--kernel/compat.c49
-rw-r--r--kernel/cpu.c144
-rw-r--r--kernel/irq/manage.c11
-rw-r--r--kernel/irq/proc.c34
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/profile.c38
-rw-r--r--kernel/rcuclassic.c32
-rw-r--r--kernel/rcupreempt.c19
-rw-r--r--kernel/rcutorture.c27
-rw-r--r--kernel/sched.c53
-rw-r--r--kernel/sched_rt.c3
-rw-r--r--kernel/smp.c145
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c10
-rw-r--r--kernel/stop_machine.c8
-rw-r--r--kernel/taskstats.c39
-rw-r--r--kernel/time/clocksource.c9
-rw-r--r--kernel/time/tick-broadcast.c115
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/trace/ring_buffer.c42
-rw-r--r--kernel/trace/trace.c72
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_boot.c2
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_hw_branches.c6
-rw-r--r--kernel/trace/trace_power.c2
-rw-r--r--kernel/trace/trace_sysprof.c13
-rw-r--r--kernel/workqueue.c26
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Makefile1
-rw-r--r--lib/cpumask.c62
-rw-r--r--lib/find_last_bit.c45
-rw-r--r--mm/pdflush.c16
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c20
-rw-r--r--mm/vmscan.c4
-rw-r--r--mm/vmstat.c4
-rw-r--r--security/selinux/selinuxfs.c2
112 files changed, 1285 insertions, 878 deletions
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
index bd699da24666..45932ec21cee 100644
--- a/Documentation/cputopology.txt
+++ b/Documentation/cputopology.txt
@@ -31,3 +31,51 @@ not defined by include/asm-XXX/topology.h:
312) core_id: 0 312) core_id: 0
323) thread_siblings: just the given CPU 323) thread_siblings: just the given CPU
334) core_siblings: just the given CPU 334) core_siblings: just the given CPU
34
35Additionally, cpu topology information is provided under
36/sys/devices/system/cpu and includes these files. The internal
37source for the output is in brackets ("[]").
38
39 kernel_max: the maximum cpu index allowed by the kernel configuration.
40 [NR_CPUS-1]
41
42 offline: cpus that are not online because they have been
43 HOTPLUGGED off (see cpu-hotplug.txt) or exceed the limit
44 of cpus allowed by the kernel configuration (kernel_max
45 above). [~cpu_online_mask + cpus >= NR_CPUS]
46
47 online: cpus that are online and being scheduled [cpu_online_mask]
48
49 possible: cpus that have been allocated resources and can be
50 brought online if they are present. [cpu_possible_mask]
51
52 present: cpus that have been identified as being present in the
53 system. [cpu_present_mask]
54
55The format for the above output is compatible with cpulist_parse()
56[see <linux/cpumask.h>]. Some examples follow.
57
58In this example, there are 64 cpus in the system but cpus 32-63 exceed
59the kernel max which is limited to 0..31 by the NR_CPUS config option
60being 32. Note also that cpus 2 and 4-31 are not online but could be
61brought online as they are both present and possible.
62
63 kernel_max: 31
64 offline: 2,4-31,32-63
65 online: 0-1,3
66 possible: 0-31
67 present: 0-31
68
69In this example, the NR_CPUS config option is 128, but the kernel was
70started with possible_cpus=144. There are 4 cpus in the system and cpu2
71was manually taken offline (and is the only cpu that can be brought
72online.)
73
74 kernel_max: 127
75 offline: 2,4-127,128-143
76 online: 0-1,3
77 possible: 0-127
78 present: 0-3
79
80See cpu-hotplug.txt for the possible_cpus=NUM kernel start parameter
81as well as more information on the various cpumask's.
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index 149532e162c4..b4f284c72ff3 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -39,7 +39,24 @@ static inline cpumask_t node_to_cpumask(int node)
39 return node_cpu_mask; 39 return node_cpu_mask;
40} 40}
41 41
42extern struct cpumask node_to_cpumask_map[];
43/* FIXME: This is dumb, recalculating every time. But simple. */
44static const struct cpumask *cpumask_of_node(int node)
45{
46 int cpu;
47
48 cpumask_clear(&node_to_cpumask_map[node]);
49
50 for_each_online_cpu(cpu) {
51 if (cpu_to_node(cpu) == node)
52 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
53 }
54
55 return &node_to_cpumask_map[node];
56}
57
42#define pcibus_to_cpumask(bus) (cpu_online_map) 58#define pcibus_to_cpumask(bus) (cpu_online_map)
59#define cpumask_of_pcibus(bus) (cpu_online_mask)
43 60
44#endif /* !CONFIG_NUMA */ 61#endif /* !CONFIG_NUMA */
45# include <asm-generic/topology.h> 62# include <asm-generic/topology.h>
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index d0f1620007f7..703731accda6 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -50,7 +50,8 @@ int irq_select_affinity(unsigned int irq)
50 if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) 50 if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
51 return 1; 51 return 1;
52 52
53 while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity)) 53 while (!cpu_possible(cpu) ||
54 !cpumask_test_cpu(cpu, irq_default_affinity))
54 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 55 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
55 last_cpu = cpu; 56 last_cpu = cpu;
56 57
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index a449e999027c..02bee6983ce2 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -79,6 +79,11 @@ int alpha_l3_cacheshape;
79unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 79unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
80#endif 80#endif
81 81
82#ifdef CONFIG_NUMA
83struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
84EXPORT_SYMBOL(node_to_cpumask_map);
85#endif
86
82/* Which processor we booted from. */ 87/* Which processor we booted from. */
83int boot_cpuid; 88int boot_cpuid;
84 89
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h
index 1a50b69b1a19..f7dd5f71edf7 100644
--- a/arch/avr32/include/asm/bitops.h
+++ b/arch/avr32/include/asm/bitops.h
@@ -263,6 +263,11 @@ static inline int fls(unsigned long word)
263 return 32 - result; 263 return 32 - result;
264} 264}
265 265
266static inline int __fls(unsigned long word)
267{
268 return fls(word) - 1;
269}
270
266unsigned long find_first_zero_bit(const unsigned long *addr, 271unsigned long find_first_zero_bit(const unsigned long *addr,
267 unsigned long size); 272 unsigned long size);
268unsigned long find_next_zero_bit(const unsigned long *addr, 273unsigned long find_next_zero_bit(const unsigned long *addr,
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index b39a175c79c1..c428e4106f89 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -213,6 +213,7 @@ static __inline__ int __test_bit(int nr, const void *addr)
213#endif /* __KERNEL__ */ 213#endif /* __KERNEL__ */
214 214
215#include <asm-generic/bitops/fls.h> 215#include <asm-generic/bitops/fls.h>
216#include <asm-generic/bitops/__fls.h>
216#include <asm-generic/bitops/fls64.h> 217#include <asm-generic/bitops/fls64.h>
217 218
218#endif /* _BLACKFIN_BITOPS_H */ 219#endif /* _BLACKFIN_BITOPS_H */
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index c0e62f811e09..9e69cfb7f134 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -148,6 +148,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
148#define ffs kernel_ffs 148#define ffs kernel_ffs
149 149
150#include <asm-generic/bitops/fls.h> 150#include <asm-generic/bitops/fls.h>
151#include <asm-generic/bitops/__fls.h>
151#include <asm-generic/bitops/fls64.h> 152#include <asm-generic/bitops/fls64.h>
152#include <asm-generic/bitops/hweight.h> 153#include <asm-generic/bitops/hweight.h>
153#include <asm-generic/bitops/find.h> 154#include <asm-generic/bitops/find.h>
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
index cb18e3b0aa94..cb9ddf5fc54f 100644
--- a/arch/h8300/include/asm/bitops.h
+++ b/arch/h8300/include/asm/bitops.h
@@ -207,6 +207,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
207#endif /* __KERNEL__ */ 207#endif /* __KERNEL__ */
208 208
209#include <asm-generic/bitops/fls.h> 209#include <asm-generic/bitops/fls.h>
210#include <asm-generic/bitops/__fls.h>
210#include <asm-generic/bitops/fls64.h> 211#include <asm-generic/bitops/fls64.h>
211 212
212#endif /* _H8300_BITOPS_H */ 213#endif /* _H8300_BITOPS_H */
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 3627116fb0e2..36429a532630 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -27,7 +27,7 @@ irq_canonicalize (int irq)
27} 27}
28 28
29extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); 29extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
30bool is_affinity_mask_valid(cpumask_t cpumask); 30bool is_affinity_mask_valid(cpumask_var_t cpumask);
31 31
32#define is_affinity_mask_valid is_affinity_mask_valid 32#define is_affinity_mask_valid is_affinity_mask_valid
33 33
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index a3cc9f65f954..76a33a91ca69 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -34,6 +34,7 @@
34 * Returns a bitmask of CPUs on Node 'node'. 34 * Returns a bitmask of CPUs on Node 'node'.
35 */ 35 */
36#define node_to_cpumask(node) (node_to_cpu_mask[node]) 36#define node_to_cpumask(node) (node_to_cpu_mask[node])
37#define cpumask_of_node(node) (&node_to_cpu_mask[node])
37 38
38/* 39/*
39 * Returns the number of the node containing Node 'nid'. 40 * Returns the number of the node containing Node 'nid'.
@@ -45,7 +46,7 @@
45/* 46/*
46 * Returns the number of the first CPU on Node 'node'. 47 * Returns the number of the first CPU on Node 'node'.
47 */ 48 */
48#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 49#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
49 50
50/* 51/*
51 * Determines the node for a given pci bus 52 * Determines the node for a given pci bus
@@ -109,6 +110,8 @@ void build_cpu_to_node_map(void);
109#define topology_core_id(cpu) (cpu_data(cpu)->core_id) 110#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
110#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 111#define topology_core_siblings(cpu) (cpu_core_map[cpu])
111#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 112#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
113#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
114#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
112#define smt_capable() (smp_num_siblings > 1) 115#define smt_capable() (smp_num_siblings > 1)
113#endif 116#endif
114 117
@@ -119,6 +122,10 @@ extern void arch_fix_phys_package_id(int num, u32 slot);
119 node_to_cpumask(pcibus_to_node(bus)) \ 122 node_to_cpumask(pcibus_to_node(bus)) \
120 ) 123 )
121 124
125#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
126 cpu_all_mask : \
127 cpumask_from_node(pcibus_to_node(bus)))
128
122#include <asm-generic/topology.h> 129#include <asm-generic/topology.h>
123 130
124#endif /* _ASM_IA64_TOPOLOGY_H */ 131#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index bd7acc71e8a9..0553648b7595 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -202,7 +202,6 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
202 Boot-time Table Parsing 202 Boot-time Table Parsing
203 -------------------------------------------------------------------------- */ 203 -------------------------------------------------------------------------- */
204 204
205static int total_cpus __initdata;
206static int available_cpus __initdata; 205static int available_cpus __initdata;
207struct acpi_table_madt *acpi_madt __initdata; 206struct acpi_table_madt *acpi_madt __initdata;
208static u8 has_8259; 207static u8 has_8259;
@@ -1001,7 +1000,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
1001 node = pxm_to_node(pxm); 1000 node = pxm_to_node(pxm);
1002 1001
1003 if (node >= MAX_NUMNODES || !node_online(node) || 1002 if (node >= MAX_NUMNODES || !node_online(node) ||
1004 cpus_empty(node_to_cpumask(node))) 1003 cpumask_empty(cpumask_of_node(node)))
1005 return AE_OK; 1004 return AE_OK;
1006 1005
1007 /* We know a gsi to node mapping! */ 1006 /* We know a gsi to node mapping! */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index c8adecd5b416..5cfd3d91001a 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq)
695#ifdef CONFIG_NUMA 695#ifdef CONFIG_NUMA
696 { 696 {
697 int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; 697 int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
698 cpumask_t cpu_mask; 698 const struct cpumask *cpu_mask;
699 699
700 iosapic_index = find_iosapic(gsi); 700 iosapic_index = find_iosapic(gsi);
701 if (iosapic_index < 0 || 701 if (iosapic_index < 0 ||
702 iosapic_lists[iosapic_index].node == MAX_NUMNODES) 702 iosapic_lists[iosapic_index].node == MAX_NUMNODES)
703 goto skip_numa_setup; 703 goto skip_numa_setup;
704 704
705 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); 705 cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
706 cpus_and(cpu_mask, cpu_mask, domain); 706 num_cpus = 0;
707 for_each_cpu_mask(numa_cpu, cpu_mask) { 707 for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
708 if (!cpu_online(numa_cpu)) 708 if (cpu_online(numa_cpu))
709 cpu_clear(numa_cpu, cpu_mask); 709 num_cpus++;
710 } 710 }
711 711
712 num_cpus = cpus_weight(cpu_mask);
713
714 if (!num_cpus) 712 if (!num_cpus)
715 goto skip_numa_setup; 713 goto skip_numa_setup;
716 714
717 /* Use irq assignment to distribute across cpus in node */ 715 /* Use irq assignment to distribute across cpus in node */
718 cpu_index = irq % num_cpus; 716 cpu_index = irq % num_cpus;
719 717
720 for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) 718 for_each_cpu_and(numa_cpu, cpu_mask, &domain)
721 numa_cpu = next_cpu(numa_cpu, cpu_mask); 719 if (cpu_online(numa_cpu) && i++ >= cpu_index)
720 break;
722 721
723 if (numa_cpu != NR_CPUS) 722 if (numa_cpu < nr_cpu_ids)
724 return cpu_physical_id(numa_cpu); 723 return cpu_physical_id(numa_cpu);
725 } 724 }
726skip_numa_setup: 725skip_numa_setup:
@@ -731,7 +730,7 @@ skip_numa_setup:
731 * case of NUMA.) 730 * case of NUMA.)
732 */ 731 */
733 do { 732 do {
734 if (++cpu >= NR_CPUS) 733 if (++cpu >= nr_cpu_ids)
735 cpu = 0; 734 cpu = 0;
736 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); 735 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
737 736
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 0b6db53fedcf..95ff16cb05d8 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -112,11 +112,11 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
112 } 112 }
113} 113}
114 114
115bool is_affinity_mask_valid(cpumask_t cpumask) 115bool is_affinity_mask_valid(cpumask_var_t cpumask)
116{ 116{
117 if (ia64_platform_is("sn2")) { 117 if (ia64_platform_is("sn2")) {
118 /* Only allow one CPU to be specified in the smp_affinity mask */ 118 /* Only allow one CPU to be specified in the smp_affinity mask */
119 if (cpus_weight(cpumask) != 1) 119 if (cpumask_weight(cpumask) != 1)
120 return false; 120 return false;
121 } 121 }
122 return true; 122 return true;
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 636588e7e068..be339477f906 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -385,7 +385,6 @@ static int sn_topology_show(struct seq_file *s, void *d)
385 int j; 385 int j;
386 const char *slabname; 386 const char *slabname;
387 int ordinal; 387 int ordinal;
388 cpumask_t cpumask;
389 char slice; 388 char slice;
390 struct cpuinfo_ia64 *c; 389 struct cpuinfo_ia64 *c;
391 struct sn_hwperf_port_info *ptdata; 390 struct sn_hwperf_port_info *ptdata;
@@ -473,23 +472,21 @@ static int sn_topology_show(struct seq_file *s, void *d)
473 * CPUs on this node, if any 472 * CPUs on this node, if any
474 */ 473 */
475 if (!SN_HWPERF_IS_IONODE(obj)) { 474 if (!SN_HWPERF_IS_IONODE(obj)) {
476 cpumask = node_to_cpumask(ordinal); 475 for_each_cpu_and(i, cpu_online_mask,
477 for_each_online_cpu(i) { 476 cpumask_of_node(ordinal)) {
478 if (cpu_isset(i, cpumask)) { 477 slice = 'a' + cpuid_to_slice(i);
479 slice = 'a' + cpuid_to_slice(i); 478 c = cpu_data(i);
480 c = cpu_data(i); 479 seq_printf(s, "cpu %d %s%c local"
481 seq_printf(s, "cpu %d %s%c local" 480 " freq %luMHz, arch ia64",
482 " freq %luMHz, arch ia64", 481 i, obj->location, slice,
483 i, obj->location, slice, 482 c->proc_freq / 1000000);
484 c->proc_freq / 1000000); 483 for_each_online_cpu(j) {
485 for_each_online_cpu(j) { 484 seq_printf(s, j ? ":%d" : ", dist %d",
486 seq_printf(s, j ? ":%d" : ", dist %d", 485 node_distance(
487 node_distance(
488 cpu_to_node(i), 486 cpu_to_node(i),
489 cpu_to_node(j))); 487 cpu_to_node(j)));
490 }
491 seq_putc(s, '\n');
492 } 488 }
489 seq_putc(s, '\n');
493 } 490 }
494 } 491 }
495 } 492 }
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 0f06b3722e96..2547d6c4a827 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -592,7 +592,7 @@ int setup_profiling_timer(unsigned int multiplier)
592 * accounting. At that time they also adjust their APIC timers 592 * accounting. At that time they also adjust their APIC timers
593 * accordingly. 593 * accordingly.
594 */ 594 */
595 for (i = 0; i < NR_CPUS; ++i) 595 for_each_possible_cpu(i)
596 per_cpu(prof_multiplier, i) = multiplier; 596 per_cpu(prof_multiplier, i) = multiplier;
597 597
598 return 0; 598 return 0;
diff --git a/arch/m68knommu/include/asm/bitops.h b/arch/m68knommu/include/asm/bitops.h
index 6f3685eab44c..9d3cbe5fad1e 100644
--- a/arch/m68knommu/include/asm/bitops.h
+++ b/arch/m68knommu/include/asm/bitops.h
@@ -331,6 +331,7 @@ found_middle:
331#endif /* __KERNEL__ */ 331#endif /* __KERNEL__ */
332 332
333#include <asm-generic/bitops/fls.h> 333#include <asm-generic/bitops/fls.h>
334#include <asm-generic/bitops/__fls.h>
334#include <asm-generic/bitops/fls64.h> 335#include <asm-generic/bitops/fls64.h>
335 336
336#endif /* _M68KNOMMU_BITOPS_H */ 337#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 1fb959f98982..55d481569a1f 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -25,11 +25,13 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 25#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
26#define parent_node(node) (node) 26#define parent_node(node) (node)
27#define node_to_cpumask(node) (hub_data(node)->h_cpus) 27#define node_to_cpumask(node) (hub_data(node)->h_cpus)
28#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 28#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
29#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
29struct pci_bus; 30struct pci_bus;
30extern int pcibus_to_node(struct pci_bus *); 31extern int pcibus_to_node(struct pci_bus *);
31 32
32#define pcibus_to_cpumask(bus) (cpu_online_map) 33#define pcibus_to_cpumask(bus) (cpu_online_map)
34#define cpumask_of_pcibus(bus) (cpu_online_mask)
33 35
34extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 36extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
35 37
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 409e698f4361..6ef4b7867b1b 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -16,8 +16,6 @@
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17typedef unsigned long address_t; 17typedef unsigned long address_t;
18 18
19extern cpumask_t cpu_online_map;
20
21 19
22/* 20/*
23 * Private routines/data 21 * Private routines/data
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 373fca394a54..375258559ae6 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node)
22 return numa_cpumask_lookup_table[node]; 22 return numa_cpumask_lookup_table[node];
23} 23}
24 24
25#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
26
25static inline int node_to_first_cpu(int node) 27static inline int node_to_first_cpu(int node)
26{ 28{
27 cpumask_t tmp; 29 return cpumask_first(cpumask_of_node(node));
28 tmp = node_to_cpumask(node);
29 return first_cpu(tmp);
30} 30}
31 31
32int of_node_to_nid(struct device_node *device); 32int of_node_to_nid(struct device_node *device);
@@ -46,6 +46,10 @@ static inline int pcibus_to_node(struct pci_bus *bus)
46 node_to_cpumask(pcibus_to_node(bus)) \ 46 node_to_cpumask(pcibus_to_node(bus)) \
47 ) 47 )
48 48
49#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
50 cpu_all_mask : \
51 cpumask_of_node(pcibus_to_node(bus)))
52
49/* sched_domains SD_NODE_INIT for PPC64 machines */ 53/* sched_domains SD_NODE_INIT for PPC64 machines */
50#define SD_NODE_INIT (struct sched_domain) { \ 54#define SD_NODE_INIT (struct sched_domain) { \
51 .parent = NULL, \ 55 .parent = NULL, \
@@ -108,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
108 112
109#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 113#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
110#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 114#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
115#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
116#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
111#define topology_core_id(cpu) (cpu_to_core_id(cpu)) 117#define topology_core_id(cpu) (cpu_to_core_id(cpu))
112#endif 118#endif
113#endif 119#endif
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 906a0a2a9fe1..1410443731eb 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -80,10 +80,10 @@ static void cpu_affinity_set(struct spu *spu, int cpu)
80 u64 route; 80 u64 route;
81 81
82 if (nr_cpus_node(spu->node)) { 82 if (nr_cpus_node(spu->node)) {
83 cpumask_t spumask = node_to_cpumask(spu->node); 83 const struct cpumask *spumask = cpumask_of_node(spu->node),
84 cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); 84 *cpumask = cpumask_of_node(cpu_to_node(cpu));
85 85
86 if (!cpus_intersects(spumask, cpumask)) 86 if (!cpumask_intersects(spumask, cpumask))
87 return; 87 return;
88 } 88 }
89 89
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 2ad914c47493..6a0ad196aeb3 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -166,9 +166,9 @@ void spu_update_sched_info(struct spu_context *ctx)
166static int __node_allowed(struct spu_context *ctx, int node) 166static int __node_allowed(struct spu_context *ctx, int node)
167{ 167{
168 if (nr_cpus_node(node)) { 168 if (nr_cpus_node(node)) {
169 cpumask_t mask = node_to_cpumask(node); 169 const struct cpumask *mask = cpumask_of_node(node);
170 170
171 if (cpus_intersects(mask, ctx->cpus_allowed)) 171 if (cpumask_intersects(mask, &ctx->cpus_allowed))
172 return 1; 172 return 1;
173 } 173 }
174 174
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index d96c91643458..c93eb50e1d09 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -6,10 +6,12 @@
6#define mc_capable() (1) 6#define mc_capable() (1)
7 7
8cpumask_t cpu_coregroup_map(unsigned int cpu); 8cpumask_t cpu_coregroup_map(unsigned int cpu);
9const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9 10
10extern cpumask_t cpu_core_map[NR_CPUS]; 11extern cpumask_t cpu_core_map[NR_CPUS];
11 12
12#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 13#define topology_core_siblings(cpu) (cpu_core_map[cpu])
14#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
13 15
14int topology_set_cpu_management(int fc); 16int topology_set_cpu_management(int fc);
15void topology_schedule_update(void); 17void topology_schedule_update(void);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 90e9ba11eba1..cc362c9ea8f1 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -97,6 +97,11 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
97 return mask; 97 return mask;
98} 98}
99 99
100const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
101{
102 return &cpu_core_map[cpu];
103}
104
100static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) 105static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
101{ 106{
102 unsigned int cpu; 107 unsigned int cpu;
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index 279d9cc4a007..066f0fba590e 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -32,6 +32,7 @@
32#define parent_node(node) ((void)(node),0) 32#define parent_node(node) ((void)(node),0)
33 33
34#define node_to_cpumask(node) ((void)node, cpu_online_map) 34#define node_to_cpumask(node) ((void)node, cpu_online_map)
35#define cpumask_of_node(node) ((void)node, cpu_online_mask)
35#define node_to_first_cpu(node) ((void)(node),0) 36#define node_to_first_cpu(node) ((void)(node),0)
36 37
37#define pcibus_to_node(bus) ((void)(bus), -1) 38#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 001c04027c82..b8a65b64e1df 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -16,8 +16,12 @@ static inline cpumask_t node_to_cpumask(int node)
16{ 16{
17 return numa_cpumask_lookup_table[node]; 17 return numa_cpumask_lookup_table[node];
18} 18}
19#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
19 20
20/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 21/*
22 * Returns a pointer to the cpumask of CPUs on Node 'node'.
23 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
24 */
21#define node_to_cpumask_ptr(v, node) \ 25#define node_to_cpumask_ptr(v, node) \
22 cpumask_t *v = &(numa_cpumask_lookup_table[node]) 26 cpumask_t *v = &(numa_cpumask_lookup_table[node])
23 27
@@ -26,9 +30,7 @@ static inline cpumask_t node_to_cpumask(int node)
26 30
27static inline int node_to_first_cpu(int node) 31static inline int node_to_first_cpu(int node)
28{ 32{
29 cpumask_t tmp; 33 return cpumask_first(cpumask_of_node(node));
30 tmp = node_to_cpumask(node);
31 return first_cpu(tmp);
32} 34}
33 35
34struct pci_bus; 36struct pci_bus;
@@ -77,10 +79,13 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
77#define topology_core_id(cpu) (cpu_data(cpu).core_id) 79#define topology_core_id(cpu) (cpu_data(cpu).core_id)
78#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 80#define topology_core_siblings(cpu) (cpu_core_map[cpu])
79#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 81#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
82#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
83#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
80#define mc_capable() (sparc64_multi_core) 84#define mc_capable() (sparc64_multi_core)
81#define smt_capable() (sparc64_multi_core) 85#define smt_capable() (sparc64_multi_core)
82#endif /* CONFIG_SMP */ 86#endif /* CONFIG_SMP */
83 87
84#define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) 88#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
89#define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu])
85 90
86#endif /* _ASM_SPARC64_TOPOLOGY_H */ 91#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 322046cdf85f..4873f28905b0 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -778,7 +778,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
778out: 778out:
779 nid = of_node_to_nid(dp); 779 nid = of_node_to_nid(dp);
780 if (nid != -1) { 780 if (nid != -1) {
781 cpumask_t numa_mask = node_to_cpumask(nid); 781 cpumask_t numa_mask = *cpumask_of_node(nid);
782 782
783 irq_set_affinity(irq, &numa_mask); 783 irq_set_affinity(irq, &numa_mask);
784 } 784 }
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 0d0cd815e83e..4ef282e81912 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -286,7 +286,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
286 286
287 nid = pbm->numa_node; 287 nid = pbm->numa_node;
288 if (nid != -1) { 288 if (nid != -1) {
289 cpumask_t numa_mask = node_to_cpumask(nid); 289 cpumask_t numa_mask = *cpumask_of_node(nid);
290 290
291 irq_set_affinity(irq, &numa_mask); 291 irq_set_affinity(irq, &numa_mask);
292 } 292 }
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index 51ac1230294e..bc53d5ef1386 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
157 157
158 num_bits_set = cpumask_weight(cpumask); 158 num_bits_set = cpumask_weight(cpumask);
159 /* Return id to all */ 159 /* Return id to all */
160 if (num_bits_set == NR_CPUS) 160 if (num_bits_set == nr_cpu_ids)
161 return 0xFF; 161 return 0xFF;
162 /* 162 /*
163 * The cpus in the mask must all be on the apic cluster. If are not 163 * The cpus in the mask must all be on the apic cluster. If are not
@@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
190 190
191 num_bits_set = cpus_weight(*cpumask); 191 num_bits_set = cpus_weight(*cpumask);
192 /* Return id to all */ 192 /* Return id to all */
193 if (num_bits_set == NR_CPUS) 193 if (num_bits_set == nr_cpu_ids)
194 return cpu_to_logical_apicid(0); 194 return cpu_to_logical_apicid(0);
195 /* 195 /*
196 * The cpus in the mask must all be on the apic cluster. If are not 196 * The cpus in the mask must all be on the apic cluster. If are not
@@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
219 const struct cpumask *andmask) 219 const struct cpumask *andmask)
220{ 220{
221 int num_bits_set;
222 int cpus_found = 0;
223 int cpu;
224 int apicid = cpu_to_logical_apicid(0); 221 int apicid = cpu_to_logical_apicid(0);
225 cpumask_var_t cpumask; 222 cpumask_var_t cpumask;
226 223
@@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
229 226
230 cpumask_and(cpumask, inmask, andmask); 227 cpumask_and(cpumask, inmask, andmask);
231 cpumask_and(cpumask, cpumask, cpu_online_mask); 228 cpumask_and(cpumask, cpumask, cpu_online_mask);
229 apicid = cpu_mask_to_apicid(cpumask);
232 230
233 num_bits_set = cpumask_weight(cpumask);
234 /* Return id to all */
235 if (num_bits_set == NR_CPUS)
236 goto exit;
237 /*
238 * The cpus in the mask must all be on the apic cluster. If are not
239 * on the same apicid cluster return default value of TARGET_CPUS.
240 */
241 cpu = cpumask_first(cpumask);
242 apicid = cpu_to_logical_apicid(cpu);
243 while (cpus_found < num_bits_set) {
244 if (cpumask_test_cpu(cpu, cpumask)) {
245 int new_apicid = cpu_to_logical_apicid(cpu);
246 if (apicid_cluster(apicid) !=
247 apicid_cluster(new_apicid)){
248 printk ("%s: Not a valid mask!\n", __func__);
249 return cpu_to_logical_apicid(0);
250 }
251 apicid = new_apicid;
252 cpus_found++;
253 }
254 cpu++;
255 }
256exit:
257 free_cpumask_var(cpumask); 231 free_cpumask_var(cpumask);
258 return apicid; 232 return apicid;
259} 233}
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index d28a507cef39..1caf57628b9c 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -15,7 +15,7 @@
15#define SHARED_SWITCHER_PAGES \ 15#define SHARED_SWITCHER_PAGES \
16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) 16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
17/* Pages for switcher itself, then two pages per cpu */ 17/* Pages for switcher itself, then two pages per cpu */
18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) 18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
19 19
20/* We map at -4M for ease of mapping into the guest (one PTE page). */ 20/* We map at -4M for ease of mapping into the guest (one PTE page). */
21#define SWITCHER_ADDR 0xFFC00000 21#define SWITCHER_ADDR 0xFFC00000
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
index c80f00d29965..bf37bc49bd8e 100644
--- a/arch/x86/include/asm/numaq/apic.h
+++ b/arch/x86/include/asm/numaq/apic.h
@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
63extern u8 cpu_2_logical_apicid[]; 63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu) 64static inline int cpu_to_logical_apicid(int cpu)
65{ 65{
66 if (cpu >= NR_CPUS) 66 if (cpu >= nr_cpu_ids)
67 return BAD_APICID; 67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu]; 68 return (int)cpu_2_logical_apicid[cpu];
69} 69}
70 70
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 66834c41c049..a977de23cb4d 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -102,9 +102,9 @@ extern void pci_iommu_alloc(void);
102 102
103#ifdef CONFIG_NUMA 103#ifdef CONFIG_NUMA
104/* Returns the node based on pci bus */ 104/* Returns the node based on pci bus */
105static inline int __pcibus_to_node(struct pci_bus *bus) 105static inline int __pcibus_to_node(const struct pci_bus *bus)
106{ 106{
107 struct pci_sysdata *sd = bus->sysdata; 107 const struct pci_sysdata *sd = bus->sysdata;
108 108
109 return sd->node; 109 return sd->node;
110} 110}
@@ -113,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
113{ 113{
114 return node_to_cpumask(__pcibus_to_node(bus)); 114 return node_to_cpumask(__pcibus_to_node(bus));
115} 115}
116
117static inline const struct cpumask *
118cpumask_of_pcibus(const struct pci_bus *bus)
119{
120 return cpumask_of_node(__pcibus_to_node(bus));
121}
116#endif 122#endif
117 123
118#endif /* _ASM_X86_PCI_H */ 124#endif /* _ASM_X86_PCI_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 99327d1be49f..4bb5fb34f030 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void)
52 int i; 52 int i;
53 53
54 /* Create logical APIC IDs by counting CPUs already in cluster. */ 54 /* Create logical APIC IDs by counting CPUs already in cluster. */
55 for (count = 0, i = NR_CPUS; --i >= 0; ) { 55 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
56 lid = cpu_2_logical_apicid[i]; 56 lid = cpu_2_logical_apicid[i];
57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
58 ++count; 58 ++count;
@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid)
97static inline int cpu_to_logical_apicid(int cpu) 97static inline int cpu_to_logical_apicid(int cpu)
98{ 98{
99#ifdef CONFIG_SMP 99#ifdef CONFIG_SMP
100 if (cpu >= NR_CPUS) 100 if (cpu >= nr_cpu_ids)
101 return BAD_APICID; 101 return BAD_APICID;
102 return (int)cpu_2_logical_apicid[cpu]; 102 return (int)cpu_2_logical_apicid[cpu];
103#else 103#else
104 return logical_smp_processor_id(); 104 return logical_smp_processor_id();
@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu)
107 107
108static inline int cpu_present_to_apicid(int mps_cpu) 108static inline int cpu_present_to_apicid(int mps_cpu)
109{ 109{
110 if (mps_cpu < NR_CPUS) 110 if (mps_cpu < nr_cpu_ids)
111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
112 else 112 else
113 return BAD_APICID; 113 return BAD_APICID;
@@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
146 146
147 num_bits_set = cpus_weight(*cpumask); 147 num_bits_set = cpus_weight(*cpumask);
148 /* Return id to all */ 148 /* Return id to all */
149 if (num_bits_set == NR_CPUS) 149 if (num_bits_set >= nr_cpu_ids)
150 return (int) 0xFF; 150 return (int) 0xFF;
151 /* 151 /*
152 * The cpus in the mask must all be on the apic cluster. If are not 152 * The cpus in the mask must all be on the apic cluster. If are not
@@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
174 const struct cpumask *andmask) 174 const struct cpumask *andmask)
175{ 175{
176 int num_bits_set; 176 int apicid = cpu_to_logical_apicid(0);
177 int cpus_found = 0;
178 int cpu;
179 int apicid = 0xFF;
180 cpumask_var_t cpumask; 177 cpumask_var_t cpumask;
181 178
182 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 179 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
183 return (int) 0xFF; 180 return apicid;
184 181
185 cpumask_and(cpumask, inmask, andmask); 182 cpumask_and(cpumask, inmask, andmask);
186 cpumask_and(cpumask, cpumask, cpu_online_mask); 183 cpumask_and(cpumask, cpumask, cpu_online_mask);
184 apicid = cpu_mask_to_apicid(cpumask);
187 185
188 num_bits_set = cpumask_weight(cpumask);
189 /* Return id to all */
190 if (num_bits_set == nr_cpu_ids)
191 goto exit;
192 /*
193 * The cpus in the mask must all be on the apic cluster. If are not
194 * on the same apicid cluster return default value of TARGET_CPUS.
195 */
196 cpu = cpumask_first(cpumask);
197 apicid = cpu_to_logical_apicid(cpu);
198 while (cpus_found < num_bits_set) {
199 if (cpumask_test_cpu(cpu, cpumask)) {
200 int new_apicid = cpu_to_logical_apicid(cpu);
201 if (apicid_cluster(apicid) !=
202 apicid_cluster(new_apicid)){
203 printk ("%s: Not a valid mask!\n", __func__);
204 return 0xFF;
205 }
206 apicid = apicid | new_apicid;
207 cpus_found++;
208 }
209 cpu++;
210 }
211exit:
212 free_cpumask_var(cpumask); 186 free_cpumask_var(cpumask);
213 return apicid; 187 return apicid;
214} 188}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 79e31e9dcdda..4e2f2e0aab27 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu)
61 * 61 *
62 * Side note: this function creates the returned cpumask on the stack 62 * Side note: this function creates the returned cpumask on the stack
63 * so with a high NR_CPUS count, excessive stack space is used. The 63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * node_to_cpumask_ptr function should be used whenever possible. 64 * cpumask_of_node function should be used whenever possible.
65 */ 65 */
66static inline cpumask_t node_to_cpumask(int node) 66static inline cpumask_t node_to_cpumask(int node)
67{ 67{
68 return node_to_cpumask_map[node]; 68 return node_to_cpumask_map[node];
69} 69}
70 70
71/* Returns a bitmask of CPUs on Node 'node'. */
72static inline const struct cpumask *cpumask_of_node(int node)
73{
74 return &node_to_cpumask_map[node];
75}
76
71#else /* CONFIG_X86_64 */ 77#else /* CONFIG_X86_64 */
72 78
73/* Mappings between node number and cpus on that node. */ 79/* Mappings between node number and cpus on that node. */
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
82#ifdef CONFIG_DEBUG_PER_CPU_MAPS 88#ifdef CONFIG_DEBUG_PER_CPU_MAPS
83extern int cpu_to_node(int cpu); 89extern int cpu_to_node(int cpu);
84extern int early_cpu_to_node(int cpu); 90extern int early_cpu_to_node(int cpu);
85extern const cpumask_t *_node_to_cpumask_ptr(int node); 91extern const cpumask_t *cpumask_of_node(int node);
86extern cpumask_t node_to_cpumask(int node); 92extern cpumask_t node_to_cpumask(int node);
87 93
88#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 94#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu)
103} 109}
104 110
105/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
106static inline const cpumask_t *_node_to_cpumask_ptr(int node) 112static inline const cpumask_t *cpumask_of_node(int node)
107{ 113{
108 return &node_to_cpumask_map[node]; 114 return &node_to_cpumask_map[node];
109} 115}
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node)
116 122
117#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
118 124
119/* Replace default node_to_cpumask_ptr with optimized version */ 125/*
126 * Replace default node_to_cpumask_ptr with optimized version
127 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
128 */
120#define node_to_cpumask_ptr(v, node) \ 129#define node_to_cpumask_ptr(v, node) \
121 const cpumask_t *v = _node_to_cpumask_ptr(node) 130 const cpumask_t *v = cpumask_of_node(node)
122 131
123#define node_to_cpumask_ptr_next(v, node) \ 132#define node_to_cpumask_ptr_next(v, node) \
124 v = _node_to_cpumask_ptr(node) 133 v = cpumask_of_node(node)
125 134
126#endif /* CONFIG_X86_64 */ 135#endif /* CONFIG_X86_64 */
127 136
@@ -187,7 +196,7 @@ extern int __node_distance(int, int);
187#define cpu_to_node(cpu) 0 196#define cpu_to_node(cpu) 0
188#define early_cpu_to_node(cpu) 0 197#define early_cpu_to_node(cpu) 0
189 198
190static inline const cpumask_t *_node_to_cpumask_ptr(int node) 199static inline const cpumask_t *cpumask_of_node(int node)
191{ 200{
192 return &cpu_online_map; 201 return &cpu_online_map;
193} 202}
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node)
200 return first_cpu(cpu_online_map); 209 return first_cpu(cpu_online_map);
201} 210}
202 211
203/* Replace default node_to_cpumask_ptr with optimized version */ 212/*
213 * Replace default node_to_cpumask_ptr with optimized version
214 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
215 */
204#define node_to_cpumask_ptr(v, node) \ 216#define node_to_cpumask_ptr(v, node) \
205 const cpumask_t *v = _node_to_cpumask_ptr(node) 217 const cpumask_t *v = cpumask_of_node(node)
206 218
207#define node_to_cpumask_ptr_next(v, node) \ 219#define node_to_cpumask_ptr_next(v, node) \
208 v = _node_to_cpumask_ptr(node) 220 v = cpumask_of_node(node)
209#endif 221#endif
210 222
211#include <asm-generic/topology.h> 223#include <asm-generic/topology.h>
@@ -214,12 +226,12 @@ static inline int node_to_first_cpu(int node)
214/* Returns the number of the first CPU on Node 'node'. */ 226/* Returns the number of the first CPU on Node 'node'. */
215static inline int node_to_first_cpu(int node) 227static inline int node_to_first_cpu(int node)
216{ 228{
217 node_to_cpumask_ptr(mask, node); 229 return cpumask_first(cpumask_of_node(node));
218 return first_cpu(*mask);
219} 230}
220#endif 231#endif
221 232
222extern cpumask_t cpu_coregroup_map(int cpu); 233extern cpumask_t cpu_coregroup_map(int cpu);
234extern const struct cpumask *cpu_coregroup_mask(int cpu);
223 235
224#ifdef ENABLE_TOPO_DEFINES 236#ifdef ENABLE_TOPO_DEFINES
225#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 237#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 65d0b72777ea..29dc0c89d4af 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -538,9 +538,10 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
539 union acpi_object *obj; 539 union acpi_object *obj;
540 struct acpi_madt_local_apic *lapic; 540 struct acpi_madt_local_apic *lapic;
541 cpumask_t tmp_map, new_map; 541 cpumask_var_t tmp_map, new_map;
542 u8 physid; 542 u8 physid;
543 int cpu; 543 int cpu;
544 int retval = -ENOMEM;
544 545
545 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 546 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
546 return -EINVAL; 547 return -EINVAL;
@@ -569,23 +570,37 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
569 buffer.length = ACPI_ALLOCATE_BUFFER; 570 buffer.length = ACPI_ALLOCATE_BUFFER;
570 buffer.pointer = NULL; 571 buffer.pointer = NULL;
571 572
572 tmp_map = cpu_present_map; 573 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
574 goto out;
575
576 if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
577 goto free_tmp_map;
578
579 cpumask_copy(tmp_map, cpu_present_mask);
573 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 580 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
574 581
575 /* 582 /*
576 * If mp_register_lapic successfully generates a new logical cpu 583 * If mp_register_lapic successfully generates a new logical cpu
577 * number, then the following will get us exactly what was mapped 584 * number, then the following will get us exactly what was mapped
578 */ 585 */
579 cpus_andnot(new_map, cpu_present_map, tmp_map); 586 cpumask_andnot(new_map, cpu_present_mask, tmp_map);
580 if (cpus_empty(new_map)) { 587 if (cpumask_empty(new_map)) {
581 printk ("Unable to map lapic to logical cpu number\n"); 588 printk ("Unable to map lapic to logical cpu number\n");
582 return -EINVAL; 589 retval = -EINVAL;
590 goto free_new_map;
583 } 591 }
584 592
585 cpu = first_cpu(new_map); 593 cpu = cpumask_first(new_map);
586 594
587 *pcpu = cpu; 595 *pcpu = cpu;
588 return 0; 596 retval = 0;
597
598free_new_map:
599 free_cpumask_var(new_map);
600free_tmp_map:
601 free_cpumask_var(tmp_map);
602out:
603 return retval;
589} 604}
590 605
591/* wrapper to silence section mismatch warning */ 606/* wrapper to silence section mismatch warning */
@@ -598,7 +613,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
598int acpi_unmap_lsapic(int cpu) 613int acpi_unmap_lsapic(int cpu)
599{ 614{
600 per_cpu(x86_cpu_to_apicid, cpu) = -1; 615 per_cpu(x86_cpu_to_apicid, cpu) = -1;
601 cpu_clear(cpu, cpu_present_map); 616 set_cpu_present(cpu, false);
602 num_processors--; 617 num_processors--;
603 618
604 return (0); 619 return (0);
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index d652515e2855..b13d3c4dbd42 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
140 struct clock_event_device *evt); 140 struct clock_event_device *evt);
141static void lapic_timer_setup(enum clock_event_mode mode, 141static void lapic_timer_setup(enum clock_event_mode mode,
142 struct clock_event_device *evt); 142 struct clock_event_device *evt);
143static void lapic_timer_broadcast(const cpumask_t *mask); 143static void lapic_timer_broadcast(const struct cpumask *mask);
144static void apic_pm_activate(void); 144static void apic_pm_activate(void);
145 145
146/* 146/*
@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
453/* 453/*
454 * Local APIC timer broadcast function 454 * Local APIC timer broadcast function
455 */ 455 */
456static void lapic_timer_broadcast(const cpumask_t *mask) 456static void lapic_timer_broadcast(const struct cpumask *mask)
457{ 457{
458#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 42e0853030cb..3f95a40f718a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
356 } else if (smp_num_siblings > 1) { 356 } else if (smp_num_siblings > 1) {
357 357
358 if (smp_num_siblings > NR_CPUS) { 358 if (smp_num_siblings > nr_cpu_ids) {
359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
360 smp_num_siblings); 360 smp_num_siblings);
361 smp_num_siblings = 1; 361 smp_num_siblings = 1;
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 88ea02dcb622..28102ad1a363 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
517 } 517 }
518} 518}
519 519
520static void free_acpi_perf_data(void)
521{
522 unsigned int i;
523
524 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
525 for_each_possible_cpu(i)
526 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
527 ->shared_cpu_map);
528 free_percpu(acpi_perf_data);
529}
530
520/* 531/*
521 * acpi_cpufreq_early_init - initialize ACPI P-States library 532 * acpi_cpufreq_early_init - initialize ACPI P-States library
522 * 533 *
@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
527 */ 538 */
528static int __init acpi_cpufreq_early_init(void) 539static int __init acpi_cpufreq_early_init(void)
529{ 540{
541 unsigned int i;
530 dprintk("acpi_cpufreq_early_init\n"); 542 dprintk("acpi_cpufreq_early_init\n");
531 543
532 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 544 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
@@ -534,6 +546,16 @@ static int __init acpi_cpufreq_early_init(void)
534 dprintk("Memory allocation error for acpi_perf_data.\n"); 546 dprintk("Memory allocation error for acpi_perf_data.\n");
535 return -ENOMEM; 547 return -ENOMEM;
536 } 548 }
549 for_each_possible_cpu(i) {
550 if (!alloc_cpumask_var_node(
551 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
552 GFP_KERNEL, cpu_to_node(i))) {
553
554 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
555 free_acpi_perf_data();
556 return -ENOMEM;
557 }
558 }
537 559
538 /* Do initialization in ACPI core */ 560 /* Do initialization in ACPI core */
539 acpi_processor_preregister_performance(acpi_perf_data); 561 acpi_processor_preregister_performance(acpi_perf_data);
@@ -604,9 +626,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
604 */ 626 */
605 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
606 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
607 policy->cpus = perf->shared_cpu_map; 629 cpumask_copy(&policy->cpus, perf->shared_cpu_map);
608 } 630 }
609 policy->related_cpus = perf->shared_cpu_map; 631 cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
610 632
611#ifdef CONFIG_SMP 633#ifdef CONFIG_SMP
612 dmi_check_system(sw_any_bug_dmi_table); 634 dmi_check_system(sw_any_bug_dmi_table);
@@ -795,7 +817,7 @@ static int __init acpi_cpufreq_init(void)
795 817
796 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 818 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
797 if (ret) 819 if (ret)
798 free_percpu(acpi_perf_data); 820 free_acpi_perf_data();
799 821
800 return ret; 822 return ret;
801} 823}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 7c7d56b43136..1b446d79a8fd 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -310,6 +310,12 @@ static int powernow_acpi_init(void)
310 goto err0; 310 goto err0;
311 } 311 }
312 312
313 if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
314 GFP_KERNEL)) {
315 retval = -ENOMEM;
316 goto err05;
317 }
318
313 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 319 if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
314 retval = -EIO; 320 retval = -EIO;
315 goto err1; 321 goto err1;
@@ -412,6 +418,8 @@ static int powernow_acpi_init(void)
412err2: 418err2:
413 acpi_processor_unregister_performance(acpi_processor_perf, 0); 419 acpi_processor_unregister_performance(acpi_processor_perf, 0);
414err1: 420err1:
421 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
422err05:
415 kfree(acpi_processor_perf); 423 kfree(acpi_processor_perf);
416err0: 424err0:
417 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); 425 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
652#ifdef CONFIG_X86_POWERNOW_K7_ACPI 660#ifdef CONFIG_X86_POWERNOW_K7_ACPI
653 if (acpi_processor_perf) { 661 if (acpi_processor_perf) {
654 acpi_processor_unregister_performance(acpi_processor_perf, 0); 662 acpi_processor_unregister_performance(acpi_processor_perf, 0);
663 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
655 kfree(acpi_processor_perf); 664 kfree(acpi_processor_perf);
656 } 665 }
657#endif 666#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 7f05f44b97e9..c3c9adbaa26f 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
766static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 766static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
767{ 767{
768 struct cpufreq_frequency_table *powernow_table; 768 struct cpufreq_frequency_table *powernow_table;
769 int ret_val; 769 int ret_val = -ENODEV;
770 770
771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
772 dprintk("register performance failed: bad ACPI data\n"); 772 dprintk("register performance failed: bad ACPI data\n");
@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
815 /* notify BIOS that we exist */ 815 /* notify BIOS that we exist */
816 acpi_processor_notify_smm(THIS_MODULE); 816 acpi_processor_notify_smm(THIS_MODULE);
817 817
818 if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
819 printk(KERN_ERR PFX
820 "unable to alloc powernow_k8_data cpumask\n");
821 ret_val = -ENOMEM;
822 goto err_out_mem;
823 }
824
818 return 0; 825 return 0;
819 826
820err_out_mem: 827err_out_mem:
@@ -826,7 +833,7 @@ err_out:
826 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 833 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
827 data->acpi_data.state_count = 0; 834 data->acpi_data.state_count = 0;
828 835
829 return -ENODEV; 836 return ret_val;
830} 837}
831 838
832static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) 839static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
929{ 936{
930 if (data->acpi_data.state_count) 937 if (data->acpi_data.state_count)
931 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 938 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
939 free_cpumask_var(data->acpi_data.shared_cpu_map);
932} 940}
933 941
934#else 942#else
@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1134 data->cpu = pol->cpu; 1142 data->cpu = pol->cpu;
1135 data->currpstate = HW_PSTATE_INVALID; 1143 data->currpstate = HW_PSTATE_INVALID;
1136 1144
1137 if (powernow_k8_cpu_init_acpi(data)) { 1145 rc = powernow_k8_cpu_init_acpi(data);
1146 if (rc) {
1138 /* 1147 /*
1139 * Use the PSB BIOS structure. This is only availabe on 1148 * Use the PSB BIOS structure. This is only availabe on
1140 * an UP version, and is deprecated by AMD. 1149 * an UP version, and is deprecated by AMD.
@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1152 "ACPI maintainers and complain to your BIOS " 1161 "ACPI maintainers and complain to your BIOS "
1153 "vendor.\n"); 1162 "vendor.\n");
1154#endif 1163#endif
1155 kfree(data); 1164 goto err_out;
1156 return -ENODEV;
1157 } 1165 }
1158 if (pol->cpu != 0) { 1166 if (pol->cpu != 0) {
1159 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1167 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1160 "CPU other than CPU0. Complain to your BIOS " 1168 "CPU other than CPU0. Complain to your BIOS "
1161 "vendor.\n"); 1169 "vendor.\n");
1162 kfree(data); 1170 goto err_out;
1163 return -ENODEV;
1164 } 1171 }
1165 rc = find_psb_table(data); 1172 rc = find_psb_table(data);
1166 if (rc) { 1173 if (rc) {
1167 kfree(data); 1174 goto err_out;
1168 return -ENODEV;
1169 } 1175 }
1170 } 1176 }
1171 1177
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index c6ecda64f5f1..48533d77be78 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -534,7 +534,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
534 per_cpu(cpuid4_info, cpu) = NULL; 534 per_cpu(cpuid4_info, cpu) = NULL;
535} 535}
536 536
537static void get_cpu_leaves(void *_retval) 537static void __cpuinit get_cpu_leaves(void *_retval)
538{ 538{
539 int j, *retval = _retval, cpu = smp_processor_id(); 539 int j, *retval = _retval, cpu = smp_processor_id();
540 540
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 85d28d53f5d3..2ac1f0c2beb3 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file)
121 lock_kernel(); 121 lock_kernel();
122 122
123 cpu = iminor(file->f_path.dentry->d_inode); 123 cpu = iminor(file->f_path.dentry->d_inode);
124 if (cpu >= NR_CPUS || !cpu_online(cpu)) { 124 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
125 ret = -ENXIO; /* No such CPU */ 125 ret = -ENXIO; /* No such CPU */
126 goto out; 126 goto out;
127 } 127 }
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 69911722b9d3..3639442aa7a4 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -214,11 +214,11 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
214 214
215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
216 if (cfg) { 216 if (cfg) {
217 /* FIXME: needs alloc_cpumask_var_node() */ 217 if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
218 if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
219 kfree(cfg); 218 kfree(cfg);
220 cfg = NULL; 219 cfg = NULL;
221 } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { 220 } else if (!alloc_cpumask_var_node(&cfg->old_domain,
221 GFP_ATOMIC, node)) {
222 free_cpumask_var(cfg->domain); 222 free_cpumask_var(cfg->domain);
223 kfree(cfg); 223 kfree(cfg);
224 cfg = NULL; 224 cfg = NULL;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 82a7c7ed6d45..726266695b2c 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file)
136 lock_kernel(); 136 lock_kernel();
137 cpu = iminor(file->f_path.dentry->d_inode); 137 cpu = iminor(file->f_path.dentry->d_inode);
138 138
139 if (cpu >= NR_CPUS || !cpu_online(cpu)) { 139 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
140 ret = -ENXIO; /* No such CPU */ 140 ret = -ENXIO; /* No such CPU */
141 goto out; 141 goto out;
142 } 142 }
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index bf088c61fa40..2b46eb41643b 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -501,7 +501,7 @@ void native_machine_shutdown(void)
501 501
502#ifdef CONFIG_X86_32 502#ifdef CONFIG_X86_32
503 /* See if there has been given a command line override */ 503 /* See if there has been given a command line override */
504 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 504 if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
505 cpu_online(reboot_cpu)) 505 cpu_online(reboot_cpu))
506 reboot_cpu_id = reboot_cpu; 506 reboot_cpu_id = reboot_cpu;
507#endif 507#endif
@@ -511,7 +511,7 @@ void native_machine_shutdown(void)
511 reboot_cpu_id = smp_processor_id(); 511 reboot_cpu_id = smp_processor_id();
512 512
513 /* Make certain I only run on the appropriate processor */ 513 /* Make certain I only run on the appropriate processor */
514 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); 514 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
515 515
516 /* O.K Now that I'm on the appropriate processor, 516 /* O.K Now that I'm on the appropriate processor,
517 * stop all of the others. 517 * stop all of the others.
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0b63b08e7530..a4b619c33106 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -153,12 +153,10 @@ void __init setup_per_cpu_areas(void)
153 align = max_t(unsigned long, PAGE_SIZE, align); 153 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align); 154 size = roundup(old_size, align);
155 155
156 printk(KERN_INFO 156 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
157 "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
158 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 157 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
159 158
160 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 159 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
161 size);
162 160
163 for_each_possible_cpu(cpu) { 161 for_each_possible_cpu(cpu) {
164#ifndef CONFIG_NEED_MULTIPLE_NODES 162#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -169,22 +167,15 @@ void __init setup_per_cpu_areas(void)
169 if (!node_online(node) || !NODE_DATA(node)) { 167 if (!node_online(node) || !NODE_DATA(node)) {
170 ptr = __alloc_bootmem(size, align, 168 ptr = __alloc_bootmem(size, align,
171 __pa(MAX_DMA_ADDRESS)); 169 __pa(MAX_DMA_ADDRESS));
172 printk(KERN_INFO 170 pr_info("cpu %d has no node %d or node-local memory\n",
173 "cpu %d has no node %d or node-local memory\n",
174 cpu, node); 171 cpu, node);
175 if (ptr) 172 pr_debug("per cpu data for cpu%d at %016lx\n",
176 printk(KERN_DEBUG 173 cpu, __pa(ptr));
177 "per cpu data for cpu%d at %016lx\n", 174 } else {
178 cpu, __pa(ptr));
179 }
180 else {
181 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 175 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
182 __pa(MAX_DMA_ADDRESS)); 176 __pa(MAX_DMA_ADDRESS));
183 if (ptr) 177 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
184 printk(KERN_DEBUG 178 cpu, node, __pa(ptr));
185 "per cpu data for cpu%d on node%d "
186 "at %016lx\n",
187 cpu, node, __pa(ptr));
188 } 179 }
189#endif 180#endif
190 per_cpu_offset(cpu) = ptr - __per_cpu_start; 181 per_cpu_offset(cpu) = ptr - __per_cpu_start;
@@ -339,25 +330,25 @@ static const cpumask_t cpu_mask_none;
339/* 330/*
340 * Returns a pointer to the bitmask of CPUs on Node 'node'. 331 * Returns a pointer to the bitmask of CPUs on Node 'node'.
341 */ 332 */
342const cpumask_t *_node_to_cpumask_ptr(int node) 333const cpumask_t *cpumask_of_node(int node)
343{ 334{
344 if (node_to_cpumask_map == NULL) { 335 if (node_to_cpumask_map == NULL) {
345 printk(KERN_WARNING 336 printk(KERN_WARNING
346 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 337 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
347 node); 338 node);
348 dump_stack(); 339 dump_stack();
349 return (const cpumask_t *)&cpu_online_map; 340 return (const cpumask_t *)&cpu_online_map;
350 } 341 }
351 if (node >= nr_node_ids) { 342 if (node >= nr_node_ids) {
352 printk(KERN_WARNING 343 printk(KERN_WARNING
353 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 344 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
354 node, nr_node_ids); 345 node, nr_node_ids);
355 dump_stack(); 346 dump_stack();
356 return &cpu_mask_none; 347 return &cpu_mask_none;
357 } 348 }
358 return &node_to_cpumask_map[node]; 349 return &node_to_cpumask_map[node];
359} 350}
360EXPORT_SYMBOL(_node_to_cpumask_ptr); 351EXPORT_SYMBOL(cpumask_of_node);
361 352
362/* 353/*
363 * Returns a bitmask of CPUs on Node 'node'. 354 * Returns a bitmask of CPUs on Node 'node'.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 31869bf5fabd..6bd4d9b73870 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -496,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
496} 496}
497 497
498/* maps the cpu to the sched domain representing multi-core */ 498/* maps the cpu to the sched domain representing multi-core */
499cpumask_t cpu_coregroup_map(int cpu) 499const struct cpumask *cpu_coregroup_mask(int cpu)
500{ 500{
501 struct cpuinfo_x86 *c = &cpu_data(cpu); 501 struct cpuinfo_x86 *c = &cpu_data(cpu);
502 /* 502 /*
@@ -504,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu)
504 * And for power savings, we return cpu_core_map 504 * And for power savings, we return cpu_core_map
505 */ 505 */
506 if (sched_mc_power_savings || sched_smt_power_savings) 506 if (sched_mc_power_savings || sched_smt_power_savings)
507 return per_cpu(cpu_core_map, cpu); 507 return &per_cpu(cpu_core_map, cpu);
508 else 508 else
509 return c->llc_shared_map; 509 return &c->llc_shared_map;
510}
511
512cpumask_t cpu_coregroup_map(int cpu)
513{
514 return *cpu_coregroup_mask(cpu);
510} 515}
511 516
512static void impress_friends(void) 517static void impress_friends(void)
@@ -1149,7 +1154,7 @@ static void __init smp_cpu_index_default(void)
1149 for_each_possible_cpu(i) { 1154 for_each_possible_cpu(i) {
1150 c = &cpu_data(i); 1155 c = &cpu_data(i);
1151 /* mark all to hotplug */ 1156 /* mark all to hotplug */
1152 c->cpu_index = NR_CPUS; 1157 c->cpu_index = nr_cpu_ids;
1153 } 1158 }
1154} 1159}
1155 1160
@@ -1293,6 +1298,8 @@ __init void prefill_possible_map(void)
1293 else 1298 else
1294 possible = setup_possible_cpus; 1299 possible = setup_possible_cpus;
1295 1300
1301 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1302
1296 if (possible > CONFIG_NR_CPUS) { 1303 if (possible > CONFIG_NR_CPUS) {
1297 printk(KERN_WARNING 1304 printk(KERN_WARNING
1298 "%d Processors exceeds NR_CPUS limit of %d\n", 1305 "%d Processors exceeds NR_CPUS limit of %d\n",
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index a5bc05492b1e..9840b7ec749a 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -357,9 +357,8 @@ void __init find_smp_config(void)
357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); 357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
358 358
359 /* initialize the CPU structures (moved from smp_boot_cpus) */ 359 /* initialize the CPU structures (moved from smp_boot_cpus) */
360 for (i = 0; i < NR_CPUS; i++) { 360 for (i = 0; i < nr_cpu_ids; i++)
361 cpu_irq_affinity[i] = ~0; 361 cpu_irq_affinity[i] = ~0;
362 }
363 cpu_online_map = cpumask_of_cpu(boot_cpu_id); 362 cpu_online_map = cpumask_of_cpu(boot_cpu_id);
364 363
365 /* The boot CPU must be extended */ 364 /* The boot CPU must be extended */
@@ -1227,7 +1226,7 @@ int setup_profiling_timer(unsigned int multiplier)
1227 * new values until the next timer interrupt in which they do process 1226 * new values until the next timer interrupt in which they do process
1228 * accounting. 1227 * accounting.
1229 */ 1228 */
1230 for (i = 0; i < NR_CPUS; ++i) 1229 for (i = 0; i < nr_cpu_ids; ++i)
1231 per_cpu(prof_multiplier, i) = multiplier; 1230 per_cpu(prof_multiplier, i) = multiplier;
1232 1231
1233 return 0; 1232 return 0;
@@ -1257,7 +1256,7 @@ void __init voyager_smp_intr_init(void)
1257 int i; 1256 int i;
1258 1257
1259 /* initialize the per cpu irq mask to all disabled */ 1258 /* initialize the per cpu irq mask to all disabled */
1260 for (i = 0; i < NR_CPUS; i++) 1259 for (i = 0; i < nr_cpu_ids; i++)
1261 vic_irq_mask[i] = 0xFFFF; 1260 vic_irq_mask[i] = 0xFFFF;
1262 1261
1263 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); 1262 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
diff --git a/block/blk.h b/block/blk.h
index d2e49af90db5..6e1ed40534e9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -99,8 +99,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
99static inline int blk_cpu_to_group(int cpu) 99static inline int blk_cpu_to_group(int cpu)
100{ 100{
101#ifdef CONFIG_SCHED_MC 101#ifdef CONFIG_SCHED_MC
102 cpumask_t mask = cpu_coregroup_map(cpu); 102 const struct cpumask *mask = cpu_coregroup_mask(cpu);
103 return first_cpu(mask); 103 return cpumask_first(mask);
104#elif defined(CONFIG_SCHED_SMT) 104#elif defined(CONFIG_SCHED_SMT)
105 return first_cpu(per_cpu(cpu_sibling_map, cpu)); 105 return first_cpu(per_cpu(cpu_sibling_map, cpu));
106#else 106#else
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 34948362f41d..0cc2fd31e376 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device)
826 if (!pr) 826 if (!pr)
827 return -ENOMEM; 827 return -ENOMEM;
828 828
829 if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
830 kfree(pr);
831 return -ENOMEM;
832 }
833
829 pr->handle = device->handle; 834 pr->handle = device->handle;
830 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 835 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
831 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 836 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
@@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
845 850
846 pr = acpi_driver_data(device); 851 pr = acpi_driver_data(device);
847 852
848 if (pr->id >= nr_cpu_ids) { 853 if (pr->id >= nr_cpu_ids)
849 kfree(pr); 854 goto free;
850 return 0;
851 }
852 855
853 if (type == ACPI_BUS_REMOVAL_EJECT) { 856 if (type == ACPI_BUS_REMOVAL_EJECT) {
854 if (acpi_processor_handle_eject(pr)) 857 if (acpi_processor_handle_eject(pr))
@@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
873 876
874 per_cpu(processors, pr->id) = NULL; 877 per_cpu(processors, pr->id) = NULL;
875 per_cpu(processor_device_array, pr->id) = NULL; 878 per_cpu(processor_device_array, pr->id) = NULL;
879
880free:
881 free_cpumask_var(pr->throttling.shared_cpu_map);
876 kfree(pr); 882 kfree(pr);
877 883
878 return 0; 884 return 0;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0d7b772bef50..846e227592d4 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -588,12 +588,15 @@ int acpi_processor_preregister_performance(
588 int count, count_target; 588 int count, count_target;
589 int retval = 0; 589 int retval = 0;
590 unsigned int i, j; 590 unsigned int i, j;
591 cpumask_t covered_cpus; 591 cpumask_var_t covered_cpus;
592 struct acpi_processor *pr; 592 struct acpi_processor *pr;
593 struct acpi_psd_package *pdomain; 593 struct acpi_psd_package *pdomain;
594 struct acpi_processor *match_pr; 594 struct acpi_processor *match_pr;
595 struct acpi_psd_package *match_pdomain; 595 struct acpi_psd_package *match_pdomain;
596 596
597 if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
598 return -ENOMEM;
599
597 mutex_lock(&performance_mutex); 600 mutex_lock(&performance_mutex);
598 601
599 retval = 0; 602 retval = 0;
@@ -617,7 +620,7 @@ int acpi_processor_preregister_performance(
617 } 620 }
618 621
619 pr->performance = percpu_ptr(performance, i); 622 pr->performance = percpu_ptr(performance, i);
620 cpu_set(i, pr->performance->shared_cpu_map); 623 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
621 if (acpi_processor_get_psd(pr)) { 624 if (acpi_processor_get_psd(pr)) {
622 retval = -EINVAL; 625 retval = -EINVAL;
623 continue; 626 continue;
@@ -650,18 +653,18 @@ int acpi_processor_preregister_performance(
650 } 653 }
651 } 654 }
652 655
653 cpus_clear(covered_cpus); 656 cpumask_clear(covered_cpus);
654 for_each_possible_cpu(i) { 657 for_each_possible_cpu(i) {
655 pr = per_cpu(processors, i); 658 pr = per_cpu(processors, i);
656 if (!pr) 659 if (!pr)
657 continue; 660 continue;
658 661
659 if (cpu_isset(i, covered_cpus)) 662 if (cpumask_test_cpu(i, covered_cpus))
660 continue; 663 continue;
661 664
662 pdomain = &(pr->performance->domain_info); 665 pdomain = &(pr->performance->domain_info);
663 cpu_set(i, pr->performance->shared_cpu_map); 666 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
664 cpu_set(i, covered_cpus); 667 cpumask_set_cpu(i, covered_cpus);
665 if (pdomain->num_processors <= 1) 668 if (pdomain->num_processors <= 1)
666 continue; 669 continue;
667 670
@@ -699,8 +702,8 @@ int acpi_processor_preregister_performance(
699 goto err_ret; 702 goto err_ret;
700 } 703 }
701 704
702 cpu_set(j, covered_cpus); 705 cpumask_set_cpu(j, covered_cpus);
703 cpu_set(j, pr->performance->shared_cpu_map); 706 cpumask_set_cpu(j, pr->performance->shared_cpu_map);
704 count++; 707 count++;
705 } 708 }
706 709
@@ -718,8 +721,8 @@ int acpi_processor_preregister_performance(
718 721
719 match_pr->performance->shared_type = 722 match_pr->performance->shared_type =
720 pr->performance->shared_type; 723 pr->performance->shared_type;
721 match_pr->performance->shared_cpu_map = 724 cpumask_copy(match_pr->performance->shared_cpu_map,
722 pr->performance->shared_cpu_map; 725 pr->performance->shared_cpu_map);
723 } 726 }
724 } 727 }
725 728
@@ -731,14 +734,15 @@ err_ret:
731 734
732 /* Assume no coordination on any error parsing domain info */ 735 /* Assume no coordination on any error parsing domain info */
733 if (retval) { 736 if (retval) {
734 cpus_clear(pr->performance->shared_cpu_map); 737 cpumask_clear(pr->performance->shared_cpu_map);
735 cpu_set(i, pr->performance->shared_cpu_map); 738 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
736 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 739 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
737 } 740 }
738 pr->performance = NULL; /* Will be set for real in register */ 741 pr->performance = NULL; /* Will be set for real in register */
739 } 742 }
740 743
741 mutex_unlock(&performance_mutex); 744 mutex_unlock(&performance_mutex);
745 free_cpumask_var(covered_cpus);
742 return retval; 746 return retval;
743} 747}
744EXPORT_SYMBOL(acpi_processor_preregister_performance); 748EXPORT_SYMBOL(acpi_processor_preregister_performance);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index a0c38c94a8a0..d27838171f4a 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void)
61 int count, count_target; 61 int count, count_target;
62 int retval = 0; 62 int retval = 0;
63 unsigned int i, j; 63 unsigned int i, j;
64 cpumask_t covered_cpus; 64 cpumask_var_t covered_cpus;
65 struct acpi_processor *pr, *match_pr; 65 struct acpi_processor *pr, *match_pr;
66 struct acpi_tsd_package *pdomain, *match_pdomain; 66 struct acpi_tsd_package *pdomain, *match_pdomain;
67 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 67 struct acpi_processor_throttling *pthrottling, *match_pthrottling;
68 68
69 if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
70 return -ENOMEM;
71
69 /* 72 /*
70 * Now that we have _TSD data from all CPUs, lets setup T-state 73 * Now that we have _TSD data from all CPUs, lets setup T-state
71 * coordination between all CPUs. 74 * coordination between all CPUs.
@@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void)
91 if (retval) 94 if (retval)
92 goto err_ret; 95 goto err_ret;
93 96
94 cpus_clear(covered_cpus); 97 cpumask_clear(covered_cpus);
95 for_each_possible_cpu(i) { 98 for_each_possible_cpu(i) {
96 pr = per_cpu(processors, i); 99 pr = per_cpu(processors, i);
97 if (!pr) 100 if (!pr)
98 continue; 101 continue;
99 102
100 if (cpu_isset(i, covered_cpus)) 103 if (cpumask_test_cpu(i, covered_cpus))
101 continue; 104 continue;
102 pthrottling = &pr->throttling; 105 pthrottling = &pr->throttling;
103 106
104 pdomain = &(pthrottling->domain_info); 107 pdomain = &(pthrottling->domain_info);
105 cpu_set(i, pthrottling->shared_cpu_map); 108 cpumask_set_cpu(i, pthrottling->shared_cpu_map);
106 cpu_set(i, covered_cpus); 109 cpumask_set_cpu(i, covered_cpus);
107 /* 110 /*
108 * If the number of processor in the TSD domain is 1, it is 111 * If the number of processor in the TSD domain is 1, it is
109 * unnecessary to parse the coordination for this CPU. 112 * unnecessary to parse the coordination for this CPU.
@@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void)
144 goto err_ret; 147 goto err_ret;
145 } 148 }
146 149
147 cpu_set(j, covered_cpus); 150 cpumask_set_cpu(j, covered_cpus);
148 cpu_set(j, pthrottling->shared_cpu_map); 151 cpumask_set_cpu(j, pthrottling->shared_cpu_map);
149 count++; 152 count++;
150 } 153 }
151 for_each_possible_cpu(j) { 154 for_each_possible_cpu(j) {
@@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void)
165 * If some CPUS have the same domain, they 168 * If some CPUS have the same domain, they
166 * will have the same shared_cpu_map. 169 * will have the same shared_cpu_map.
167 */ 170 */
168 match_pthrottling->shared_cpu_map = 171 cpumask_copy(match_pthrottling->shared_cpu_map,
169 pthrottling->shared_cpu_map; 172 pthrottling->shared_cpu_map);
170 } 173 }
171 } 174 }
172 175
173err_ret: 176err_ret:
177 free_cpumask_var(covered_cpus);
178
174 for_each_possible_cpu(i) { 179 for_each_possible_cpu(i) {
175 pr = per_cpu(processors, i); 180 pr = per_cpu(processors, i);
176 if (!pr) 181 if (!pr)
@@ -182,8 +187,8 @@ err_ret:
182 */ 187 */
183 if (retval) { 188 if (retval) {
184 pthrottling = &(pr->throttling); 189 pthrottling = &(pr->throttling);
185 cpus_clear(pthrottling->shared_cpu_map); 190 cpumask_clear(pthrottling->shared_cpu_map);
186 cpu_set(i, pthrottling->shared_cpu_map); 191 cpumask_set_cpu(i, pthrottling->shared_cpu_map);
187 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 192 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
188 } 193 }
189 } 194 }
@@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
567 pthrottling = &pr->throttling; 572 pthrottling = &pr->throttling;
568 pthrottling->tsd_valid_flag = 1; 573 pthrottling->tsd_valid_flag = 1;
569 pthrottling->shared_type = pdomain->coord_type; 574 pthrottling->shared_type = pdomain->coord_type;
570 cpu_set(pr->id, pthrottling->shared_cpu_map); 575 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
571 /* 576 /*
572 * If the coordination type is not defined in ACPI spec, 577 * If the coordination type is not defined in ACPI spec,
573 * the tsd_valid_flag will be clear and coordination type 578 * the tsd_valid_flag will be clear and coordination type
@@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
826 831
827static int acpi_processor_get_throttling(struct acpi_processor *pr) 832static int acpi_processor_get_throttling(struct acpi_processor *pr)
828{ 833{
829 cpumask_t saved_mask; 834 cpumask_var_t saved_mask;
830 int ret; 835 int ret;
831 836
832 if (!pr) 837 if (!pr)
@@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
834 839
835 if (!pr->flags.throttling) 840 if (!pr->flags.throttling)
836 return -ENODEV; 841 return -ENODEV;
842
843 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
844 return -ENOMEM;
845
837 /* 846 /*
838 * Migrate task to the cpu pointed by pr. 847 * Migrate task to the cpu pointed by pr.
839 */ 848 */
840 saved_mask = current->cpus_allowed; 849 cpumask_copy(saved_mask, &current->cpus_allowed);
841 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 850 /* FIXME: use work_on_cpu() */
851 set_cpus_allowed_ptr(current, cpumask_of(pr->id));
842 ret = pr->throttling.acpi_processor_get_throttling(pr); 852 ret = pr->throttling.acpi_processor_get_throttling(pr);
843 /* restore the previous state */ 853 /* restore the previous state */
844 set_cpus_allowed_ptr(current, &saved_mask); 854 set_cpus_allowed_ptr(current, saved_mask);
855 free_cpumask_var(saved_mask);
845 856
846 return ret; 857 return ret;
847} 858}
@@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
986 997
987int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 998int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
988{ 999{
989 cpumask_t saved_mask; 1000 cpumask_var_t saved_mask;
990 int ret = 0; 1001 int ret = 0;
991 unsigned int i; 1002 unsigned int i;
992 struct acpi_processor *match_pr; 1003 struct acpi_processor *match_pr;
993 struct acpi_processor_throttling *p_throttling; 1004 struct acpi_processor_throttling *p_throttling;
994 struct throttling_tstate t_state; 1005 struct throttling_tstate t_state;
995 cpumask_t online_throttling_cpus; 1006 cpumask_var_t online_throttling_cpus;
996 1007
997 if (!pr) 1008 if (!pr)
998 return -EINVAL; 1009 return -EINVAL;
@@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1003 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1014 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1004 return -EINVAL; 1015 return -EINVAL;
1005 1016
1006 saved_mask = current->cpus_allowed; 1017 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
1018 return -ENOMEM;
1019
1020 if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
1021 free_cpumask_var(saved_mask);
1022 return -ENOMEM;
1023 }
1024
1025 cpumask_copy(saved_mask, &current->cpus_allowed);
1007 t_state.target_state = state; 1026 t_state.target_state = state;
1008 p_throttling = &(pr->throttling); 1027 p_throttling = &(pr->throttling);
1009 cpus_and(online_throttling_cpus, cpu_online_map, 1028 cpumask_and(online_throttling_cpus, cpu_online_mask,
1010 p_throttling->shared_cpu_map); 1029 p_throttling->shared_cpu_map);
1011 /* 1030 /*
1012 * The throttling notifier will be called for every 1031 * The throttling notifier will be called for every
1013 * affected cpu in order to get one proper T-state. 1032 * affected cpu in order to get one proper T-state.
1014 * The notifier event is THROTTLING_PRECHANGE. 1033 * The notifier event is THROTTLING_PRECHANGE.
1015 */ 1034 */
1016 for_each_cpu_mask_nr(i, online_throttling_cpus) { 1035 for_each_cpu(i, online_throttling_cpus) {
1017 t_state.cpu = i; 1036 t_state.cpu = i;
1018 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1037 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1019 &t_state); 1038 &t_state);
@@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1025 * it can be called only for the cpu pointed by pr. 1044 * it can be called only for the cpu pointed by pr.
1026 */ 1045 */
1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1046 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1028 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1047 /* FIXME: use work_on_cpu() */
1048 set_cpus_allowed_ptr(current, cpumask_of(pr->id));
1029 ret = p_throttling->acpi_processor_set_throttling(pr, 1049 ret = p_throttling->acpi_processor_set_throttling(pr,
1030 t_state.target_state); 1050 t_state.target_state);
1031 } else { 1051 } else {
@@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1034 * it is necessary to set T-state for every affected 1054 * it is necessary to set T-state for every affected
1035 * cpus. 1055 * cpus.
1036 */ 1056 */
1037 for_each_cpu_mask_nr(i, online_throttling_cpus) { 1057 for_each_cpu(i, online_throttling_cpus) {
1038 match_pr = per_cpu(processors, i); 1058 match_pr = per_cpu(processors, i);
1039 /* 1059 /*
1040 * If the pointer is invalid, we will report the 1060 * If the pointer is invalid, we will report the
@@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1056 continue; 1076 continue;
1057 } 1077 }
1058 t_state.cpu = i; 1078 t_state.cpu = i;
1059 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1079 /* FIXME: use work_on_cpu() */
1080 set_cpus_allowed_ptr(current, cpumask_of(i));
1060 ret = match_pr->throttling. 1081 ret = match_pr->throttling.
1061 acpi_processor_set_throttling( 1082 acpi_processor_set_throttling(
1062 match_pr, t_state.target_state); 1083 match_pr, t_state.target_state);
@@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1068 * affected cpu to update the T-states. 1089 * affected cpu to update the T-states.
1069 * The notifier event is THROTTLING_POSTCHANGE 1090 * The notifier event is THROTTLING_POSTCHANGE
1070 */ 1091 */
1071 for_each_cpu_mask_nr(i, online_throttling_cpus) { 1092 for_each_cpu(i, online_throttling_cpus) {
1072 t_state.cpu = i; 1093 t_state.cpu = i;
1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1094 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1074 &t_state); 1095 &t_state);
1075 } 1096 }
1076 /* restore the previous state */ 1097 /* restore the previous state */
1077 set_cpus_allowed_ptr(current, &saved_mask); 1098 /* FIXME: use work_on_cpu() */
1099 set_cpus_allowed_ptr(current, saved_mask);
1100 free_cpumask_var(online_throttling_cpus);
1101 free_cpumask_var(saved_mask);
1078 return ret; 1102 return ret;
1079} 1103}
1080 1104
@@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1120 if (acpi_processor_get_tsd(pr)) { 1144 if (acpi_processor_get_tsd(pr)) {
1121 pthrottling = &pr->throttling; 1145 pthrottling = &pr->throttling;
1122 pthrottling->tsd_valid_flag = 0; 1146 pthrottling->tsd_valid_flag = 0;
1123 cpu_set(pr->id, pthrottling->shared_cpu_map); 1147 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1124 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1148 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1125 } 1149 }
1126 1150
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4259072f5bd0..719ee5c1c8d9 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -128,10 +128,54 @@ print_cpus_func(online);
128print_cpus_func(possible); 128print_cpus_func(possible);
129print_cpus_func(present); 129print_cpus_func(present);
130 130
131/*
132 * Print values for NR_CPUS and offlined cpus
133 */
134static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf)
135{
136 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
137 return n;
138}
139static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
140
141/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
142unsigned int total_cpus;
143
144static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
145{
146 int n = 0, len = PAGE_SIZE-2;
147 cpumask_var_t offline;
148
149 /* display offline cpus < nr_cpu_ids */
150 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
151 return -ENOMEM;
152 cpumask_complement(offline, cpu_online_mask);
153 n = cpulist_scnprintf(buf, len, offline);
154 free_cpumask_var(offline);
155
156 /* display offline cpus >= nr_cpu_ids */
157 if (total_cpus && nr_cpu_ids < total_cpus) {
158 if (n && n < len)
159 buf[n++] = ',';
160
161 if (nr_cpu_ids == total_cpus-1)
162 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
163 else
164 n += snprintf(&buf[n], len - n, "%d-%d",
165 nr_cpu_ids, total_cpus-1);
166 }
167
168 n += snprintf(&buf[n], len - n, "\n");
169 return n;
170}
171static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
172
131static struct sysdev_class_attribute *cpu_state_attr[] = { 173static struct sysdev_class_attribute *cpu_state_attr[] = {
132 &attr_online_map, 174 &attr_online_map,
133 &attr_possible_map, 175 &attr_possible_map,
134 &attr_present_map, 176 &attr_present_map,
177 &attr_kernel_max,
178 &attr_offline,
135}; 179};
136 180
137static int cpu_states_init(void) 181static int cpu_states_init(void)
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 757035ea246f..3128a5090dbd 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -659,12 +659,12 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
659 659
660 WARN_ON_ONCE(!in_interrupt()); 660 WARN_ON_ONCE(!in_interrupt());
661 if (ehca_debug_level >= 3) 661 if (ehca_debug_level >= 3)
662 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 662 ehca_dmp(cpu_online_mask, cpumask_size(), "");
663 663
664 spin_lock_irqsave(&pool->last_cpu_lock, flags); 664 spin_lock_irqsave(&pool->last_cpu_lock, flags);
665 cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); 665 cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
666 if (cpu >= nr_cpu_ids) 666 if (cpu >= nr_cpu_ids)
667 cpu = first_cpu(cpu_online_map); 667 cpu = cpumask_first(cpu_online_mask);
668 pool->last_cpu = cpu; 668 pool->last_cpu = cpu;
669 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 669 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
670 670
@@ -855,7 +855,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
855 case CPU_UP_CANCELED_FROZEN: 855 case CPU_UP_CANCELED_FROZEN:
856 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 856 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
857 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 857 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
858 kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 858 kthread_bind(cct->task, cpumask_any(cpu_online_mask));
859 destroy_comp_task(pool, cpu); 859 destroy_comp_task(pool, cpu);
860 break; 860 break;
861 case CPU_ONLINE: 861 case CPU_ONLINE:
@@ -902,7 +902,7 @@ int ehca_create_comp_pool(void)
902 return -ENOMEM; 902 return -ENOMEM;
903 903
904 spin_lock_init(&pool->last_cpu_lock); 904 spin_lock_init(&pool->last_cpu_lock);
905 pool->last_cpu = any_online_cpu(cpu_online_map); 905 pool->last_cpu = cpumask_any(cpu_online_mask);
906 906
907 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); 907 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
908 if (pool->cpu_comp_tasks == NULL) { 908 if (pool->cpu_comp_tasks == NULL) {
@@ -934,10 +934,9 @@ void ehca_destroy_comp_pool(void)
934 934
935 unregister_hotcpu_notifier(&comp_pool_callback_nb); 935 unregister_hotcpu_notifier(&comp_pool_callback_nb);
936 936
937 for (i = 0; i < NR_CPUS; i++) { 937 for_each_online_cpu(i)
938 if (cpu_online(i)) 938 destroy_comp_task(pool, i);
939 destroy_comp_task(pool, i); 939
940 }
941 free_percpu(pool->cpu_comp_tasks); 940 free_percpu(pool->cpu_comp_tasks);
942 kfree(pool); 941 kfree(pool);
943} 942}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 239d4e8068ac..23173982b32c 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1679,7 +1679,7 @@ static int find_best_unit(struct file *fp,
1679 * InfiniPath chip to that processor (we assume reasonable connectivity, 1679 * InfiniPath chip to that processor (we assume reasonable connectivity,
1680 * for now). This code assumes that if affinity has been set 1680 * for now). This code assumes that if affinity has been set
1681 * before this point, that at most one cpu is set; for now this 1681 * before this point, that at most one cpu is set; for now this
1682 * is reasonable. I check for both cpus_empty() and cpus_full(), 1682 * is reasonable. I check for both cpumask_empty() and cpumask_full(),
1683 * in case some kernel variant sets none of the bits when no 1683 * in case some kernel variant sets none of the bits when no
1684 * affinity is set. 2.6.11 and 12 kernels have all present 1684 * affinity is set. 2.6.11 and 12 kernels have all present
1685 * cpus set. Some day we'll have to fix it up further to handle 1685 * cpus set. Some day we'll have to fix it up further to handle
@@ -1688,11 +1688,11 @@ static int find_best_unit(struct file *fp,
1688 * information. There may be some issues with dual core numbering 1688 * information. There may be some issues with dual core numbering
1689 * as well. This needs more work prior to release. 1689 * as well. This needs more work prior to release.
1690 */ 1690 */
1691 if (!cpus_empty(current->cpus_allowed) && 1691 if (!cpumask_empty(&current->cpus_allowed) &&
1692 !cpus_full(current->cpus_allowed)) { 1692 !cpumask_full(&current->cpus_allowed)) {
1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1694 for (i = 0; i < ncpus; i++) 1694 for (i = 0; i < ncpus; i++)
1695 if (cpu_isset(i, current->cpus_allowed)) { 1695 if (cpumask_test_cpu(i, &current->cpus_allowed)) {
1696 ipath_cdbg(PROC, "%s[%u] affinity set for " 1696 ipath_cdbg(PROC, "%s[%u] affinity set for "
1697 "cpu %d/%d\n", current->comm, 1697 "cpu %d/%d\n", current->comm,
1698 current->pid, i, ncpus); 1698 current->pid, i, ncpus);
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 7ff824496b39..7e6b5a3b3281 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -481,7 +481,7 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
481 481
482 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 482 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
483 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 483 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
484 for (i = 0; i < NR_CPUS; i++) { 484 for_each_possible_cpu(i) {
485 struct desc_struct *gdt = get_cpu_gdt_table(i); 485 struct desc_struct *gdt = get_cpu_gdt_table(i);
486 if (!gdt) 486 if (!gdt)
487 continue; 487 continue;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 99d8b8cfc9b7..b569ff1c4dc8 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -468,7 +468,8 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
468 return -1; 468 return -1;
469} 469}
470 470
471int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits) 471int seq_bitmap(struct seq_file *m, const unsigned long *bits,
472 unsigned int nr_bits)
472{ 473{
473 if (m->count < m->size) { 474 if (m->count < m->size) {
474 int len = bitmap_scnprintf(m->buf + m->count, 475 int len = bitmap_scnprintf(m->buf + m->count,
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3795590e152a..0574add2a1e3 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -127,7 +127,7 @@ struct acpi_processor_performance {
127 unsigned int state_count; 127 unsigned int state_count;
128 struct acpi_processor_px *states; 128 struct acpi_processor_px *states;
129 struct acpi_psd_package domain_info; 129 struct acpi_psd_package domain_info;
130 cpumask_t shared_cpu_map; 130 cpumask_var_t shared_cpu_map;
131 unsigned int shared_type; 131 unsigned int shared_type;
132}; 132};
133 133
@@ -172,7 +172,7 @@ struct acpi_processor_throttling {
172 unsigned int state_count; 172 unsigned int state_count;
173 struct acpi_processor_tx_tss *states_tss; 173 struct acpi_processor_tx_tss *states_tss;
174 struct acpi_tsd_package domain_info; 174 struct acpi_tsd_package domain_info;
175 cpumask_t shared_cpu_map; 175 cpumask_var_t shared_cpu_map;
176 int (*acpi_processor_get_throttling) (struct acpi_processor * pr); 176 int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
177 int (*acpi_processor_set_throttling) (struct acpi_processor * pr, 177 int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
178 int state); 178 int state);
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index 39456ba0ec17..287f6f697ce2 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -339,6 +339,19 @@ int __ffs(unsigned long x)
339 return 31 - bit; 339 return 31 - bit;
340} 340}
341 341
342/**
343 * __fls - find last (most-significant) set bit in a long word
344 * @word: the word to search
345 *
346 * Undefined if no set bit exists, so code should check against 0 first.
347 */
348static inline unsigned long __fls(unsigned long word)
349{
350 unsigned long bit;
351 asm("scan %1,gr0,%0" : "=r"(bit) : "r"(word));
352 return bit;
353}
354
342/* 355/*
343 * special slimline version of fls() for calculating ilog2_u32() 356 * special slimline version of fls() for calculating ilog2_u32()
344 * - note: no protection against n == 0 357 * - note: no protection against n == 0
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h
index 6dc9b81bf9f3..aaddf0d57603 100644
--- a/include/asm-m32r/bitops.h
+++ b/include/asm-m32r/bitops.h
@@ -251,6 +251,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
251#include <asm-generic/bitops/ffz.h> 251#include <asm-generic/bitops/ffz.h>
252#include <asm-generic/bitops/__ffs.h> 252#include <asm-generic/bitops/__ffs.h>
253#include <asm-generic/bitops/fls.h> 253#include <asm-generic/bitops/fls.h>
254#include <asm-generic/bitops/__fls.h>
254#include <asm-generic/bitops/fls64.h> 255#include <asm-generic/bitops/fls64.h>
255 256
256#ifdef __KERNEL__ 257#ifdef __KERNEL__
diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h
index 3e8106442d5a..9bde784e7bad 100644
--- a/include/asm-m68k/bitops.h
+++ b/include/asm-m68k/bitops.h
@@ -315,6 +315,11 @@ static inline int fls(int x)
315 return 32 - cnt; 315 return 32 - cnt;
316} 316}
317 317
318static inline int __fls(int x)
319{
320 return fls(x) - 1;
321}
322
318#include <asm-generic/bitops/fls64.h> 323#include <asm-generic/bitops/fls64.h>
319#include <asm-generic/bitops/sched.h> 324#include <asm-generic/bitops/sched.h>
320#include <asm-generic/bitops/hweight.h> 325#include <asm-generic/bitops/hweight.h>
diff --git a/include/asm-mn10300/bitops.h b/include/asm-mn10300/bitops.h
index cc6d40c05cf3..0b610f482abb 100644
--- a/include/asm-mn10300/bitops.h
+++ b/include/asm-mn10300/bitops.h
@@ -196,6 +196,17 @@ int fls(int x)
196} 196}
197 197
198/** 198/**
199 * __fls - find last (most-significant) set bit in a long word
200 * @word: the word to search
201 *
202 * Undefined if no set bit exists, so code should check against 0 first.
203 */
204static inline unsigned long __fls(unsigned long word)
205{
206 return __ilog2_u32(word);
207}
208
209/**
199 * ffs - find first bit set 210 * ffs - find first bit set
200 * @x: the word to search 211 * @x: the word to search
201 * 212 *
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index 23261e8f2e5a..6c3930397bd3 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -82,6 +82,16 @@ static inline int fls (unsigned int x)
82 return 32 - __cntlz(x); 82 return 32 - __cntlz(x);
83} 83}
84 84
85/**
86 * __fls - find last (most-significant) set bit in a long word
87 * @word: the word to search
88 *
89 * Undefined if no set bit exists, so code should check against 0 first.
90 */
91static inline unsigned long __fls(unsigned long word)
92{
93 return 31 - __cntlz(word);
94}
85#else 95#else
86 96
87/* Use the generic implementation if we don't have the nsa/nsau instructions. */ 97/* Use the generic implementation if we don't have the nsa/nsau instructions. */
@@ -90,6 +100,7 @@ static inline int fls (unsigned int x)
90# include <asm-generic/bitops/__ffs.h> 100# include <asm-generic/bitops/__ffs.h>
91# include <asm-generic/bitops/ffz.h> 101# include <asm-generic/bitops/ffz.h>
92# include <asm-generic/bitops/fls.h> 102# include <asm-generic/bitops/fls.h>
103# include <asm-generic/bitops/__fls.h>
93 104
94#endif 105#endif
95 106
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index a08c33a26ca9..2878811c6134 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -137,9 +137,12 @@ extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
137 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ 137 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
138) 138)
139 139
140#define small_const_nbits(nbits) \
141 (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
142
140static inline void bitmap_zero(unsigned long *dst, int nbits) 143static inline void bitmap_zero(unsigned long *dst, int nbits)
141{ 144{
142 if (nbits <= BITS_PER_LONG) 145 if (small_const_nbits(nbits))
143 *dst = 0UL; 146 *dst = 0UL;
144 else { 147 else {
145 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 148 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
@@ -150,7 +153,7 @@ static inline void bitmap_zero(unsigned long *dst, int nbits)
150static inline void bitmap_fill(unsigned long *dst, int nbits) 153static inline void bitmap_fill(unsigned long *dst, int nbits)
151{ 154{
152 size_t nlongs = BITS_TO_LONGS(nbits); 155 size_t nlongs = BITS_TO_LONGS(nbits);
153 if (nlongs > 1) { 156 if (!small_const_nbits(nbits)) {
154 int len = (nlongs - 1) * sizeof(unsigned long); 157 int len = (nlongs - 1) * sizeof(unsigned long);
155 memset(dst, 0xff, len); 158 memset(dst, 0xff, len);
156 } 159 }
@@ -160,7 +163,7 @@ static inline void bitmap_fill(unsigned long *dst, int nbits)
160static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 163static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
161 int nbits) 164 int nbits)
162{ 165{
163 if (nbits <= BITS_PER_LONG) 166 if (small_const_nbits(nbits))
164 *dst = *src; 167 *dst = *src;
165 else { 168 else {
166 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 169 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
@@ -171,7 +174,7 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
171static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, 174static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
172 const unsigned long *src2, int nbits) 175 const unsigned long *src2, int nbits)
173{ 176{
174 if (nbits <= BITS_PER_LONG) 177 if (small_const_nbits(nbits))
175 *dst = *src1 & *src2; 178 *dst = *src1 & *src2;
176 else 179 else
177 __bitmap_and(dst, src1, src2, nbits); 180 __bitmap_and(dst, src1, src2, nbits);
@@ -180,7 +183,7 @@ static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
180static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, 183static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
181 const unsigned long *src2, int nbits) 184 const unsigned long *src2, int nbits)
182{ 185{
183 if (nbits <= BITS_PER_LONG) 186 if (small_const_nbits(nbits))
184 *dst = *src1 | *src2; 187 *dst = *src1 | *src2;
185 else 188 else
186 __bitmap_or(dst, src1, src2, nbits); 189 __bitmap_or(dst, src1, src2, nbits);
@@ -189,7 +192,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
189static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, 192static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
190 const unsigned long *src2, int nbits) 193 const unsigned long *src2, int nbits)
191{ 194{
192 if (nbits <= BITS_PER_LONG) 195 if (small_const_nbits(nbits))
193 *dst = *src1 ^ *src2; 196 *dst = *src1 ^ *src2;
194 else 197 else
195 __bitmap_xor(dst, src1, src2, nbits); 198 __bitmap_xor(dst, src1, src2, nbits);
@@ -198,7 +201,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
198static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, 201static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
199 const unsigned long *src2, int nbits) 202 const unsigned long *src2, int nbits)
200{ 203{
201 if (nbits <= BITS_PER_LONG) 204 if (small_const_nbits(nbits))
202 *dst = *src1 & ~(*src2); 205 *dst = *src1 & ~(*src2);
203 else 206 else
204 __bitmap_andnot(dst, src1, src2, nbits); 207 __bitmap_andnot(dst, src1, src2, nbits);
@@ -207,7 +210,7 @@ static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
207static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, 210static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
208 int nbits) 211 int nbits)
209{ 212{
210 if (nbits <= BITS_PER_LONG) 213 if (small_const_nbits(nbits))
211 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); 214 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
212 else 215 else
213 __bitmap_complement(dst, src, nbits); 216 __bitmap_complement(dst, src, nbits);
@@ -216,7 +219,7 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
216static inline int bitmap_equal(const unsigned long *src1, 219static inline int bitmap_equal(const unsigned long *src1,
217 const unsigned long *src2, int nbits) 220 const unsigned long *src2, int nbits)
218{ 221{
219 if (nbits <= BITS_PER_LONG) 222 if (small_const_nbits(nbits))
220 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); 223 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
221 else 224 else
222 return __bitmap_equal(src1, src2, nbits); 225 return __bitmap_equal(src1, src2, nbits);
@@ -225,7 +228,7 @@ static inline int bitmap_equal(const unsigned long *src1,
225static inline int bitmap_intersects(const unsigned long *src1, 228static inline int bitmap_intersects(const unsigned long *src1,
226 const unsigned long *src2, int nbits) 229 const unsigned long *src2, int nbits)
227{ 230{
228 if (nbits <= BITS_PER_LONG) 231 if (small_const_nbits(nbits))
229 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; 232 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
230 else 233 else
231 return __bitmap_intersects(src1, src2, nbits); 234 return __bitmap_intersects(src1, src2, nbits);
@@ -234,7 +237,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
234static inline int bitmap_subset(const unsigned long *src1, 237static inline int bitmap_subset(const unsigned long *src1,
235 const unsigned long *src2, int nbits) 238 const unsigned long *src2, int nbits)
236{ 239{
237 if (nbits <= BITS_PER_LONG) 240 if (small_const_nbits(nbits))
238 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); 241 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
239 else 242 else
240 return __bitmap_subset(src1, src2, nbits); 243 return __bitmap_subset(src1, src2, nbits);
@@ -242,7 +245,7 @@ static inline int bitmap_subset(const unsigned long *src1,
242 245
243static inline int bitmap_empty(const unsigned long *src, int nbits) 246static inline int bitmap_empty(const unsigned long *src, int nbits)
244{ 247{
245 if (nbits <= BITS_PER_LONG) 248 if (small_const_nbits(nbits))
246 return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); 249 return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
247 else 250 else
248 return __bitmap_empty(src, nbits); 251 return __bitmap_empty(src, nbits);
@@ -250,7 +253,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
250 253
251static inline int bitmap_full(const unsigned long *src, int nbits) 254static inline int bitmap_full(const unsigned long *src, int nbits)
252{ 255{
253 if (nbits <= BITS_PER_LONG) 256 if (small_const_nbits(nbits))
254 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); 257 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
255 else 258 else
256 return __bitmap_full(src, nbits); 259 return __bitmap_full(src, nbits);
@@ -258,7 +261,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
258 261
259static inline int bitmap_weight(const unsigned long *src, int nbits) 262static inline int bitmap_weight(const unsigned long *src, int nbits)
260{ 263{
261 if (nbits <= BITS_PER_LONG) 264 if (small_const_nbits(nbits))
262 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 265 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
263 return __bitmap_weight(src, nbits); 266 return __bitmap_weight(src, nbits);
264} 267}
@@ -266,7 +269,7 @@ static inline int bitmap_weight(const unsigned long *src, int nbits)
266static inline void bitmap_shift_right(unsigned long *dst, 269static inline void bitmap_shift_right(unsigned long *dst,
267 const unsigned long *src, int n, int nbits) 270 const unsigned long *src, int n, int nbits)
268{ 271{
269 if (nbits <= BITS_PER_LONG) 272 if (small_const_nbits(nbits))
270 *dst = *src >> n; 273 *dst = *src >> n;
271 else 274 else
272 __bitmap_shift_right(dst, src, n, nbits); 275 __bitmap_shift_right(dst, src, n, nbits);
@@ -275,7 +278,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
275static inline void bitmap_shift_left(unsigned long *dst, 278static inline void bitmap_shift_left(unsigned long *dst,
276 const unsigned long *src, int n, int nbits) 279 const unsigned long *src, int n, int nbits)
277{ 280{
278 if (nbits <= BITS_PER_LONG) 281 if (small_const_nbits(nbits))
279 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); 282 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
280 else 283 else
281 __bitmap_shift_left(dst, src, n, nbits); 284 __bitmap_shift_left(dst, src, n, nbits);
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 024f2b027244..61829139795a 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -134,9 +134,20 @@ extern unsigned long find_first_bit(const unsigned long *addr,
134 */ 134 */
135extern unsigned long find_first_zero_bit(const unsigned long *addr, 135extern unsigned long find_first_zero_bit(const unsigned long *addr,
136 unsigned long size); 136 unsigned long size);
137
138#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 137#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
139 138
139#ifdef CONFIG_GENERIC_FIND_LAST_BIT
140/**
141 * find_last_bit - find the last set bit in a memory region
142 * @addr: The address to start the search at
143 * @size: The maximum size to search
144 *
145 * Returns the bit number of the first set bit, or size.
146 */
147extern unsigned long find_last_bit(const unsigned long *addr,
148 unsigned long size);
149#endif /* CONFIG_GENERIC_FIND_LAST_BIT */
150
140#ifdef CONFIG_GENERIC_FIND_NEXT_BIT 151#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
141 152
142/** 153/**
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d4bf52603e6b..9f315382610b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -144,6 +144,7 @@
144typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 144typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
145extern cpumask_t _unused_cpumask_arg_; 145extern cpumask_t _unused_cpumask_arg_;
146 146
147#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
147#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 148#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
148static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 149static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
149{ 150{
@@ -267,6 +268,26 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
267{ 268{
268 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 269 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
269} 270}
271#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
272
273/**
274 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
275 * @bitmap: the bitmap
276 *
277 * There are a few places where cpumask_var_t isn't appropriate and
278 * static cpumasks must be used (eg. very early boot), yet we don't
279 * expose the definition of 'struct cpumask'.
280 *
281 * This does the conversion, and can be used as a constant initializer.
282 */
283#define to_cpumask(bitmap) \
284 ((struct cpumask *)(1 ? (bitmap) \
285 : (void *)sizeof(__check_is_bitmap(bitmap))))
286
287static inline int __check_is_bitmap(const unsigned long *bitmap)
288{
289 return 1;
290}
270 291
271/* 292/*
272 * Special-case data structure for "single bit set only" constant CPU masks. 293 * Special-case data structure for "single bit set only" constant CPU masks.
@@ -278,13 +299,14 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
278extern const unsigned long 299extern const unsigned long
279 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 300 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
280 301
281static inline const cpumask_t *get_cpu_mask(unsigned int cpu) 302static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
282{ 303{
283 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 304 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
284 p -= cpu / BITS_PER_LONG; 305 p -= cpu / BITS_PER_LONG;
285 return (const cpumask_t *)p; 306 return to_cpumask(p);
286} 307}
287 308
309#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
288/* 310/*
289 * In cases where we take the address of the cpumask immediately, 311 * In cases where we take the address of the cpumask immediately,
290 * gcc optimizes it out (it's a constant) and there's no huge stack 312 * gcc optimizes it out (it's a constant) and there's no huge stack
@@ -370,19 +392,22 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
370{ 392{
371 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 393 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
372} 394}
395#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
373 396
374#if NR_CPUS == 1 397#if NR_CPUS == 1
375 398
376#define nr_cpu_ids 1 399#define nr_cpu_ids 1
400#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
377#define first_cpu(src) ({ (void)(src); 0; }) 401#define first_cpu(src) ({ (void)(src); 0; })
378#define next_cpu(n, src) ({ (void)(src); 1; }) 402#define next_cpu(n, src) ({ (void)(src); 1; })
379#define any_online_cpu(mask) 0 403#define any_online_cpu(mask) 0
380#define for_each_cpu_mask(cpu, mask) \ 404#define for_each_cpu_mask(cpu, mask) \
381 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 405 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
382 406#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
383#else /* NR_CPUS > 1 */ 407#else /* NR_CPUS > 1 */
384 408
385extern int nr_cpu_ids; 409extern int nr_cpu_ids;
410#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
386int __first_cpu(const cpumask_t *srcp); 411int __first_cpu(const cpumask_t *srcp);
387int __next_cpu(int n, const cpumask_t *srcp); 412int __next_cpu(int n, const cpumask_t *srcp);
388int __any_online_cpu(const cpumask_t *mask); 413int __any_online_cpu(const cpumask_t *mask);
@@ -394,8 +419,10 @@ int __any_online_cpu(const cpumask_t *mask);
394 for ((cpu) = -1; \ 419 for ((cpu) = -1; \
395 (cpu) = next_cpu((cpu), (mask)), \ 420 (cpu) = next_cpu((cpu), (mask)), \
396 (cpu) < NR_CPUS; ) 421 (cpu) < NR_CPUS; )
422#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
397#endif 423#endif
398 424
425#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
399#if NR_CPUS <= 64 426#if NR_CPUS <= 64
400 427
401#define next_cpu_nr(n, src) next_cpu(n, src) 428#define next_cpu_nr(n, src) next_cpu(n, src)
@@ -413,77 +440,67 @@ int __next_cpu_nr(int n, const cpumask_t *srcp);
413 (cpu) < nr_cpu_ids; ) 440 (cpu) < nr_cpu_ids; )
414 441
415#endif /* NR_CPUS > 64 */ 442#endif /* NR_CPUS > 64 */
443#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
416 444
417/* 445/*
418 * The following particular system cpumasks and operations manage 446 * The following particular system cpumasks and operations manage
419 * possible, present, active and online cpus. Each of them is a fixed size 447 * possible, present, active and online cpus.
420 * bitmap of size NR_CPUS.
421 * 448 *
422 * #ifdef CONFIG_HOTPLUG_CPU 449 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
423 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable 450 * cpu_present_mask - has bit 'cpu' set iff cpu is populated
424 * cpu_present_map - has bit 'cpu' set iff cpu is populated 451 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
425 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 452 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
426 * cpu_active_map - has bit 'cpu' set iff cpu available to migration
427 * #else
428 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
429 * cpu_present_map - copy of cpu_possible_map
430 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
431 * #endif
432 * 453 *
433 * In either case, NR_CPUS is fixed at compile time, as the static 454 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
434 * size of these bitmaps. The cpu_possible_map is fixed at boot
435 * time, as the set of CPU id's that it is possible might ever
436 * be plugged in at anytime during the life of that system boot.
437 * The cpu_present_map is dynamic(*), representing which CPUs
438 * are currently plugged in. And cpu_online_map is the dynamic
439 * subset of cpu_present_map, indicating those CPUs available
440 * for scheduling.
441 * 455 *
442 * If HOTPLUG is enabled, then cpu_possible_map is forced to have 456 * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
457 * that it is possible might ever be plugged in at anytime during the
458 * life of that system boot. The cpu_present_mask is dynamic(*),
459 * representing which CPUs are currently plugged in. And
460 * cpu_online_mask is the dynamic subset of cpu_present_mask,
461 * indicating those CPUs available for scheduling.
462 *
463 * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
443 * all NR_CPUS bits set, otherwise it is just the set of CPUs that 464 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
444 * ACPI reports present at boot. 465 * ACPI reports present at boot.
445 * 466 *
446 * If HOTPLUG is enabled, then cpu_present_map varies dynamically, 467 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
447 * depending on what ACPI reports as currently plugged in, otherwise 468 * depending on what ACPI reports as currently plugged in, otherwise
448 * cpu_present_map is just a copy of cpu_possible_map. 469 * cpu_present_mask is just a copy of cpu_possible_mask.
449 * 470 *
450 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not 471 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
451 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. 472 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
452 * 473 *
453 * Subtleties: 474 * Subtleties:
454 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 475 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
455 * assumption that their single CPU is online. The UP 476 * assumption that their single CPU is online. The UP
456 * cpu_{online,possible,present}_maps are placebos. Changing them 477 * cpu_{online,possible,present}_masks are placebos. Changing them
457 * will have no useful affect on the following num_*_cpus() 478 * will have no useful affect on the following num_*_cpus()
458 * and cpu_*() macros in the UP case. This ugliness is a UP 479 * and cpu_*() macros in the UP case. This ugliness is a UP
459 * optimization - don't waste any instructions or memory references 480 * optimization - don't waste any instructions or memory references
460 * asking if you're online or how many CPUs there are if there is 481 * asking if you're online or how many CPUs there are if there is
461 * only one CPU. 482 * only one CPU.
462 * 2) Most SMP arch's #define some of these maps to be some
463 * other map specific to that arch. Therefore, the following
464 * must be #define macros, not inlines. To see why, examine
465 * the assembly code produced by the following. Note that
466 * set1() writes phys_x_map, but set2() writes x_map:
467 * int x_map, phys_x_map;
468 * #define set1(a) x_map = a
469 * inline void set2(int a) { x_map = a; }
470 * #define x_map phys_x_map
471 * main(){ set1(3); set2(5); }
472 */ 483 */
473 484
474extern cpumask_t cpu_possible_map; 485extern const struct cpumask *const cpu_possible_mask;
475extern cpumask_t cpu_online_map; 486extern const struct cpumask *const cpu_online_mask;
476extern cpumask_t cpu_present_map; 487extern const struct cpumask *const cpu_present_mask;
477extern cpumask_t cpu_active_map; 488extern const struct cpumask *const cpu_active_mask;
489
490/* These strip const, as traditionally they weren't const. */
491#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
492#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
493#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
494#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
478 495
479#if NR_CPUS > 1 496#if NR_CPUS > 1
480#define num_online_cpus() cpus_weight_nr(cpu_online_map) 497#define num_online_cpus() cpumask_weight(cpu_online_mask)
481#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) 498#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
482#define num_present_cpus() cpus_weight_nr(cpu_present_map) 499#define num_present_cpus() cpumask_weight(cpu_present_mask)
483#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) 500#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
484#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) 501#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
485#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) 502#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
486#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) 503#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
487#else 504#else
488#define num_online_cpus() 1 505#define num_online_cpus() 1
489#define num_possible_cpus() 1 506#define num_possible_cpus() 1
@@ -496,10 +513,6 @@ extern cpumask_t cpu_active_map;
496 513
497#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 514#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
498 515
499#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
500#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
501#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
502
503/* These are the new versions of the cpumask operators: passed by pointer. 516/* These are the new versions of the cpumask operators: passed by pointer.
504 * The older versions will be implemented in terms of these, then deleted. */ 517 * The older versions will be implemented in terms of these, then deleted. */
505#define cpumask_bits(maskp) ((maskp)->bits) 518#define cpumask_bits(maskp) ((maskp)->bits)
@@ -687,7 +700,7 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
687 * No static inline type checking - see Subtlety (1) above. 700 * No static inline type checking - see Subtlety (1) above.
688 */ 701 */
689#define cpumask_test_cpu(cpu, cpumask) \ 702#define cpumask_test_cpu(cpu, cpumask) \
690 test_bit(cpumask_check(cpu), (cpumask)->bits) 703 test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
691 704
692/** 705/**
693 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask 706 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -930,7 +943,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
930static inline int cpumask_scnprintf(char *buf, int len, 943static inline int cpumask_scnprintf(char *buf, int len,
931 const struct cpumask *srcp) 944 const struct cpumask *srcp)
932{ 945{
933 return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); 946 return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
934} 947}
935 948
936/** 949/**
@@ -944,7 +957,7 @@ static inline int cpumask_scnprintf(char *buf, int len,
944static inline int cpumask_parse_user(const char __user *buf, int len, 957static inline int cpumask_parse_user(const char __user *buf, int len,
945 struct cpumask *dstp) 958 struct cpumask *dstp)
946{ 959{
947 return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); 960 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
948} 961}
949 962
950/** 963/**
@@ -959,7 +972,8 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
959static inline int cpulist_scnprintf(char *buf, int len, 972static inline int cpulist_scnprintf(char *buf, int len,
960 const struct cpumask *srcp) 973 const struct cpumask *srcp)
961{ 974{
962 return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); 975 return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
976 nr_cpumask_bits);
963} 977}
964 978
965/** 979/**
@@ -972,26 +986,7 @@ static inline int cpulist_scnprintf(char *buf, int len,
972 */ 986 */
973static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 987static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
974{ 988{
975 return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); 989 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
976}
977
978/**
979 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
980 * @bitmap: the bitmap
981 *
982 * There are a few places where cpumask_var_t isn't appropriate and
983 * static cpumasks must be used (eg. very early boot), yet we don't
984 * expose the definition of 'struct cpumask'.
985 *
986 * This does the conversion, and can be used as a constant initializer.
987 */
988#define to_cpumask(bitmap) \
989 ((struct cpumask *)(1 ? (bitmap) \
990 : (void *)sizeof(__check_is_bitmap(bitmap))))
991
992static inline int __check_is_bitmap(const unsigned long *bitmap)
993{
994 return 1;
995} 990}
996 991
997/** 992/**
@@ -1025,6 +1020,7 @@ static inline size_t cpumask_size(void)
1025#ifdef CONFIG_CPUMASK_OFFSTACK 1020#ifdef CONFIG_CPUMASK_OFFSTACK
1026typedef struct cpumask *cpumask_var_t; 1021typedef struct cpumask *cpumask_var_t;
1027 1022
1023bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
1028bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 1024bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
1029void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1025void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
1030void free_cpumask_var(cpumask_var_t mask); 1026void free_cpumask_var(cpumask_var_t mask);
@@ -1038,6 +1034,12 @@ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1038 return true; 1034 return true;
1039} 1035}
1040 1036
1037static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
1038 int node)
1039{
1040 return true;
1041}
1042
1041static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) 1043static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1042{ 1044{
1043} 1045}
@@ -1051,12 +1053,6 @@ static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
1051} 1053}
1052#endif /* CONFIG_CPUMASK_OFFSTACK */ 1054#endif /* CONFIG_CPUMASK_OFFSTACK */
1053 1055
1054/* The pointer versions of the maps, these will become the primary versions. */
1055#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
1056#define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
1057#define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
1058#define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
1059
1060/* It's common to want to use cpu_all_mask in struct member initializers, 1056/* It's common to want to use cpu_all_mask in struct member initializers,
1061 * so it has to refer to an address rather than a pointer. */ 1057 * so it has to refer to an address rather than a pointer. */
1062extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 1058extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
@@ -1065,51 +1061,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
1065/* First bits of cpu_bit_bitmap are in fact unset. */ 1061/* First bits of cpu_bit_bitmap are in fact unset. */
1066#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 1062#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
1067 1063
1068/* Wrappers for arch boot code to manipulate normally-constant masks */ 1064#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
1069static inline void set_cpu_possible(unsigned int cpu, bool possible) 1065#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
1070{ 1066#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
1071 if (possible)
1072 cpumask_set_cpu(cpu, &cpu_possible_map);
1073 else
1074 cpumask_clear_cpu(cpu, &cpu_possible_map);
1075}
1076
1077static inline void set_cpu_present(unsigned int cpu, bool present)
1078{
1079 if (present)
1080 cpumask_set_cpu(cpu, &cpu_present_map);
1081 else
1082 cpumask_clear_cpu(cpu, &cpu_present_map);
1083}
1084
1085static inline void set_cpu_online(unsigned int cpu, bool online)
1086{
1087 if (online)
1088 cpumask_set_cpu(cpu, &cpu_online_map);
1089 else
1090 cpumask_clear_cpu(cpu, &cpu_online_map);
1091}
1092 1067
1093static inline void set_cpu_active(unsigned int cpu, bool active) 1068/* Wrappers for arch boot code to manipulate normally-constant masks */
1094{ 1069void set_cpu_possible(unsigned int cpu, bool possible);
1095 if (active) 1070void set_cpu_present(unsigned int cpu, bool present);
1096 cpumask_set_cpu(cpu, &cpu_active_map); 1071void set_cpu_online(unsigned int cpu, bool online);
1097 else 1072void set_cpu_active(unsigned int cpu, bool active);
1098 cpumask_clear_cpu(cpu, &cpu_active_map); 1073void init_cpu_present(const struct cpumask *src);
1099} 1074void init_cpu_possible(const struct cpumask *src);
1100 1075void init_cpu_online(const struct cpumask *src);
1101static inline void init_cpu_present(const struct cpumask *src)
1102{
1103 cpumask_copy(&cpu_present_map, src);
1104}
1105
1106static inline void init_cpu_possible(const struct cpumask *src)
1107{
1108 cpumask_copy(&cpu_possible_map, src);
1109}
1110
1111static inline void init_cpu_online(const struct cpumask *src)
1112{
1113 cpumask_copy(&cpu_online_map, src);
1114}
1115#endif /* __LINUX_CPUMASK_H */ 1076#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 990355fbc54e..0702c4d7bdf0 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -109,7 +109,7 @@ extern void enable_irq(unsigned int irq);
109 109
110#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 110#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
111 111
112extern cpumask_t irq_default_affinity; 112extern cpumask_var_t irq_default_affinity;
113 113
114extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 114extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
115extern int irq_can_set_affinity(unsigned int irq); 115extern int irq_can_set_affinity(unsigned int irq);
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 301dda829e37..f3f697df1d71 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -59,8 +59,8 @@ struct rcu_ctrlblk {
59 int signaled; 59 int signaled;
60 60
61 spinlock_t lock ____cacheline_internodealigned_in_smp; 61 spinlock_t lock ____cacheline_internodealigned_in_smp;
62 cpumask_t cpumask; /* CPUs that need to switch in order */ 62 DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
63 /* for current batch to proceed. */ 63 /* current batch to proceed. */
64} ____cacheline_internodealigned_in_smp; 64} ____cacheline_internodealigned_in_smp;
65 65
66/* Is batch a before batch b ? */ 66/* Is batch a before batch b ? */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index b3dfa72f13b9..40ea5058c2ec 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -50,10 +50,11 @@ int seq_path(struct seq_file *, struct path *, char *);
50int seq_dentry(struct seq_file *, struct dentry *, char *); 50int seq_dentry(struct seq_file *, struct dentry *, char *);
51int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 51int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
52 char *esc); 52 char *esc);
53int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); 53int seq_bitmap(struct seq_file *m, const unsigned long *bits,
54static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) 54 unsigned int nr_bits);
55static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
55{ 56{
56 return seq_bitmap(m, mask->bits, NR_CPUS); 57 return seq_bitmap(m, mask->bits, nr_cpu_ids);
57} 58}
58 59
59static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) 60static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 6e7ba16ff454..b82466968101 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -21,6 +21,9 @@ struct call_single_data {
21 u16 priv; 21 u16 priv;
22}; 22};
23 23
24/* total number of cpus in this system (may exceed NR_CPUS) */
25extern unsigned int total_cpus;
26
24#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
25 28
26#include <linux/preempt.h> 29#include <linux/preempt.h>
@@ -64,15 +67,16 @@ extern void smp_cpus_done(unsigned int max_cpus);
64 * Call a function on all other processors 67 * Call a function on all other processors
65 */ 68 */
66int smp_call_function(void(*func)(void *info), void *info, int wait); 69int smp_call_function(void(*func)(void *info), void *info, int wait);
67/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ 70void smp_call_function_many(const struct cpumask *mask,
68int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 71 void (*func)(void *info), void *info, bool wait);
69 int wait);
70 72
71static inline void smp_call_function_many(const struct cpumask *mask, 73/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
72 void (*func)(void *info), void *info, 74static inline int
73 int wait) 75smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
76 int wait)
74{ 77{
75 smp_call_function_mask(*mask, func, info, wait); 78 smp_call_function_many(&mask, func, info, wait);
79 return 0;
76} 80}
77 81
78int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 82int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index faf1519b5adc..74d59a641362 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -23,7 +23,7 @@
23 * 23 *
24 * This can be thought of as a very heavy write lock, equivalent to 24 * This can be thought of as a very heavy write lock, equivalent to
25 * grabbing every spinlock in the kernel. */ 25 * grabbing every spinlock in the kernel. */
26int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 26int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
27 27
28/** 28/**
29 * __stop_machine: freeze the machine on all CPUs and run this function 29 * __stop_machine: freeze the machine on all CPUs and run this function
@@ -34,11 +34,11 @@ int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
34 * Description: This is a special version of the above, which assumes cpus 34 * Description: This is a special version of the above, which assumes cpus
35 * won't come or go while it's being called. Used by hotplug cpu. 35 * won't come or go while it's being called. Used by hotplug cpu.
36 */ 36 */
37int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 37int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
38#else 38#else
39 39
40static inline int stop_machine(int (*fn)(void *), void *data, 40static inline int stop_machine(int (*fn)(void *), void *data,
41 const cpumask_t *cpus) 41 const struct cpumask *cpus)
42{ 42{
43 int ret; 43 int ret;
44 local_irq_disable(); 44 local_irq_disable();
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 38d1a5d6568e..052b12bec8bd 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -8,17 +8,17 @@
8 */ 8 */
9 9
10/* 10/*
11 * Maximum supported processors that can run under SMP. This value is 11 * Maximum supported processors. Setting this smaller saves quite a
12 * set via configure setting. The maximum is equal to the size of the 12 * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps.
13 * bitmasks used on that platform, i.e. 32 or 64. Setting this smaller
14 * saves quite a bit of memory.
15 */ 13 */
16#ifdef CONFIG_SMP 14#ifndef CONFIG_NR_CPUS
17#define NR_CPUS CONFIG_NR_CPUS 15/* FIXME: This should be fixed in the arch's Kconfig */
18#else 16#define CONFIG_NR_CPUS 1
19#define NR_CPUS 1
20#endif 17#endif
21 18
19/* Places which use this should consider cpumask_var_t. */
20#define NR_CPUS CONFIG_NR_CPUS
21
22#define MIN_THREADS_LEFT_FOR_ROOT 4 22#define MIN_THREADS_LEFT_FOR_ROOT 4
23 23
24/* 24/*
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b6ec8189ac0c..469b82d88b3b 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -84,10 +84,10 @@ static inline void tick_cancel_sched_timer(int cpu) { }
84 84
85# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 85# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
86extern struct tick_device *tick_get_broadcast_device(void); 86extern struct tick_device *tick_get_broadcast_device(void);
87extern cpumask_t *tick_get_broadcast_mask(void); 87extern struct cpumask *tick_get_broadcast_mask(void);
88 88
89# ifdef CONFIG_TICK_ONESHOT 89# ifdef CONFIG_TICK_ONESHOT
90extern cpumask_t *tick_get_broadcast_oneshot_mask(void); 90extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
91# endif 91# endif
92 92
93# endif /* BROADCAST */ 93# endif /* BROADCAST */
diff --git a/init/main.c b/init/main.c
index ad8f9f53f8d1..cd168ebc5924 100644
--- a/init/main.c
+++ b/init/main.c
@@ -371,12 +371,7 @@ EXPORT_SYMBOL(nr_cpu_ids);
371/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 371/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
372static void __init setup_nr_cpu_ids(void) 372static void __init setup_nr_cpu_ids(void)
373{ 373{
374 int cpu, highest_cpu = 0; 374 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
375
376 for_each_possible_cpu(cpu)
377 highest_cpu = cpu;
378
379 nr_cpu_ids = highest_cpu + 1;
380} 375}
381 376
382#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 377#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
@@ -518,9 +513,9 @@ static void __init boot_cpu_init(void)
518{ 513{
519 int cpu = smp_processor_id(); 514 int cpu = smp_processor_id();
520 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 515 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
521 cpu_set(cpu, cpu_online_map); 516 set_cpu_online(cpu, true);
522 cpu_set(cpu, cpu_present_map); 517 set_cpu_present(cpu, true);
523 cpu_set(cpu, cpu_possible_map); 518 set_cpu_possible(cpu, true);
524} 519}
525 520
526void __init __weak smp_setup_processor_id(void) 521void __init __weak smp_setup_processor_id(void)
diff --git a/kernel/compat.c b/kernel/compat.c
index 8eafe3eb50d9..d52e2ec1deb5 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -454,16 +454,16 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
454} 454}
455 455
456static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, 456static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
457 unsigned len, cpumask_t *new_mask) 457 unsigned len, struct cpumask *new_mask)
458{ 458{
459 unsigned long *k; 459 unsigned long *k;
460 460
461 if (len < sizeof(cpumask_t)) 461 if (len < cpumask_size())
462 memset(new_mask, 0, sizeof(cpumask_t)); 462 memset(new_mask, 0, cpumask_size());
463 else if (len > sizeof(cpumask_t)) 463 else if (len > cpumask_size())
464 len = sizeof(cpumask_t); 464 len = cpumask_size();
465 465
466 k = cpus_addr(*new_mask); 466 k = cpumask_bits(new_mask);
467 return compat_get_bitmap(k, user_mask_ptr, len * 8); 467 return compat_get_bitmap(k, user_mask_ptr, len * 8);
468} 468}
469 469
@@ -471,40 +471,51 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
471 unsigned int len, 471 unsigned int len,
472 compat_ulong_t __user *user_mask_ptr) 472 compat_ulong_t __user *user_mask_ptr)
473{ 473{
474 cpumask_t new_mask; 474 cpumask_var_t new_mask;
475 int retval; 475 int retval;
476 476
477 retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask); 477 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
478 return -ENOMEM;
479
480 retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask);
478 if (retval) 481 if (retval)
479 return retval; 482 goto out;
480 483
481 return sched_setaffinity(pid, &new_mask); 484 retval = sched_setaffinity(pid, new_mask);
485out:
486 free_cpumask_var(new_mask);
487 return retval;
482} 488}
483 489
484asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, 490asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
485 compat_ulong_t __user *user_mask_ptr) 491 compat_ulong_t __user *user_mask_ptr)
486{ 492{
487 int ret; 493 int ret;
488 cpumask_t mask; 494 cpumask_var_t mask;
489 unsigned long *k; 495 unsigned long *k;
490 unsigned int min_length = sizeof(cpumask_t); 496 unsigned int min_length = cpumask_size();
491 497
492 if (NR_CPUS <= BITS_PER_COMPAT_LONG) 498 if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
493 min_length = sizeof(compat_ulong_t); 499 min_length = sizeof(compat_ulong_t);
494 500
495 if (len < min_length) 501 if (len < min_length)
496 return -EINVAL; 502 return -EINVAL;
497 503
498 ret = sched_getaffinity(pid, &mask); 504 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
505 return -ENOMEM;
506
507 ret = sched_getaffinity(pid, mask);
499 if (ret < 0) 508 if (ret < 0)
500 return ret; 509 goto out;
501 510
502 k = cpus_addr(mask); 511 k = cpumask_bits(mask);
503 ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); 512 ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
504 if (ret) 513 if (ret == 0)
505 return ret; 514 ret = min_length;
506 515
507 return min_length; 516out:
517 free_cpumask_var(mask);
518 return ret;
508} 519}
509 520
510int get_compat_itimerspec(struct itimerspec *dst, 521int get_compat_itimerspec(struct itimerspec *dst,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index bae131a1211b..47fff3b63cbf 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,30 +15,8 @@
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18/*
19 * Represents all cpu's present in the system
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24cpumask_t cpu_present_map __read_mostly;
25EXPORT_SYMBOL(cpu_present_map);
26
27/*
28 * Represents all cpu's that are currently online.
29 */
30cpumask_t cpu_online_map __read_mostly;
31EXPORT_SYMBOL(cpu_online_map);
32
33#ifdef CONFIG_INIT_ALL_POSSIBLE
34cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
35#else
36cpumask_t cpu_possible_map __read_mostly;
37#endif
38EXPORT_SYMBOL(cpu_possible_map);
39
40#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
41/* Serializes the updates to cpu_online_map, cpu_present_map */ 19/* Serializes the updates to cpu_online_mask, cpu_present_mask */
42static DEFINE_MUTEX(cpu_add_remove_lock); 20static DEFINE_MUTEX(cpu_add_remove_lock);
43 21
44static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
@@ -65,8 +43,6 @@ void __init cpu_hotplug_init(void)
65 cpu_hotplug.refcount = 0; 43 cpu_hotplug.refcount = 0;
66} 44}
67 45
68cpumask_t cpu_active_map;
69
70#ifdef CONFIG_HOTPLUG_CPU 46#ifdef CONFIG_HOTPLUG_CPU
71 47
72void get_online_cpus(void) 48void get_online_cpus(void)
@@ -97,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
97 73
98/* 74/*
99 * The following two API's must be used when attempting 75 * The following two API's must be used when attempting
100 * to serialize the updates to cpu_online_map, cpu_present_map. 76 * to serialize the updates to cpu_online_mask, cpu_present_mask.
101 */ 77 */
102void cpu_maps_update_begin(void) 78void cpu_maps_update_begin(void)
103{ 79{
@@ -218,7 +194,7 @@ static int __ref take_cpu_down(void *_param)
218static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 194static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
219{ 195{
220 int err, nr_calls = 0; 196 int err, nr_calls = 0;
221 cpumask_t old_allowed, tmp; 197 cpumask_var_t old_allowed;
222 void *hcpu = (void *)(long)cpu; 198 void *hcpu = (void *)(long)cpu;
223 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 199 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
224 struct take_cpu_down_param tcd_param = { 200 struct take_cpu_down_param tcd_param = {
@@ -232,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
232 if (!cpu_online(cpu)) 208 if (!cpu_online(cpu))
233 return -EINVAL; 209 return -EINVAL;
234 210
211 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
212 return -ENOMEM;
213
235 cpu_hotplug_begin(); 214 cpu_hotplug_begin();
236 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
237 hcpu, -1, &nr_calls); 216 hcpu, -1, &nr_calls);
@@ -246,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
246 } 225 }
247 226
248 /* Ensure that we are not runnable on dying cpu */ 227 /* Ensure that we are not runnable on dying cpu */
249 old_allowed = current->cpus_allowed; 228 cpumask_copy(old_allowed, &current->cpus_allowed);
250 cpus_setall(tmp); 229 set_cpus_allowed_ptr(current,
251 cpu_clear(cpu, tmp); 230 cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
252 set_cpus_allowed_ptr(current, &tmp);
253 tmp = cpumask_of_cpu(cpu);
254 231
255 err = __stop_machine(take_cpu_down, &tcd_param, &tmp); 232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
256 if (err) { 233 if (err) {
257 /* CPU didn't die: tell everyone. Can't complain. */ 234 /* CPU didn't die: tell everyone. Can't complain. */
258 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 235 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
@@ -278,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
278 check_for_tasks(cpu); 255 check_for_tasks(cpu);
279 256
280out_allowed: 257out_allowed:
281 set_cpus_allowed_ptr(current, &old_allowed); 258 set_cpus_allowed_ptr(current, old_allowed);
282out_release: 259out_release:
283 cpu_hotplug_done(); 260 cpu_hotplug_done();
284 if (!err) { 261 if (!err) {
@@ -286,6 +263,7 @@ out_release:
286 hcpu) == NOTIFY_BAD) 263 hcpu) == NOTIFY_BAD)
287 BUG(); 264 BUG();
288 } 265 }
266 free_cpumask_var(old_allowed);
289 return err; 267 return err;
290} 268}
291 269
@@ -304,7 +282,7 @@ int __ref cpu_down(unsigned int cpu)
304 282
305 /* 283 /*
306 * Make sure the all cpus did the reschedule and are not 284 * Make sure the all cpus did the reschedule and are not
307 * using stale version of the cpu_active_map. 285 * using stale version of the cpu_active_mask.
308 * This is not strictly necessary becuase stop_machine() 286 * This is not strictly necessary becuase stop_machine()
309 * that we run down the line already provides the required 287 * that we run down the line already provides the required
310 * synchronization. But it's really a side effect and we do not 288 * synchronization. But it's really a side effect and we do not
@@ -368,7 +346,7 @@ out_notify:
368int __cpuinit cpu_up(unsigned int cpu) 346int __cpuinit cpu_up(unsigned int cpu)
369{ 347{
370 int err = 0; 348 int err = 0;
371 if (!cpu_isset(cpu, cpu_possible_map)) { 349 if (!cpu_possible(cpu)) {
372 printk(KERN_ERR "can't online cpu %d because it is not " 350 printk(KERN_ERR "can't online cpu %d because it is not "
373 "configured as may-hotadd at boot time\n", cpu); 351 "configured as may-hotadd at boot time\n", cpu);
374#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 352#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
@@ -393,25 +371,25 @@ out:
393} 371}
394 372
395#ifdef CONFIG_PM_SLEEP_SMP 373#ifdef CONFIG_PM_SLEEP_SMP
396static cpumask_t frozen_cpus; 374static cpumask_var_t frozen_cpus;
397 375
398int disable_nonboot_cpus(void) 376int disable_nonboot_cpus(void)
399{ 377{
400 int cpu, first_cpu, error = 0; 378 int cpu, first_cpu, error = 0;
401 379
402 cpu_maps_update_begin(); 380 cpu_maps_update_begin();
403 first_cpu = first_cpu(cpu_online_map); 381 first_cpu = cpumask_first(cpu_online_mask);
404 /* We take down all of the non-boot CPUs in one shot to avoid races 382 /* We take down all of the non-boot CPUs in one shot to avoid races
405 * with the userspace trying to use the CPU hotplug at the same time 383 * with the userspace trying to use the CPU hotplug at the same time
406 */ 384 */
407 cpus_clear(frozen_cpus); 385 cpumask_clear(frozen_cpus);
408 printk("Disabling non-boot CPUs ...\n"); 386 printk("Disabling non-boot CPUs ...\n");
409 for_each_online_cpu(cpu) { 387 for_each_online_cpu(cpu) {
410 if (cpu == first_cpu) 388 if (cpu == first_cpu)
411 continue; 389 continue;
412 error = _cpu_down(cpu, 1); 390 error = _cpu_down(cpu, 1);
413 if (!error) { 391 if (!error) {
414 cpu_set(cpu, frozen_cpus); 392 cpumask_set_cpu(cpu, frozen_cpus);
415 printk("CPU%d is down\n", cpu); 393 printk("CPU%d is down\n", cpu);
416 } else { 394 } else {
417 printk(KERN_ERR "Error taking CPU%d down: %d\n", 395 printk(KERN_ERR "Error taking CPU%d down: %d\n",
@@ -437,11 +415,11 @@ void __ref enable_nonboot_cpus(void)
437 /* Allow everyone to use the CPU hotplug again */ 415 /* Allow everyone to use the CPU hotplug again */
438 cpu_maps_update_begin(); 416 cpu_maps_update_begin();
439 cpu_hotplug_disabled = 0; 417 cpu_hotplug_disabled = 0;
440 if (cpus_empty(frozen_cpus)) 418 if (cpumask_empty(frozen_cpus))
441 goto out; 419 goto out;
442 420
443 printk("Enabling non-boot CPUs ...\n"); 421 printk("Enabling non-boot CPUs ...\n");
444 for_each_cpu_mask_nr(cpu, frozen_cpus) { 422 for_each_cpu(cpu, frozen_cpus) {
445 error = _cpu_up(cpu, 1); 423 error = _cpu_up(cpu, 1);
446 if (!error) { 424 if (!error) {
447 printk("CPU%d is up\n", cpu); 425 printk("CPU%d is up\n", cpu);
@@ -449,10 +427,18 @@ void __ref enable_nonboot_cpus(void)
449 } 427 }
450 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 428 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
451 } 429 }
452 cpus_clear(frozen_cpus); 430 cpumask_clear(frozen_cpus);
453out: 431out:
454 cpu_maps_update_done(); 432 cpu_maps_update_done();
455} 433}
434
435static int alloc_frozen_cpus(void)
436{
437 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
438 return -ENOMEM;
439 return 0;
440}
441core_initcall(alloc_frozen_cpus);
456#endif /* CONFIG_PM_SLEEP_SMP */ 442#endif /* CONFIG_PM_SLEEP_SMP */
457 443
458/** 444/**
@@ -468,7 +454,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
468 unsigned long val = CPU_STARTING; 454 unsigned long val = CPU_STARTING;
469 455
470#ifdef CONFIG_PM_SLEEP_SMP 456#ifdef CONFIG_PM_SLEEP_SMP
471 if (cpu_isset(cpu, frozen_cpus)) 457 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
472 val = CPU_STARTING_FROZEN; 458 val = CPU_STARTING_FROZEN;
473#endif /* CONFIG_PM_SLEEP_SMP */ 459#endif /* CONFIG_PM_SLEEP_SMP */
474 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 460 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
@@ -480,7 +466,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
480 * cpu_bit_bitmap[] is a special, "compressed" data structure that 466 * cpu_bit_bitmap[] is a special, "compressed" data structure that
481 * represents all NR_CPUS bits binary values of 1<<nr. 467 * represents all NR_CPUS bits binary values of 1<<nr.
482 * 468 *
483 * It is used by cpumask_of_cpu() to get a constant address to a CPU 469 * It is used by cpumask_of() to get a constant address to a CPU
484 * mask value that has a single bit set only. 470 * mask value that has a single bit set only.
485 */ 471 */
486 472
@@ -503,3 +489,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
503 489
504const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 490const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
505EXPORT_SYMBOL(cpu_all_bits); 491EXPORT_SYMBOL(cpu_all_bits);
492
493#ifdef CONFIG_INIT_ALL_POSSIBLE
494static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
495 = CPU_BITS_ALL;
496#else
497static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
498#endif
499const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
500EXPORT_SYMBOL(cpu_possible_mask);
501
502static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
503const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
504EXPORT_SYMBOL(cpu_online_mask);
505
506static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
507const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
508EXPORT_SYMBOL(cpu_present_mask);
509
510static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
511const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
512EXPORT_SYMBOL(cpu_active_mask);
513
514void set_cpu_possible(unsigned int cpu, bool possible)
515{
516 if (possible)
517 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
518 else
519 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
520}
521
522void set_cpu_present(unsigned int cpu, bool present)
523{
524 if (present)
525 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
526 else
527 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
528}
529
530void set_cpu_online(unsigned int cpu, bool online)
531{
532 if (online)
533 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
534 else
535 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
536}
537
538void set_cpu_active(unsigned int cpu, bool active)
539{
540 if (active)
541 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
542 else
543 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
544}
545
546void init_cpu_present(const struct cpumask *src)
547{
548 cpumask_copy(to_cpumask(cpu_present_bits), src);
549}
550
551void init_cpu_possible(const struct cpumask *src)
552{
553 cpumask_copy(to_cpumask(cpu_possible_bits), src);
554}
555
556void init_cpu_online(const struct cpumask *src)
557{
558 cpumask_copy(to_cpumask(cpu_online_bits), src);
559}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 61c4a9b62165..cd0cd8dcb345 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,8 +16,15 @@
16#include "internals.h" 16#include "internals.h"
17 17
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19cpumask_var_t irq_default_affinity;
19 20
20cpumask_t irq_default_affinity = CPU_MASK_ALL; 21static int init_irq_default_affinity(void)
22{
23 alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
24 cpumask_setall(irq_default_affinity);
25 return 0;
26}
27core_initcall(init_irq_default_affinity);
21 28
22/** 29/**
23 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -127,7 +134,7 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
127 desc->status &= ~IRQ_AFFINITY_SET; 134 desc->status &= ~IRQ_AFFINITY_SET;
128 } 135 }
129 136
130 cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); 137 cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
131set_affinity: 138set_affinity:
132 desc->chip->set_affinity(irq, &desc->affinity); 139 desc->chip->set_affinity(irq, &desc->affinity);
133 140
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d2c0e5ee53c5..aae3f742bcec 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir;
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_to_desc((long)m->private); 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 cpumask_t *mask = &desc->affinity; 23 const struct cpumask *mask = &desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
26 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
@@ -54,7 +54,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
54 if (err) 54 if (err)
55 goto free_cpumask; 55 goto free_cpumask;
56 56
57 if (!is_affinity_mask_valid(*new_value)) { 57 if (!is_affinity_mask_valid(new_value)) {
58 err = -EINVAL; 58 err = -EINVAL;
59 goto free_cpumask; 59 goto free_cpumask;
60 } 60 }
@@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = {
93 93
94static int default_affinity_show(struct seq_file *m, void *v) 94static int default_affinity_show(struct seq_file *m, void *v)
95{ 95{
96 seq_cpumask(m, &irq_default_affinity); 96 seq_cpumask(m, irq_default_affinity);
97 seq_putc(m, '\n'); 97 seq_putc(m, '\n');
98 return 0; 98 return 0;
99} 99}
@@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v)
101static ssize_t default_affinity_write(struct file *file, 101static ssize_t default_affinity_write(struct file *file,
102 const char __user *buffer, size_t count, loff_t *ppos) 102 const char __user *buffer, size_t count, loff_t *ppos)
103{ 103{
104 cpumask_t new_value; 104 cpumask_var_t new_value;
105 int err; 105 int err;
106 106
107 err = cpumask_parse_user(buffer, count, &new_value); 107 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
108 return -ENOMEM;
109
110 err = cpumask_parse_user(buffer, count, new_value);
108 if (err) 111 if (err)
109 return err; 112 goto out;
110 113
111 if (!is_affinity_mask_valid(new_value)) 114 if (!is_affinity_mask_valid(new_value)) {
112 return -EINVAL; 115 err = -EINVAL;
116 goto out;
117 }
113 118
114 /* 119 /*
115 * Do not allow disabling IRQs completely - it's a too easy 120 * Do not allow disabling IRQs completely - it's a too easy
116 * way to make the system unusable accidentally :-) At least 121 * way to make the system unusable accidentally :-) At least
117 * one online CPU still has to be targeted. 122 * one online CPU still has to be targeted.
118 */ 123 */
119 if (!cpus_intersects(new_value, cpu_online_map)) 124 if (!cpumask_intersects(new_value, cpu_online_mask)) {
120 return -EINVAL; 125 err = -EINVAL;
126 goto out;
127 }
121 128
122 irq_default_affinity = new_value; 129 cpumask_copy(irq_default_affinity, new_value);
130 err = count;
123 131
124 return count; 132out:
133 free_cpumask_var(new_value);
134 return err;
125} 135}
126 136
127static int default_affinity_open(struct inode *inode, struct file *file) 137static int default_affinity_open(struct inode *inode, struct file *file)
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ac0fde7b54d0..3fb855ad6aa0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1116,7 +1116,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
1116 struct elf_prstatus prstatus; 1116 struct elf_prstatus prstatus;
1117 u32 *buf; 1117 u32 *buf;
1118 1118
1119 if ((cpu < 0) || (cpu >= NR_CPUS)) 1119 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1120 return; 1120 return;
1121 1121
1122 /* Using ELF notes here is opportunistic. 1122 /* Using ELF notes here is opportunistic.
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 72016f051477..97890831e1b5 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -27,7 +27,7 @@ static DECLARE_WORK(poweroff_work, do_poweroff);
27static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key, struct tty_struct *tty)
28{ 28{
29 /* run sysrq poweroff on boot cpu */ 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(first_cpu(cpu_online_map), &poweroff_work); 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
31} 31}
32 32
33static struct sysrq_key_op sysrq_poweroff_op = { 33static struct sysrq_key_op sysrq_poweroff_op = {
diff --git a/kernel/profile.c b/kernel/profile.c
index 4cb7d68fed82..d18e2d2654f2 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -45,7 +45,7 @@ static unsigned long prof_len, prof_shift;
45int prof_on __read_mostly; 45int prof_on __read_mostly;
46EXPORT_SYMBOL_GPL(prof_on); 46EXPORT_SYMBOL_GPL(prof_on);
47 47
48static cpumask_t prof_cpu_mask = CPU_MASK_ALL; 48static cpumask_var_t prof_cpu_mask;
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 50static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
51static DEFINE_PER_CPU(int, cpu_profile_flip); 51static DEFINE_PER_CPU(int, cpu_profile_flip);
@@ -113,9 +113,13 @@ int __ref profile_init(void)
113 buffer_bytes = prof_len*sizeof(atomic_t); 113 buffer_bytes = prof_len*sizeof(atomic_t);
114 if (!slab_is_available()) { 114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes); 115 prof_buffer = alloc_bootmem(buffer_bytes);
116 alloc_bootmem_cpumask_var(&prof_cpu_mask);
116 return 0; 117 return 0;
117 } 118 }
118 119
120 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
121 return -ENOMEM;
122
119 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 123 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
120 if (prof_buffer) 124 if (prof_buffer)
121 return 0; 125 return 0;
@@ -128,6 +132,7 @@ int __ref profile_init(void)
128 if (prof_buffer) 132 if (prof_buffer)
129 return 0; 133 return 0;
130 134
135 free_cpumask_var(prof_cpu_mask);
131 return -ENOMEM; 136 return -ENOMEM;
132} 137}
133 138
@@ -386,13 +391,15 @@ out_free:
386 return NOTIFY_BAD; 391 return NOTIFY_BAD;
387 case CPU_ONLINE: 392 case CPU_ONLINE:
388 case CPU_ONLINE_FROZEN: 393 case CPU_ONLINE_FROZEN:
389 cpu_set(cpu, prof_cpu_mask); 394 if (prof_cpu_mask != NULL)
395 cpumask_set_cpu(cpu, prof_cpu_mask);
390 break; 396 break;
391 case CPU_UP_CANCELED: 397 case CPU_UP_CANCELED:
392 case CPU_UP_CANCELED_FROZEN: 398 case CPU_UP_CANCELED_FROZEN:
393 case CPU_DEAD: 399 case CPU_DEAD:
394 case CPU_DEAD_FROZEN: 400 case CPU_DEAD_FROZEN:
395 cpu_clear(cpu, prof_cpu_mask); 401 if (prof_cpu_mask != NULL)
402 cpumask_clear_cpu(cpu, prof_cpu_mask);
396 if (per_cpu(cpu_profile_hits, cpu)[0]) { 403 if (per_cpu(cpu_profile_hits, cpu)[0]) {
397 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 404 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
398 per_cpu(cpu_profile_hits, cpu)[0] = NULL; 405 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
@@ -430,7 +437,8 @@ void profile_tick(int type)
430 437
431 if (type == CPU_PROFILING && timer_hook) 438 if (type == CPU_PROFILING && timer_hook)
432 timer_hook(regs); 439 timer_hook(regs);
433 if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) 440 if (!user_mode(regs) && prof_cpu_mask != NULL &&
441 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
434 profile_hit(type, (void *)profile_pc(regs)); 442 profile_hit(type, (void *)profile_pc(regs));
435} 443}
436 444
@@ -442,7 +450,7 @@ void profile_tick(int type)
442static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, 450static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
443 int count, int *eof, void *data) 451 int count, int *eof, void *data)
444{ 452{
445 int len = cpumask_scnprintf(page, count, (cpumask_t *)data); 453 int len = cpumask_scnprintf(page, count, data);
446 if (count - len < 2) 454 if (count - len < 2)
447 return -EINVAL; 455 return -EINVAL;
448 len += sprintf(page + len, "\n"); 456 len += sprintf(page + len, "\n");
@@ -452,16 +460,20 @@ static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
452static int prof_cpu_mask_write_proc(struct file *file, 460static int prof_cpu_mask_write_proc(struct file *file,
453 const char __user *buffer, unsigned long count, void *data) 461 const char __user *buffer, unsigned long count, void *data)
454{ 462{
455 cpumask_t *mask = (cpumask_t *)data; 463 struct cpumask *mask = data;
456 unsigned long full_count = count, err; 464 unsigned long full_count = count, err;
457 cpumask_t new_value; 465 cpumask_var_t new_value;
458 466
459 err = cpumask_parse_user(buffer, count, &new_value); 467 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
460 if (err) 468 return -ENOMEM;
461 return err;
462 469
463 *mask = new_value; 470 err = cpumask_parse_user(buffer, count, new_value);
464 return full_count; 471 if (!err) {
472 cpumask_copy(mask, new_value);
473 err = full_count;
474 }
475 free_cpumask_var(new_value);
476 return err;
465} 477}
466 478
467void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) 479void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
@@ -472,7 +484,7 @@ void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
472 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); 484 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
473 if (!entry) 485 if (!entry)
474 return; 486 return;
475 entry->data = (void *)&prof_cpu_mask; 487 entry->data = prof_cpu_mask;
476 entry->read_proc = prof_cpu_mask_read_proc; 488 entry->read_proc = prof_cpu_mask_read_proc;
477 entry->write_proc = prof_cpu_mask_write_proc; 489 entry->write_proc = prof_cpu_mask_write_proc;
478} 490}
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index c03ca3e61919..490934fc7ac3 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -63,14 +63,14 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
63 .completed = -300, 63 .completed = -300,
64 .pending = -300, 64 .pending = -300,
65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
66 .cpumask = CPU_MASK_NONE, 66 .cpumask = CPU_BITS_NONE,
67}; 67};
68static struct rcu_ctrlblk rcu_bh_ctrlblk = { 68static struct rcu_ctrlblk rcu_bh_ctrlblk = {
69 .cur = -300, 69 .cur = -300,
70 .completed = -300, 70 .completed = -300,
71 .pending = -300, 71 .pending = -300,
72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
73 .cpumask = CPU_MASK_NONE, 73 .cpumask = CPU_BITS_NONE,
74}; 74};
75 75
76DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; 76DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
@@ -85,7 +85,6 @@ static void force_quiescent_state(struct rcu_data *rdp,
85 struct rcu_ctrlblk *rcp) 85 struct rcu_ctrlblk *rcp)
86{ 86{
87 int cpu; 87 int cpu;
88 cpumask_t cpumask;
89 unsigned long flags; 88 unsigned long flags;
90 89
91 set_need_resched(); 90 set_need_resched();
@@ -96,10 +95,10 @@ static void force_quiescent_state(struct rcu_data *rdp,
96 * Don't send IPI to itself. With irqs disabled, 95 * Don't send IPI to itself. With irqs disabled,
97 * rdp->cpu is the current cpu. 96 * rdp->cpu is the current cpu.
98 * 97 *
99 * cpu_online_map is updated by the _cpu_down() 98 * cpu_online_mask is updated by the _cpu_down()
100 * using __stop_machine(). Since we're in irqs disabled 99 * using __stop_machine(). Since we're in irqs disabled
101 * section, __stop_machine() is not exectuting, hence 100 * section, __stop_machine() is not exectuting, hence
102 * the cpu_online_map is stable. 101 * the cpu_online_mask is stable.
103 * 102 *
104 * However, a cpu might have been offlined _just_ before 103 * However, a cpu might have been offlined _just_ before
105 * we disabled irqs while entering here. 104 * we disabled irqs while entering here.
@@ -107,13 +106,14 @@ static void force_quiescent_state(struct rcu_data *rdp,
107 * notification, leading to the offlined cpu's bit 106 * notification, leading to the offlined cpu's bit
108 * being set in the rcp->cpumask. 107 * being set in the rcp->cpumask.
109 * 108 *
110 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent 109 * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
111 * sending smp_reschedule() to an offlined CPU. 110 * sending smp_reschedule() to an offlined CPU.
112 */ 111 */
113 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 112 for_each_cpu_and(cpu,
114 cpu_clear(rdp->cpu, cpumask); 113 to_cpumask(rcp->cpumask), cpu_online_mask) {
115 for_each_cpu_mask_nr(cpu, cpumask) 114 if (cpu != rdp->cpu)
116 smp_send_reschedule(cpu); 115 smp_send_reschedule(cpu);
116 }
117 } 117 }
118 spin_unlock_irqrestore(&rcp->lock, flags); 118 spin_unlock_irqrestore(&rcp->lock, flags);
119} 119}
@@ -193,7 +193,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
193 193
194 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 194 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
195 for_each_possible_cpu(cpu) { 195 for_each_possible_cpu(cpu) {
196 if (cpu_isset(cpu, rcp->cpumask)) 196 if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
197 printk(" %d", cpu); 197 printk(" %d", cpu);
198 } 198 }
199 printk(" (detected by %d, t=%ld jiffies)\n", 199 printk(" (detected by %d, t=%ld jiffies)\n",
@@ -221,7 +221,8 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
221 long delta; 221 long delta;
222 222
223 delta = jiffies - rcp->jiffies_stall; 223 delta = jiffies - rcp->jiffies_stall;
224 if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) { 224 if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
225 delta >= 0) {
225 226
226 /* We haven't checked in, so go dump stack. */ 227 /* We haven't checked in, so go dump stack. */
227 print_cpu_stall(rcp); 228 print_cpu_stall(rcp);
@@ -393,7 +394,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
393 * unnecessarily. 394 * unnecessarily.
394 */ 395 */
395 smp_mb(); 396 smp_mb();
396 cpumask_andnot(&rcp->cpumask, cpu_online_mask, nohz_cpu_mask); 397 cpumask_andnot(to_cpumask(rcp->cpumask),
398 cpu_online_mask, nohz_cpu_mask);
397 399
398 rcp->signaled = 0; 400 rcp->signaled = 0;
399 } 401 }
@@ -406,8 +408,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
406 */ 408 */
407static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) 409static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
408{ 410{
409 cpu_clear(cpu, rcp->cpumask); 411 cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
410 if (cpus_empty(rcp->cpumask)) { 412 if (cpumask_empty(to_cpumask(rcp->cpumask))) {
411 /* batch completed ! */ 413 /* batch completed ! */
412 rcp->completed = rcp->cur; 414 rcp->completed = rcp->cur;
413 rcu_start_batch(rcp); 415 rcu_start_batch(rcp);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 04982659875a..f9dc8f3720f6 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] =
164 { "idle", "waitack", "waitzero", "waitmb" }; 164 { "idle", "waitack", "waitzero", "waitmb" };
165#endif /* #ifdef CONFIG_RCU_TRACE */ 165#endif /* #ifdef CONFIG_RCU_TRACE */
166 166
167static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; 167static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly
168 = CPU_BITS_NONE;
168 169
169/* 170/*
170 * Enum and per-CPU flag to determine when each CPU has seen 171 * Enum and per-CPU flag to determine when each CPU has seen
@@ -758,7 +759,7 @@ rcu_try_flip_idle(void)
758 759
759 /* Now ask each CPU for acknowledgement of the flip. */ 760 /* Now ask each CPU for acknowledgement of the flip. */
760 761
761 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 762 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
762 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 763 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
763 dyntick_save_progress_counter(cpu); 764 dyntick_save_progress_counter(cpu);
764 } 765 }
@@ -776,7 +777,7 @@ rcu_try_flip_waitack(void)
776 int cpu; 777 int cpu;
777 778
778 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 779 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
779 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 780 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
780 if (rcu_try_flip_waitack_needed(cpu) && 781 if (rcu_try_flip_waitack_needed(cpu) &&
781 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 782 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
782 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 783 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -808,7 +809,7 @@ rcu_try_flip_waitzero(void)
808 /* Check to see if the sum of the "last" counters is zero. */ 809 /* Check to see if the sum of the "last" counters is zero. */
809 810
810 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 811 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
811 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 812 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
812 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 813 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
813 if (sum != 0) { 814 if (sum != 0) {
814 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 815 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -823,7 +824,7 @@ rcu_try_flip_waitzero(void)
823 smp_mb(); /* ^^^^^^^^^^^^ */ 824 smp_mb(); /* ^^^^^^^^^^^^ */
824 825
825 /* Call for a memory barrier from each CPU. */ 826 /* Call for a memory barrier from each CPU. */
826 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 827 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
827 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 828 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
828 dyntick_save_progress_counter(cpu); 829 dyntick_save_progress_counter(cpu);
829 } 830 }
@@ -843,7 +844,7 @@ rcu_try_flip_waitmb(void)
843 int cpu; 844 int cpu;
844 845
845 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 846 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
846 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 847 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
847 if (rcu_try_flip_waitmb_needed(cpu) && 848 if (rcu_try_flip_waitmb_needed(cpu) &&
848 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 849 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
849 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 850 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
@@ -1032,7 +1033,7 @@ void rcu_offline_cpu(int cpu)
1032 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; 1033 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
1033 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; 1034 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
1034 1035
1035 cpu_clear(cpu, rcu_cpu_online_map); 1036 cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
1036 1037
1037 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1038 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1038 1039
@@ -1072,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu)
1072 struct rcu_data *rdp; 1073 struct rcu_data *rdp;
1073 1074
1074 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); 1075 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1075 cpu_set(cpu, rcu_cpu_online_map); 1076 cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
1076 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1077 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1077 1078
1078 /* 1079 /*
@@ -1430,7 +1431,7 @@ void __init __rcu_init(void)
1430 * We don't need protection against CPU-Hotplug here 1431 * We don't need protection against CPU-Hotplug here
1431 * since 1432 * since
1432 * a) If a CPU comes online while we are iterating over the 1433 * a) If a CPU comes online while we are iterating over the
1433 * cpu_online_map below, we would only end up making a 1434 * cpu_online_mask below, we would only end up making a
1434 * duplicate call to rcu_online_cpu() which sets the corresponding 1435 * duplicate call to rcu_online_cpu() which sets the corresponding
1435 * CPU's mask in the rcu_cpu_online_map. 1436 * CPU's mask in the rcu_cpu_online_map.
1436 * 1437 *
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index b31065522104..3245b40952c6 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -868,49 +868,52 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
868 */ 868 */
869static void rcu_torture_shuffle_tasks(void) 869static void rcu_torture_shuffle_tasks(void)
870{ 870{
871 cpumask_t tmp_mask; 871 cpumask_var_t tmp_mask;
872 int i; 872 int i;
873 873
874 cpus_setall(tmp_mask); 874 if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
875 BUG();
876
877 cpumask_setall(tmp_mask);
875 get_online_cpus(); 878 get_online_cpus();
876 879
877 /* No point in shuffling if there is only one online CPU (ex: UP) */ 880 /* No point in shuffling if there is only one online CPU (ex: UP) */
878 if (num_online_cpus() == 1) { 881 if (num_online_cpus() == 1)
879 put_online_cpus(); 882 goto out;
880 return;
881 }
882 883
883 if (rcu_idle_cpu != -1) 884 if (rcu_idle_cpu != -1)
884 cpu_clear(rcu_idle_cpu, tmp_mask); 885 cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
885 886
886 set_cpus_allowed_ptr(current, &tmp_mask); 887 set_cpus_allowed_ptr(current, tmp_mask);
887 888
888 if (reader_tasks) { 889 if (reader_tasks) {
889 for (i = 0; i < nrealreaders; i++) 890 for (i = 0; i < nrealreaders; i++)
890 if (reader_tasks[i]) 891 if (reader_tasks[i])
891 set_cpus_allowed_ptr(reader_tasks[i], 892 set_cpus_allowed_ptr(reader_tasks[i],
892 &tmp_mask); 893 tmp_mask);
893 } 894 }
894 895
895 if (fakewriter_tasks) { 896 if (fakewriter_tasks) {
896 for (i = 0; i < nfakewriters; i++) 897 for (i = 0; i < nfakewriters; i++)
897 if (fakewriter_tasks[i]) 898 if (fakewriter_tasks[i])
898 set_cpus_allowed_ptr(fakewriter_tasks[i], 899 set_cpus_allowed_ptr(fakewriter_tasks[i],
899 &tmp_mask); 900 tmp_mask);
900 } 901 }
901 902
902 if (writer_task) 903 if (writer_task)
903 set_cpus_allowed_ptr(writer_task, &tmp_mask); 904 set_cpus_allowed_ptr(writer_task, tmp_mask);
904 905
905 if (stats_task) 906 if (stats_task)
906 set_cpus_allowed_ptr(stats_task, &tmp_mask); 907 set_cpus_allowed_ptr(stats_task, tmp_mask);
907 908
908 if (rcu_idle_cpu == -1) 909 if (rcu_idle_cpu == -1)
909 rcu_idle_cpu = num_online_cpus() - 1; 910 rcu_idle_cpu = num_online_cpus() - 1;
910 else 911 else
911 rcu_idle_cpu--; 912 rcu_idle_cpu--;
912 913
914out:
913 put_online_cpus(); 915 put_online_cpus();
916 free_cpumask_var(tmp_mask);
914} 917}
915 918
916/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 919/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
diff --git a/kernel/sched.c b/kernel/sched.c
index 930bf2e6d714..545c6fccd1dc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3715,7 +3715,7 @@ redo:
3715 * don't kick the migration_thread, if the curr 3715 * don't kick the migration_thread, if the curr
3716 * task on busiest cpu can't be moved to this_cpu 3716 * task on busiest cpu can't be moved to this_cpu
3717 */ 3717 */
3718 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 3718 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
3719 double_unlock_balance(this_rq, busiest); 3719 double_unlock_balance(this_rq, busiest);
3720 all_pinned = 1; 3720 all_pinned = 1;
3721 return ld_moved; 3721 return ld_moved;
@@ -6257,9 +6257,7 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
6257static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6257static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6258{ 6258{
6259 int dest_cpu; 6259 int dest_cpu;
6260 /* FIXME: Use cpumask_of_node here. */ 6260 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
6261 cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu));
6262 const struct cpumask *nodemask = &_nodemask;
6263 6261
6264again: 6262again:
6265 /* Look for allowed, online CPU in same node. */ 6263 /* Look for allowed, online CPU in same node. */
@@ -7170,21 +7168,18 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
7170static void sched_domain_node_span(int node, struct cpumask *span) 7168static void sched_domain_node_span(int node, struct cpumask *span)
7171{ 7169{
7172 nodemask_t used_nodes; 7170 nodemask_t used_nodes;
7173 /* FIXME: use cpumask_of_node() */
7174 node_to_cpumask_ptr(nodemask, node);
7175 int i; 7171 int i;
7176 7172
7177 cpus_clear(*span); 7173 cpumask_clear(span);
7178 nodes_clear(used_nodes); 7174 nodes_clear(used_nodes);
7179 7175
7180 cpus_or(*span, *span, *nodemask); 7176 cpumask_or(span, span, cpumask_of_node(node));
7181 node_set(node, used_nodes); 7177 node_set(node, used_nodes);
7182 7178
7183 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 7179 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
7184 int next_node = find_next_best_node(node, &used_nodes); 7180 int next_node = find_next_best_node(node, &used_nodes);
7185 7181
7186 node_to_cpumask_ptr_next(nodemask, next_node); 7182 cpumask_or(span, span, cpumask_of_node(next_node));
7187 cpus_or(*span, *span, *nodemask);
7188 } 7183 }
7189} 7184}
7190#endif /* CONFIG_NUMA */ 7185#endif /* CONFIG_NUMA */
@@ -7264,9 +7259,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7264{ 7259{
7265 int group; 7260 int group;
7266#ifdef CONFIG_SCHED_MC 7261#ifdef CONFIG_SCHED_MC
7267 /* FIXME: Use cpu_coregroup_mask. */ 7262 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
7268 *mask = cpu_coregroup_map(cpu);
7269 cpus_and(*mask, *mask, *cpu_map);
7270 group = cpumask_first(mask); 7263 group = cpumask_first(mask);
7271#elif defined(CONFIG_SCHED_SMT) 7264#elif defined(CONFIG_SCHED_SMT)
7272 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7265 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
@@ -7296,10 +7289,8 @@ static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
7296 struct cpumask *nodemask) 7289 struct cpumask *nodemask)
7297{ 7290{
7298 int group; 7291 int group;
7299 /* FIXME: use cpumask_of_node */
7300 node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));
7301 7292
7302 cpumask_and(nodemask, pnodemask, cpu_map); 7293 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
7303 group = cpumask_first(nodemask); 7294 group = cpumask_first(nodemask);
7304 7295
7305 if (sg) 7296 if (sg)
@@ -7350,10 +7341,8 @@ static void free_sched_groups(const struct cpumask *cpu_map,
7350 7341
7351 for (i = 0; i < nr_node_ids; i++) { 7342 for (i = 0; i < nr_node_ids; i++) {
7352 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7343 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7353 /* FIXME: Use cpumask_of_node */
7354 node_to_cpumask_ptr(pnodemask, i);
7355 7344
7356 cpus_and(*nodemask, *pnodemask, *cpu_map); 7345 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7357 if (cpumask_empty(nodemask)) 7346 if (cpumask_empty(nodemask))
7358 continue; 7347 continue;
7359 7348
@@ -7562,9 +7551,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7562 for_each_cpu(i, cpu_map) { 7551 for_each_cpu(i, cpu_map) {
7563 struct sched_domain *sd = NULL, *p; 7552 struct sched_domain *sd = NULL, *p;
7564 7553
7565 /* FIXME: use cpumask_of_node */ 7554 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
7566 *nodemask = node_to_cpumask(cpu_to_node(i));
7567 cpus_and(*nodemask, *nodemask, *cpu_map);
7568 7555
7569#ifdef CONFIG_NUMA 7556#ifdef CONFIG_NUMA
7570 if (cpumask_weight(cpu_map) > 7557 if (cpumask_weight(cpu_map) >
@@ -7605,9 +7592,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7605 sd = &per_cpu(core_domains, i).sd; 7592 sd = &per_cpu(core_domains, i).sd;
7606 SD_INIT(sd, MC); 7593 SD_INIT(sd, MC);
7607 set_domain_attribute(sd, attr); 7594 set_domain_attribute(sd, attr);
7608 *sched_domain_span(sd) = cpu_coregroup_map(i); 7595 cpumask_and(sched_domain_span(sd), cpu_map,
7609 cpumask_and(sched_domain_span(sd), 7596 cpu_coregroup_mask(i));
7610 sched_domain_span(sd), cpu_map);
7611 sd->parent = p; 7597 sd->parent = p;
7612 p->child = sd; 7598 p->child = sd;
7613 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); 7599 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7643,9 +7629,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7643#ifdef CONFIG_SCHED_MC 7629#ifdef CONFIG_SCHED_MC
7644 /* Set up multi-core groups */ 7630 /* Set up multi-core groups */
7645 for_each_cpu(i, cpu_map) { 7631 for_each_cpu(i, cpu_map) {
7646 /* FIXME: Use cpu_coregroup_mask */ 7632 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
7647 *this_core_map = cpu_coregroup_map(i);
7648 cpus_and(*this_core_map, *this_core_map, *cpu_map);
7649 if (i != cpumask_first(this_core_map)) 7633 if (i != cpumask_first(this_core_map))
7650 continue; 7634 continue;
7651 7635
@@ -7657,9 +7641,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7657 7641
7658 /* Set up physical groups */ 7642 /* Set up physical groups */
7659 for (i = 0; i < nr_node_ids; i++) { 7643 for (i = 0; i < nr_node_ids; i++) {
7660 /* FIXME: Use cpumask_of_node */ 7644 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7661 *nodemask = node_to_cpumask(i);
7662 cpus_and(*nodemask, *nodemask, *cpu_map);
7663 if (cpumask_empty(nodemask)) 7645 if (cpumask_empty(nodemask))
7664 continue; 7646 continue;
7665 7647
@@ -7681,11 +7663,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7681 struct sched_group *sg, *prev; 7663 struct sched_group *sg, *prev;
7682 int j; 7664 int j;
7683 7665
7684 /* FIXME: Use cpumask_of_node */
7685 *nodemask = node_to_cpumask(i);
7686 cpumask_clear(covered); 7666 cpumask_clear(covered);
7687 7667 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7688 cpus_and(*nodemask, *nodemask, *cpu_map);
7689 if (cpumask_empty(nodemask)) { 7668 if (cpumask_empty(nodemask)) {
7690 sched_group_nodes[i] = NULL; 7669 sched_group_nodes[i] = NULL;
7691 continue; 7670 continue;
@@ -7716,8 +7695,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7716 7695
7717 for (j = 0; j < nr_node_ids; j++) { 7696 for (j = 0; j < nr_node_ids; j++) {
7718 int n = (i + j) % nr_node_ids; 7697 int n = (i + j) % nr_node_ids;
7719 /* FIXME: Use cpumask_of_node */
7720 node_to_cpumask_ptr(pnodemask, n);
7721 7698
7722 cpumask_complement(notcovered, covered); 7699 cpumask_complement(notcovered, covered);
7723 cpumask_and(tmpmask, notcovered, cpu_map); 7700 cpumask_and(tmpmask, notcovered, cpu_map);
@@ -7725,7 +7702,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7725 if (cpumask_empty(tmpmask)) 7702 if (cpumask_empty(tmpmask))
7726 break; 7703 break;
7727 7704
7728 cpumask_and(tmpmask, tmpmask, pnodemask); 7705 cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
7729 if (cpumask_empty(tmpmask)) 7706 if (cpumask_empty(tmpmask))
7730 continue; 7707 continue;
7731 7708
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 833b6d44483c..954e1a81b796 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1383,7 +1383,8 @@ static inline void init_sched_rt_class(void)
1383 unsigned int i; 1383 unsigned int i;
1384 1384
1385 for_each_possible_cpu(i) 1385 for_each_possible_cpu(i)
1386 alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL); 1386 alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1387 GFP_KERNEL, cpu_to_node(i));
1387} 1388}
1388#endif /* CONFIG_SMP */ 1389#endif /* CONFIG_SMP */
1389 1390
diff --git a/kernel/smp.c b/kernel/smp.c
index 75c8dde58c55..5cfa0e5e3e88 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -24,8 +24,8 @@ struct call_function_data {
24 struct call_single_data csd; 24 struct call_single_data csd;
25 spinlock_t lock; 25 spinlock_t lock;
26 unsigned int refs; 26 unsigned int refs;
27 cpumask_t cpumask;
28 struct rcu_head rcu_head; 27 struct rcu_head rcu_head;
28 unsigned long cpumask_bits[];
29}; 29};
30 30
31struct call_single_queue { 31struct call_single_queue {
@@ -110,13 +110,13 @@ void generic_smp_call_function_interrupt(void)
110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) { 110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
111 int refs; 111 int refs;
112 112
113 if (!cpu_isset(cpu, data->cpumask)) 113 if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
114 continue; 114 continue;
115 115
116 data->csd.func(data->csd.info); 116 data->csd.func(data->csd.info);
117 117
118 spin_lock(&data->lock); 118 spin_lock(&data->lock);
119 cpu_clear(cpu, data->cpumask); 119 cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
120 WARN_ON(data->refs == 0); 120 WARN_ON(data->refs == 0);
121 data->refs--; 121 data->refs--;
122 refs = data->refs; 122 refs = data->refs;
@@ -223,7 +223,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
223 local_irq_save(flags); 223 local_irq_save(flags);
224 func(info); 224 func(info);
225 local_irq_restore(flags); 225 local_irq_restore(flags);
226 } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { 226 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
227 struct call_single_data *data = NULL; 227 struct call_single_data *data = NULL;
228 228
229 if (!wait) { 229 if (!wait) {
@@ -266,51 +266,19 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
266 generic_exec_single(cpu, data); 266 generic_exec_single(cpu, data);
267} 267}
268 268
269/* Dummy function */ 269/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
270static void quiesce_dummy(void *unused) 270#ifndef arch_send_call_function_ipi_mask
271{ 271#define arch_send_call_function_ipi_mask(maskp) \
272} 272 arch_send_call_function_ipi(*(maskp))
273 273#endif
274/*
275 * Ensure stack based data used in call function mask is safe to free.
276 *
277 * This is needed by smp_call_function_mask when using on-stack data, because
278 * a single call function queue is shared by all CPUs, and any CPU may pick up
279 * the data item on the queue at any time before it is deleted. So we need to
280 * ensure that all CPUs have transitioned through a quiescent state after
281 * this call.
282 *
283 * This is a very slow function, implemented by sending synchronous IPIs to
284 * all possible CPUs. For this reason, we have to alloc data rather than use
285 * stack based data even in the case of synchronous calls. The stack based
286 * data is then just used for deadlock/oom fallback which will be very rare.
287 *
288 * If a faster scheme can be made, we could go back to preferring stack based
289 * data -- the data allocation/free is non-zero cost.
290 */
291static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
292{
293 struct call_single_data data;
294 int cpu;
295
296 data.func = quiesce_dummy;
297 data.info = NULL;
298
299 for_each_cpu_mask(cpu, mask) {
300 data.flags = CSD_FLAG_WAIT;
301 generic_exec_single(cpu, &data);
302 }
303}
304 274
305/** 275/**
306 * smp_call_function_mask(): Run a function on a set of other CPUs. 276 * smp_call_function_many(): Run a function on a set of other CPUs.
307 * @mask: The set of cpus to run on. 277 * @mask: The set of cpus to run on (only runs on online subset).
308 * @func: The function to run. This must be fast and non-blocking. 278 * @func: The function to run. This must be fast and non-blocking.
309 * @info: An arbitrary pointer to pass to the function. 279 * @info: An arbitrary pointer to pass to the function.
310 * @wait: If true, wait (atomically) until function has completed on other CPUs. 280 * @wait: If true, wait (atomically) until function has completed on other CPUs.
311 * 281 *
312 * Returns 0 on success, else a negative status code.
313 *
314 * If @wait is true, then returns once @func has returned. Note that @wait 282 * If @wait is true, then returns once @func has returned. Note that @wait
315 * will be implicitly turned on in case of allocation failures, since 283 * will be implicitly turned on in case of allocation failures, since
316 * we fall back to on-stack allocation. 284 * we fall back to on-stack allocation.
@@ -319,53 +287,57 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
319 * hardware interrupt handler or from a bottom half handler. Preemption 287 * hardware interrupt handler or from a bottom half handler. Preemption
320 * must be disabled when calling this function. 288 * must be disabled when calling this function.
321 */ 289 */
322int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 290void smp_call_function_many(const struct cpumask *mask,
323 int wait) 291 void (*func)(void *), void *info,
292 bool wait)
324{ 293{
325 struct call_function_data d; 294 struct call_function_data *data;
326 struct call_function_data *data = NULL;
327 cpumask_t allbutself;
328 unsigned long flags; 295 unsigned long flags;
329 int cpu, num_cpus; 296 int cpu, next_cpu;
330 int slowpath = 0;
331 297
332 /* Can deadlock when called with interrupts disabled */ 298 /* Can deadlock when called with interrupts disabled */
333 WARN_ON(irqs_disabled()); 299 WARN_ON(irqs_disabled());
334 300
335 cpu = smp_processor_id(); 301 /* So, what's a CPU they want? Ignoring this one. */
336 allbutself = cpu_online_map; 302 cpu = cpumask_first_and(mask, cpu_online_mask);
337 cpu_clear(cpu, allbutself); 303 if (cpu == smp_processor_id())
338 cpus_and(mask, mask, allbutself); 304 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
339 num_cpus = cpus_weight(mask); 305 /* No online cpus? We're done. */
340 306 if (cpu >= nr_cpu_ids)
341 /* 307 return;
342 * If zero CPUs, return. If just a single CPU, turn this request 308
343 * into a targetted single call instead since it's faster. 309 /* Do we have another CPU which isn't us? */
344 */ 310 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
345 if (!num_cpus) 311 if (next_cpu == smp_processor_id())
346 return 0; 312 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
347 else if (num_cpus == 1) { 313
348 cpu = first_cpu(mask); 314 /* Fastpath: do that cpu by itself. */
349 return smp_call_function_single(cpu, func, info, wait); 315 if (next_cpu >= nr_cpu_ids) {
316 smp_call_function_single(cpu, func, info, wait);
317 return;
350 } 318 }
351 319
352 data = kmalloc(sizeof(*data), GFP_ATOMIC); 320 data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
353 if (data) { 321 if (unlikely(!data)) {
354 data->csd.flags = CSD_FLAG_ALLOC; 322 /* Slow path. */
355 if (wait) 323 for_each_online_cpu(cpu) {
356 data->csd.flags |= CSD_FLAG_WAIT; 324 if (cpu == smp_processor_id())
357 } else { 325 continue;
358 data = &d; 326 if (cpumask_test_cpu(cpu, mask))
359 data->csd.flags = CSD_FLAG_WAIT; 327 smp_call_function_single(cpu, func, info, wait);
360 wait = 1; 328 }
361 slowpath = 1; 329 return;
362 } 330 }
363 331
364 spin_lock_init(&data->lock); 332 spin_lock_init(&data->lock);
333 data->csd.flags = CSD_FLAG_ALLOC;
334 if (wait)
335 data->csd.flags |= CSD_FLAG_WAIT;
365 data->csd.func = func; 336 data->csd.func = func;
366 data->csd.info = info; 337 data->csd.info = info;
367 data->refs = num_cpus; 338 cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
368 data->cpumask = mask; 339 cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
340 data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
369 341
370 spin_lock_irqsave(&call_function_lock, flags); 342 spin_lock_irqsave(&call_function_lock, flags);
371 list_add_tail_rcu(&data->csd.list, &call_function_queue); 343 list_add_tail_rcu(&data->csd.list, &call_function_queue);
@@ -377,18 +349,13 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
377 smp_mb(); 349 smp_mb();
378 350
379 /* Send a message to all CPUs in the map */ 351 /* Send a message to all CPUs in the map */
380 arch_send_call_function_ipi(mask); 352 arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
381 353
382 /* optionally wait for the CPUs to complete */ 354 /* optionally wait for the CPUs to complete */
383 if (wait) { 355 if (wait)
384 csd_flag_wait(&data->csd); 356 csd_flag_wait(&data->csd);
385 if (unlikely(slowpath))
386 smp_call_function_mask_quiesce_stack(mask);
387 }
388
389 return 0;
390} 357}
391EXPORT_SYMBOL(smp_call_function_mask); 358EXPORT_SYMBOL(smp_call_function_many);
392 359
393/** 360/**
394 * smp_call_function(): Run a function on all other CPUs. 361 * smp_call_function(): Run a function on all other CPUs.
@@ -396,7 +363,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
396 * @info: An arbitrary pointer to pass to the function. 363 * @info: An arbitrary pointer to pass to the function.
397 * @wait: If true, wait (atomically) until function has completed on other CPUs. 364 * @wait: If true, wait (atomically) until function has completed on other CPUs.
398 * 365 *
399 * Returns 0 on success, else a negative status code. 366 * Returns 0.
400 * 367 *
401 * If @wait is true, then returns once @func has returned; otherwise 368 * If @wait is true, then returns once @func has returned; otherwise
402 * it returns just before the target cpu calls @func. In case of allocation 369 * it returns just before the target cpu calls @func. In case of allocation
@@ -407,12 +374,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
407 */ 374 */
408int smp_call_function(void (*func)(void *), void *info, int wait) 375int smp_call_function(void (*func)(void *), void *info, int wait)
409{ 376{
410 int ret;
411
412 preempt_disable(); 377 preempt_disable();
413 ret = smp_call_function_mask(cpu_online_map, func, info, wait); 378 smp_call_function_many(cpu_online_mask, func, info, wait);
414 preempt_enable(); 379 preempt_enable();
415 return ret; 380 return 0;
416} 381}
417EXPORT_SYMBOL(smp_call_function); 382EXPORT_SYMBOL(smp_call_function);
418 383
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 670c1eca47ec..bdbe9de9cd8d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -733,7 +733,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
733 break; 733 break;
734 /* Unbind so it can run. Fall thru. */ 734 /* Unbind so it can run. Fall thru. */
735 kthread_bind(per_cpu(ksoftirqd, hotcpu), 735 kthread_bind(per_cpu(ksoftirqd, hotcpu),
736 any_online_cpu(cpu_online_map)); 736 cpumask_any(cpu_online_mask));
737 case CPU_DEAD: 737 case CPU_DEAD:
738 case CPU_DEAD_FROZEN: { 738 case CPU_DEAD_FROZEN: {
739 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 739 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 1ab790c67b17..d9188c66278a 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -303,17 +303,15 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
303 break; 303 break;
304 case CPU_ONLINE: 304 case CPU_ONLINE:
305 case CPU_ONLINE_FROZEN: 305 case CPU_ONLINE_FROZEN:
306 check_cpu = any_online_cpu(cpu_online_map); 306 check_cpu = cpumask_any(cpu_online_mask);
307 wake_up_process(per_cpu(watchdog_task, hotcpu)); 307 wake_up_process(per_cpu(watchdog_task, hotcpu));
308 break; 308 break;
309#ifdef CONFIG_HOTPLUG_CPU 309#ifdef CONFIG_HOTPLUG_CPU
310 case CPU_DOWN_PREPARE: 310 case CPU_DOWN_PREPARE:
311 case CPU_DOWN_PREPARE_FROZEN: 311 case CPU_DOWN_PREPARE_FROZEN:
312 if (hotcpu == check_cpu) { 312 if (hotcpu == check_cpu) {
313 cpumask_t temp_cpu_online_map = cpu_online_map; 313 /* Pick any other online cpu. */
314 314 check_cpu = cpumask_any_but(cpu_online_mask, hotcpu);
315 cpu_clear(hotcpu, temp_cpu_online_map);
316 check_cpu = any_online_cpu(temp_cpu_online_map);
317 } 315 }
318 break; 316 break;
319 317
@@ -323,7 +321,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
323 break; 321 break;
324 /* Unbind so it can run. Fall thru. */ 322 /* Unbind so it can run. Fall thru. */
325 kthread_bind(per_cpu(watchdog_task, hotcpu), 323 kthread_bind(per_cpu(watchdog_task, hotcpu),
326 any_online_cpu(cpu_online_map)); 324 cpumask_any(cpu_online_mask));
327 case CPU_DEAD: 325 case CPU_DEAD:
328 case CPU_DEAD_FROZEN: 326 case CPU_DEAD_FROZEN:
329 p = per_cpu(watchdog_task, hotcpu); 327 p = per_cpu(watchdog_task, hotcpu);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 24e8ceacc388..286c41722e8c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -69,10 +69,10 @@ static void stop_cpu(struct work_struct *unused)
69 int err; 69 int err;
70 70
71 if (!active_cpus) { 71 if (!active_cpus) {
72 if (cpu == first_cpu(cpu_online_map)) 72 if (cpu == cpumask_first(cpu_online_mask))
73 smdata = &active; 73 smdata = &active;
74 } else { 74 } else {
75 if (cpu_isset(cpu, *active_cpus)) 75 if (cpumask_test_cpu(cpu, active_cpus))
76 smdata = &active; 76 smdata = &active;
77 } 77 }
78 /* Simple state machine */ 78 /* Simple state machine */
@@ -109,7 +109,7 @@ static int chill(void *unused)
109 return 0; 109 return 0;
110} 110}
111 111
112int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 112int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
113{ 113{
114 struct work_struct *sm_work; 114 struct work_struct *sm_work;
115 int i, ret; 115 int i, ret;
@@ -142,7 +142,7 @@ int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
142 return ret; 142 return ret;
143} 143}
144 144
145int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 145int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
146{ 146{
147 int ret; 147 int ret;
148 148
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 6d7dc4ec4aa5..888adbcca30c 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -290,18 +290,17 @@ ret:
290 return; 290 return;
291} 291}
292 292
293static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) 293static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
294{ 294{
295 struct listener_list *listeners; 295 struct listener_list *listeners;
296 struct listener *s, *tmp; 296 struct listener *s, *tmp;
297 unsigned int cpu; 297 unsigned int cpu;
298 cpumask_t mask = *maskp;
299 298
300 if (!cpus_subset(mask, cpu_possible_map)) 299 if (!cpumask_subset(mask, cpu_possible_mask))
301 return -EINVAL; 300 return -EINVAL;
302 301
303 if (isadd == REGISTER) { 302 if (isadd == REGISTER) {
304 for_each_cpu_mask_nr(cpu, mask) { 303 for_each_cpu(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 304 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 305 cpu_to_node(cpu));
307 if (!s) 306 if (!s)
@@ -320,7 +319,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 319
321 /* Deregister or cleanup */ 320 /* Deregister or cleanup */
322cleanup: 321cleanup:
323 for_each_cpu_mask_nr(cpu, mask) { 322 for_each_cpu(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 323 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 324 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 325 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
@@ -335,7 +334,7 @@ cleanup:
335 return 0; 334 return 0;
336} 335}
337 336
338static int parse(struct nlattr *na, cpumask_t *mask) 337static int parse(struct nlattr *na, struct cpumask *mask)
339{ 338{
340 char *data; 339 char *data;
341 int len; 340 int len;
@@ -428,23 +427,33 @@ err:
428 427
429static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) 428static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
430{ 429{
431 int rc = 0; 430 int rc;
432 struct sk_buff *rep_skb; 431 struct sk_buff *rep_skb;
433 struct taskstats *stats; 432 struct taskstats *stats;
434 size_t size; 433 size_t size;
435 cpumask_t mask; 434 cpumask_var_t mask;
435
436 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
437 return -ENOMEM;
436 438
437 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); 439 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
438 if (rc < 0) 440 if (rc < 0)
439 return rc; 441 goto free_return_rc;
440 if (rc == 0) 442 if (rc == 0) {
441 return add_del_listener(info->snd_pid, &mask, REGISTER); 443 rc = add_del_listener(info->snd_pid, mask, REGISTER);
444 goto free_return_rc;
445 }
442 446
443 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask); 447 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
444 if (rc < 0) 448 if (rc < 0)
449 goto free_return_rc;
450 if (rc == 0) {
451 rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
452free_return_rc:
453 free_cpumask_var(mask);
445 return rc; 454 return rc;
446 if (rc == 0) 455 }
447 return add_del_listener(info->snd_pid, &mask, DEREGISTER); 456 free_cpumask_var(mask);
448 457
449 /* 458 /*
450 * Size includes space for nested attributes 459 * Size includes space for nested attributes
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 9ed2eec97526..ca89e1593f08 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,10 +145,11 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = cpumask_next(raw_smp_processor_id(),
149 cpu_online_mask);
149 150
150 if (next_cpu >= nr_cpu_ids) 151 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 152 next_cpu = cpumask_first(cpu_online_mask);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 153 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 154 add_timer_on(&watchdog_timer, next_cpu);
154 } 155 }
@@ -173,7 +174,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
173 watchdog_last = watchdog->read(); 174 watchdog_last = watchdog->read();
174 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 175 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
175 add_timer_on(&watchdog_timer, 176 add_timer_on(&watchdog_timer,
176 first_cpu(cpu_online_map)); 177 cpumask_first(cpu_online_mask));
177 } 178 }
178 } else { 179 } else {
179 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 180 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
@@ -195,7 +196,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
195 watchdog_timer.expires = 196 watchdog_timer.expires =
196 jiffies + WATCHDOG_INTERVAL; 197 jiffies + WATCHDOG_INTERVAL;
197 add_timer_on(&watchdog_timer, 198 add_timer_on(&watchdog_timer,
198 first_cpu(cpu_online_map)); 199 cpumask_first(cpu_online_mask));
199 } 200 }
200 } 201 }
201 } 202 }
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 9590af2327be..118a3b3b3f9a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -28,7 +28,9 @@
28 */ 28 */
29 29
30struct tick_device tick_broadcast_device; 30struct tick_device tick_broadcast_device;
31static cpumask_t tick_broadcast_mask; 31/* FIXME: Use cpumask_var_t. */
32static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
33static DECLARE_BITMAP(tmpmask, NR_CPUS);
32static DEFINE_SPINLOCK(tick_broadcast_lock); 34static DEFINE_SPINLOCK(tick_broadcast_lock);
33static int tick_broadcast_force; 35static int tick_broadcast_force;
34 36
@@ -46,9 +48,9 @@ struct tick_device *tick_get_broadcast_device(void)
46 return &tick_broadcast_device; 48 return &tick_broadcast_device;
47} 49}
48 50
49cpumask_t *tick_get_broadcast_mask(void) 51struct cpumask *tick_get_broadcast_mask(void)
50{ 52{
51 return &tick_broadcast_mask; 53 return to_cpumask(tick_broadcast_mask);
52} 54}
53 55
54/* 56/*
@@ -72,7 +74,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
72 74
73 clockevents_exchange_device(NULL, dev); 75 clockevents_exchange_device(NULL, dev);
74 tick_broadcast_device.evtdev = dev; 76 tick_broadcast_device.evtdev = dev;
75 if (!cpus_empty(tick_broadcast_mask)) 77 if (!cpumask_empty(tick_get_broadcast_mask()))
76 tick_broadcast_start_periodic(dev); 78 tick_broadcast_start_periodic(dev);
77 return 1; 79 return 1;
78} 80}
@@ -104,7 +106,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
104 */ 106 */
105 if (!tick_device_is_functional(dev)) { 107 if (!tick_device_is_functional(dev)) {
106 dev->event_handler = tick_handle_periodic; 108 dev->event_handler = tick_handle_periodic;
107 cpu_set(cpu, tick_broadcast_mask); 109 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
108 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 110 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
109 ret = 1; 111 ret = 1;
110 } else { 112 } else {
@@ -116,7 +118,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
116 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 118 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
117 int cpu = smp_processor_id(); 119 int cpu = smp_processor_id();
118 120
119 cpu_clear(cpu, tick_broadcast_mask); 121 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
120 tick_broadcast_clear_oneshot(cpu); 122 tick_broadcast_clear_oneshot(cpu);
121 } 123 }
122 } 124 }
@@ -125,9 +127,9 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
125} 127}
126 128
127/* 129/*
128 * Broadcast the event to the cpus, which are set in the mask 130 * Broadcast the event to the cpus, which are set in the mask (mangled).
129 */ 131 */
130static void tick_do_broadcast(cpumask_t mask) 132static void tick_do_broadcast(struct cpumask *mask)
131{ 133{
132 int cpu = smp_processor_id(); 134 int cpu = smp_processor_id();
133 struct tick_device *td; 135 struct tick_device *td;
@@ -135,22 +137,21 @@ static void tick_do_broadcast(cpumask_t mask)
135 /* 137 /*
136 * Check, if the current cpu is in the mask 138 * Check, if the current cpu is in the mask
137 */ 139 */
138 if (cpu_isset(cpu, mask)) { 140 if (cpumask_test_cpu(cpu, mask)) {
139 cpu_clear(cpu, mask); 141 cpumask_clear_cpu(cpu, mask);
140 td = &per_cpu(tick_cpu_device, cpu); 142 td = &per_cpu(tick_cpu_device, cpu);
141 td->evtdev->event_handler(td->evtdev); 143 td->evtdev->event_handler(td->evtdev);
142 } 144 }
143 145
144 if (!cpus_empty(mask)) { 146 if (!cpumask_empty(mask)) {
145 /* 147 /*
146 * It might be necessary to actually check whether the devices 148 * It might be necessary to actually check whether the devices
147 * have different broadcast functions. For now, just use the 149 * have different broadcast functions. For now, just use the
148 * one of the first device. This works as long as we have this 150 * one of the first device. This works as long as we have this
149 * misfeature only on x86 (lapic) 151 * misfeature only on x86 (lapic)
150 */ 152 */
151 cpu = first_cpu(mask); 153 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
152 td = &per_cpu(tick_cpu_device, cpu); 154 td->evtdev->broadcast(mask);
153 td->evtdev->broadcast(&mask);
154 } 155 }
155} 156}
156 157
@@ -160,12 +161,11 @@ static void tick_do_broadcast(cpumask_t mask)
160 */ 161 */
161static void tick_do_periodic_broadcast(void) 162static void tick_do_periodic_broadcast(void)
162{ 163{
163 cpumask_t mask;
164
165 spin_lock(&tick_broadcast_lock); 164 spin_lock(&tick_broadcast_lock);
166 165
167 cpus_and(mask, cpu_online_map, tick_broadcast_mask); 166 cpumask_and(to_cpumask(tmpmask),
168 tick_do_broadcast(mask); 167 cpu_online_mask, tick_get_broadcast_mask());
168 tick_do_broadcast(to_cpumask(tmpmask));
169 169
170 spin_unlock(&tick_broadcast_lock); 170 spin_unlock(&tick_broadcast_lock);
171} 171}
@@ -228,13 +228,13 @@ static void tick_do_broadcast_on_off(void *why)
228 if (!tick_device_is_functional(dev)) 228 if (!tick_device_is_functional(dev))
229 goto out; 229 goto out;
230 230
231 bc_stopped = cpus_empty(tick_broadcast_mask); 231 bc_stopped = cpumask_empty(tick_get_broadcast_mask());
232 232
233 switch (*reason) { 233 switch (*reason) {
234 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 234 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
236 if (!cpu_isset(cpu, tick_broadcast_mask)) { 236 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
237 cpu_set(cpu, tick_broadcast_mask); 237 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
238 if (tick_broadcast_device.mode == 238 if (tick_broadcast_device.mode ==
239 TICKDEV_MODE_PERIODIC) 239 TICKDEV_MODE_PERIODIC)
240 clockevents_shutdown(dev); 240 clockevents_shutdown(dev);
@@ -244,8 +244,8 @@ static void tick_do_broadcast_on_off(void *why)
244 break; 244 break;
245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
246 if (!tick_broadcast_force && 246 if (!tick_broadcast_force &&
247 cpu_isset(cpu, tick_broadcast_mask)) { 247 cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
248 cpu_clear(cpu, tick_broadcast_mask); 248 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
249 if (tick_broadcast_device.mode == 249 if (tick_broadcast_device.mode ==
250 TICKDEV_MODE_PERIODIC) 250 TICKDEV_MODE_PERIODIC)
251 tick_setup_periodic(dev, 0); 251 tick_setup_periodic(dev, 0);
@@ -253,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why)
253 break; 253 break;
254 } 254 }
255 255
256 if (cpus_empty(tick_broadcast_mask)) { 256 if (cpumask_empty(tick_get_broadcast_mask())) {
257 if (!bc_stopped) 257 if (!bc_stopped)
258 clockevents_shutdown(bc); 258 clockevents_shutdown(bc);
259 } else if (bc_stopped) { 259 } else if (bc_stopped) {
@@ -272,7 +272,7 @@ out:
272 */ 272 */
273void tick_broadcast_on_off(unsigned long reason, int *oncpu) 273void tick_broadcast_on_off(unsigned long reason, int *oncpu)
274{ 274{
275 if (!cpu_isset(*oncpu, cpu_online_map)) 275 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
277 "offline CPU #%d\n", *oncpu); 277 "offline CPU #%d\n", *oncpu);
278 else 278 else
@@ -303,10 +303,10 @@ void tick_shutdown_broadcast(unsigned int *cpup)
303 spin_lock_irqsave(&tick_broadcast_lock, flags); 303 spin_lock_irqsave(&tick_broadcast_lock, flags);
304 304
305 bc = tick_broadcast_device.evtdev; 305 bc = tick_broadcast_device.evtdev;
306 cpu_clear(cpu, tick_broadcast_mask); 306 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
307 307
308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
309 if (bc && cpus_empty(tick_broadcast_mask)) 309 if (bc && cpumask_empty(tick_get_broadcast_mask()))
310 clockevents_shutdown(bc); 310 clockevents_shutdown(bc);
311 } 311 }
312 312
@@ -342,10 +342,10 @@ int tick_resume_broadcast(void)
342 342
343 switch (tick_broadcast_device.mode) { 343 switch (tick_broadcast_device.mode) {
344 case TICKDEV_MODE_PERIODIC: 344 case TICKDEV_MODE_PERIODIC:
345 if(!cpus_empty(tick_broadcast_mask)) 345 if (!cpumask_empty(tick_get_broadcast_mask()))
346 tick_broadcast_start_periodic(bc); 346 tick_broadcast_start_periodic(bc);
347 broadcast = cpu_isset(smp_processor_id(), 347 broadcast = cpumask_test_cpu(smp_processor_id(),
348 tick_broadcast_mask); 348 tick_get_broadcast_mask());
349 break; 349 break;
350 case TICKDEV_MODE_ONESHOT: 350 case TICKDEV_MODE_ONESHOT:
351 broadcast = tick_resume_broadcast_oneshot(bc); 351 broadcast = tick_resume_broadcast_oneshot(bc);
@@ -360,14 +360,15 @@ int tick_resume_broadcast(void)
360 360
361#ifdef CONFIG_TICK_ONESHOT 361#ifdef CONFIG_TICK_ONESHOT
362 362
363static cpumask_t tick_broadcast_oneshot_mask; 363/* FIXME: use cpumask_var_t. */
364static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
364 365
365/* 366/*
366 * Debugging: see timer_list.c 367 * Exposed for debugging: see timer_list.c
367 */ 368 */
368cpumask_t *tick_get_broadcast_oneshot_mask(void) 369struct cpumask *tick_get_broadcast_oneshot_mask(void)
369{ 370{
370 return &tick_broadcast_oneshot_mask; 371 return to_cpumask(tick_broadcast_oneshot_mask);
371} 372}
372 373
373static int tick_broadcast_set_event(ktime_t expires, int force) 374static int tick_broadcast_set_event(ktime_t expires, int force)
@@ -389,7 +390,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
389 */ 390 */
390void tick_check_oneshot_broadcast(int cpu) 391void tick_check_oneshot_broadcast(int cpu)
391{ 392{
392 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 393 if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 394 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
394 395
395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); 396 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
@@ -402,7 +403,6 @@ void tick_check_oneshot_broadcast(int cpu)
402static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 403static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
403{ 404{
404 struct tick_device *td; 405 struct tick_device *td;
405 cpumask_t mask;
406 ktime_t now, next_event; 406 ktime_t now, next_event;
407 int cpu; 407 int cpu;
408 408
@@ -410,13 +410,13 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
410again: 410again:
411 dev->next_event.tv64 = KTIME_MAX; 411 dev->next_event.tv64 = KTIME_MAX;
412 next_event.tv64 = KTIME_MAX; 412 next_event.tv64 = KTIME_MAX;
413 mask = CPU_MASK_NONE; 413 cpumask_clear(to_cpumask(tmpmask));
414 now = ktime_get(); 414 now = ktime_get();
415 /* Find all expired events */ 415 /* Find all expired events */
416 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { 416 for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
417 td = &per_cpu(tick_cpu_device, cpu); 417 td = &per_cpu(tick_cpu_device, cpu);
418 if (td->evtdev->next_event.tv64 <= now.tv64) 418 if (td->evtdev->next_event.tv64 <= now.tv64)
419 cpu_set(cpu, mask); 419 cpumask_set_cpu(cpu, to_cpumask(tmpmask));
420 else if (td->evtdev->next_event.tv64 < next_event.tv64) 420 else if (td->evtdev->next_event.tv64 < next_event.tv64)
421 next_event.tv64 = td->evtdev->next_event.tv64; 421 next_event.tv64 = td->evtdev->next_event.tv64;
422 } 422 }
@@ -424,7 +424,7 @@ again:
424 /* 424 /*
425 * Wakeup the cpus which have an expired event. 425 * Wakeup the cpus which have an expired event.
426 */ 426 */
427 tick_do_broadcast(mask); 427 tick_do_broadcast(to_cpumask(tmpmask));
428 428
429 /* 429 /*
430 * Two reasons for reprogram: 430 * Two reasons for reprogram:
@@ -476,15 +476,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
476 goto out; 476 goto out;
477 477
478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
479 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 479 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
480 cpu_set(cpu, tick_broadcast_oneshot_mask); 480 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
482 if (dev->next_event.tv64 < bc->next_event.tv64) 482 if (dev->next_event.tv64 < bc->next_event.tv64)
483 tick_broadcast_set_event(dev->next_event, 1); 483 tick_broadcast_set_event(dev->next_event, 1);
484 } 484 }
485 } else { 485 } else {
486 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 486 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
487 cpu_clear(cpu, tick_broadcast_oneshot_mask); 487 cpumask_clear_cpu(cpu,
488 tick_get_broadcast_oneshot_mask());
488 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 489 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
489 if (dev->next_event.tv64 != KTIME_MAX) 490 if (dev->next_event.tv64 != KTIME_MAX)
490 tick_program_event(dev->next_event, 1); 491 tick_program_event(dev->next_event, 1);
@@ -502,15 +503,16 @@ out:
502 */ 503 */
503static void tick_broadcast_clear_oneshot(int cpu) 504static void tick_broadcast_clear_oneshot(int cpu)
504{ 505{
505 cpu_clear(cpu, tick_broadcast_oneshot_mask); 506 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
506} 507}
507 508
508static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) 509static void tick_broadcast_init_next_event(struct cpumask *mask,
510 ktime_t expires)
509{ 511{
510 struct tick_device *td; 512 struct tick_device *td;
511 int cpu; 513 int cpu;
512 514
513 for_each_cpu_mask_nr(cpu, *mask) { 515 for_each_cpu(cpu, mask) {
514 td = &per_cpu(tick_cpu_device, cpu); 516 td = &per_cpu(tick_cpu_device, cpu);
515 if (td->evtdev) 517 if (td->evtdev)
516 td->evtdev->next_event = expires; 518 td->evtdev->next_event = expires;
@@ -526,7 +528,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
526 if (bc->event_handler != tick_handle_oneshot_broadcast) { 528 if (bc->event_handler != tick_handle_oneshot_broadcast) {
527 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 529 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
528 int cpu = smp_processor_id(); 530 int cpu = smp_processor_id();
529 cpumask_t mask;
530 531
531 bc->event_handler = tick_handle_oneshot_broadcast; 532 bc->event_handler = tick_handle_oneshot_broadcast;
532 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 533 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
@@ -540,13 +541,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
540 * oneshot_mask bits for those and program the 541 * oneshot_mask bits for those and program the
541 * broadcast device to fire. 542 * broadcast device to fire.
542 */ 543 */
543 mask = tick_broadcast_mask; 544 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
544 cpu_clear(cpu, mask); 545 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
545 cpus_or(tick_broadcast_oneshot_mask, 546 cpumask_or(tick_get_broadcast_oneshot_mask(),
546 tick_broadcast_oneshot_mask, mask); 547 tick_get_broadcast_oneshot_mask(),
547 548 to_cpumask(tmpmask));
548 if (was_periodic && !cpus_empty(mask)) { 549
549 tick_broadcast_init_next_event(&mask, tick_next_period); 550 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
551 tick_broadcast_init_next_event(to_cpumask(tmpmask),
552 tick_next_period);
550 tick_broadcast_set_event(tick_next_period, 1); 553 tick_broadcast_set_event(tick_next_period, 1);
551 } else 554 } else
552 bc->next_event.tv64 = KTIME_MAX; 555 bc->next_event.tv64 = KTIME_MAX;
@@ -585,7 +588,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
585 * Clear the broadcast mask flag for the dead cpu, but do not 588 * Clear the broadcast mask flag for the dead cpu, but do not
586 * stop the broadcast device! 589 * stop the broadcast device!
587 */ 590 */
588 cpu_clear(cpu, tick_broadcast_oneshot_mask); 591 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
589 592
590 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 593 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
591} 594}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index f8372be74122..63e05d423a09 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -254,7 +254,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
254 curdev = NULL; 254 curdev = NULL;
255 } 255 }
256 clockevents_exchange_device(curdev, newdev); 256 clockevents_exchange_device(curdev, newdev);
257 tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); 257 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
259 tick_oneshot_notify(); 259 tick_oneshot_notify();
260 260
@@ -299,9 +299,9 @@ static void tick_shutdown(unsigned int *cpup)
299 } 299 }
300 /* Transfer the do_timer job away from this cpu */ 300 /* Transfer the do_timer job away from this cpu */
301 if (*cpup == tick_do_timer_cpu) { 301 if (*cpup == tick_do_timer_cpu) {
302 int cpu = first_cpu(cpu_online_map); 302 int cpu = cpumask_first(cpu_online_mask);
303 303
304 tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : 304 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
305 TICK_DO_TIMER_NONE; 305 TICK_DO_TIMER_NONE;
306 } 306 }
307 spin_unlock_irqrestore(&tick_device_lock, flags); 307 spin_unlock_irqrestore(&tick_device_lock, flags);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1d601a7c4587..a9d9760dc7b6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -195,7 +195,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
195EXPORT_SYMBOL_GPL(ring_buffer_event_data); 195EXPORT_SYMBOL_GPL(ring_buffer_event_data);
196 196
197#define for_each_buffer_cpu(buffer, cpu) \ 197#define for_each_buffer_cpu(buffer, cpu) \
198 for_each_cpu_mask(cpu, buffer->cpumask) 198 for_each_cpu(cpu, buffer->cpumask)
199 199
200#define TS_SHIFT 27 200#define TS_SHIFT 27
201#define TS_MASK ((1ULL << TS_SHIFT) - 1) 201#define TS_MASK ((1ULL << TS_SHIFT) - 1)
@@ -267,7 +267,7 @@ struct ring_buffer {
267 unsigned pages; 267 unsigned pages;
268 unsigned flags; 268 unsigned flags;
269 int cpus; 269 int cpus;
270 cpumask_t cpumask; 270 cpumask_var_t cpumask;
271 atomic_t record_disabled; 271 atomic_t record_disabled;
272 272
273 struct mutex mutex; 273 struct mutex mutex;
@@ -458,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
458 if (!buffer) 458 if (!buffer)
459 return NULL; 459 return NULL;
460 460
461 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
462 goto fail_free_buffer;
463
461 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 464 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
462 buffer->flags = flags; 465 buffer->flags = flags;
463 466
@@ -465,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
465 if (buffer->pages == 1) 468 if (buffer->pages == 1)
466 buffer->pages++; 469 buffer->pages++;
467 470
468 buffer->cpumask = cpu_possible_map; 471 cpumask_copy(buffer->cpumask, cpu_possible_mask);
469 buffer->cpus = nr_cpu_ids; 472 buffer->cpus = nr_cpu_ids;
470 473
471 bsize = sizeof(void *) * nr_cpu_ids; 474 bsize = sizeof(void *) * nr_cpu_ids;
472 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 475 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
473 GFP_KERNEL); 476 GFP_KERNEL);
474 if (!buffer->buffers) 477 if (!buffer->buffers)
475 goto fail_free_buffer; 478 goto fail_free_cpumask;
476 479
477 for_each_buffer_cpu(buffer, cpu) { 480 for_each_buffer_cpu(buffer, cpu) {
478 buffer->buffers[cpu] = 481 buffer->buffers[cpu] =
@@ -492,6 +495,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
492 } 495 }
493 kfree(buffer->buffers); 496 kfree(buffer->buffers);
494 497
498 fail_free_cpumask:
499 free_cpumask_var(buffer->cpumask);
500
495 fail_free_buffer: 501 fail_free_buffer:
496 kfree(buffer); 502 kfree(buffer);
497 return NULL; 503 return NULL;
@@ -510,6 +516,8 @@ ring_buffer_free(struct ring_buffer *buffer)
510 for_each_buffer_cpu(buffer, cpu) 516 for_each_buffer_cpu(buffer, cpu)
511 rb_free_cpu_buffer(buffer->buffers[cpu]); 517 rb_free_cpu_buffer(buffer->buffers[cpu]);
512 518
519 free_cpumask_var(buffer->cpumask);
520
513 kfree(buffer); 521 kfree(buffer);
514} 522}
515EXPORT_SYMBOL_GPL(ring_buffer_free); 523EXPORT_SYMBOL_GPL(ring_buffer_free);
@@ -1283,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1283 1291
1284 cpu = raw_smp_processor_id(); 1292 cpu = raw_smp_processor_id();
1285 1293
1286 if (!cpu_isset(cpu, buffer->cpumask)) 1294 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1287 goto out; 1295 goto out;
1288 1296
1289 cpu_buffer = buffer->buffers[cpu]; 1297 cpu_buffer = buffer->buffers[cpu];
@@ -1396,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1396 1404
1397 cpu = raw_smp_processor_id(); 1405 cpu = raw_smp_processor_id();
1398 1406
1399 if (!cpu_isset(cpu, buffer->cpumask)) 1407 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1400 goto out; 1408 goto out;
1401 1409
1402 cpu_buffer = buffer->buffers[cpu]; 1410 cpu_buffer = buffer->buffers[cpu];
@@ -1478,7 +1486,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1478{ 1486{
1479 struct ring_buffer_per_cpu *cpu_buffer; 1487 struct ring_buffer_per_cpu *cpu_buffer;
1480 1488
1481 if (!cpu_isset(cpu, buffer->cpumask)) 1489 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1482 return; 1490 return;
1483 1491
1484 cpu_buffer = buffer->buffers[cpu]; 1492 cpu_buffer = buffer->buffers[cpu];
@@ -1498,7 +1506,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1498{ 1506{
1499 struct ring_buffer_per_cpu *cpu_buffer; 1507 struct ring_buffer_per_cpu *cpu_buffer;
1500 1508
1501 if (!cpu_isset(cpu, buffer->cpumask)) 1509 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1502 return; 1510 return;
1503 1511
1504 cpu_buffer = buffer->buffers[cpu]; 1512 cpu_buffer = buffer->buffers[cpu];
@@ -1515,7 +1523,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1515{ 1523{
1516 struct ring_buffer_per_cpu *cpu_buffer; 1524 struct ring_buffer_per_cpu *cpu_buffer;
1517 1525
1518 if (!cpu_isset(cpu, buffer->cpumask)) 1526 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1519 return 0; 1527 return 0;
1520 1528
1521 cpu_buffer = buffer->buffers[cpu]; 1529 cpu_buffer = buffer->buffers[cpu];
@@ -1532,7 +1540,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1532{ 1540{
1533 struct ring_buffer_per_cpu *cpu_buffer; 1541 struct ring_buffer_per_cpu *cpu_buffer;
1534 1542
1535 if (!cpu_isset(cpu, buffer->cpumask)) 1543 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1536 return 0; 1544 return 0;
1537 1545
1538 cpu_buffer = buffer->buffers[cpu]; 1546 cpu_buffer = buffer->buffers[cpu];
@@ -1850,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1850 struct buffer_page *reader; 1858 struct buffer_page *reader;
1851 int nr_loops = 0; 1859 int nr_loops = 0;
1852 1860
1853 if (!cpu_isset(cpu, buffer->cpumask)) 1861 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1854 return NULL; 1862 return NULL;
1855 1863
1856 cpu_buffer = buffer->buffers[cpu]; 1864 cpu_buffer = buffer->buffers[cpu];
@@ -2025,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2025 struct ring_buffer_event *event; 2033 struct ring_buffer_event *event;
2026 unsigned long flags; 2034 unsigned long flags;
2027 2035
2028 if (!cpu_isset(cpu, buffer->cpumask)) 2036 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2029 return NULL; 2037 return NULL;
2030 2038
2031 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2039 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2062,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2062 struct ring_buffer_iter *iter; 2070 struct ring_buffer_iter *iter;
2063 unsigned long flags; 2071 unsigned long flags;
2064 2072
2065 if (!cpu_isset(cpu, buffer->cpumask)) 2073 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2066 return NULL; 2074 return NULL;
2067 2075
2068 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 2076 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2172,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2172 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2180 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2173 unsigned long flags; 2181 unsigned long flags;
2174 2182
2175 if (!cpu_isset(cpu, buffer->cpumask)) 2183 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2176 return; 2184 return;
2177 2185
2178 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2186 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2228,7 +2236,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2228{ 2236{
2229 struct ring_buffer_per_cpu *cpu_buffer; 2237 struct ring_buffer_per_cpu *cpu_buffer;
2230 2238
2231 if (!cpu_isset(cpu, buffer->cpumask)) 2239 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2232 return 1; 2240 return 1;
2233 2241
2234 cpu_buffer = buffer->buffers[cpu]; 2242 cpu_buffer = buffer->buffers[cpu];
@@ -2252,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2252 struct ring_buffer_per_cpu *cpu_buffer_a; 2260 struct ring_buffer_per_cpu *cpu_buffer_a;
2253 struct ring_buffer_per_cpu *cpu_buffer_b; 2261 struct ring_buffer_per_cpu *cpu_buffer_b;
2254 2262
2255 if (!cpu_isset(cpu, buffer_a->cpumask) || 2263 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2256 !cpu_isset(cpu, buffer_b->cpumask)) 2264 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2257 return -EINVAL; 2265 return -EINVAL;
2258 2266
2259 /* At least make sure the two buffers are somewhat the same */ 2267 /* At least make sure the two buffers are somewhat the same */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0e91f43b6baf..c580233add95 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
89 preempt_enable(); 89 preempt_enable();
90} 90}
91 91
92static cpumask_t __read_mostly tracing_buffer_mask; 92static cpumask_var_t __read_mostly tracing_buffer_mask;
93 93
94#define for_each_tracing_cpu(cpu) \ 94#define for_each_tracing_cpu(cpu) \
95 for_each_cpu_mask(cpu, tracing_buffer_mask) 95 for_each_cpu(cpu, tracing_buffer_mask)
96 96
97/* 97/*
98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -1811,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1812 return; 1812 return;
1813 1813
1814 if (cpu_isset(iter->cpu, iter->started)) 1814 if (cpumask_test_cpu(iter->cpu, iter->started))
1815 return; 1815 return;
1816 1816
1817 cpu_set(iter->cpu, iter->started); 1817 cpumask_set_cpu(iter->cpu, iter->started);
1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1819} 1819}
1820 1820
@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = {
2646/* 2646/*
2647 * Only trace on a CPU if the bitmask is set: 2647 * Only trace on a CPU if the bitmask is set:
2648 */ 2648 */
2649static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2649static cpumask_var_t tracing_cpumask;
2650
2651/*
2652 * When tracing/tracing_cpu_mask is modified then this holds
2653 * the new bitmask we are about to install:
2654 */
2655static cpumask_t tracing_cpumask_new;
2656 2650
2657/* 2651/*
2658 * The tracer itself will not take this lock, but still we want 2652 * The tracer itself will not take this lock, but still we want
@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
2674 2668
2675 mutex_lock(&tracing_cpumask_update_lock); 2669 mutex_lock(&tracing_cpumask_update_lock);
2676 2670
2677 len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); 2671 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2678 if (count - len < 2) { 2672 if (count - len < 2) {
2679 count = -EINVAL; 2673 count = -EINVAL;
2680 goto out_err; 2674 goto out_err;
@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2693 size_t count, loff_t *ppos) 2687 size_t count, loff_t *ppos)
2694{ 2688{
2695 int err, cpu; 2689 int err, cpu;
2690 cpumask_var_t tracing_cpumask_new;
2691
2692 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2693 return -ENOMEM;
2696 2694
2697 mutex_lock(&tracing_cpumask_update_lock); 2695 mutex_lock(&tracing_cpumask_update_lock);
2698 err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); 2696 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2699 if (err) 2697 if (err)
2700 goto err_unlock; 2698 goto err_unlock;
2701 2699
@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2706 * Increase/decrease the disabled counter if we are 2704 * Increase/decrease the disabled counter if we are
2707 * about to flip a bit in the cpumask: 2705 * about to flip a bit in the cpumask:
2708 */ 2706 */
2709 if (cpu_isset(cpu, tracing_cpumask) && 2707 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2710 !cpu_isset(cpu, tracing_cpumask_new)) { 2708 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2711 atomic_inc(&global_trace.data[cpu]->disabled); 2709 atomic_inc(&global_trace.data[cpu]->disabled);
2712 } 2710 }
2713 if (!cpu_isset(cpu, tracing_cpumask) && 2711 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2714 cpu_isset(cpu, tracing_cpumask_new)) { 2712 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2715 atomic_dec(&global_trace.data[cpu]->disabled); 2713 atomic_dec(&global_trace.data[cpu]->disabled);
2716 } 2714 }
2717 } 2715 }
2718 __raw_spin_unlock(&ftrace_max_lock); 2716 __raw_spin_unlock(&ftrace_max_lock);
2719 local_irq_enable(); 2717 local_irq_enable();
2720 2718
2721 tracing_cpumask = tracing_cpumask_new; 2719 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2722 2720
2723 mutex_unlock(&tracing_cpumask_update_lock); 2721 mutex_unlock(&tracing_cpumask_update_lock);
2722 free_cpumask_var(tracing_cpumask_new);
2724 2723
2725 return count; 2724 return count;
2726 2725
2727err_unlock: 2726err_unlock:
2728 mutex_unlock(&tracing_cpumask_update_lock); 2727 mutex_unlock(&tracing_cpumask_update_lock);
2728 free_cpumask_var(tracing_cpumask);
2729 2729
2730 return err; 2730 return err;
2731} 2731}
@@ -3114,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3114 if (!iter) 3114 if (!iter)
3115 return -ENOMEM; 3115 return -ENOMEM;
3116 3116
3117 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3118 kfree(iter);
3119 return -ENOMEM;
3120 }
3121
3117 mutex_lock(&trace_types_lock); 3122 mutex_lock(&trace_types_lock);
3118 3123
3119 /* trace pipe does not show start of buffer */ 3124 /* trace pipe does not show start of buffer */
3120 cpus_setall(iter->started); 3125 cpumask_setall(iter->started);
3121 3126
3122 iter->tr = &global_trace; 3127 iter->tr = &global_trace;
3123 iter->trace = current_trace; 3128 iter->trace = current_trace;
@@ -3134,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
3134{ 3139{
3135 struct trace_iterator *iter = file->private_data; 3140 struct trace_iterator *iter = file->private_data;
3136 3141
3142 free_cpumask_var(iter->started);
3137 kfree(iter); 3143 kfree(iter);
3138 atomic_dec(&tracing_reader); 3144 atomic_dec(&tracing_reader);
3139 3145
@@ -3752,7 +3758,6 @@ void ftrace_dump(void)
3752 static DEFINE_SPINLOCK(ftrace_dump_lock); 3758 static DEFINE_SPINLOCK(ftrace_dump_lock);
3753 /* use static because iter can be a bit big for the stack */ 3759 /* use static because iter can be a bit big for the stack */
3754 static struct trace_iterator iter; 3760 static struct trace_iterator iter;
3755 static cpumask_t mask;
3756 static int dump_ran; 3761 static int dump_ran;
3757 unsigned long flags; 3762 unsigned long flags;
3758 int cnt = 0, cpu; 3763 int cnt = 0, cpu;
@@ -3786,8 +3791,6 @@ void ftrace_dump(void)
3786 * and then release the locks again. 3791 * and then release the locks again.
3787 */ 3792 */
3788 3793
3789 cpus_clear(mask);
3790
3791 while (!trace_empty(&iter)) { 3794 while (!trace_empty(&iter)) {
3792 3795
3793 if (!cnt) 3796 if (!cnt)
@@ -3823,19 +3826,28 @@ __init static int tracer_alloc_buffers(void)
3823{ 3826{
3824 struct trace_array_cpu *data; 3827 struct trace_array_cpu *data;
3825 int i; 3828 int i;
3829 int ret = -ENOMEM;
3826 3830
3827 /* TODO: make the number of buffers hot pluggable with CPUS */ 3831 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3828 tracing_buffer_mask = cpu_possible_map; 3832 goto out;
3833
3834 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3835 goto out_free_buffer_mask;
3836
3837 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3838 cpumask_copy(tracing_cpumask, cpu_all_mask);
3829 3839
3840 /* TODO: make the number of buffers hot pluggable with CPUS */
3830 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3841 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3831 TRACE_BUFFER_FLAGS); 3842 TRACE_BUFFER_FLAGS);
3832 if (!global_trace.buffer) { 3843 if (!global_trace.buffer) {
3833 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3844 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3834 WARN_ON(1); 3845 WARN_ON(1);
3835 return 0; 3846 goto out_free_cpumask;
3836 } 3847 }
3837 global_trace.entries = ring_buffer_size(global_trace.buffer); 3848 global_trace.entries = ring_buffer_size(global_trace.buffer);
3838 3849
3850
3839#ifdef CONFIG_TRACER_MAX_TRACE 3851#ifdef CONFIG_TRACER_MAX_TRACE
3840 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 3852 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3841 TRACE_BUFFER_FLAGS); 3853 TRACE_BUFFER_FLAGS);
@@ -3843,7 +3855,7 @@ __init static int tracer_alloc_buffers(void)
3843 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3855 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3844 WARN_ON(1); 3856 WARN_ON(1);
3845 ring_buffer_free(global_trace.buffer); 3857 ring_buffer_free(global_trace.buffer);
3846 return 0; 3858 goto out_free_cpumask;
3847 } 3859 }
3848 max_tr.entries = ring_buffer_size(max_tr.buffer); 3860 max_tr.entries = ring_buffer_size(max_tr.buffer);
3849 WARN_ON(max_tr.entries != global_trace.entries); 3861 WARN_ON(max_tr.entries != global_trace.entries);
@@ -3873,8 +3885,14 @@ __init static int tracer_alloc_buffers(void)
3873 &trace_panic_notifier); 3885 &trace_panic_notifier);
3874 3886
3875 register_die_notifier(&trace_die_notifier); 3887 register_die_notifier(&trace_die_notifier);
3888 ret = 0;
3876 3889
3877 return 0; 3890out_free_cpumask:
3891 free_cpumask_var(tracing_cpumask);
3892out_free_buffer_mask:
3893 free_cpumask_var(tracing_buffer_mask);
3894out:
3895 return ret;
3878} 3896}
3879early_initcall(tracer_alloc_buffers); 3897early_initcall(tracer_alloc_buffers);
3880fs_initcall(tracer_init_debugfs); 3898fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cc7a4f864036..4d3d381bfd95 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -368,7 +368,7 @@ struct trace_iterator {
368 loff_t pos; 368 loff_t pos;
369 long idx; 369 long idx;
370 370
371 cpumask_t started; 371 cpumask_var_t started;
372}; 372};
373 373
374int tracing_is_enabled(void); 374int tracing_is_enabled(void);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 3ccebde28482..366c8c333e13 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -42,7 +42,7 @@ static int boot_trace_init(struct trace_array *tr)
42 int cpu; 42 int cpu;
43 boot_trace = tr; 43 boot_trace = tr;
44 44
45 for_each_cpu_mask(cpu, cpu_possible_map) 45 for_each_cpu(cpu, cpu_possible_mask)
46 tracing_reset(tr, cpu); 46 tracing_reset(tr, cpu);
47 47
48 tracing_sched_switch_assign_trace(tr); 48 tracing_sched_switch_assign_trace(tr);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4bf39fcae97a..930c08e5b38e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -79,7 +79,7 @@ print_graph_cpu(struct trace_seq *s, int cpu)
79 int i; 79 int i;
80 int ret; 80 int ret;
81 int log10_this = log10_cpu(cpu); 81 int log10_this = log10_cpu(cpu);
82 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); 82 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
83 83
84 84
85 /* 85 /*
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index b6a3e20a49a9..649df22d435f 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -46,7 +46,7 @@ static void bts_trace_start(struct trace_array *tr)
46 46
47 tracing_reset_online_cpus(tr); 47 tracing_reset_online_cpus(tr);
48 48
49 for_each_cpu_mask(cpu, cpu_possible_map) 49 for_each_cpu(cpu, cpu_possible_mask)
50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
51} 51}
52 52
@@ -62,7 +62,7 @@ static void bts_trace_stop(struct trace_array *tr)
62{ 62{
63 int cpu; 63 int cpu;
64 64
65 for_each_cpu_mask(cpu, cpu_possible_map) 65 for_each_cpu(cpu, cpu_possible_mask)
66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
67} 67}
68 68
@@ -172,7 +172,7 @@ static void trace_bts_prepare(struct trace_iterator *iter)
172{ 172{
173 int cpu; 173 int cpu;
174 174
175 for_each_cpu_mask(cpu, cpu_possible_map) 175 for_each_cpu(cpu, cpu_possible_mask)
176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); 176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
177} 177}
178 178
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index a7172a352f62..7bda248daf55 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -39,7 +39,7 @@ static int power_trace_init(struct trace_array *tr)
39 39
40 trace_power_enabled = 1; 40 trace_power_enabled = 1;
41 41
42 for_each_cpu_mask(cpu, cpu_possible_map) 42 for_each_cpu(cpu, cpu_possible_mask)
43 tracing_reset(tr, cpu); 43 tracing_reset(tr, cpu);
44 return 0; 44 return 0;
45} 45}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index a5779bd975db..eaca5ad803ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -196,9 +196,9 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
196 return HRTIMER_RESTART; 196 return HRTIMER_RESTART;
197} 197}
198 198
199static void start_stack_timer(int cpu) 199static void start_stack_timer(void *unused)
200{ 200{
201 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); 201 struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
202 202
203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 hrtimer->function = stack_trace_timer_fn; 204 hrtimer->function = stack_trace_timer_fn;
@@ -208,14 +208,7 @@ static void start_stack_timer(int cpu)
208 208
209static void start_stack_timers(void) 209static void start_stack_timers(void)
210{ 210{
211 cpumask_t saved_mask = current->cpus_allowed; 211 on_each_cpu(start_stack_timer, NULL, 1);
212 int cpu;
213
214 for_each_online_cpu(cpu) {
215 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
216 start_stack_timer(cpu);
217 }
218 set_cpus_allowed_ptr(current, &saved_mask);
219} 212}
220 213
221static void stop_stack_timer(int cpu) 214static void stop_stack_timer(int cpu)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4952322cba45..2f445833ae37 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(workqueue_lock);
73static LIST_HEAD(workqueues); 73static LIST_HEAD(workqueues);
74 74
75static int singlethread_cpu __read_mostly; 75static int singlethread_cpu __read_mostly;
76static cpumask_t cpu_singlethread_map __read_mostly; 76static const struct cpumask *cpu_singlethread_map __read_mostly;
77/* 77/*
78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work 79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
@@ -81,7 +81,7 @@ static cpumask_t cpu_singlethread_map __read_mostly;
81 * use cpu_possible_map, the cpumask below is more a documentation 81 * use cpu_possible_map, the cpumask below is more a documentation
82 * than optimization. 82 * than optimization.
83 */ 83 */
84static cpumask_t cpu_populated_map __read_mostly; 84static cpumask_var_t cpu_populated_map __read_mostly;
85 85
86/* If it's single threaded, it isn't in the list of workqueues. */ 86/* If it's single threaded, it isn't in the list of workqueues. */
87static inline int is_wq_single_threaded(struct workqueue_struct *wq) 87static inline int is_wq_single_threaded(struct workqueue_struct *wq)
@@ -89,10 +89,10 @@ static inline int is_wq_single_threaded(struct workqueue_struct *wq)
89 return wq->singlethread; 89 return wq->singlethread;
90} 90}
91 91
92static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) 92static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
93{ 93{
94 return is_wq_single_threaded(wq) 94 return is_wq_single_threaded(wq)
95 ? &cpu_singlethread_map : &cpu_populated_map; 95 ? cpu_singlethread_map : cpu_populated_map;
96} 96}
97 97
98static 98static
@@ -410,7 +410,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
410 */ 410 */
411void flush_workqueue(struct workqueue_struct *wq) 411void flush_workqueue(struct workqueue_struct *wq)
412{ 412{
413 const cpumask_t *cpu_map = wq_cpu_map(wq); 413 const struct cpumask *cpu_map = wq_cpu_map(wq);
414 int cpu; 414 int cpu;
415 415
416 might_sleep(); 416 might_sleep();
@@ -532,7 +532,7 @@ static void wait_on_work(struct work_struct *work)
532{ 532{
533 struct cpu_workqueue_struct *cwq; 533 struct cpu_workqueue_struct *cwq;
534 struct workqueue_struct *wq; 534 struct workqueue_struct *wq;
535 const cpumask_t *cpu_map; 535 const struct cpumask *cpu_map;
536 int cpu; 536 int cpu;
537 537
538 might_sleep(); 538 might_sleep();
@@ -903,7 +903,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
903 */ 903 */
904void destroy_workqueue(struct workqueue_struct *wq) 904void destroy_workqueue(struct workqueue_struct *wq)
905{ 905{
906 const cpumask_t *cpu_map = wq_cpu_map(wq); 906 const struct cpumask *cpu_map = wq_cpu_map(wq);
907 int cpu; 907 int cpu;
908 908
909 cpu_maps_update_begin(); 909 cpu_maps_update_begin();
@@ -933,7 +933,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
933 933
934 switch (action) { 934 switch (action) {
935 case CPU_UP_PREPARE: 935 case CPU_UP_PREPARE:
936 cpu_set(cpu, cpu_populated_map); 936 cpumask_set_cpu(cpu, cpu_populated_map);
937 } 937 }
938undo: 938undo:
939 list_for_each_entry(wq, &workqueues, list) { 939 list_for_each_entry(wq, &workqueues, list) {
@@ -964,7 +964,7 @@ undo:
964 switch (action) { 964 switch (action) {
965 case CPU_UP_CANCELED: 965 case CPU_UP_CANCELED:
966 case CPU_POST_DEAD: 966 case CPU_POST_DEAD:
967 cpu_clear(cpu, cpu_populated_map); 967 cpumask_clear_cpu(cpu, cpu_populated_map);
968 } 968 }
969 969
970 return ret; 970 return ret;
@@ -1017,9 +1017,11 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
1017 1017
1018void __init init_workqueues(void) 1018void __init init_workqueues(void)
1019{ 1019{
1020 cpu_populated_map = cpu_online_map; 1020 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1021 singlethread_cpu = first_cpu(cpu_possible_map); 1021
1022 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); 1022 cpumask_copy(cpu_populated_map, cpu_online_mask);
1023 singlethread_cpu = cpumask_first(cpu_possible_mask);
1024 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1023 hotcpu_notifier(workqueue_cpu_callback, 0); 1025 hotcpu_notifier(workqueue_cpu_callback, 0);
1024 keventd_wq = create_workqueue("events"); 1026 keventd_wq = create_workqueue("events");
1025 BUG_ON(!keventd_wq); 1027 BUG_ON(!keventd_wq);
diff --git a/lib/Kconfig b/lib/Kconfig
index 2ba43c4a5b07..03c2c24b9083 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -13,6 +13,10 @@ config GENERIC_FIND_FIRST_BIT
13config GENERIC_FIND_NEXT_BIT 13config GENERIC_FIND_NEXT_BIT
14 bool 14 bool
15 15
16config GENERIC_FIND_LAST_BIT
17 bool
18 default y
19
16config CRC_CCITT 20config CRC_CCITT
17 tristate "CRC-CCITT functions" 21 tristate "CRC-CCITT functions"
18 help 22 help
@@ -166,4 +170,8 @@ config CPUMASK_OFFSTACK
166 them on the stack. This is a bit more expensive, but avoids 170 them on the stack. This is a bit more expensive, but avoids
167 stack overflow. 171 stack overflow.
168 172
173config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
174 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
175 depends on EXPERIMENTAL && BROKEN
176
169endmenu 177endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 80fe8a3ec12a..32b0e64ded27 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -37,6 +37,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
37lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 37lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
38lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 38lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
39lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
40obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 41obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
41obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 42obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
42obj-$(CONFIG_PLIST) += plist.o 43obj-$(CONFIG_PLIST) += plist.o
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 8d03f22c6ced..3389e2440da0 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -76,15 +76,28 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
76 76
77/* These are not inline because of header tangles. */ 77/* These are not inline because of header tangles. */
78#ifdef CONFIG_CPUMASK_OFFSTACK 78#ifdef CONFIG_CPUMASK_OFFSTACK
79bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 79/**
80 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
81 * @mask: pointer to cpumask_var_t where the cpumask is returned
82 * @flags: GFP_ flags
83 *
84 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
85 * a nop returning a constant 1 (in <linux/cpumask.h>)
86 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
87 *
88 * In addition, mask will be NULL if this fails. Note that gcc is
89 * usually smart enough to know that mask can never be NULL if
90 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
91 * too.
92 */
93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
80{ 94{
81 if (likely(slab_is_available())) 95 if (likely(slab_is_available()))
82 *mask = kmalloc(cpumask_size(), flags); 96 *mask = kmalloc_node(cpumask_size(), flags, node);
83 else { 97 else {
84#ifdef CONFIG_DEBUG_PER_CPU_MAPS 98#ifdef CONFIG_DEBUG_PER_CPU_MAPS
85 printk(KERN_ERR 99 printk(KERN_ERR
86 "=> alloc_cpumask_var: kmalloc not available!\n"); 100 "=> alloc_cpumask_var: kmalloc not available!\n");
87 dump_stack();
88#endif 101#endif
89 *mask = NULL; 102 *mask = NULL;
90 } 103 }
@@ -94,21 +107,64 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
94 dump_stack(); 107 dump_stack();
95 } 108 }
96#endif 109#endif
110 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
111 if (*mask) {
112 unsigned int tail;
113 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
114 memset(cpumask_bits(*mask) + cpumask_size() - tail,
115 0, tail);
116 }
117
97 return *mask != NULL; 118 return *mask != NULL;
98} 119}
120EXPORT_SYMBOL(alloc_cpumask_var_node);
121
122/**
123 * alloc_cpumask_var - allocate a struct cpumask
124 * @mask: pointer to cpumask_var_t where the cpumask is returned
125 * @flags: GFP_ flags
126 *
127 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
128 * a nop returning a constant 1 (in <linux/cpumask.h>).
129 *
130 * See alloc_cpumask_var_node.
131 */
132bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
133{
134 return alloc_cpumask_var_node(mask, flags, numa_node_id());
135}
99EXPORT_SYMBOL(alloc_cpumask_var); 136EXPORT_SYMBOL(alloc_cpumask_var);
100 137
138/**
139 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
140 * @mask: pointer to cpumask_var_t where the cpumask is returned
141 *
142 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
143 * a nop (in <linux/cpumask.h>).
144 * Either returns an allocated (zero-filled) cpumask, or causes the
145 * system to panic.
146 */
101void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 147void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
102{ 148{
103 *mask = alloc_bootmem(cpumask_size()); 149 *mask = alloc_bootmem(cpumask_size());
104} 150}
105 151
152/**
153 * free_cpumask_var - frees memory allocated for a struct cpumask.
154 * @mask: cpumask to free
155 *
156 * This is safe on a NULL mask.
157 */
106void free_cpumask_var(cpumask_var_t mask) 158void free_cpumask_var(cpumask_var_t mask)
107{ 159{
108 kfree(mask); 160 kfree(mask);
109} 161}
110EXPORT_SYMBOL(free_cpumask_var); 162EXPORT_SYMBOL(free_cpumask_var);
111 163
164/**
165 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
166 * @mask: cpumask to free
167 */
112void __init free_bootmem_cpumask_var(cpumask_var_t mask) 168void __init free_bootmem_cpumask_var(cpumask_var_t mask)
113{ 169{
114 free_bootmem((unsigned long)mask, cpumask_size()); 170 free_bootmem((unsigned long)mask, cpumask_size());
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
new file mode 100644
index 000000000000..5d202e36bdd8
--- /dev/null
+++ b/lib/find_last_bit.c
@@ -0,0 +1,45 @@
1/* find_last_bit.c: fallback find next bit implementation
2 *
3 * Copyright (C) 2008 IBM Corporation
4 * Written by Rusty Russell <rusty@rustcorp.com.au>
5 * (Inspired by David Howell's find_next_bit implementation)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/module.h>
15#include <asm/types.h>
16#include <asm/byteorder.h>
17
18unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
19{
20 unsigned long words;
21 unsigned long tmp;
22
23 /* Start at final word. */
24 words = size / BITS_PER_LONG;
25
26 /* Partial final word? */
27 if (size & (BITS_PER_LONG-1)) {
28 tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
29 - (size & (BITS_PER_LONG-1)))));
30 if (tmp)
31 goto found;
32 }
33
34 while (words) {
35 tmp = addr[--words];
36 if (tmp) {
37found:
38 return words * BITS_PER_LONG + __fls(tmp);
39 }
40 }
41
42 /* Not found */
43 return size;
44}
45EXPORT_SYMBOL(find_last_bit);
diff --git a/mm/pdflush.c b/mm/pdflush.c
index a0a14c4d5072..15de509b68fd 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work)
172static int pdflush(void *dummy) 172static int pdflush(void *dummy)
173{ 173{
174 struct pdflush_work my_work; 174 struct pdflush_work my_work;
175 cpumask_t cpus_allowed; 175 cpumask_var_t cpus_allowed;
176
177 /*
178 * Since the caller doesn't even check kthread_run() worked, let's not
179 * freak out too much if this fails.
180 */
181 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
182 printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
183 return 0;
184 }
176 185
177 /* 186 /*
178 * pdflush can spend a lot of time doing encryption via dm-crypt. We 187 * pdflush can spend a lot of time doing encryption via dm-crypt. We
@@ -187,8 +196,9 @@ static int pdflush(void *dummy)
187 * This is needed as pdflush's are dynamically created and destroyed. 196 * This is needed as pdflush's are dynamically created and destroyed.
188 * The boottime pdflush's are easily placed w/o these 2 lines. 197 * The boottime pdflush's are easily placed w/o these 2 lines.
189 */ 198 */
190 cpuset_cpus_allowed(current, &cpus_allowed); 199 cpuset_cpus_allowed(current, cpus_allowed);
191 set_cpus_allowed_ptr(current, &cpus_allowed); 200 set_cpus_allowed_ptr(current, cpus_allowed);
201 free_cpumask_var(cpus_allowed);
192 202
193 return __pdflush(&my_work); 203 return __pdflush(&my_work);
194} 204}
diff --git a/mm/slab.c b/mm/slab.c
index f97e564bdf11..ddc41f337d58 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2157,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2157 2157
2158 /* 2158 /*
2159 * We use cache_chain_mutex to ensure a consistent view of 2159 * We use cache_chain_mutex to ensure a consistent view of
2160 * cpu_online_map as well. Please see cpuup_callback 2160 * cpu_online_mask as well. Please see cpuup_callback
2161 */ 2161 */
2162 get_online_cpus(); 2162 get_online_cpus();
2163 mutex_lock(&cache_chain_mutex); 2163 mutex_lock(&cache_chain_mutex);
diff --git a/mm/slub.c b/mm/slub.c
index 0d861c3154b6..f0e2892fe403 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1970,7 +1970,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu,
1970 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1970 kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1971 1971
1972static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1972static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1973static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 1973static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
1974 1974
1975static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1975static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1976 int cpu, gfp_t flags) 1976 int cpu, gfp_t flags)
@@ -2045,13 +2045,13 @@ static void init_alloc_cpu_cpu(int cpu)
2045{ 2045{
2046 int i; 2046 int i;
2047 2047
2048 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2048 if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
2049 return; 2049 return;
2050 2050
2051 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2051 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2052 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2052 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2053 2053
2054 cpu_set(cpu, kmem_cach_cpu_free_init_once); 2054 cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
2055} 2055}
2056 2056
2057static void __init init_alloc_cpu(void) 2057static void __init init_alloc_cpu(void)
@@ -3451,7 +3451,7 @@ struct location {
3451 long max_time; 3451 long max_time;
3452 long min_pid; 3452 long min_pid;
3453 long max_pid; 3453 long max_pid;
3454 cpumask_t cpus; 3454 DECLARE_BITMAP(cpus, NR_CPUS);
3455 nodemask_t nodes; 3455 nodemask_t nodes;
3456}; 3456};
3457 3457
@@ -3526,7 +3526,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3526 if (track->pid > l->max_pid) 3526 if (track->pid > l->max_pid)
3527 l->max_pid = track->pid; 3527 l->max_pid = track->pid;
3528 3528
3529 cpu_set(track->cpu, l->cpus); 3529 cpumask_set_cpu(track->cpu,
3530 to_cpumask(l->cpus));
3530 } 3531 }
3531 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3532 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3532 return 1; 3533 return 1;
@@ -3556,8 +3557,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3556 l->max_time = age; 3557 l->max_time = age;
3557 l->min_pid = track->pid; 3558 l->min_pid = track->pid;
3558 l->max_pid = track->pid; 3559 l->max_pid = track->pid;
3559 cpus_clear(l->cpus); 3560 cpumask_clear(to_cpumask(l->cpus));
3560 cpu_set(track->cpu, l->cpus); 3561 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3561 nodes_clear(l->nodes); 3562 nodes_clear(l->nodes);
3562 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3563 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3563 return 1; 3564 return 1;
@@ -3638,11 +3639,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
3638 len += sprintf(buf + len, " pid=%ld", 3639 len += sprintf(buf + len, " pid=%ld",
3639 l->min_pid); 3640 l->min_pid);
3640 3641
3641 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3642 if (num_online_cpus() > 1 &&
3643 !cpumask_empty(to_cpumask(l->cpus)) &&
3642 len < PAGE_SIZE - 60) { 3644 len < PAGE_SIZE - 60) {
3643 len += sprintf(buf + len, " cpus="); 3645 len += sprintf(buf + len, " cpus=");
3644 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3646 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3645 &l->cpus); 3647 to_cpumask(l->cpus));
3646 } 3648 }
3647 3649
3648 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3650 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 62e7f62fb559..d196f46c8808 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1902,7 +1902,7 @@ static int kswapd(void *p)
1902 }; 1902 };
1903 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1903 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1904 1904
1905 if (!cpus_empty(*cpumask)) 1905 if (!cpumask_empty(cpumask))
1906 set_cpus_allowed_ptr(tsk, cpumask); 1906 set_cpus_allowed_ptr(tsk, cpumask);
1907 current->reclaim_state = &reclaim_state; 1907 current->reclaim_state = &reclaim_state;
1908 1908
@@ -2141,7 +2141,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
2141 pg_data_t *pgdat = NODE_DATA(nid); 2141 pg_data_t *pgdat = NODE_DATA(nid);
2142 node_to_cpumask_ptr(mask, pgdat->node_id); 2142 node_to_cpumask_ptr(mask, pgdat->node_id);
2143 2143
2144 if (any_online_cpu(*mask) < nr_cpu_ids) 2144 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2145 /* One of our CPUs online: restore mask */ 2145 /* One of our CPUs online: restore mask */
2146 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2146 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2147 } 2147 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c3ccfda23adc..91149746bb8d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -20,7 +20,7 @@
20DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 20DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
21EXPORT_PER_CPU_SYMBOL(vm_event_states); 21EXPORT_PER_CPU_SYMBOL(vm_event_states);
22 22
23static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) 23static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
24{ 24{
25 int cpu; 25 int cpu;
26 int i; 26 int i;
@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
43void all_vm_events(unsigned long *ret) 43void all_vm_events(unsigned long *ret)
44{ 44{
45 get_online_cpus(); 45 get_online_cpus();
46 sum_vm_events(ret, &cpu_online_map); 46 sum_vm_events(ret, cpu_online_mask);
47 put_online_cpus(); 47 put_online_cpus();
48} 48}
49EXPORT_SYMBOL_GPL(all_vm_events); 49EXPORT_SYMBOL_GPL(all_vm_events);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index c86303638235..e5520996a75b 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1211,7 +1211,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
1211{ 1211{
1212 int cpu; 1212 int cpu;
1213 1213
1214 for (cpu = *idx; cpu < NR_CPUS; ++cpu) { 1214 for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) {
1215 if (!cpu_possible(cpu)) 1215 if (!cpu_possible(cpu))
1216 continue; 1216 continue;
1217 *idx = cpu + 1; 1217 *idx = cpu + 1;