aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 21:37:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 21:37:44 -0400
commit26dcce0fabbef75ae426461edf21b5030bad60f3 (patch)
tree56c64fa47dc29f7ea5a8fd0cab0459fb0a05a2bc /kernel
parentd7b6de14a0ef8a376f9d57b867545b47302b7bfb (diff)
parenteb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0 (diff)
Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits) NR_CPUS: Replace NR_CPUS in speedstep-centrino.c cpumask: Provide a generic set of CPUMASK_ALLOC macros, FIXUP NR_CPUS: Replace NR_CPUS in cpufreq userspace routines NR_CPUS: Replace per_cpu(..., smp_processor_id()) with __get_cpu_var NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genapic_flat_64.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genx2apic_uv_x.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/proc.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/mcheck/mce_64.c cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c, fix cpumask: Use optimized CPUMASK_ALLOC macros in the centrino_target cpumask: Provide a generic set of CPUMASK_ALLOC macros cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c cpumask: Optimize cpumask_of_cpu in kernel/time/tick-common.c cpumask: Optimize cpumask_of_cpu in drivers/misc/sgi-xp/xpc_main.c cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/ldt.c cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/io_apic_64.c cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr Revert "cpumask: introduce new APIs" cpumask: make for_each_cpu_mask a bit smaller net: Pass reference to cpumask variable in net/sunrpc/svc.c ... Fix up trivial conflicts in drivers/cpufreq/cpufreq.c manually
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/sched.c36
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/stop_machine.c3
-rw-r--r--kernel/taskstats.c4
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/tick-common.c14
-rw-r--r--kernel/trace/trace_sysprof.c4
-rw-r--r--kernel/workqueue.c6
13 files changed, 48 insertions, 46 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index cfb1d43ab80..d26d0b095b3 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -413,7 +413,7 @@ void __ref enable_nonboot_cpus(void)
413 goto out; 413 goto out;
414 414
415 printk("Enabling non-boot CPUs ...\n"); 415 printk("Enabling non-boot CPUs ...\n");
416 for_each_cpu_mask(cpu, frozen_cpus) { 416 for_each_cpu_mask_nr(cpu, frozen_cpus) {
417 error = _cpu_up(cpu, 1); 417 error = _cpu_up(cpu, 1);
418 if (!error) { 418 if (!error) {
419 printk("CPU%d is up\n", cpu); 419 printk("CPU%d is up\n", cpu);
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 16eeeaa9d61..6f8696c502f 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
106 */ 106 */
107 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
108 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
109 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
111 } 111 }
112} 112}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 6f62b77d93c..27827931ca0 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
756 756
757 /* Now ask each CPU for acknowledgement of the flip. */ 757 /* Now ask each CPU for acknowledgement of the flip. */
758 758
759 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 759 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
761 dyntick_save_progress_counter(cpu); 761 dyntick_save_progress_counter(cpu);
762 } 762 }
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
774 int cpu; 774 int cpu;
775 775
776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
777 for_each_cpu_mask(cpu, rcu_cpu_online_map) 777 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
778 if (rcu_try_flip_waitack_needed(cpu) && 778 if (rcu_try_flip_waitack_needed(cpu) &&
779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
806 /* Check to see if the sum of the "last" counters is zero. */ 806 /* Check to see if the sum of the "last" counters is zero. */
807 807
808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
809 for_each_cpu_mask(cpu, rcu_cpu_online_map) 809 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
811 if (sum != 0) { 811 if (sum != 0) {
812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
821 smp_mb(); /* ^^^^^^^^^^^^ */ 821 smp_mb(); /* ^^^^^^^^^^^^ */
822 822
823 /* Call for a memory barrier from each CPU. */ 823 /* Call for a memory barrier from each CPU. */
824 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 824 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
826 dyntick_save_progress_counter(cpu); 826 dyntick_save_progress_counter(cpu);
827 } 827 }
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
841 int cpu; 841 int cpu;
842 842
843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
844 for_each_cpu_mask(cpu, rcu_cpu_online_map) 844 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
845 if (rcu_try_flip_waitmb_needed(cpu) && 845 if (rcu_try_flip_waitmb_needed(cpu) &&
846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff --git a/kernel/sched.c b/kernel/sched.c
index b1104ea5d25..df80bae6815 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2108,7 +2108,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2108 /* Tally up the load of all CPUs in the group */ 2108 /* Tally up the load of all CPUs in the group */
2109 avg_load = 0; 2109 avg_load = 0;
2110 2110
2111 for_each_cpu_mask(i, group->cpumask) { 2111 for_each_cpu_mask_nr(i, group->cpumask) {
2112 /* Bias balancing toward cpus of our domain */ 2112 /* Bias balancing toward cpus of our domain */
2113 if (local_group) 2113 if (local_group)
2114 load = source_load(i, load_idx); 2114 load = source_load(i, load_idx);
@@ -2150,7 +2150,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2150 /* Traverse only the allowed CPUs */ 2150 /* Traverse only the allowed CPUs */
2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2152 2152
2153 for_each_cpu_mask(i, *tmp) { 2153 for_each_cpu_mask_nr(i, *tmp) {
2154 load = weighted_cpuload(i); 2154 load = weighted_cpuload(i);
2155 2155
2156 if (load < min_load || (load == min_load && i == this_cpu)) { 2156 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3168,7 +3168,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3168 max_cpu_load = 0; 3168 max_cpu_load = 0;
3169 min_cpu_load = ~0UL; 3169 min_cpu_load = ~0UL;
3170 3170
3171 for_each_cpu_mask(i, group->cpumask) { 3171 for_each_cpu_mask_nr(i, group->cpumask) {
3172 struct rq *rq; 3172 struct rq *rq;
3173 3173
3174 if (!cpu_isset(i, *cpus)) 3174 if (!cpu_isset(i, *cpus))
@@ -3447,7 +3447,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3447 unsigned long max_load = 0; 3447 unsigned long max_load = 0;
3448 int i; 3448 int i;
3449 3449
3450 for_each_cpu_mask(i, group->cpumask) { 3450 for_each_cpu_mask_nr(i, group->cpumask) {
3451 unsigned long wl; 3451 unsigned long wl;
3452 3452
3453 if (!cpu_isset(i, *cpus)) 3453 if (!cpu_isset(i, *cpus))
@@ -3989,7 +3989,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3989 int balance_cpu; 3989 int balance_cpu;
3990 3990
3991 cpu_clear(this_cpu, cpus); 3991 cpu_clear(this_cpu, cpus);
3992 for_each_cpu_mask(balance_cpu, cpus) { 3992 for_each_cpu_mask_nr(balance_cpu, cpus) {
3993 /* 3993 /*
3994 * If this cpu gets work to do, stop the load balancing 3994 * If this cpu gets work to do, stop the load balancing
3995 * work being done for other cpus. Next load 3995 * work being done for other cpus. Next load
@@ -6802,7 +6802,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6802 6802
6803 cpus_clear(*covered); 6803 cpus_clear(*covered);
6804 6804
6805 for_each_cpu_mask(i, *span) { 6805 for_each_cpu_mask_nr(i, *span) {
6806 struct sched_group *sg; 6806 struct sched_group *sg;
6807 int group = group_fn(i, cpu_map, &sg, tmpmask); 6807 int group = group_fn(i, cpu_map, &sg, tmpmask);
6808 int j; 6808 int j;
@@ -6813,7 +6813,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6813 cpus_clear(sg->cpumask); 6813 cpus_clear(sg->cpumask);
6814 sg->__cpu_power = 0; 6814 sg->__cpu_power = 0;
6815 6815
6816 for_each_cpu_mask(j, *span) { 6816 for_each_cpu_mask_nr(j, *span) {
6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6818 continue; 6818 continue;
6819 6819
@@ -7013,7 +7013,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7013 if (!sg) 7013 if (!sg)
7014 return; 7014 return;
7015 do { 7015 do {
7016 for_each_cpu_mask(j, sg->cpumask) { 7016 for_each_cpu_mask_nr(j, sg->cpumask) {
7017 struct sched_domain *sd; 7017 struct sched_domain *sd;
7018 7018
7019 sd = &per_cpu(phys_domains, j); 7019 sd = &per_cpu(phys_domains, j);
@@ -7038,7 +7038,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7038{ 7038{
7039 int cpu, i; 7039 int cpu, i;
7040 7040
7041 for_each_cpu_mask(cpu, *cpu_map) { 7041 for_each_cpu_mask_nr(cpu, *cpu_map) {
7042 struct sched_group **sched_group_nodes 7042 struct sched_group **sched_group_nodes
7043 = sched_group_nodes_bycpu[cpu]; 7043 = sched_group_nodes_bycpu[cpu];
7044 7044
@@ -7277,7 +7277,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7277 /* 7277 /*
7278 * Set up domains for cpus specified by the cpu_map. 7278 * Set up domains for cpus specified by the cpu_map.
7279 */ 7279 */
7280 for_each_cpu_mask(i, *cpu_map) { 7280 for_each_cpu_mask_nr(i, *cpu_map) {
7281 struct sched_domain *sd = NULL, *p; 7281 struct sched_domain *sd = NULL, *p;
7282 SCHED_CPUMASK_VAR(nodemask, allmasks); 7282 SCHED_CPUMASK_VAR(nodemask, allmasks);
7283 7283
@@ -7344,7 +7344,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7344 7344
7345#ifdef CONFIG_SCHED_SMT 7345#ifdef CONFIG_SCHED_SMT
7346 /* Set up CPU (sibling) groups */ 7346 /* Set up CPU (sibling) groups */
7347 for_each_cpu_mask(i, *cpu_map) { 7347 for_each_cpu_mask_nr(i, *cpu_map) {
7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7349 SCHED_CPUMASK_VAR(send_covered, allmasks); 7349 SCHED_CPUMASK_VAR(send_covered, allmasks);
7350 7350
@@ -7361,7 +7361,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7361 7361
7362#ifdef CONFIG_SCHED_MC 7362#ifdef CONFIG_SCHED_MC
7363 /* Set up multi-core groups */ 7363 /* Set up multi-core groups */
7364 for_each_cpu_mask(i, *cpu_map) { 7364 for_each_cpu_mask_nr(i, *cpu_map) {
7365 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7365 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7366 SCHED_CPUMASK_VAR(send_covered, allmasks); 7366 SCHED_CPUMASK_VAR(send_covered, allmasks);
7367 7367
@@ -7428,7 +7428,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7428 goto error; 7428 goto error;
7429 } 7429 }
7430 sched_group_nodes[i] = sg; 7430 sched_group_nodes[i] = sg;
7431 for_each_cpu_mask(j, *nodemask) { 7431 for_each_cpu_mask_nr(j, *nodemask) {
7432 struct sched_domain *sd; 7432 struct sched_domain *sd;
7433 7433
7434 sd = &per_cpu(node_domains, j); 7434 sd = &per_cpu(node_domains, j);
@@ -7474,21 +7474,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7474 7474
7475 /* Calculate CPU power for physical packages and nodes */ 7475 /* Calculate CPU power for physical packages and nodes */
7476#ifdef CONFIG_SCHED_SMT 7476#ifdef CONFIG_SCHED_SMT
7477 for_each_cpu_mask(i, *cpu_map) { 7477 for_each_cpu_mask_nr(i, *cpu_map) {
7478 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7478 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7479 7479
7480 init_sched_groups_power(i, sd); 7480 init_sched_groups_power(i, sd);
7481 } 7481 }
7482#endif 7482#endif
7483#ifdef CONFIG_SCHED_MC 7483#ifdef CONFIG_SCHED_MC
7484 for_each_cpu_mask(i, *cpu_map) { 7484 for_each_cpu_mask_nr(i, *cpu_map) {
7485 struct sched_domain *sd = &per_cpu(core_domains, i); 7485 struct sched_domain *sd = &per_cpu(core_domains, i);
7486 7486
7487 init_sched_groups_power(i, sd); 7487 init_sched_groups_power(i, sd);
7488 } 7488 }
7489#endif 7489#endif
7490 7490
7491 for_each_cpu_mask(i, *cpu_map) { 7491 for_each_cpu_mask_nr(i, *cpu_map) {
7492 struct sched_domain *sd = &per_cpu(phys_domains, i); 7492 struct sched_domain *sd = &per_cpu(phys_domains, i);
7493 7493
7494 init_sched_groups_power(i, sd); 7494 init_sched_groups_power(i, sd);
@@ -7508,7 +7508,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7508#endif 7508#endif
7509 7509
7510 /* Attach the domains */ 7510 /* Attach the domains */
7511 for_each_cpu_mask(i, *cpu_map) { 7511 for_each_cpu_mask_nr(i, *cpu_map) {
7512 struct sched_domain *sd; 7512 struct sched_domain *sd;
7513#ifdef CONFIG_SCHED_SMT 7513#ifdef CONFIG_SCHED_SMT
7514 sd = &per_cpu(cpu_domains, i); 7514 sd = &per_cpu(cpu_domains, i);
@@ -7603,7 +7603,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7603 7603
7604 unregister_sched_domain_sysctl(); 7604 unregister_sched_domain_sysctl();
7605 7605
7606 for_each_cpu_mask(i, *cpu_map) 7606 for_each_cpu_mask_nr(i, *cpu_map)
7607 cpu_attach_domain(NULL, &def_root_domain, i); 7607 cpu_attach_domain(NULL, &def_root_domain, i);
7608 synchronize_sched(); 7608 synchronize_sched();
7609 arch_destroy_sched_domains(cpu_map, &tmpmask); 7609 arch_destroy_sched_domains(cpu_map, &tmpmask);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2aa987027d..bb61fe26b62 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1031,7 +1031,7 @@ static int wake_idle(int cpu, struct task_struct *p)
1031 || ((sd->flags & SD_WAKE_IDLE_FAR) 1031 || ((sd->flags & SD_WAKE_IDLE_FAR)
1032 && !task_hot(p, task_rq(p)->clock, sd))) { 1032 && !task_hot(p, task_rq(p)->clock, sd))) {
1033 cpus_and(tmp, sd->span, p->cpus_allowed); 1033 cpus_and(tmp, sd->span, p->cpus_allowed);
1034 for_each_cpu_mask(i, tmp) { 1034 for_each_cpu_mask_nr(i, tmp) {
1035 if (idle_cpu(i)) { 1035 if (idle_cpu(i)) {
1036 if (i != task_cpu(p)) { 1036 if (i != task_cpu(p)) {
1037 schedstat_inc(p, 1037 schedstat_inc(p,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 47ceac9e855..7c9614728c5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
240 240
241 spin_lock(&rt_b->rt_runtime_lock); 241 spin_lock(&rt_b->rt_runtime_lock);
242 rt_period = ktime_to_ns(rt_b->rt_period); 242 rt_period = ktime_to_ns(rt_b->rt_period);
243 for_each_cpu_mask(i, rd->span) { 243 for_each_cpu_mask_nr(i, rd->span) {
244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245 s64 diff; 245 s64 diff;
246 246
@@ -1107,7 +1107,7 @@ static int pull_rt_task(struct rq *this_rq)
1107 1107
1108 next = pick_next_task_rt(this_rq); 1108 next = pick_next_task_rt(this_rq);
1109 1109
1110 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1110 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1111 if (this_cpu == cpu) 1111 if (this_cpu == cpu)
1112 continue; 1112 continue;
1113 1113
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index ba9b2054ecb..738b411ff2d 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
33{ 33{
34 int irqs_disabled = 0; 34 int irqs_disabled = 0;
35 int prepared = 0; 35 int prepared = 0;
36 cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
36 37
37 set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); 38 set_cpus_allowed_ptr(current, cpumask);
38 39
39 /* Ack: we are alive */ 40 /* Ack: we are alive */
40 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 41 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4a23517169a..06b17547f4e 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 if (isadd == REGISTER) { 303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) { 304 for_each_cpu_mask_nr(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 306 cpu_to_node(cpu));
307 if (!s) 307 if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 320
321 /* Deregister or cleanup */ 321 /* Deregister or cleanup */
322cleanup: 322cleanup:
323 for_each_cpu_mask(cpu, mask) { 323 for_each_cpu_mask_nr(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index b1c2da81b05..093d4acf993 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
149 149
150 if (next_cpu >= NR_CPUS) 150 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 151 next_cpu = first_cpu(cpu_online_map);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 152 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 153 add_timer_on(&watchdog_timer, next_cpu);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f48d0f09d32..31463d370b9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -399,8 +399,7 @@ again:
399 mask = CPU_MASK_NONE; 399 mask = CPU_MASK_NONE;
400 now = ktime_get(); 400 now = ktime_get();
401 /* Find all expired events */ 401 /* Find all expired events */
402 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 402 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
403 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
404 td = &per_cpu(tick_cpu_device, cpu); 403 td = &per_cpu(tick_cpu_device, cpu);
405 if (td->evtdev->next_event.tv64 <= now.tv64) 404 if (td->evtdev->next_event.tv64 <= now.tv64)
406 cpu_set(cpu, mask); 405 cpu_set(cpu, mask);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 4f3886562b8..bf43284d685 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
135 */ 135 */
136static void tick_setup_device(struct tick_device *td, 136static void tick_setup_device(struct tick_device *td,
137 struct clock_event_device *newdev, int cpu, 137 struct clock_event_device *newdev, int cpu,
138 cpumask_t cpumask) 138 const cpumask_t *cpumask)
139{ 139{
140 ktime_t next_event; 140 ktime_t next_event;
141 void (*handler)(struct clock_event_device *) = NULL; 141 void (*handler)(struct clock_event_device *) = NULL;
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td,
169 * When the device is not per cpu, pin the interrupt to the 169 * When the device is not per cpu, pin the interrupt to the
170 * current cpu: 170 * current cpu:
171 */ 171 */
172 if (!cpus_equal(newdev->cpumask, cpumask)) 172 if (!cpus_equal(newdev->cpumask, *cpumask))
173 irq_set_affinity(newdev->irq, cpumask); 173 irq_set_affinity(newdev->irq, *cpumask);
174 174
175 /* 175 /*
176 * When global broadcasting is active, check if the current 176 * When global broadcasting is active, check if the current
@@ -196,20 +196,20 @@ static int tick_check_new_device(struct clock_event_device *newdev)
196 struct tick_device *td; 196 struct tick_device *td;
197 int cpu, ret = NOTIFY_OK; 197 int cpu, ret = NOTIFY_OK;
198 unsigned long flags; 198 unsigned long flags;
199 cpumask_t cpumask; 199 cpumask_of_cpu_ptr_declare(cpumask);
200 200
201 spin_lock_irqsave(&tick_device_lock, flags); 201 spin_lock_irqsave(&tick_device_lock, flags);
202 202
203 cpu = smp_processor_id(); 203 cpu = smp_processor_id();
204 cpumask_of_cpu_ptr_next(cpumask, cpu);
204 if (!cpu_isset(cpu, newdev->cpumask)) 205 if (!cpu_isset(cpu, newdev->cpumask))
205 goto out_bc; 206 goto out_bc;
206 207
207 td = &per_cpu(tick_cpu_device, cpu); 208 td = &per_cpu(tick_cpu_device, cpu);
208 curdev = td->evtdev; 209 curdev = td->evtdev;
209 cpumask = cpumask_of_cpu(cpu);
210 210
211 /* cpu local device ? */ 211 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, cpumask)) { 212 if (!cpus_equal(newdev->cpumask, *cpumask)) {
213 213
214 /* 214 /*
215 * If the cpu affinity of the device interrupt can not 215 * If the cpu affinity of the device interrupt can not
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 222 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 223 * by a non cpu local device
224 */ 224 */
225 if (curdev && cpus_equal(curdev->cpumask, cpumask)) 225 if (curdev && cpus_equal(curdev->cpumask, *cpumask))
226 goto out_bc; 226 goto out_bc;
227 } 227 }
228 228
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 2301e1e7c60..63528086337 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -213,7 +213,9 @@ static void start_stack_timers(void)
213 int cpu; 213 int cpu;
214 214
215 for_each_online_cpu(cpu) { 215 for_each_online_cpu(cpu) {
216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 216 cpumask_of_cpu_ptr(new_mask, cpu);
217
218 set_cpus_allowed_ptr(current, new_mask);
217 start_stack_timer(cpu); 219 start_stack_timer(cpu);
218 } 220 }
219 set_cpus_allowed_ptr(current, &saved_mask); 221 set_cpus_allowed_ptr(current, &saved_mask);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce7799540c9..a6d36346d10 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
397 might_sleep(); 397 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 399 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map) 400 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 402}
403EXPORT_SYMBOL_GPL(flush_workqueue); 403EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 477 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 478 cpu_map = wq_cpu_map(wq);
479 479
480 for_each_cpu_mask(cpu, *cpu_map) 480 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 482}
483 483
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
813 list_del(&wq->list); 813 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 814 spin_unlock(&workqueue_lock);
815 815
816 for_each_cpu_mask(cpu, *cpu_map) 816 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 818 put_online_cpus();
819 819