aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/sched.c54
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c6
-rw-r--r--kernel/taskstats.c4
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/workqueue.c6
10 files changed, 46 insertions, 47 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c77bc3a1c722..50ae922c6022 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void)
390 goto out; 390 goto out;
391 391
392 printk("Enabling non-boot CPUs ...\n"); 392 printk("Enabling non-boot CPUs ...\n");
393 for_each_cpu_mask(cpu, frozen_cpus) { 393 for_each_cpu_mask_nr(cpu, frozen_cpus) {
394 error = _cpu_up(cpu, 1); 394 error = _cpu_up(cpu, 1);
395 if (!error) { 395 if (!error) {
396 printk("CPU%d is up\n", cpu); 396 printk("CPU%d is up\n", cpu);
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index a38895a5b8e2..adde10388d0c 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
106 */ 106 */
107 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
108 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
109 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
111 } 111 }
112} 112}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 5e02b7740702..5cbd69edf5d9 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -655,7 +655,7 @@ rcu_try_flip_idle(void)
655 655
656 /* Now ask each CPU for acknowledgement of the flip. */ 656 /* Now ask each CPU for acknowledgement of the flip. */
657 657
658 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 658 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
659 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 659 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
660 dyntick_save_progress_counter(cpu); 660 dyntick_save_progress_counter(cpu);
661 } 661 }
@@ -673,7 +673,7 @@ rcu_try_flip_waitack(void)
673 int cpu; 673 int cpu;
674 674
675 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 675 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
676 for_each_cpu_mask(cpu, rcu_cpu_online_map) 676 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
677 if (rcu_try_flip_waitack_needed(cpu) && 677 if (rcu_try_flip_waitack_needed(cpu) &&
678 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 678 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
679 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 679 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -705,7 +705,7 @@ rcu_try_flip_waitzero(void)
705 /* Check to see if the sum of the "last" counters is zero. */ 705 /* Check to see if the sum of the "last" counters is zero. */
706 706
707 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 707 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
708 for_each_cpu_mask(cpu, rcu_cpu_online_map) 708 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
709 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 709 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
710 if (sum != 0) { 710 if (sum != 0) {
711 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 711 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -720,7 +720,7 @@ rcu_try_flip_waitzero(void)
720 smp_mb(); /* ^^^^^^^^^^^^ */ 720 smp_mb(); /* ^^^^^^^^^^^^ */
721 721
722 /* Call for a memory barrier from each CPU. */ 722 /* Call for a memory barrier from each CPU. */
723 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 723 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
724 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 724 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
725 dyntick_save_progress_counter(cpu); 725 dyntick_save_progress_counter(cpu);
726 } 726 }
@@ -740,7 +740,7 @@ rcu_try_flip_waitmb(void)
740 int cpu; 740 int cpu;
741 741
742 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 742 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
743 for_each_cpu_mask(cpu, rcu_cpu_online_map) 743 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
744 if (rcu_try_flip_waitmb_needed(cpu) && 744 if (rcu_try_flip_waitmb_needed(cpu) &&
745 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 745 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
746 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 746 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff --git a/kernel/sched.c b/kernel/sched.c
index 94ead43eda62..e6795e39c8ab 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1939,7 +1939,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1939 /* Tally up the load of all CPUs in the group */ 1939 /* Tally up the load of all CPUs in the group */
1940 avg_load = 0; 1940 avg_load = 0;
1941 1941
1942 for_each_cpu_mask(i, group->cpumask) { 1942 for_each_cpu_mask_nr(i, group->cpumask) {
1943 /* Bias balancing toward cpus of our domain */ 1943 /* Bias balancing toward cpus of our domain */
1944 if (local_group) 1944 if (local_group)
1945 load = source_load(i, load_idx); 1945 load = source_load(i, load_idx);
@@ -1981,7 +1981,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
1981 /* Traverse only the allowed CPUs */ 1981 /* Traverse only the allowed CPUs */
1982 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 1982 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
1983 1983
1984 for_each_cpu_mask(i, *tmp) { 1984 for_each_cpu_mask_nr(i, *tmp) {
1985 load = weighted_cpuload(i); 1985 load = weighted_cpuload(i);
1986 1986
1987 if (load < min_load || (load == min_load && i == this_cpu)) { 1987 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2964,7 +2964,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2964 max_cpu_load = 0; 2964 max_cpu_load = 0;
2965 min_cpu_load = ~0UL; 2965 min_cpu_load = ~0UL;
2966 2966
2967 for_each_cpu_mask(i, group->cpumask) { 2967 for_each_cpu_mask_nr(i, group->cpumask) {
2968 struct rq *rq; 2968 struct rq *rq;
2969 2969
2970 if (!cpu_isset(i, *cpus)) 2970 if (!cpu_isset(i, *cpus))
@@ -3228,7 +3228,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3228 unsigned long max_load = 0; 3228 unsigned long max_load = 0;
3229 int i; 3229 int i;
3230 3230
3231 for_each_cpu_mask(i, group->cpumask) { 3231 for_each_cpu_mask_nr(i, group->cpumask) {
3232 unsigned long wl; 3232 unsigned long wl;
3233 3233
3234 if (!cpu_isset(i, *cpus)) 3234 if (!cpu_isset(i, *cpus))
@@ -3759,7 +3759,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3759 int balance_cpu; 3759 int balance_cpu;
3760 3760
3761 cpu_clear(this_cpu, cpus); 3761 cpu_clear(this_cpu, cpus);
3762 for_each_cpu_mask(balance_cpu, cpus) { 3762 for_each_cpu_mask_nr(balance_cpu, cpus) {
3763 /* 3763 /*
3764 * If this cpu gets work to do, stop the load balancing 3764 * If this cpu gets work to do, stop the load balancing
3765 * work being done for other cpus. Next load 3765 * work being done for other cpus. Next load
@@ -6491,7 +6491,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6491 6491
6492 cpus_clear(*covered); 6492 cpus_clear(*covered);
6493 6493
6494 for_each_cpu_mask(i, *span) { 6494 for_each_cpu_mask_nr(i, *span) {
6495 struct sched_group *sg; 6495 struct sched_group *sg;
6496 int group = group_fn(i, cpu_map, &sg, tmpmask); 6496 int group = group_fn(i, cpu_map, &sg, tmpmask);
6497 int j; 6497 int j;
@@ -6502,7 +6502,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6502 cpus_clear(sg->cpumask); 6502 cpus_clear(sg->cpumask);
6503 sg->__cpu_power = 0; 6503 sg->__cpu_power = 0;
6504 6504
6505 for_each_cpu_mask(j, *span) { 6505 for_each_cpu_mask_nr(j, *span) {
6506 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6506 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6507 continue; 6507 continue;
6508 6508
@@ -6538,9 +6538,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
6538 6538
6539 min_val = INT_MAX; 6539 min_val = INT_MAX;
6540 6540
6541 for (i = 0; i < MAX_NUMNODES; i++) { 6541 for (i = 0; i < nr_node_ids; i++) {
6542 /* Start at @node */ 6542 /* Start at @node */
6543 n = (node + i) % MAX_NUMNODES; 6543 n = (node + i) % nr_node_ids;
6544 6544
6545 if (!nr_cpus_node(n)) 6545 if (!nr_cpus_node(n))
6546 continue; 6546 continue;
@@ -6702,7 +6702,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
6702 if (!sg) 6702 if (!sg)
6703 return; 6703 return;
6704 do { 6704 do {
6705 for_each_cpu_mask(j, sg->cpumask) { 6705 for_each_cpu_mask_nr(j, sg->cpumask) {
6706 struct sched_domain *sd; 6706 struct sched_domain *sd;
6707 6707
6708 sd = &per_cpu(phys_domains, j); 6708 sd = &per_cpu(phys_domains, j);
@@ -6727,14 +6727,14 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6727{ 6727{
6728 int cpu, i; 6728 int cpu, i;
6729 6729
6730 for_each_cpu_mask(cpu, *cpu_map) { 6730 for_each_cpu_mask_nr(cpu, *cpu_map) {
6731 struct sched_group **sched_group_nodes 6731 struct sched_group **sched_group_nodes
6732 = sched_group_nodes_bycpu[cpu]; 6732 = sched_group_nodes_bycpu[cpu];
6733 6733
6734 if (!sched_group_nodes) 6734 if (!sched_group_nodes)
6735 continue; 6735 continue;
6736 6736
6737 for (i = 0; i < MAX_NUMNODES; i++) { 6737 for (i = 0; i < nr_node_ids; i++) {
6738 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 6738 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6739 6739
6740 *nodemask = node_to_cpumask(i); 6740 *nodemask = node_to_cpumask(i);
@@ -6927,7 +6927,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
6927 /* 6927 /*
6928 * Allocate the per-node list of sched groups 6928 * Allocate the per-node list of sched groups
6929 */ 6929 */
6930 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), 6930 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
6931 GFP_KERNEL); 6931 GFP_KERNEL);
6932 if (!sched_group_nodes) { 6932 if (!sched_group_nodes) {
6933 printk(KERN_WARNING "Can not alloc sched group node list\n"); 6933 printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -6966,7 +6966,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
6966 /* 6966 /*
6967 * Set up domains for cpus specified by the cpu_map. 6967 * Set up domains for cpus specified by the cpu_map.
6968 */ 6968 */
6969 for_each_cpu_mask(i, *cpu_map) { 6969 for_each_cpu_mask_nr(i, *cpu_map) {
6970 struct sched_domain *sd = NULL, *p; 6970 struct sched_domain *sd = NULL, *p;
6971 SCHED_CPUMASK_VAR(nodemask, allmasks); 6971 SCHED_CPUMASK_VAR(nodemask, allmasks);
6972 6972
@@ -7033,7 +7033,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7033 7033
7034#ifdef CONFIG_SCHED_SMT 7034#ifdef CONFIG_SCHED_SMT
7035 /* Set up CPU (sibling) groups */ 7035 /* Set up CPU (sibling) groups */
7036 for_each_cpu_mask(i, *cpu_map) { 7036 for_each_cpu_mask_nr(i, *cpu_map) {
7037 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7037 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7038 SCHED_CPUMASK_VAR(send_covered, allmasks); 7038 SCHED_CPUMASK_VAR(send_covered, allmasks);
7039 7039
@@ -7050,7 +7050,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7050 7050
7051#ifdef CONFIG_SCHED_MC 7051#ifdef CONFIG_SCHED_MC
7052 /* Set up multi-core groups */ 7052 /* Set up multi-core groups */
7053 for_each_cpu_mask(i, *cpu_map) { 7053 for_each_cpu_mask_nr(i, *cpu_map) {
7054 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7054 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7055 SCHED_CPUMASK_VAR(send_covered, allmasks); 7055 SCHED_CPUMASK_VAR(send_covered, allmasks);
7056 7056
@@ -7066,7 +7066,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7066#endif 7066#endif
7067 7067
7068 /* Set up physical groups */ 7068 /* Set up physical groups */
7069 for (i = 0; i < MAX_NUMNODES; i++) { 7069 for (i = 0; i < nr_node_ids; i++) {
7070 SCHED_CPUMASK_VAR(nodemask, allmasks); 7070 SCHED_CPUMASK_VAR(nodemask, allmasks);
7071 SCHED_CPUMASK_VAR(send_covered, allmasks); 7071 SCHED_CPUMASK_VAR(send_covered, allmasks);
7072 7072
@@ -7090,7 +7090,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7090 send_covered, tmpmask); 7090 send_covered, tmpmask);
7091 } 7091 }
7092 7092
7093 for (i = 0; i < MAX_NUMNODES; i++) { 7093 for (i = 0; i < nr_node_ids; i++) {
7094 /* Set up node groups */ 7094 /* Set up node groups */
7095 struct sched_group *sg, *prev; 7095 struct sched_group *sg, *prev;
7096 SCHED_CPUMASK_VAR(nodemask, allmasks); 7096 SCHED_CPUMASK_VAR(nodemask, allmasks);
@@ -7117,7 +7117,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7117 goto error; 7117 goto error;
7118 } 7118 }
7119 sched_group_nodes[i] = sg; 7119 sched_group_nodes[i] = sg;
7120 for_each_cpu_mask(j, *nodemask) { 7120 for_each_cpu_mask_nr(j, *nodemask) {
7121 struct sched_domain *sd; 7121 struct sched_domain *sd;
7122 7122
7123 sd = &per_cpu(node_domains, j); 7123 sd = &per_cpu(node_domains, j);
@@ -7129,9 +7129,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7129 cpus_or(*covered, *covered, *nodemask); 7129 cpus_or(*covered, *covered, *nodemask);
7130 prev = sg; 7130 prev = sg;
7131 7131
7132 for (j = 0; j < MAX_NUMNODES; j++) { 7132 for (j = 0; j < nr_node_ids; j++) {
7133 SCHED_CPUMASK_VAR(notcovered, allmasks); 7133 SCHED_CPUMASK_VAR(notcovered, allmasks);
7134 int n = (i + j) % MAX_NUMNODES; 7134 int n = (i + j) % nr_node_ids;
7135 node_to_cpumask_ptr(pnodemask, n); 7135 node_to_cpumask_ptr(pnodemask, n);
7136 7136
7137 cpus_complement(*notcovered, *covered); 7137 cpus_complement(*notcovered, *covered);
@@ -7163,28 +7163,28 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7163 7163
7164 /* Calculate CPU power for physical packages and nodes */ 7164 /* Calculate CPU power for physical packages and nodes */
7165#ifdef CONFIG_SCHED_SMT 7165#ifdef CONFIG_SCHED_SMT
7166 for_each_cpu_mask(i, *cpu_map) { 7166 for_each_cpu_mask_nr(i, *cpu_map) {
7167 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7167 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7168 7168
7169 init_sched_groups_power(i, sd); 7169 init_sched_groups_power(i, sd);
7170 } 7170 }
7171#endif 7171#endif
7172#ifdef CONFIG_SCHED_MC 7172#ifdef CONFIG_SCHED_MC
7173 for_each_cpu_mask(i, *cpu_map) { 7173 for_each_cpu_mask_nr(i, *cpu_map) {
7174 struct sched_domain *sd = &per_cpu(core_domains, i); 7174 struct sched_domain *sd = &per_cpu(core_domains, i);
7175 7175
7176 init_sched_groups_power(i, sd); 7176 init_sched_groups_power(i, sd);
7177 } 7177 }
7178#endif 7178#endif
7179 7179
7180 for_each_cpu_mask(i, *cpu_map) { 7180 for_each_cpu_mask_nr(i, *cpu_map) {
7181 struct sched_domain *sd = &per_cpu(phys_domains, i); 7181 struct sched_domain *sd = &per_cpu(phys_domains, i);
7182 7182
7183 init_sched_groups_power(i, sd); 7183 init_sched_groups_power(i, sd);
7184 } 7184 }
7185 7185
7186#ifdef CONFIG_NUMA 7186#ifdef CONFIG_NUMA
7187 for (i = 0; i < MAX_NUMNODES; i++) 7187 for (i = 0; i < nr_node_ids; i++)
7188 init_numa_sched_groups_power(sched_group_nodes[i]); 7188 init_numa_sched_groups_power(sched_group_nodes[i]);
7189 7189
7190 if (sd_allnodes) { 7190 if (sd_allnodes) {
@@ -7197,7 +7197,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7197#endif 7197#endif
7198 7198
7199 /* Attach the domains */ 7199 /* Attach the domains */
7200 for_each_cpu_mask(i, *cpu_map) { 7200 for_each_cpu_mask_nr(i, *cpu_map) {
7201 struct sched_domain *sd; 7201 struct sched_domain *sd;
7202#ifdef CONFIG_SCHED_SMT 7202#ifdef CONFIG_SCHED_SMT
7203 sd = &per_cpu(cpu_domains, i); 7203 sd = &per_cpu(cpu_domains, i);
@@ -7292,7 +7292,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7292 7292
7293 unregister_sched_domain_sysctl(); 7293 unregister_sched_domain_sysctl();
7294 7294
7295 for_each_cpu_mask(i, *cpu_map) 7295 for_each_cpu_mask_nr(i, *cpu_map)
7296 cpu_attach_domain(NULL, &def_root_domain, i); 7296 cpu_attach_domain(NULL, &def_root_domain, i);
7297 synchronize_sched(); 7297 synchronize_sched();
7298 arch_destroy_sched_domains(cpu_map, &tmpmask); 7298 arch_destroy_sched_domains(cpu_map, &tmpmask);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 08ae848b71d4..74774bde5264 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -961,7 +961,7 @@ static int wake_idle(int cpu, struct task_struct *p)
961 || ((sd->flags & SD_WAKE_IDLE_FAR) 961 || ((sd->flags & SD_WAKE_IDLE_FAR)
962 && !task_hot(p, task_rq(p)->clock, sd))) { 962 && !task_hot(p, task_rq(p)->clock, sd))) {
963 cpus_and(tmp, sd->span, p->cpus_allowed); 963 cpus_and(tmp, sd->span, p->cpus_allowed);
964 for_each_cpu_mask(i, tmp) { 964 for_each_cpu_mask_nr(i, tmp) {
965 if (idle_cpu(i)) { 965 if (idle_cpu(i)) {
966 if (i != task_cpu(p)) { 966 if (i != task_cpu(p)) {
967 schedstat_inc(p, 967 schedstat_inc(p,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 0f3c19197fa4..e757f370eb1b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
231 return 1; 231 return 1;
232 232
233 span = sched_rt_period_mask(); 233 span = sched_rt_period_mask();
234 for_each_cpu_mask(i, span) { 234 for_each_cpu_mask_nr(i, span) {
235 int enqueue = 0; 235 int enqueue = 0;
236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237 struct rq *rq = rq_of_rt_rq(rt_rq); 237 struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -273,7 +273,7 @@ static int balance_runtime(struct rt_rq *rt_rq)
273 273
274 spin_lock(&rt_b->rt_runtime_lock); 274 spin_lock(&rt_b->rt_runtime_lock);
275 rt_period = ktime_to_ns(rt_b->rt_period); 275 rt_period = ktime_to_ns(rt_b->rt_period);
276 for_each_cpu_mask(i, rd->span) { 276 for_each_cpu_mask_nr(i, rd->span) {
277 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 277 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
278 s64 diff; 278 s64 diff;
279 279
@@ -1006,7 +1006,7 @@ static int pull_rt_task(struct rq *this_rq)
1006 1006
1007 next = pick_next_task_rt(this_rq); 1007 next = pick_next_task_rt(this_rq);
1008 1008
1009 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1009 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1010 if (this_cpu == cpu) 1010 if (this_cpu == cpu)
1011 continue; 1011 continue;
1012 1012
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4a23517169a6..06b17547f4e7 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 if (isadd == REGISTER) { 303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) { 304 for_each_cpu_mask_nr(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 306 cpu_to_node(cpu));
307 if (!s) 307 if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 320
321 /* Deregister or cleanup */ 321 /* Deregister or cleanup */
322cleanup: 322cleanup:
323 for_each_cpu_mask(cpu, mask) { 323 for_each_cpu_mask_nr(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index dadde5361f32..60ceabd53f2e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
149 149
150 if (next_cpu >= NR_CPUS) 150 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 151 next_cpu = first_cpu(cpu_online_map);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 152 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 153 add_timer_on(&watchdog_timer, next_cpu);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 57a1f02e5ec0..2d0a96346259 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -397,8 +397,7 @@ again:
397 mask = CPU_MASK_NONE; 397 mask = CPU_MASK_NONE;
398 now = ktime_get(); 398 now = ktime_get();
399 /* Find all expired events */ 399 /* Find all expired events */
400 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 400 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
401 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
402 td = &per_cpu(tick_cpu_device, cpu); 401 td = &per_cpu(tick_cpu_device, cpu);
403 if (td->evtdev->next_event.tv64 <= now.tv64) 402 if (td->evtdev->next_event.tv64 <= now.tv64)
404 cpu_set(cpu, mask); 403 cpu_set(cpu, mask);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce7799540c91..a6d36346d10a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
397 might_sleep(); 397 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 399 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map) 400 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 402}
403EXPORT_SYMBOL_GPL(flush_workqueue); 403EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 477 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 478 cpu_map = wq_cpu_map(wq);
479 479
480 for_each_cpu_mask(cpu, *cpu_map) 480 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 482}
483 483
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
813 list_del(&wq->list); 813 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 814 spin_unlock(&workqueue_lock);
815 815
816 for_each_cpu_mask(cpu, *cpu_map) 816 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 818 put_online_cpus();
819 819