aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--fs/file.c3
-rw-r--r--kernel/sched.c2
-rw-r--r--mm/page_alloc.c10
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/utils.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/socket.c2
11 files changed, 17 insertions, 16 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d661703ac1c..63f39a7e2c9 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
138 if (nmi_watchdog == NMI_LOCAL_APIC) 138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140 140
141 for (cpu = 0; cpu < NR_CPUS; cpu++) 141 for_each_cpu(cpu)
142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
143 local_irq_enable(); 143 local_irq_enable();
144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index f9fc07efd2d..e5aad831458 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3453,7 +3453,7 @@ int __init blk_dev_init(void)
3453 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3453 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3454 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3454 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3455 3455
3456 for (i = 0; i < NR_CPUS; i++) 3456 for_each_cpu(i)
3457 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3457 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3458 3458
3459 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3459 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 245ca99a641..c551bb84dbf 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
1245 if (error) 1245 if (error)
1246 goto cleanup_sysctl; 1246 goto cleanup_sysctl;
1247 1247
1248 for (i = 0; i < NR_CPUS; i++) 1248 for_each_cpu(i)
1249 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1249 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1250 1250
1251 devfs_mk_dir("scsi"); 1251 devfs_mk_dir("scsi");
diff --git a/fs/file.c b/fs/file.c
index fd066b261c7..cea7cbea11d 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
379void __init files_defer_init(void) 379void __init files_defer_init(void)
380{ 380{
381 int i; 381 int i;
382 /* Really early - can't use for_each_cpu */ 382 for_each_cpu(i)
383 for (i = 0; i < NR_CPUS; i++)
384 fdtable_defer_list_init(i); 383 fdtable_defer_list_init(i);
385} 384}
diff --git a/kernel/sched.c b/kernel/sched.c
index f77f23f8f47..839466fdfb4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6109,7 +6109,7 @@ void __init sched_init(void)
6109 runqueue_t *rq; 6109 runqueue_t *rq;
6110 int i, j, k; 6110 int i, j, k;
6111 6111
6112 for (i = 0; i < NR_CPUS; i++) { 6112 for_each_cpu(i) {
6113 prio_array_t *array; 6113 prio_array_t *array;
6114 6114
6115 rq = cpu_rq(i); 6115 rq = cpu_rq(i);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44b4eb4202d..dde04ff4be3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1213{ 1213{
1214 int cpu = 0; 1214 int cpu = 0;
1215 1215
1216 memset(ret, 0, sizeof(*ret)); 1216 memset(ret, 0, nr * sizeof(unsigned long));
1217 cpus_and(*cpumask, *cpumask, cpu_online_map); 1217 cpus_and(*cpumask, *cpumask, cpu_online_map);
1218 1218
1219 cpu = first_cpu(*cpumask); 1219 cpu = first_cpu(*cpumask);
1220 while (cpu < NR_CPUS) { 1220 while (cpu < NR_CPUS) {
1221 unsigned long *in, *out, off; 1221 unsigned long *in, *out, off;
1222 1222
1223 if (!cpu_isset(cpu, *cpumask))
1224 continue;
1225
1223 in = (unsigned long *)&per_cpu(page_states, cpu); 1226 in = (unsigned long *)&per_cpu(page_states, cpu);
1224 1227
1225 cpu = next_cpu(cpu, *cpumask); 1228 cpu = next_cpu(cpu, *cpumask);
1226 1229
1227 if (cpu < NR_CPUS) 1230 if (likely(cpu < NR_CPUS))
1228 prefetch(&per_cpu(page_states, cpu)); 1231 prefetch(&per_cpu(page_states, cpu));
1229 1232
1230 out = (unsigned long *)ret; 1233 out = (unsigned long *)ret;
@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
1886 * not check if the processor is online before following the pageset pointer. 1889 * not check if the processor is online before following the pageset pointer.
1887 * Other parts of the kernel may not check if the zone is available. 1890 * Other parts of the kernel may not check if the zone is available.
1888 */ 1891 */
1889static struct per_cpu_pageset 1892static struct per_cpu_pageset boot_pageset[NR_CPUS];
1890 boot_pageset[NR_CPUS];
1891 1893
1892/* 1894/*
1893 * Dynamically allocate memory for the 1895 * Dynamically allocate memory for the
diff --git a/net/core/dev.c b/net/core/dev.c
index ffb82073056..2afb0de9532 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
3237 * Initialise the packet receive queues. 3237 * Initialise the packet receive queues.
3238 */ 3238 */
3239 3239
3240 for (i = 0; i < NR_CPUS; i++) { 3240 for_each_cpu(i) {
3241 struct softnet_data *queue; 3241 struct softnet_data *queue;
3242 3242
3243 queue = &per_cpu(softnet_data, i); 3243 queue = &per_cpu(softnet_data, i);
diff --git a/net/core/utils.c b/net/core/utils.c
index ac1d1fcf867..fdc4f38bc46 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -121,7 +121,7 @@ void __init net_random_init(void)
121{ 121{
122 int i; 122 int i;
123 123
124 for (i = 0; i < NR_CPUS; i++) { 124 for_each_cpu(i) {
125 struct nrnd_state *state = &per_cpu(net_rand_state,i); 125 struct nrnd_state *state = &per_cpu(net_rand_state,i);
126 __net_srandom(state, i+jiffies); 126 __net_srandom(state, i+jiffies);
127 } 127 }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed[NR_CPUS];
134 134
135 get_random_bytes(seed, sizeof(seed)); 135 get_random_bytes(seed, sizeof(seed));
136 for (i = 0; i < NR_CPUS; i++) { 136 for_each_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 137 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 138 __net_srandom(state, seed[i]);
139 } 139 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 39d49dc333a..1b167c4bb3b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
49 int res = 0; 49 int res = 0;
50 int cpu; 50 int cpu;
51 51
52 for (cpu = 0; cpu < NR_CPUS; cpu++) 52 for_each_cpu(cpu)
53 res += proto->stats[cpu].inuse; 53 res += proto->stats[cpu].inuse;
54 54
55 return res; 55 return res;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 50a13e75d70..4238b1ed886 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
38 int res = 0; 38 int res = 0;
39 int cpu; 39 int cpu;
40 40
41 for (cpu=0; cpu<NR_CPUS; cpu++) 41 for_each_cpu(cpu)
42 res += proto->stats[cpu].inuse; 42 res += proto->stats[cpu].inuse;
43 43
44 return res; 44 return res;
diff --git a/net/socket.c b/net/socket.c
index b38a263853c..a00851f981d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
2078 int cpu; 2078 int cpu;
2079 int counter = 0; 2079 int counter = 0;
2080 2080
2081 for (cpu = 0; cpu < NR_CPUS; cpu++) 2081 for_each_cpu(cpu)
2082 counter += per_cpu(sockets_in_use, cpu); 2082 counter += per_cpu(sockets_in_use, cpu);
2083 2083
2084 /* It can be negative, by the way. 8) */ 2084 /* It can be negative, by the way. 8) */