diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2006-04-11 01:52:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-04-11 09:18:31 -0400 |
commit | 6f912042256c12b0927438122594f5379b364f5d (patch) | |
tree | c11949814057b356d5896e7f025ec15132dbff78 /net | |
parent | dd7ba3b8b15f9c65366986d723ae83254d8d78b7 (diff) |
[PATCH] for_each_possible_cpu: network codes
for_each_cpu() actually iterates across all possible CPUs. We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs. This is inefficient and
possibly buggy.
We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.
This patch replaces for_each_cpu with for_each_possible_cpu under /net
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net')
-rw-r--r-- | net/bridge/netfilter/ebtables.c | 12 | ||||
-rw-r--r-- | net/core/dev.c | 2 | ||||
-rw-r--r-- | net/core/flow.c | 4 | ||||
-rw-r--r-- | net/core/neighbour.c | 2 | ||||
-rw-r--r-- | net/core/utils.c | 4 | ||||
-rw-r--r-- | net/ipv4/icmp.c | 2 | ||||
-rw-r--r-- | net/ipv4/ipcomp.c | 8 | ||||
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_core.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 4 | ||||
-rw-r--r-- | net/ipv4/proc.c | 4 | ||||
-rw-r--r-- | net/ipv4/route.c | 2 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 4 | ||||
-rw-r--r-- | net/ipv6/ipcomp6.c | 8 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 4 | ||||
-rw-r--r-- | net/ipv6/proc.c | 4 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 2 | ||||
-rw-r--r-- | net/netfilter/x_tables.c | 4 | ||||
-rw-r--r-- | net/sctp/proc.c | 2 | ||||
-rw-r--r-- | net/socket.c | 2 |
20 files changed, 40 insertions, 40 deletions
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 01eae97c53d9..66bd93252c4e 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -829,7 +829,7 @@ static int translate_table(struct ebt_replace *repl, | |||
829 | * sizeof(struct ebt_chainstack)); | 829 | * sizeof(struct ebt_chainstack)); |
830 | if (!newinfo->chainstack) | 830 | if (!newinfo->chainstack) |
831 | return -ENOMEM; | 831 | return -ENOMEM; |
832 | for_each_cpu(i) { | 832 | for_each_possible_cpu(i) { |
833 | newinfo->chainstack[i] = | 833 | newinfo->chainstack[i] = |
834 | vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); | 834 | vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); |
835 | if (!newinfo->chainstack[i]) { | 835 | if (!newinfo->chainstack[i]) { |
@@ -901,7 +901,7 @@ static void get_counters(struct ebt_counter *oldcounters, | |||
901 | sizeof(struct ebt_counter) * nentries); | 901 | sizeof(struct ebt_counter) * nentries); |
902 | 902 | ||
903 | /* add other counters to those of cpu 0 */ | 903 | /* add other counters to those of cpu 0 */ |
904 | for_each_cpu(cpu) { | 904 | for_each_possible_cpu(cpu) { |
905 | if (cpu == 0) | 905 | if (cpu == 0) |
906 | continue; | 906 | continue; |
907 | counter_base = COUNTER_BASE(oldcounters, nentries, cpu); | 907 | counter_base = COUNTER_BASE(oldcounters, nentries, cpu); |
@@ -1036,7 +1036,7 @@ static int do_replace(void __user *user, unsigned int len) | |||
1036 | 1036 | ||
1037 | vfree(table->entries); | 1037 | vfree(table->entries); |
1038 | if (table->chainstack) { | 1038 | if (table->chainstack) { |
1039 | for_each_cpu(i) | 1039 | for_each_possible_cpu(i) |
1040 | vfree(table->chainstack[i]); | 1040 | vfree(table->chainstack[i]); |
1041 | vfree(table->chainstack); | 1041 | vfree(table->chainstack); |
1042 | } | 1042 | } |
@@ -1054,7 +1054,7 @@ free_counterstmp: | |||
1054 | vfree(counterstmp); | 1054 | vfree(counterstmp); |
1055 | /* can be initialized in translate_table() */ | 1055 | /* can be initialized in translate_table() */ |
1056 | if (newinfo->chainstack) { | 1056 | if (newinfo->chainstack) { |
1057 | for_each_cpu(i) | 1057 | for_each_possible_cpu(i) |
1058 | vfree(newinfo->chainstack[i]); | 1058 | vfree(newinfo->chainstack[i]); |
1059 | vfree(newinfo->chainstack); | 1059 | vfree(newinfo->chainstack); |
1060 | } | 1060 | } |
@@ -1201,7 +1201,7 @@ free_unlock: | |||
1201 | mutex_unlock(&ebt_mutex); | 1201 | mutex_unlock(&ebt_mutex); |
1202 | free_chainstack: | 1202 | free_chainstack: |
1203 | if (newinfo->chainstack) { | 1203 | if (newinfo->chainstack) { |
1204 | for_each_cpu(i) | 1204 | for_each_possible_cpu(i) |
1205 | vfree(newinfo->chainstack[i]); | 1205 | vfree(newinfo->chainstack[i]); |
1206 | vfree(newinfo->chainstack); | 1206 | vfree(newinfo->chainstack); |
1207 | } | 1207 | } |
@@ -1224,7 +1224,7 @@ void ebt_unregister_table(struct ebt_table *table) | |||
1224 | mutex_unlock(&ebt_mutex); | 1224 | mutex_unlock(&ebt_mutex); |
1225 | vfree(table->private->entries); | 1225 | vfree(table->private->entries); |
1226 | if (table->private->chainstack) { | 1226 | if (table->private->chainstack) { |
1227 | for_each_cpu(i) | 1227 | for_each_possible_cpu(i) |
1228 | vfree(table->private->chainstack[i]); | 1228 | vfree(table->private->chainstack[i]); |
1229 | vfree(table->private->chainstack); | 1229 | vfree(table->private->chainstack); |
1230 | } | 1230 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index 2731570eba5b..83231a27ae02 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3346,7 +3346,7 @@ static int __init net_dev_init(void) | |||
3346 | * Initialise the packet receive queues. | 3346 | * Initialise the packet receive queues. |
3347 | */ | 3347 | */ |
3348 | 3348 | ||
3349 | for_each_cpu(i) { | 3349 | for_each_possible_cpu(i) { |
3350 | struct softnet_data *queue; | 3350 | struct softnet_data *queue; |
3351 | 3351 | ||
3352 | queue = &per_cpu(softnet_data, i); | 3352 | queue = &per_cpu(softnet_data, i); |
diff --git a/net/core/flow.c b/net/core/flow.c index 885a2f655db0..2191af5f26ac 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -79,7 +79,7 @@ static void flow_cache_new_hashrnd(unsigned long arg) | |||
79 | { | 79 | { |
80 | int i; | 80 | int i; |
81 | 81 | ||
82 | for_each_cpu(i) | 82 | for_each_possible_cpu(i) |
83 | flow_hash_rnd_recalc(i) = 1; | 83 | flow_hash_rnd_recalc(i) = 1; |
84 | 84 | ||
85 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 85 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
@@ -361,7 +361,7 @@ static int __init flow_cache_init(void) | |||
361 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 361 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
362 | add_timer(&flow_hash_rnd_timer); | 362 | add_timer(&flow_hash_rnd_timer); |
363 | 363 | ||
364 | for_each_cpu(i) | 364 | for_each_possible_cpu(i) |
365 | flow_cache_cpu_prepare(i); | 365 | flow_cache_cpu_prepare(i); |
366 | 366 | ||
367 | hotcpu_notifier(flow_cache_cpu, 0); | 367 | hotcpu_notifier(flow_cache_cpu, 0); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 2ec8693fb778..4cf878efdb49 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1627,7 +1627,7 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb, | |||
1627 | 1627 | ||
1628 | memset(&ndst, 0, sizeof(ndst)); | 1628 | memset(&ndst, 0, sizeof(ndst)); |
1629 | 1629 | ||
1630 | for_each_cpu(cpu) { | 1630 | for_each_possible_cpu(cpu) { |
1631 | struct neigh_statistics *st; | 1631 | struct neigh_statistics *st; |
1632 | 1632 | ||
1633 | st = per_cpu_ptr(tbl->stats, cpu); | 1633 | st = per_cpu_ptr(tbl->stats, cpu); |
diff --git a/net/core/utils.c b/net/core/utils.c index fdc4f38bc46c..4f96f389243d 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -121,7 +121,7 @@ void __init net_random_init(void) | |||
121 | { | 121 | { |
122 | int i; | 122 | int i; |
123 | 123 | ||
124 | for_each_cpu(i) { | 124 | for_each_possible_cpu(i) { |
125 | struct nrnd_state *state = &per_cpu(net_rand_state,i); | 125 | struct nrnd_state *state = &per_cpu(net_rand_state,i); |
126 | __net_srandom(state, i+jiffies); | 126 | __net_srandom(state, i+jiffies); |
127 | } | 127 | } |
@@ -133,7 +133,7 @@ static int net_random_reseed(void) | |||
133 | unsigned long seed[NR_CPUS]; | 133 | unsigned long seed[NR_CPUS]; |
134 | 134 | ||
135 | get_random_bytes(seed, sizeof(seed)); | 135 | get_random_bytes(seed, sizeof(seed)); |
136 | for_each_cpu(i) { | 136 | for_each_possible_cpu(i) { |
137 | struct nrnd_state *state = &per_cpu(net_rand_state,i); | 137 | struct nrnd_state *state = &per_cpu(net_rand_state,i); |
138 | __net_srandom(state, seed[i]); | 138 | __net_srandom(state, seed[i]); |
139 | } | 139 | } |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 9831fd2c73a0..2a0455911ee0 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -1107,7 +1107,7 @@ void __init icmp_init(struct net_proto_family *ops) | |||
1107 | struct inet_sock *inet; | 1107 | struct inet_sock *inet; |
1108 | int i; | 1108 | int i; |
1109 | 1109 | ||
1110 | for_each_cpu(i) { | 1110 | for_each_possible_cpu(i) { |
1111 | int err; | 1111 | int err; |
1112 | 1112 | ||
1113 | err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, | 1113 | err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, |
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 0a1d86a0f632..04a429465665 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -290,7 +290,7 @@ static void ipcomp_free_scratches(void) | |||
290 | if (!scratches) | 290 | if (!scratches) |
291 | return; | 291 | return; |
292 | 292 | ||
293 | for_each_cpu(i) { | 293 | for_each_possible_cpu(i) { |
294 | void *scratch = *per_cpu_ptr(scratches, i); | 294 | void *scratch = *per_cpu_ptr(scratches, i); |
295 | if (scratch) | 295 | if (scratch) |
296 | vfree(scratch); | 296 | vfree(scratch); |
@@ -313,7 +313,7 @@ static void **ipcomp_alloc_scratches(void) | |||
313 | 313 | ||
314 | ipcomp_scratches = scratches; | 314 | ipcomp_scratches = scratches; |
315 | 315 | ||
316 | for_each_cpu(i) { | 316 | for_each_possible_cpu(i) { |
317 | void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); | 317 | void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); |
318 | if (!scratch) | 318 | if (!scratch) |
319 | return NULL; | 319 | return NULL; |
@@ -344,7 +344,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms) | |||
344 | if (!tfms) | 344 | if (!tfms) |
345 | return; | 345 | return; |
346 | 346 | ||
347 | for_each_cpu(cpu) { | 347 | for_each_possible_cpu(cpu) { |
348 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); | 348 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); |
349 | crypto_free_tfm(tfm); | 349 | crypto_free_tfm(tfm); |
350 | } | 350 | } |
@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) | |||
384 | if (!tfms) | 384 | if (!tfms) |
385 | goto error; | 385 | goto error; |
386 | 386 | ||
387 | for_each_cpu(cpu) { | 387 | for_each_possible_cpu(cpu) { |
388 | struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); | 388 | struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); |
389 | if (!tfm) | 389 | if (!tfm) |
390 | goto error; | 390 | goto error; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index a44a5d73457d..c2d92f99a2b8 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -646,7 +646,7 @@ static int translate_table(const char *name, | |||
646 | } | 646 | } |
647 | 647 | ||
648 | /* And one copy for every other CPU */ | 648 | /* And one copy for every other CPU */ |
649 | for_each_cpu(i) { | 649 | for_each_possible_cpu(i) { |
650 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 650 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
651 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 651 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
652 | } | 652 | } |
@@ -696,7 +696,7 @@ static void get_counters(const struct xt_table_info *t, | |||
696 | counters, | 696 | counters, |
697 | &i); | 697 | &i); |
698 | 698 | ||
699 | for_each_cpu(cpu) { | 699 | for_each_possible_cpu(cpu) { |
700 | if (cpu == curcpu) | 700 | if (cpu == curcpu) |
701 | continue; | 701 | continue; |
702 | i = 0; | 702 | i = 0; |
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index ceaabc18202b..979a2eac6f00 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -133,7 +133,7 @@ static void ip_ct_event_cache_flush(void) | |||
133 | struct ip_conntrack_ecache *ecache; | 133 | struct ip_conntrack_ecache *ecache; |
134 | int cpu; | 134 | int cpu; |
135 | 135 | ||
136 | for_each_cpu(cpu) { | 136 | for_each_possible_cpu(cpu) { |
137 | ecache = &per_cpu(ip_conntrack_ecache, cpu); | 137 | ecache = &per_cpu(ip_conntrack_ecache, cpu); |
138 | if (ecache->ct) | 138 | if (ecache->ct) |
139 | ip_conntrack_put(ecache->ct); | 139 | ip_conntrack_put(ecache->ct); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d5b8cdd361ce..d25ac8ba6eba 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -735,7 +735,7 @@ translate_table(const char *name, | |||
735 | } | 735 | } |
736 | 736 | ||
737 | /* And one copy for every other CPU */ | 737 | /* And one copy for every other CPU */ |
738 | for_each_cpu(i) { | 738 | for_each_possible_cpu(i) { |
739 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 739 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
740 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 740 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
741 | } | 741 | } |
@@ -788,7 +788,7 @@ get_counters(const struct xt_table_info *t, | |||
788 | counters, | 788 | counters, |
789 | &i); | 789 | &i); |
790 | 790 | ||
791 | for_each_cpu(cpu) { | 791 | for_each_possible_cpu(cpu) { |
792 | if (cpu == curcpu) | 792 | if (cpu == curcpu) |
793 | continue; | 793 | continue; |
794 | i = 0; | 794 | i = 0; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 1b167c4bb3be..d61e2a9d394d 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto) | |||
49 | int res = 0; | 49 | int res = 0; |
50 | int cpu; | 50 | int cpu; |
51 | 51 | ||
52 | for_each_cpu(cpu) | 52 | for_each_possible_cpu(cpu) |
53 | res += proto->stats[cpu].inuse; | 53 | res += proto->stats[cpu].inuse; |
54 | 54 | ||
55 | return res; | 55 | return res; |
@@ -91,7 +91,7 @@ fold_field(void *mib[], int offt) | |||
91 | unsigned long res = 0; | 91 | unsigned long res = 0; |
92 | int i; | 92 | int i; |
93 | 93 | ||
94 | for_each_cpu(i) { | 94 | for_each_possible_cpu(i) { |
95 | res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); | 95 | res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); |
96 | res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); | 96 | res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); |
97 | } | 97 | } |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 94fcbc5e5a1b..ff434821909f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -3083,7 +3083,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset, | |||
3083 | memcpy(dst, src, length); | 3083 | memcpy(dst, src, length); |
3084 | 3084 | ||
3085 | /* Add the other cpus in, one int at a time */ | 3085 | /* Add the other cpus in, one int at a time */ |
3086 | for_each_cpu(i) { | 3086 | for_each_possible_cpu(i) { |
3087 | unsigned int j; | 3087 | unsigned int j; |
3088 | 3088 | ||
3089 | src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; | 3089 | src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 21eb725e885f..1044b6fce0d5 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -717,7 +717,7 @@ int __init icmpv6_init(struct net_proto_family *ops) | |||
717 | struct sock *sk; | 717 | struct sock *sk; |
718 | int err, i, j; | 718 | int err, i, j; |
719 | 719 | ||
720 | for_each_cpu(i) { | 720 | for_each_possible_cpu(i) { |
721 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, | 721 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, |
722 | &per_cpu(__icmpv6_socket, i)); | 722 | &per_cpu(__icmpv6_socket, i)); |
723 | if (err < 0) { | 723 | if (err < 0) { |
@@ -763,7 +763,7 @@ void icmpv6_cleanup(void) | |||
763 | { | 763 | { |
764 | int i; | 764 | int i; |
765 | 765 | ||
766 | for_each_cpu(i) { | 766 | for_each_possible_cpu(i) { |
767 | sock_release(per_cpu(__icmpv6_socket, i)); | 767 | sock_release(per_cpu(__icmpv6_socket, i)); |
768 | } | 768 | } |
769 | inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); | 769 | inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 00f3fadfcca7..05eb67def39f 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
@@ -290,7 +290,7 @@ static void ipcomp6_free_scratches(void) | |||
290 | if (!scratches) | 290 | if (!scratches) |
291 | return; | 291 | return; |
292 | 292 | ||
293 | for_each_cpu(i) { | 293 | for_each_possible_cpu(i) { |
294 | void *scratch = *per_cpu_ptr(scratches, i); | 294 | void *scratch = *per_cpu_ptr(scratches, i); |
295 | 295 | ||
296 | vfree(scratch); | 296 | vfree(scratch); |
@@ -313,7 +313,7 @@ static void **ipcomp6_alloc_scratches(void) | |||
313 | 313 | ||
314 | ipcomp6_scratches = scratches; | 314 | ipcomp6_scratches = scratches; |
315 | 315 | ||
316 | for_each_cpu(i) { | 316 | for_each_possible_cpu(i) { |
317 | void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); | 317 | void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); |
318 | if (!scratch) | 318 | if (!scratch) |
319 | return NULL; | 319 | return NULL; |
@@ -344,7 +344,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms) | |||
344 | if (!tfms) | 344 | if (!tfms) |
345 | return; | 345 | return; |
346 | 346 | ||
347 | for_each_cpu(cpu) { | 347 | for_each_possible_cpu(cpu) { |
348 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); | 348 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); |
349 | crypto_free_tfm(tfm); | 349 | crypto_free_tfm(tfm); |
350 | } | 350 | } |
@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) | |||
384 | if (!tfms) | 384 | if (!tfms) |
385 | goto error; | 385 | goto error; |
386 | 386 | ||
387 | for_each_cpu(cpu) { | 387 | for_each_possible_cpu(cpu) { |
388 | struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); | 388 | struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); |
389 | if (!tfm) | 389 | if (!tfm) |
390 | goto error; | 390 | goto error; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 3ecf2db841f8..642b4b11464f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -788,7 +788,7 @@ translate_table(const char *name, | |||
788 | } | 788 | } |
789 | 789 | ||
790 | /* And one copy for every other CPU */ | 790 | /* And one copy for every other CPU */ |
791 | for_each_cpu(i) { | 791 | for_each_possible_cpu(i) { |
792 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 792 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
793 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 793 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
794 | } | 794 | } |
@@ -841,7 +841,7 @@ get_counters(const struct xt_table_info *t, | |||
841 | counters, | 841 | counters, |
842 | &i); | 842 | &i); |
843 | 843 | ||
844 | for_each_cpu(cpu) { | 844 | for_each_possible_cpu(cpu) { |
845 | if (cpu == curcpu) | 845 | if (cpu == curcpu) |
846 | continue; | 846 | continue; |
847 | i = 0; | 847 | i = 0; |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 4238b1ed8860..779ddf77f4d4 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto) | |||
38 | int res = 0; | 38 | int res = 0; |
39 | int cpu; | 39 | int cpu; |
40 | 40 | ||
41 | for_each_cpu(cpu) | 41 | for_each_possible_cpu(cpu) |
42 | res += proto->stats[cpu].inuse; | 42 | res += proto->stats[cpu].inuse; |
43 | 43 | ||
44 | return res; | 44 | return res; |
@@ -140,7 +140,7 @@ fold_field(void *mib[], int offt) | |||
140 | unsigned long res = 0; | 140 | unsigned long res = 0; |
141 | int i; | 141 | int i; |
142 | 142 | ||
143 | for_each_cpu(i) { | 143 | for_each_possible_cpu(i) { |
144 | res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); | 144 | res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); |
145 | res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); | 145 | res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); |
146 | } | 146 | } |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 56389c83557c..e581190fb6c3 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -146,7 +146,7 @@ static void nf_ct_event_cache_flush(void) | |||
146 | struct nf_conntrack_ecache *ecache; | 146 | struct nf_conntrack_ecache *ecache; |
147 | int cpu; | 147 | int cpu; |
148 | 148 | ||
149 | for_each_cpu(cpu) { | 149 | for_each_possible_cpu(cpu) { |
150 | ecache = &per_cpu(nf_conntrack_ecache, cpu); | 150 | ecache = &per_cpu(nf_conntrack_ecache, cpu); |
151 | if (ecache->ct) | 151 | if (ecache->ct) |
152 | nf_ct_put(ecache->ct); | 152 | nf_ct_put(ecache->ct); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index feb8a9e066b0..00cf0a4f4d92 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -413,7 +413,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) | |||
413 | 413 | ||
414 | newinfo->size = size; | 414 | newinfo->size = size; |
415 | 415 | ||
416 | for_each_cpu(cpu) { | 416 | for_each_possible_cpu(cpu) { |
417 | if (size <= PAGE_SIZE) | 417 | if (size <= PAGE_SIZE) |
418 | newinfo->entries[cpu] = kmalloc_node(size, | 418 | newinfo->entries[cpu] = kmalloc_node(size, |
419 | GFP_KERNEL, | 419 | GFP_KERNEL, |
@@ -436,7 +436,7 @@ void xt_free_table_info(struct xt_table_info *info) | |||
436 | { | 436 | { |
437 | int cpu; | 437 | int cpu; |
438 | 438 | ||
439 | for_each_cpu(cpu) { | 439 | for_each_possible_cpu(cpu) { |
440 | if (info->size <= PAGE_SIZE) | 440 | if (info->size <= PAGE_SIZE) |
441 | kfree(info->entries[cpu]); | 441 | kfree(info->entries[cpu]); |
442 | else | 442 | else |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index d47a52c303a8..5b3b0e0ae7e5 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -69,7 +69,7 @@ fold_field(void *mib[], int nr) | |||
69 | unsigned long res = 0; | 69 | unsigned long res = 0; |
70 | int i; | 70 | int i; |
71 | 71 | ||
72 | for_each_cpu(i) { | 72 | for_each_possible_cpu(i) { |
73 | res += | 73 | res += |
74 | *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + | 74 | *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + |
75 | sizeof (unsigned long) * nr)); | 75 | sizeof (unsigned long) * nr)); |
diff --git a/net/socket.c b/net/socket.c index b807f360e02c..00cdfd2088db 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2136,7 +2136,7 @@ void socket_seq_show(struct seq_file *seq) | |||
2136 | int cpu; | 2136 | int cpu; |
2137 | int counter = 0; | 2137 | int counter = 0; |
2138 | 2138 | ||
2139 | for_each_cpu(cpu) | 2139 | for_each_possible_cpu(cpu) |
2140 | counter += per_cpu(sockets_in_use, cpu); | 2140 | counter += per_cpu(sockets_in_use, cpu); |
2141 | 2141 | ||
2142 | /* It can be negative, by the way. 8) */ | 2142 | /* It can be negative, by the way. 8) */ |