aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2006-04-11 01:52:52 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-11 09:18:31 -0400
commita283a52520569195c2d26d75455cddab758f530b (patch)
tree3ea10360b06dad909dc5b9e48b7236bcf23f3fab
parentfff8efe7b71efd88829782be64dc42c25c70ad53 (diff)
[PATCH] for_each_possible_cpu: sparc64
for_each_cpu() actually iterates across all possible CPUs. We've had mistakes in the past where people were using for_each_cpu() where they should have been iterating across only online or present CPUs. This is inefficient and possibly buggy. We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the future. This patch replaces for_each_cpu with for_each_possible_cpu. for sparc64. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c2
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/kernel/smp.c6
-rw-r--r--include/asm-sparc64/percpu.h2
4 files changed, 6 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 9372d4f376d5..9e94db2573a2 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1092,7 +1092,7 @@ void sun4v_pci_init(int node, char *model_name)
1092 } 1092 }
1093 } 1093 }
1094 1094
1095 for_each_cpu(i) { 1095 for_each_possible_cpu(i) {
1096 unsigned long page = get_zeroed_page(GFP_ATOMIC); 1096 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1097 1097
1098 if (!page) 1098 if (!page)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 7d0e67c1ce50..005167f82419 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -535,7 +535,7 @@ static int __init topology_init(void)
535 while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) 535 while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
536 ncpus_probed++; 536 ncpus_probed++;
537 537
538 for_each_cpu(i) { 538 for_each_possible_cpu(i) {
539 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 539 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
540 if (p) { 540 if (p) {
541 register_cpu(p, i, NULL); 541 register_cpu(p, i, NULL);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index eb36f7988ff7..90eaca3ec9a6 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1280,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier)
1280 return -EINVAL; 1280 return -EINVAL;
1281 1281
1282 spin_lock_irqsave(&prof_setup_lock, flags); 1282 spin_lock_irqsave(&prof_setup_lock, flags);
1283 for_each_cpu(i) 1283 for_each_possible_cpu(i)
1284 prof_multiplier(i) = multiplier; 1284 prof_multiplier(i) = multiplier;
1285 current_tick_offset = (timer_tick_offset / multiplier); 1285 current_tick_offset = (timer_tick_offset / multiplier);
1286 spin_unlock_irqrestore(&prof_setup_lock, flags); 1286 spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1308,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1308 } 1308 }
1309 } 1309 }
1310 1310
1311 for_each_cpu(i) { 1311 for_each_possible_cpu(i) {
1312 if (tlb_type == hypervisor) { 1312 if (tlb_type == hypervisor) {
1313 int j; 1313 int j;
1314 1314
1315 /* XXX get this mapping from machine description */ 1315 /* XXX get this mapping from machine description */
1316 for_each_cpu(j) { 1316 for_each_possible_cpu(j) {
1317 if ((j >> 2) == (i >> 2)) 1317 if ((j >> 2) == (i >> 2))
1318 cpu_set(j, cpu_sibling_map[i]); 1318 cpu_set(j, cpu_sibling_map[i]);
1319 } 1319 }
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index 82032e159a76..baef13b58952 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -26,7 +26,7 @@ register unsigned long __local_per_cpu_offset asm("g5");
26#define percpu_modcopy(pcpudst, src, size) \ 26#define percpu_modcopy(pcpudst, src, size) \
27do { \ 27do { \
28 unsigned int __i; \ 28 unsigned int __i; \
29 for_each_cpu(__i) \ 29 for_each_possible_cpu(__i) \
30 memcpy((pcpudst)+__per_cpu_offset(__i), \ 30 memcpy((pcpudst)+__per_cpu_offset(__i), \
31 (src), (size)); \ 31 (src), (size)); \
32} while (0) 32} while (0)