diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2006-03-28 17:50:51 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-03-28 21:44:15 -0500 |
commit | 0e5519548fdc8eadc3eacb49b1908d44d347fb2b (patch) | |
tree | 176d01babed4ece005f6c4715c464411c4262e9b | |
parent | bab70a4af737f623de5b034976a311055308ab86 (diff) |
[PATCH] for_each_possible_cpu: powerpc
for_each_cpu() actually iterates across all possible CPUs. We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs. This is inefficient and
possibly buggy.
We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.
This patch replaces for_each_cpu with for_each_possible_cpu.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/lparcfg.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/sysfs.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/interrupt.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/pervasive.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 2 | ||||
-rw-r--r-- | include/asm-powerpc/percpu.h | 2 |
14 files changed, 21 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 771a59cbd213..bb5c9501234c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -379,7 +379,7 @@ void irq_ctx_init(void) | |||
379 | struct thread_info *tp; | 379 | struct thread_info *tp; |
380 | int i; | 380 | int i; |
381 | 381 | ||
382 | for_each_cpu(i) { | 382 | for_each_possible_cpu(i) { |
383 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | 383 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
384 | tp = softirq_ctx[i]; | 384 | tp = softirq_ctx[i]; |
385 | tp->cpu = i; | 385 | tp->cpu = i; |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index e789fef4eb8a..1b73508ecb2b 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -56,7 +56,7 @@ static unsigned long get_purr(void) | |||
56 | unsigned long sum_purr = 0; | 56 | unsigned long sum_purr = 0; |
57 | int cpu; | 57 | int cpu; |
58 | 58 | ||
59 | for_each_cpu(cpu) { | 59 | for_each_possible_cpu(cpu) { |
60 | sum_purr += lppaca[cpu].emulated_time_base; | 60 | sum_purr += lppaca[cpu].emulated_time_base; |
61 | 61 | ||
62 | #ifdef PURR_DEBUG | 62 | #ifdef PURR_DEBUG |
@@ -222,7 +222,7 @@ static unsigned long get_purr(void) | |||
222 | int cpu; | 222 | int cpu; |
223 | struct cpu_usage *cu; | 223 | struct cpu_usage *cu; |
224 | 224 | ||
225 | for_each_cpu(cpu) { | 225 | for_each_possible_cpu(cpu) { |
226 | cu = &per_cpu(cpu_usage_array, cpu); | 226 | cu = &per_cpu(cpu_usage_array, cpu); |
227 | sum_purr += cu->current_tb; | 227 | sum_purr += cu->current_tb; |
228 | } | 228 | } |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 4b78ee0e5867..06636c927a7e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -593,7 +593,7 @@ static void rtas_percpu_suspend_me(void *info) | |||
593 | data->waiting = 0; | 593 | data->waiting = 0; |
594 | data->args->args[data->args->nargs] = | 594 | data->args->args[data->args->nargs] = |
595 | rtas_call(ibm_suspend_me_token, 0, 1, NULL); | 595 | rtas_call(ibm_suspend_me_token, 0, 1, NULL); |
596 | for_each_cpu(i) | 596 | for_each_possible_cpu(i) |
597 | plpar_hcall_norets(H_PROD,i); | 597 | plpar_hcall_norets(H_PROD,i); |
598 | } else { | 598 | } else { |
599 | data->waiting = -EBUSY; | 599 | data->waiting = -EBUSY; |
@@ -626,7 +626,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) | |||
626 | /* Prod each CPU. This won't hurt, and will wake | 626 | /* Prod each CPU. This won't hurt, and will wake |
627 | * anyone we successfully put to sleep with H_Join | 627 | * anyone we successfully put to sleep with H_Join |
628 | */ | 628 | */ |
629 | for_each_cpu(i) | 629 | for_each_possible_cpu(i) |
630 | plpar_hcall_norets(H_PROD, i); | 630 | plpar_hcall_norets(H_PROD, i); |
631 | 631 | ||
632 | return data.waiting; | 632 | return data.waiting; |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 3473cb9cb0ab..c607f3b9ca17 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -431,7 +431,7 @@ void __init smp_setup_cpu_maps(void) | |||
431 | /* | 431 | /* |
432 | * Do the sibling map; assume only two threads per processor. | 432 | * Do the sibling map; assume only two threads per processor. |
433 | */ | 433 | */ |
434 | for_each_cpu(cpu) { | 434 | for_each_possible_cpu(cpu) { |
435 | cpu_set(cpu, cpu_sibling_map[cpu]); | 435 | cpu_set(cpu, cpu_sibling_map[cpu]); |
436 | if (cpu_has_feature(CPU_FTR_SMT)) | 436 | if (cpu_has_feature(CPU_FTR_SMT)) |
437 | cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); | 437 | cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index ae9c33d70731..a72bf5dceeee 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -226,7 +226,7 @@ int __init ppc_init(void) | |||
226 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); | 226 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); |
227 | 227 | ||
228 | /* register CPU devices */ | 228 | /* register CPU devices */ |
229 | for_each_cpu(i) | 229 | for_each_possible_cpu(i) |
230 | register_cpu(&cpu_devices[i], i, NULL); | 230 | register_cpu(&cpu_devices[i], i, NULL); |
231 | 231 | ||
232 | /* call platform init */ | 232 | /* call platform init */ |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 05b152299396..59aa92cd6fa4 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -474,7 +474,7 @@ static void __init irqstack_early_init(void) | |||
474 | * interrupt stacks must be under 256MB, we cannot afford to take | 474 | * interrupt stacks must be under 256MB, we cannot afford to take |
475 | * SLB misses on them. | 475 | * SLB misses on them. |
476 | */ | 476 | */ |
477 | for_each_cpu(i) { | 477 | for_each_possible_cpu(i) { |
478 | softirq_ctx[i] = (struct thread_info *) | 478 | softirq_ctx[i] = (struct thread_info *) |
479 | __va(lmb_alloc_base(THREAD_SIZE, | 479 | __va(lmb_alloc_base(THREAD_SIZE, |
480 | THREAD_SIZE, 0x10000000)); | 480 | THREAD_SIZE, 0x10000000)); |
@@ -507,7 +507,7 @@ static void __init emergency_stack_init(void) | |||
507 | */ | 507 | */ |
508 | limit = min(0x10000000UL, lmb.rmo_size); | 508 | limit = min(0x10000000UL, lmb.rmo_size); |
509 | 509 | ||
510 | for_each_cpu(i) | 510 | for_each_possible_cpu(i) |
511 | paca[i].emergency_sp = | 511 | paca[i].emergency_sp = |
512 | __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; | 512 | __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; |
513 | } | 513 | } |
@@ -624,7 +624,7 @@ void __init setup_per_cpu_areas(void) | |||
624 | size = PERCPU_ENOUGH_ROOM; | 624 | size = PERCPU_ENOUGH_ROOM; |
625 | #endif | 625 | #endif |
626 | 626 | ||
627 | for_each_cpu(i) { | 627 | for_each_possible_cpu(i) { |
628 | ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); | 628 | ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); |
629 | if (!ptr) | 629 | if (!ptr) |
630 | panic("Cannot allocate cpu data for CPU %d\n", i); | 630 | panic("Cannot allocate cpu data for CPU %d\n", i); |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 805eaedbc308..530f7dba0bd2 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -362,7 +362,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
362 | 362 | ||
363 | smp_space_timers(max_cpus); | 363 | smp_space_timers(max_cpus); |
364 | 364 | ||
365 | for_each_cpu(cpu) | 365 | for_each_possible_cpu(cpu) |
366 | if (cpu != boot_cpuid) | 366 | if (cpu != boot_cpuid) |
367 | smp_create_idle(cpu); | 367 | smp_create_idle(cpu); |
368 | } | 368 | } |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index aca2f09cd842..73560ef6f802 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -74,7 +74,7 @@ static int __init smt_setup(void) | |||
74 | val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay", | 74 | val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay", |
75 | NULL); | 75 | NULL); |
76 | if (!smt_snooze_cmdline && val) { | 76 | if (!smt_snooze_cmdline && val) { |
77 | for_each_cpu(cpu) | 77 | for_each_possible_cpu(cpu) |
78 | per_cpu(smt_snooze_delay, cpu) = *val; | 78 | per_cpu(smt_snooze_delay, cpu) = *val; |
79 | } | 79 | } |
80 | 80 | ||
@@ -93,7 +93,7 @@ static int __init setup_smt_snooze_delay(char *str) | |||
93 | smt_snooze_cmdline = 1; | 93 | smt_snooze_cmdline = 1; |
94 | 94 | ||
95 | if (get_option(&str, &snooze)) { | 95 | if (get_option(&str, &snooze)) { |
96 | for_each_cpu(cpu) | 96 | for_each_possible_cpu(cpu) |
97 | per_cpu(smt_snooze_delay, cpu) = snooze; | 97 | per_cpu(smt_snooze_delay, cpu) = snooze; |
98 | } | 98 | } |
99 | 99 | ||
@@ -347,7 +347,7 @@ static int __init topology_init(void) | |||
347 | 347 | ||
348 | register_cpu_notifier(&sysfs_cpu_nb); | 348 | register_cpu_notifier(&sysfs_cpu_nb); |
349 | 349 | ||
350 | for_each_cpu(cpu) { | 350 | for_each_possible_cpu(cpu) { |
351 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 351 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
352 | 352 | ||
353 | #ifdef CONFIG_NUMA | 353 | #ifdef CONFIG_NUMA |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 4a27218a086c..24e3ad756de0 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -261,7 +261,7 @@ void snapshot_timebases(void) | |||
261 | 261 | ||
262 | if (!cpu_has_feature(CPU_FTR_PURR)) | 262 | if (!cpu_has_feature(CPU_FTR_PURR)) |
263 | return; | 263 | return; |
264 | for_each_cpu(cpu) | 264 | for_each_possible_cpu(cpu) |
265 | spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); | 265 | spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); |
266 | on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); | 266 | on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); |
267 | } | 267 | } |
@@ -751,7 +751,7 @@ void __init smp_space_timers(unsigned int max_cpus) | |||
751 | * systems works better if the two threads' timebase interrupts | 751 | * systems works better if the two threads' timebase interrupts |
752 | * are staggered by half a jiffy with respect to each other. | 752 | * are staggered by half a jiffy with respect to each other. |
753 | */ | 753 | */ |
754 | for_each_cpu(i) { | 754 | for_each_possible_cpu(i) { |
755 | if (i == boot_cpuid) | 755 | if (i == boot_cpuid) |
756 | continue; | 756 | continue; |
757 | if (i == (boot_cpuid ^ 1)) | 757 | if (i == (boot_cpuid ^ 1)) |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 91d25fb27f89..4a9291d9fef8 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -239,7 +239,7 @@ void stabs_alloc(void) | |||
239 | if (cpu_has_feature(CPU_FTR_SLB)) | 239 | if (cpu_has_feature(CPU_FTR_SLB)) |
240 | return; | 240 | return; |
241 | 241 | ||
242 | for_each_cpu(cpu) { | 242 | for_each_possible_cpu(cpu) { |
243 | unsigned long newstab; | 243 | unsigned long newstab; |
244 | 244 | ||
245 | if (cpu == 0) | 245 | if (cpu == 0) |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index ae62f5d5c31b..978be1c30c1b 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -364,7 +364,7 @@ void iic_init_IRQ(void) | |||
364 | setup_iic_hardcoded(); | 364 | setup_iic_hardcoded(); |
365 | 365 | ||
366 | irq_offset = 0; | 366 | irq_offset = 0; |
367 | for_each_cpu(cpu) { | 367 | for_each_possible_cpu(cpu) { |
368 | iic = &per_cpu(iic, cpu); | 368 | iic = &per_cpu(iic, cpu); |
369 | if (iic->regs) | 369 | if (iic->regs) |
370 | out_be64(&iic->regs->prio, 0xff); | 370 | out_be64(&iic->regs->prio, 0xff); |
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index 58baeb52f6fc..7eed8c624517 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c | |||
@@ -217,7 +217,7 @@ void __init cell_pervasive_init(void) | |||
217 | if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) | 217 | if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) |
218 | return; | 218 | return; |
219 | 219 | ||
220 | for_each_cpu(cpu) { | 220 | for_each_possible_cpu(cpu) { |
221 | p = &cbe_pervasive[cpu]; | 221 | p = &cbe_pervasive[cpu]; |
222 | ret = cbe_find_pmd_mmio(cpu, p); | 222 | ret = cbe_find_pmd_mmio(cpu, p); |
223 | if (ret) | 223 | if (ret) |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index c60d3ff25a2f..4864cb32be25 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -541,7 +541,7 @@ nextnode: | |||
541 | ops = &pSeriesLP_ops; | 541 | ops = &pSeriesLP_ops; |
542 | else { | 542 | else { |
543 | #ifdef CONFIG_SMP | 543 | #ifdef CONFIG_SMP |
544 | for_each_cpu(i) { | 544 | for_each_possible_cpu(i) { |
545 | int hard_id; | 545 | int hard_id; |
546 | 546 | ||
547 | /* FIXME: Do this dynamically! --RR */ | 547 | /* FIXME: Do this dynamically! --RR */ |
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index 464301cd0d03..184a7a4d2fdf 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #define percpu_modcopy(pcpudst, src, size) \ | 27 | #define percpu_modcopy(pcpudst, src, size) \ |
28 | do { \ | 28 | do { \ |
29 | unsigned int __i; \ | 29 | unsigned int __i; \ |
30 | for_each_cpu(__i) \ | 30 | for_each_possible_cpu(__i) \ |
31 | memcpy((pcpudst)+__per_cpu_offset(__i), \ | 31 | memcpy((pcpudst)+__per_cpu_offset(__i), \ |
32 | (src), (size)); \ | 32 | (src), (size)); \ |
33 | } while (0) | 33 | } while (0) |