diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-04-28 01:07:23 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-05-04 01:22:59 -0400 |
commit | 104699c0ab473535793b5fea156adaf309afd29b (patch) | |
tree | 6fb55df112b7beb3af4840378b5b3bb55565659b /arch/powerpc/kernel | |
parent | 48404f2e95ef0ffd8134d89c8abcd1a15e15f1b0 (diff) |
powerpc: Convert old cpumask API into new one
Adapt new API.
Almost change is trivial. Most important change is the below line
because we plan to change task->cpus_allowed implementation.
- ctx->cpus_allowed = current->cpus_allowed;
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/crash.c | 32 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 2 |
4 files changed, 21 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 5b5e1f002a8e..ccc2198e6b23 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
64 | return; | 64 | return; |
65 | 65 | ||
66 | hard_irq_disable(); | 66 | hard_irq_disable(); |
67 | if (!cpu_isset(cpu, cpus_in_crash)) | 67 | if (!cpumask_test_cpu(cpu, &cpus_in_crash)) |
68 | crash_save_cpu(regs, cpu); | 68 | crash_save_cpu(regs, cpu); |
69 | cpu_set(cpu, cpus_in_crash); | 69 | cpumask_set_cpu(cpu, &cpus_in_crash); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Entered via soft-reset - could be the kdump | 72 | * Entered via soft-reset - could be the kdump |
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
77 | * Tell the kexec CPU that entered via soft-reset and ready | 77 | * Tell the kexec CPU that entered via soft-reset and ready |
78 | * to go down. | 78 | * to go down. |
79 | */ | 79 | */ |
80 | if (cpu_isset(cpu, cpus_in_sr)) { | 80 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) { |
81 | cpu_clear(cpu, cpus_in_sr); | 81 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
82 | atomic_inc(&enter_on_soft_reset); | 82 | atomic_inc(&enter_on_soft_reset); |
83 | } | 83 | } |
84 | 84 | ||
@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
87 | * This barrier is needed to make sure that all CPUs are stopped. | 87 | * This barrier is needed to make sure that all CPUs are stopped. |
88 | * If not, soft-reset will be invoked to bring other CPUs. | 88 | * If not, soft-reset will be invoked to bring other CPUs. |
89 | */ | 89 | */ |
90 | while (!cpu_isset(crashing_cpu, cpus_in_crash)) | 90 | while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash)) |
91 | cpu_relax(); | 91 | cpu_relax(); |
92 | 92 | ||
93 | if (ppc_md.kexec_cpu_down) | 93 | if (ppc_md.kexec_cpu_down) |
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu) | |||
109 | { | 109 | { |
110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | 110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ |
111 | 111 | ||
112 | cpu_clear(cpu, cpus_in_sr); | 112 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
113 | while (atomic_read(&enter_on_soft_reset) != ncpus) | 113 | while (atomic_read(&enter_on_soft_reset) != ncpus) |
114 | cpu_relax(); | 114 | cpu_relax(); |
115 | } | 115 | } |
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
132 | */ | 132 | */ |
133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); | 133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); |
134 | msecs = 10000; | 134 | msecs = 10000; |
135 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { | 135 | while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { |
136 | cpu_relax(); | 136 | cpu_relax(); |
137 | mdelay(1); | 137 | mdelay(1); |
138 | } | 138 | } |
@@ -144,20 +144,20 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
144 | * user to do soft reset such that we get all. | 144 | * user to do soft reset such that we get all. |
145 | * Soft-reset will be used until better mechanism is implemented. | 145 | * Soft-reset will be used until better mechanism is implemented. |
146 | */ | 146 | */ |
147 | if (cpus_weight(cpus_in_crash) < ncpus) { | 147 | if (cpumask_weight(&cpus_in_crash) < ncpus) { |
148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", | 148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", |
149 | ncpus - cpus_weight(cpus_in_crash)); | 149 | ncpus - cpumask_weight(&cpus_in_crash)); |
150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); | 150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); |
151 | cpus_in_sr = CPU_MASK_NONE; | 151 | cpumask_clear(&cpus_in_sr); |
152 | atomic_set(&enter_on_soft_reset, 0); | 152 | atomic_set(&enter_on_soft_reset, 0); |
153 | while (cpus_weight(cpus_in_crash) < ncpus) | 153 | while (cpumask_weight(&cpus_in_crash) < ncpus) |
154 | cpu_relax(); | 154 | cpu_relax(); |
155 | } | 155 | } |
156 | /* | 156 | /* |
157 | * Make sure all CPUs are entered via soft-reset if the kdump is | 157 | * Make sure all CPUs are entered via soft-reset if the kdump is |
158 | * invoked using soft-reset. | 158 | * invoked using soft-reset. |
159 | */ | 159 | */ |
160 | if (cpu_isset(cpu, cpus_in_sr)) | 160 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) |
161 | crash_soft_reset_check(cpu); | 161 | crash_soft_reset_check(cpu); |
162 | /* Leave the IPI callback set */ | 162 | /* Leave the IPI callback set */ |
163 | } | 163 | } |
@@ -210,7 +210,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
210 | * exited using 'x'(exit and recover) or | 210 | * exited using 'x'(exit and recover) or |
211 | * kexec_should_crash() failed for all running tasks. | 211 | * kexec_should_crash() failed for all running tasks. |
212 | */ | 212 | */ |
213 | cpu_clear(cpu, cpus_in_sr); | 213 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
214 | local_irq_restore(flags); | 214 | local_irq_restore(flags); |
215 | return; | 215 | return; |
216 | } | 216 | } |
@@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
224 | * then start kexec boot. | 224 | * then start kexec boot. |
225 | */ | 225 | */ |
226 | crash_soft_reset_check(cpu); | 226 | crash_soft_reset_check(cpu); |
227 | cpu_set(crashing_cpu, cpus_in_crash); | 227 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
228 | if (ppc_md.kexec_cpu_down) | 228 | if (ppc_md.kexec_cpu_down) |
229 | ppc_md.kexec_cpu_down(1, 0); | 229 | ppc_md.kexec_cpu_down(1, 0); |
230 | machine_kexec(kexec_crash_image); | 230 | machine_kexec(kexec_crash_image); |
@@ -253,7 +253,7 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
253 | 253 | ||
254 | void crash_kexec_secondary(struct pt_regs *regs) | 254 | void crash_kexec_secondary(struct pt_regs *regs) |
255 | { | 255 | { |
256 | cpus_in_sr = CPU_MASK_NONE; | 256 | cpumask_clear(&cpus_in_sr); |
257 | } | 257 | } |
258 | #endif /* CONFIG_SMP */ | 258 | #endif /* CONFIG_SMP */ |
259 | 259 | ||
@@ -345,7 +345,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
345 | crashing_cpu = smp_processor_id(); | 345 | crashing_cpu = smp_processor_id(); |
346 | crash_save_cpu(regs, crashing_cpu); | 346 | crash_save_cpu(regs, crashing_cpu); |
347 | crash_kexec_prepare_cpus(crashing_cpu); | 347 | crash_kexec_prepare_cpus(crashing_cpu); |
348 | cpu_set(crashing_cpu, cpus_in_crash); | 348 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
349 | crash_kexec_wait_realmode(crashing_cpu); | 349 | crash_kexec_wait_realmode(crashing_cpu); |
350 | 350 | ||
351 | machine_kexec_mask_interrupts(); | 351 | machine_kexec_mask_interrupts(); |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 21f30cb68077..1475df6e403f 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
381 | int i; | 381 | int i; |
382 | 382 | ||
383 | threads_per_core = tpc; | 383 | threads_per_core = tpc; |
384 | threads_core_mask = CPU_MASK_NONE; | 384 | cpumask_clear(&threads_core_mask); |
385 | 385 | ||
386 | /* This implementation only supports power of 2 number of threads | 386 | /* This implementation only supports power of 2 number of threads |
387 | * for simplicity and performance | 387 | * for simplicity and performance |
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
390 | BUG_ON(tpc != (1 << threads_shift)); | 390 | BUG_ON(tpc != (1 << threads_shift)); |
391 | 391 | ||
392 | for (i = 0; i < tpc; i++) | 392 | for (i = 0; i < tpc; i++) |
393 | cpu_set(i, threads_core_mask); | 393 | cpumask_set_cpu(i, &threads_core_mask); |
394 | 394 | ||
395 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", | 395 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", |
396 | tpc, tpc > 1 ? "s" : ""); | 396 | tpc, tpc > 1 ? "s" : ""); |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index b6083f4f39b1..87517ab6d365 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -513,7 +513,7 @@ int cpu_first_thread_of_core(int core) | |||
513 | } | 513 | } |
514 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | 514 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); |
515 | 515 | ||
516 | /* Must be called when no change can occur to cpu_present_map, | 516 | /* Must be called when no change can occur to cpu_present_mask, |
517 | * i.e. during cpu online or offline. | 517 | * i.e. during cpu online or offline. |
518 | */ | 518 | */ |
519 | static struct device_node *cpu_to_l2cache(int cpu) | 519 | static struct device_node *cpu_to_l2cache(int cpu) |
@@ -614,7 +614,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
614 | * se we pin us down to CPU 0 for a short while | 614 | * se we pin us down to CPU 0 for a short while |
615 | */ | 615 | */ |
616 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); | 616 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); |
617 | cpumask_copy(old_mask, ¤t->cpus_allowed); | 617 | cpumask_copy(old_mask, tsk_cpus_allowed(current)); |
618 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); | 618 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); |
619 | 619 | ||
620 | if (smp_ops && smp_ops->setup_cpu) | 620 | if (smp_ops && smp_ops->setup_cpu) |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 4a6a109b6816..06b9d457d0a7 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -221,7 +221,7 @@ void system_reset_exception(struct pt_regs *regs) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | #ifdef CONFIG_KEXEC | 223 | #ifdef CONFIG_KEXEC |
224 | cpu_set(smp_processor_id(), cpus_in_sr); | 224 | cpumask_set_cpu(smp_processor_id(), &cpus_in_sr); |
225 | #endif | 225 | #endif |
226 | 226 | ||
227 | die("System Reset", regs, SIGABRT); | 227 | die("System Reset", regs, SIGABRT); |