diff options
Diffstat (limited to 'arch/powerpc/kernel/crash.c')
| -rw-r--r-- | arch/powerpc/kernel/crash.c | 164 |
1 files changed, 43 insertions, 121 deletions
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 4457382f8667..4e6ee944495a 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
| @@ -48,7 +48,7 @@ int crashing_cpu = -1; | |||
| 48 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; | 48 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; |
| 49 | cpumask_t cpus_in_sr = CPU_MASK_NONE; | 49 | cpumask_t cpus_in_sr = CPU_MASK_NONE; |
| 50 | 50 | ||
| 51 | #define CRASH_HANDLER_MAX 2 | 51 | #define CRASH_HANDLER_MAX 3 |
| 52 | /* NULL terminated list of shutdown handles */ | 52 | /* NULL terminated list of shutdown handles */ |
| 53 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; | 53 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; |
| 54 | static DEFINE_SPINLOCK(crash_handlers_lock); | 54 | static DEFINE_SPINLOCK(crash_handlers_lock); |
| @@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
| 64 | return; | 64 | return; |
| 65 | 65 | ||
| 66 | hard_irq_disable(); | 66 | hard_irq_disable(); |
| 67 | if (!cpu_isset(cpu, cpus_in_crash)) | 67 | if (!cpumask_test_cpu(cpu, &cpus_in_crash)) |
| 68 | crash_save_cpu(regs, cpu); | 68 | crash_save_cpu(regs, cpu); |
| 69 | cpu_set(cpu, cpus_in_crash); | 69 | cpumask_set_cpu(cpu, &cpus_in_crash); |
| 70 | 70 | ||
| 71 | /* | 71 | /* |
| 72 | * Entered via soft-reset - could be the kdump | 72 | * Entered via soft-reset - could be the kdump |
| @@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
| 77 | * Tell the kexec CPU that entered via soft-reset and ready | 77 | * Tell the kexec CPU that entered via soft-reset and ready |
| 78 | * to go down. | 78 | * to go down. |
| 79 | */ | 79 | */ |
| 80 | if (cpu_isset(cpu, cpus_in_sr)) { | 80 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) { |
| 81 | cpu_clear(cpu, cpus_in_sr); | 81 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
| 82 | atomic_inc(&enter_on_soft_reset); | 82 | atomic_inc(&enter_on_soft_reset); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| @@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
| 87 | * This barrier is needed to make sure that all CPUs are stopped. | 87 | * This barrier is needed to make sure that all CPUs are stopped. |
| 88 | * If not, soft-reset will be invoked to bring other CPUs. | 88 | * If not, soft-reset will be invoked to bring other CPUs. |
| 89 | */ | 89 | */ |
| 90 | while (!cpu_isset(crashing_cpu, cpus_in_crash)) | 90 | while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash)) |
| 91 | cpu_relax(); | 91 | cpu_relax(); |
| 92 | 92 | ||
| 93 | if (ppc_md.kexec_cpu_down) | 93 | if (ppc_md.kexec_cpu_down) |
| @@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu) | |||
| 109 | { | 109 | { |
| 110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | 110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ |
| 111 | 111 | ||
| 112 | cpu_clear(cpu, cpus_in_sr); | 112 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
| 113 | while (atomic_read(&enter_on_soft_reset) != ncpus) | 113 | while (atomic_read(&enter_on_soft_reset) != ncpus) |
| 114 | cpu_relax(); | 114 | cpu_relax(); |
| 115 | } | 115 | } |
| @@ -125,14 +125,14 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
| 125 | smp_wmb(); | 125 | smp_wmb(); |
| 126 | 126 | ||
| 127 | /* | 127 | /* |
| 128 | * FIXME: Until we will have the way to stop other CPUSs reliabally, | 128 | * FIXME: Until we will have the way to stop other CPUs reliably, |
| 129 | * the crash CPU will send an IPI and wait for other CPUs to | 129 | * the crash CPU will send an IPI and wait for other CPUs to |
| 130 | * respond. | 130 | * respond. |
| 131 | * Delay of at least 10 seconds. | 131 | * Delay of at least 10 seconds. |
| 132 | */ | 132 | */ |
| 133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); | 133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); |
| 134 | msecs = 10000; | 134 | msecs = 10000; |
| 135 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { | 135 | while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { |
| 136 | cpu_relax(); | 136 | cpu_relax(); |
| 137 | mdelay(1); | 137 | mdelay(1); |
| 138 | } | 138 | } |
| @@ -144,52 +144,24 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
| 144 | * user to do soft reset such that we get all. | 144 | * user to do soft reset such that we get all. |
| 145 | * Soft-reset will be used until better mechanism is implemented. | 145 | * Soft-reset will be used until better mechanism is implemented. |
| 146 | */ | 146 | */ |
| 147 | if (cpus_weight(cpus_in_crash) < ncpus) { | 147 | if (cpumask_weight(&cpus_in_crash) < ncpus) { |
| 148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", | 148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", |
| 149 | ncpus - cpus_weight(cpus_in_crash)); | 149 | ncpus - cpumask_weight(&cpus_in_crash)); |
| 150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); | 150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); |
| 151 | cpus_in_sr = CPU_MASK_NONE; | 151 | cpumask_clear(&cpus_in_sr); |
| 152 | atomic_set(&enter_on_soft_reset, 0); | 152 | atomic_set(&enter_on_soft_reset, 0); |
| 153 | while (cpus_weight(cpus_in_crash) < ncpus) | 153 | while (cpumask_weight(&cpus_in_crash) < ncpus) |
| 154 | cpu_relax(); | 154 | cpu_relax(); |
| 155 | } | 155 | } |
| 156 | /* | 156 | /* |
| 157 | * Make sure all CPUs are entered via soft-reset if the kdump is | 157 | * Make sure all CPUs are entered via soft-reset if the kdump is |
| 158 | * invoked using soft-reset. | 158 | * invoked using soft-reset. |
| 159 | */ | 159 | */ |
| 160 | if (cpu_isset(cpu, cpus_in_sr)) | 160 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) |
| 161 | crash_soft_reset_check(cpu); | 161 | crash_soft_reset_check(cpu); |
| 162 | /* Leave the IPI callback set */ | 162 | /* Leave the IPI callback set */ |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ | ||
| 166 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
| 167 | static void crash_kexec_wait_realmode(int cpu) | ||
| 168 | { | ||
| 169 | unsigned int msecs; | ||
| 170 | int i; | ||
| 171 | |||
| 172 | msecs = 10000; | ||
| 173 | for (i=0; i < NR_CPUS && msecs > 0; i++) { | ||
| 174 | if (i == cpu) | ||
| 175 | continue; | ||
| 176 | |||
| 177 | while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { | ||
| 178 | barrier(); | ||
| 179 | if (!cpu_possible(i)) { | ||
| 180 | break; | ||
| 181 | } | ||
| 182 | if (!cpu_online(i)) { | ||
| 183 | break; | ||
| 184 | } | ||
| 185 | msecs--; | ||
| 186 | mdelay(1); | ||
| 187 | } | ||
| 188 | } | ||
| 189 | mb(); | ||
| 190 | } | ||
| 191 | #endif | ||
| 192 | |||
| 193 | /* | 165 | /* |
| 194 | * This function will be called by secondary cpus or by kexec cpu | 166 | * This function will be called by secondary cpus or by kexec cpu |
| 195 | * if soft-reset is activated to stop some CPUs. | 167 | * if soft-reset is activated to stop some CPUs. |
| @@ -210,7 +182,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
| 210 | * exited using 'x'(exit and recover) or | 182 | * exited using 'x'(exit and recover) or |
| 211 | * kexec_should_crash() failed for all running tasks. | 183 | * kexec_should_crash() failed for all running tasks. |
| 212 | */ | 184 | */ |
| 213 | cpu_clear(cpu, cpus_in_sr); | 185 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
| 214 | local_irq_restore(flags); | 186 | local_irq_restore(flags); |
| 215 | return; | 187 | return; |
| 216 | } | 188 | } |
| @@ -224,7 +196,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
| 224 | * then start kexec boot. | 196 | * then start kexec boot. |
| 225 | */ | 197 | */ |
| 226 | crash_soft_reset_check(cpu); | 198 | crash_soft_reset_check(cpu); |
| 227 | cpu_set(crashing_cpu, cpus_in_crash); | 199 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
| 228 | if (ppc_md.kexec_cpu_down) | 200 | if (ppc_md.kexec_cpu_down) |
| 229 | ppc_md.kexec_cpu_down(1, 0); | 201 | ppc_md.kexec_cpu_down(1, 0); |
| 230 | machine_kexec(kexec_crash_image); | 202 | machine_kexec(kexec_crash_image); |
| @@ -233,7 +205,8 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
| 233 | crash_ipi_callback(regs); | 205 | crash_ipi_callback(regs); |
| 234 | } | 206 | } |
| 235 | 207 | ||
| 236 | #else | 208 | #else /* ! CONFIG_SMP */ |
| 209 | |||
| 237 | static void crash_kexec_prepare_cpus(int cpu) | 210 | static void crash_kexec_prepare_cpus(int cpu) |
| 238 | { | 211 | { |
| 239 | /* | 212 | /* |
| @@ -251,75 +224,39 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
| 251 | 224 | ||
| 252 | void crash_kexec_secondary(struct pt_regs *regs) | 225 | void crash_kexec_secondary(struct pt_regs *regs) |
| 253 | { | 226 | { |
| 254 | cpus_in_sr = CPU_MASK_NONE; | 227 | cpumask_clear(&cpus_in_sr); |
| 255 | } | 228 | } |
| 256 | #endif | 229 | #endif /* CONFIG_SMP */ |
| 257 | #ifdef CONFIG_SPU_BASE | ||
| 258 | |||
| 259 | #include <asm/spu.h> | ||
| 260 | #include <asm/spu_priv1.h> | ||
| 261 | |||
| 262 | struct crash_spu_info { | ||
| 263 | struct spu *spu; | ||
| 264 | u32 saved_spu_runcntl_RW; | ||
| 265 | u32 saved_spu_status_R; | ||
| 266 | u32 saved_spu_npc_RW; | ||
| 267 | u64 saved_mfc_sr1_RW; | ||
| 268 | u64 saved_mfc_dar; | ||
| 269 | u64 saved_mfc_dsisr; | ||
| 270 | }; | ||
| 271 | 230 | ||
| 272 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ | 231 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ |
| 273 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; | 232 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64) |
| 274 | 233 | static void crash_kexec_wait_realmode(int cpu) | |
| 275 | static void crash_kexec_stop_spus(void) | ||
| 276 | { | 234 | { |
| 277 | struct spu *spu; | 235 | unsigned int msecs; |
| 278 | int i; | 236 | int i; |
| 279 | u64 tmp; | ||
| 280 | 237 | ||
| 281 | for (i = 0; i < CRASH_NUM_SPUS; i++) { | 238 | msecs = 10000; |
| 282 | if (!crash_spu_info[i].spu) | 239 | for (i=0; i < nr_cpu_ids && msecs > 0; i++) { |
| 283 | continue; | 240 | if (i == cpu) |
| 284 | |||
| 285 | spu = crash_spu_info[i].spu; | ||
| 286 | |||
| 287 | crash_spu_info[i].saved_spu_runcntl_RW = | ||
| 288 | in_be32(&spu->problem->spu_runcntl_RW); | ||
| 289 | crash_spu_info[i].saved_spu_status_R = | ||
| 290 | in_be32(&spu->problem->spu_status_R); | ||
| 291 | crash_spu_info[i].saved_spu_npc_RW = | ||
| 292 | in_be32(&spu->problem->spu_npc_RW); | ||
| 293 | |||
| 294 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); | ||
| 295 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); | ||
| 296 | tmp = spu_mfc_sr1_get(spu); | ||
| 297 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; | ||
| 298 | |||
| 299 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
| 300 | spu_mfc_sr1_set(spu, tmp); | ||
| 301 | |||
| 302 | __delay(200); | ||
| 303 | } | ||
| 304 | } | ||
| 305 | |||
| 306 | void crash_register_spus(struct list_head *list) | ||
| 307 | { | ||
| 308 | struct spu *spu; | ||
| 309 | |||
| 310 | list_for_each_entry(spu, list, full_list) { | ||
| 311 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) | ||
| 312 | continue; | 241 | continue; |
| 313 | 242 | ||
| 314 | crash_spu_info[spu->number].spu = spu; | 243 | while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { |
| 244 | barrier(); | ||
| 245 | if (!cpu_possible(i)) { | ||
| 246 | break; | ||
| 247 | } | ||
| 248 | if (!cpu_online(i)) { | ||
| 249 | break; | ||
| 250 | } | ||
| 251 | msecs--; | ||
| 252 | mdelay(1); | ||
| 253 | } | ||
| 315 | } | 254 | } |
| 255 | mb(); | ||
| 316 | } | 256 | } |
| 317 | |||
| 318 | #else | 257 | #else |
| 319 | static inline void crash_kexec_stop_spus(void) | 258 | static inline void crash_kexec_wait_realmode(int cpu) {} |
| 320 | { | 259 | #endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */ |
| 321 | } | ||
| 322 | #endif /* CONFIG_SPU_BASE */ | ||
| 323 | 260 | ||
| 324 | /* | 261 | /* |
| 325 | * Register a function to be called on shutdown. Only use this if you | 262 | * Register a function to be called on shutdown. Only use this if you |
| @@ -409,23 +346,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
| 409 | crashing_cpu = smp_processor_id(); | 346 | crashing_cpu = smp_processor_id(); |
| 410 | crash_save_cpu(regs, crashing_cpu); | 347 | crash_save_cpu(regs, crashing_cpu); |
| 411 | crash_kexec_prepare_cpus(crashing_cpu); | 348 | crash_kexec_prepare_cpus(crashing_cpu); |
| 412 | cpu_set(crashing_cpu, cpus_in_crash); | 349 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
| 413 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | ||
| 414 | crash_kexec_wait_realmode(crashing_cpu); | 350 | crash_kexec_wait_realmode(crashing_cpu); |
| 415 | #endif | ||
| 416 | |||
| 417 | for_each_irq(i) { | ||
| 418 | struct irq_desc *desc = irq_to_desc(i); | ||
| 419 | |||
| 420 | if (!desc || !desc->chip || !desc->chip->eoi) | ||
| 421 | continue; | ||
| 422 | |||
| 423 | if (desc->status & IRQ_INPROGRESS) | ||
| 424 | desc->chip->eoi(i); | ||
| 425 | 351 | ||
| 426 | if (!(desc->status & IRQ_DISABLED)) | 352 | machine_kexec_mask_interrupts(); |
| 427 | desc->chip->shutdown(i); | ||
| 428 | } | ||
| 429 | 353 | ||
| 430 | /* | 354 | /* |
| 431 | * Call registered shutdown routines savely. Swap out | 355 | * Call registered shutdown routines savely. Swap out |
| @@ -450,8 +374,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
| 450 | crash_shutdown_cpu = -1; | 374 | crash_shutdown_cpu = -1; |
| 451 | __debugger_fault_handler = old_handler; | 375 | __debugger_fault_handler = old_handler; |
| 452 | 376 | ||
| 453 | crash_kexec_stop_spus(); | ||
| 454 | |||
| 455 | if (ppc_md.kexec_cpu_down) | 377 | if (ppc_md.kexec_cpu_down) |
| 456 | ppc_md.kexec_cpu_down(1, 0); | 378 | ppc_md.kexec_cpu_down(1, 0); |
| 457 | } | 379 | } |
