diff options
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2d337cbb9329..006ed5016eb4 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/cache.h> | 33 | #include <linux/cache.h> |
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | #include <linux/irqflags.h> | ||
35 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
36 | #include <linux/timex.h> | 37 | #include <linux/timex.h> |
37 | #include <linux/bootmem.h> | 38 | #include <linux/bootmem.h> |
@@ -50,12 +51,6 @@ | |||
50 | #include <asm/vdso.h> | 51 | #include <asm/vdso.h> |
51 | #include "entry.h" | 52 | #include "entry.h" |
52 | 53 | ||
53 | /* | ||
54 | * An array with a pointer the lowcore of every CPU. | ||
55 | */ | ||
56 | struct _lowcore *lowcore_ptr[NR_CPUS]; | ||
57 | EXPORT_SYMBOL(lowcore_ptr); | ||
58 | |||
59 | static struct task_struct *current_set[NR_CPUS]; | 54 | static struct task_struct *current_set[NR_CPUS]; |
60 | 55 | ||
61 | static u8 smp_cpu_type; | 56 | static u8 smp_cpu_type; |
@@ -81,9 +76,7 @@ void smp_send_stop(void) | |||
81 | 76 | ||
82 | /* Disable all interrupts/machine checks */ | 77 | /* Disable all interrupts/machine checks */ |
83 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); | 78 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
84 | 79 | trace_hardirqs_off(); | |
85 | /* write magic number to zero page (absolute 0) */ | ||
86 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | ||
87 | 80 | ||
88 | /* stop all processors */ | 81 | /* stop all processors */ |
89 | for_each_online_cpu(cpu) { | 82 | for_each_online_cpu(cpu) { |
@@ -233,7 +226,7 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); | |||
233 | */ | 226 | */ |
234 | #define CPU_INIT_NO 1 | 227 | #define CPU_INIT_NO 1 |
235 | 228 | ||
236 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) | 229 | #ifdef CONFIG_ZFCPDUMP |
237 | 230 | ||
238 | /* | 231 | /* |
239 | * zfcpdump_prefix_array holds prefix registers for the following scenario: | 232 | * zfcpdump_prefix_array holds prefix registers for the following scenario: |
@@ -274,7 +267,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | |||
274 | 267 | ||
275 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | 268 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } |
276 | 269 | ||
277 | #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ | 270 | #endif /* CONFIG_ZFCPDUMP */ |
278 | 271 | ||
279 | static int cpu_stopped(int cpu) | 272 | static int cpu_stopped(int cpu) |
280 | { | 273 | { |
@@ -304,8 +297,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
304 | { | 297 | { |
305 | int cpu_id, logical_cpu; | 298 | int cpu_id, logical_cpu; |
306 | 299 | ||
307 | logical_cpu = first_cpu(avail); | 300 | logical_cpu = cpumask_first(&avail); |
308 | if (logical_cpu == NR_CPUS) | 301 | if (logical_cpu >= nr_cpu_ids) |
309 | return 0; | 302 | return 0; |
310 | for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { | 303 | for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { |
311 | if (cpu_known(cpu_id)) | 304 | if (cpu_known(cpu_id)) |
@@ -316,8 +309,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
316 | continue; | 309 | continue; |
317 | cpu_set(logical_cpu, cpu_present_map); | 310 | cpu_set(logical_cpu, cpu_present_map); |
318 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 311 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
319 | logical_cpu = next_cpu(logical_cpu, avail); | 312 | logical_cpu = cpumask_next(logical_cpu, &avail); |
320 | if (logical_cpu == NR_CPUS) | 313 | if (logical_cpu >= nr_cpu_ids) |
321 | break; | 314 | break; |
322 | } | 315 | } |
323 | return 0; | 316 | return 0; |
@@ -329,8 +322,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) | |||
329 | int cpu_id, logical_cpu, cpu; | 322 | int cpu_id, logical_cpu, cpu; |
330 | int rc; | 323 | int rc; |
331 | 324 | ||
332 | logical_cpu = first_cpu(avail); | 325 | logical_cpu = cpumask_first(&avail); |
333 | if (logical_cpu == NR_CPUS) | 326 | if (logical_cpu >= nr_cpu_ids) |
334 | return 0; | 327 | return 0; |
335 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 328 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
336 | if (!info) | 329 | if (!info) |
@@ -351,8 +344,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) | |||
351 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 344 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
352 | else | 345 | else |
353 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 346 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
354 | logical_cpu = next_cpu(logical_cpu, avail); | 347 | logical_cpu = cpumask_next(logical_cpu, &avail); |
355 | if (logical_cpu == NR_CPUS) | 348 | if (logical_cpu >= nr_cpu_ids) |
356 | break; | 349 | break; |
357 | } | 350 | } |
358 | out: | 351 | out: |
@@ -379,7 +372,7 @@ static void __init smp_detect_cpus(void) | |||
379 | 372 | ||
380 | c_cpus = 1; | 373 | c_cpus = 1; |
381 | s_cpus = 0; | 374 | s_cpus = 0; |
382 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | 375 | boot_cpu_addr = __cpu_logical_map[0]; |
383 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 376 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
384 | if (!info) | 377 | if (!info) |
385 | panic("smp_detect_cpus failed to allocate memory\n"); | 378 | panic("smp_detect_cpus failed to allocate memory\n"); |
@@ -453,7 +446,7 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
453 | /* Switch on interrupts */ | 446 | /* Switch on interrupts */ |
454 | local_irq_enable(); | 447 | local_irq_enable(); |
455 | /* Print info about this processor */ | 448 | /* Print info about this processor */ |
456 | print_cpu_info(&S390_lowcore.cpu_data); | 449 | print_cpu_info(); |
457 | /* cpu_idle will call schedule for us */ | 450 | /* cpu_idle will call schedule for us */ |
458 | cpu_idle(); | 451 | cpu_idle(); |
459 | return 0; | 452 | return 0; |
@@ -515,7 +508,6 @@ out: | |||
515 | return -ENOMEM; | 508 | return -ENOMEM; |
516 | } | 509 | } |
517 | 510 | ||
518 | #ifdef CONFIG_HOTPLUG_CPU | ||
519 | static void smp_free_lowcore(int cpu) | 511 | static void smp_free_lowcore(int cpu) |
520 | { | 512 | { |
521 | struct _lowcore *lowcore; | 513 | struct _lowcore *lowcore; |
@@ -534,7 +526,6 @@ static void smp_free_lowcore(int cpu) | |||
534 | free_pages((unsigned long) lowcore, lc_order); | 526 | free_pages((unsigned long) lowcore, lc_order); |
535 | lowcore_ptr[cpu] = NULL; | 527 | lowcore_ptr[cpu] = NULL; |
536 | } | 528 | } |
537 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
538 | 529 | ||
539 | /* Upping and downing of CPUs */ | 530 | /* Upping and downing of CPUs */ |
540 | int __cpuinit __cpu_up(unsigned int cpu) | 531 | int __cpuinit __cpu_up(unsigned int cpu) |
@@ -543,16 +534,23 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
543 | struct _lowcore *cpu_lowcore; | 534 | struct _lowcore *cpu_lowcore; |
544 | struct stack_frame *sf; | 535 | struct stack_frame *sf; |
545 | sigp_ccode ccode; | 536 | sigp_ccode ccode; |
537 | u32 lowcore; | ||
546 | 538 | ||
547 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 539 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
548 | return -EIO; | 540 | return -EIO; |
549 | if (smp_alloc_lowcore(cpu)) | 541 | if (smp_alloc_lowcore(cpu)) |
550 | return -ENOMEM; | 542 | return -ENOMEM; |
551 | 543 | do { | |
552 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | 544 | ccode = signal_processor(cpu, sigp_initial_cpu_reset); |
553 | cpu, sigp_set_prefix); | 545 | if (ccode == sigp_busy) |
554 | if (ccode) | 546 | udelay(10); |
555 | return -EIO; | 547 | if (ccode == sigp_not_operational) |
548 | goto err_out; | ||
549 | } while (ccode == sigp_busy); | ||
550 | |||
551 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; | ||
552 | while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | ||
553 | udelay(10); | ||
556 | 554 | ||
557 | idle = current_set[cpu]; | 555 | idle = current_set[cpu]; |
558 | cpu_lowcore = lowcore_ptr[cpu]; | 556 | cpu_lowcore = lowcore_ptr[cpu]; |
@@ -571,9 +569,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
571 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | 569 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); |
572 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | 570 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
573 | cpu_lowcore->current_task = (unsigned long) idle; | 571 | cpu_lowcore->current_task = (unsigned long) idle; |
574 | cpu_lowcore->cpu_data.cpu_nr = cpu; | 572 | cpu_lowcore->cpu_nr = cpu; |
575 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; | 573 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; |
576 | cpu_lowcore->ipl_device = S390_lowcore.ipl_device; | ||
577 | eieio(); | 574 | eieio(); |
578 | 575 | ||
579 | while (signal_processor(cpu, sigp_restart) == sigp_busy) | 576 | while (signal_processor(cpu, sigp_restart) == sigp_busy) |
@@ -582,6 +579,10 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
582 | while (!cpu_online(cpu)) | 579 | while (!cpu_online(cpu)) |
583 | cpu_relax(); | 580 | cpu_relax(); |
584 | return 0; | 581 | return 0; |
582 | |||
583 | err_out: | ||
584 | smp_free_lowcore(cpu); | ||
585 | return -EIO; | ||
585 | } | 586 | } |
586 | 587 | ||
587 | static int __init setup_possible_cpus(char *s) | 588 | static int __init setup_possible_cpus(char *s) |
@@ -589,9 +590,8 @@ static int __init setup_possible_cpus(char *s) | |||
589 | int pcpus, cpu; | 590 | int pcpus, cpu; |
590 | 591 | ||
591 | pcpus = simple_strtoul(s, NULL, 0); | 592 | pcpus = simple_strtoul(s, NULL, 0); |
592 | cpu_possible_map = cpumask_of_cpu(0); | 593 | for (cpu = 0; cpu < pcpus && cpu < nr_cpu_ids; cpu++) |
593 | for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) | 594 | set_cpu_possible(cpu, true); |
594 | cpu_set(cpu, cpu_possible_map); | ||
595 | return 0; | 595 | return 0; |
596 | } | 596 | } |
597 | early_param("possible_cpus", setup_possible_cpus); | 597 | early_param("possible_cpus", setup_possible_cpus); |
@@ -663,7 +663,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
663 | /* request the 0x1201 emergency signal external interrupt */ | 663 | /* request the 0x1201 emergency signal external interrupt */ |
664 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 664 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
665 | panic("Couldn't request external interrupt 0x1201"); | 665 | panic("Couldn't request external interrupt 0x1201"); |
666 | print_cpu_info(&S390_lowcore.cpu_data); | 666 | print_cpu_info(); |
667 | 667 | ||
668 | /* Reallocate current lowcore, but keep its contents. */ | 668 | /* Reallocate current lowcore, but keep its contents. */ |
669 | lc_order = sizeof(long) == 8 ? 1 : 0; | 669 | lc_order = sizeof(long) == 8 ? 1 : 0; |