diff options
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 93 |
1 files changed, 52 insertions, 41 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8127ebd59c4d..1d55c95f617c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #define KMSG_COMPONENT "cpu" | 23 | #define KMSG_COMPONENT "cpu" |
24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
25 | 25 | ||
26 | #include <linux/workqueue.h> | ||
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/init.h> | 28 | #include <linux/init.h> |
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
@@ -43,7 +44,6 @@ | |||
43 | #include <asm/sigp.h> | 44 | #include <asm/sigp.h> |
44 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
45 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
46 | #include <asm/s390_ext.h> | ||
47 | #include <asm/cpcmd.h> | 47 | #include <asm/cpcmd.h> |
48 | #include <asm/tlbflush.h> | 48 | #include <asm/tlbflush.h> |
49 | #include <asm/timer.h> | 49 | #include <asm/timer.h> |
@@ -156,18 +156,20 @@ void smp_send_stop(void) | |||
156 | * cpus are handled. | 156 | * cpus are handled. |
157 | */ | 157 | */ |
158 | 158 | ||
159 | static void do_ext_call_interrupt(__u16 code) | 159 | static void do_ext_call_interrupt(unsigned int ext_int_code, |
160 | unsigned int param32, unsigned long param64) | ||
160 | { | 161 | { |
161 | unsigned long bits; | 162 | unsigned long bits; |
162 | 163 | ||
164 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; | ||
163 | /* | 165 | /* |
164 | * handle bit signal external calls | 166 | * handle bit signal external calls |
165 | * | ||
166 | * For the ec_schedule signal we have to do nothing. All the work | ||
167 | * is done automatically when we return from the interrupt. | ||
168 | */ | 167 | */ |
169 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 168 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
170 | 169 | ||
170 | if (test_bit(ec_schedule, &bits)) | ||
171 | scheduler_ipi(); | ||
172 | |||
171 | if (test_bit(ec_call_function, &bits)) | 173 | if (test_bit(ec_call_function, &bits)) |
172 | generic_smp_call_function_interrupt(); | 174 | generic_smp_call_function_interrupt(); |
173 | 175 | ||
@@ -260,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit) | |||
260 | 262 | ||
261 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 263 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
262 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 264 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
263 | parms.orvals[cr] = 1 << bit; | 265 | parms.orvals[cr] = 1UL << bit; |
264 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 266 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
265 | } | 267 | } |
266 | EXPORT_SYMBOL(smp_ctl_set_bit); | 268 | EXPORT_SYMBOL(smp_ctl_set_bit); |
@@ -274,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
274 | 276 | ||
275 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 277 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
276 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 278 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
277 | parms.andvals[cr] = ~(1L << bit); | 279 | parms.andvals[cr] = ~(1UL << bit); |
278 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 280 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
279 | } | 281 | } |
280 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 282 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
@@ -332,7 +334,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
332 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 334 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
333 | if (!cpu_stopped(logical_cpu)) | 335 | if (!cpu_stopped(logical_cpu)) |
334 | continue; | 336 | continue; |
335 | cpu_set(logical_cpu, cpu_present_map); | 337 | set_cpu_present(logical_cpu, true); |
336 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 338 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
337 | logical_cpu = cpumask_next(logical_cpu, &avail); | 339 | logical_cpu = cpumask_next(logical_cpu, &avail); |
338 | if (logical_cpu >= nr_cpu_ids) | 340 | if (logical_cpu >= nr_cpu_ids) |
@@ -364,7 +366,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) | |||
364 | continue; | 366 | continue; |
365 | __cpu_logical_map[logical_cpu] = cpu_id; | 367 | __cpu_logical_map[logical_cpu] = cpu_id; |
366 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 368 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
367 | cpu_set(logical_cpu, cpu_present_map); | 369 | set_cpu_present(logical_cpu, true); |
368 | if (cpu >= info->configured) | 370 | if (cpu >= info->configured) |
369 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 371 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
370 | else | 372 | else |
@@ -382,7 +384,7 @@ static int __smp_rescan_cpus(void) | |||
382 | { | 384 | { |
383 | cpumask_t avail; | 385 | cpumask_t avail; |
384 | 386 | ||
385 | cpus_xor(avail, cpu_possible_map, cpu_present_map); | 387 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); |
386 | if (smp_use_sigp_detection) | 388 | if (smp_use_sigp_detection) |
387 | return smp_rescan_cpus_sigp(avail); | 389 | return smp_rescan_cpus_sigp(avail); |
388 | else | 390 | else |
@@ -464,29 +466,29 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
464 | notify_cpu_starting(smp_processor_id()); | 466 | notify_cpu_starting(smp_processor_id()); |
465 | /* Mark this cpu as online */ | 467 | /* Mark this cpu as online */ |
466 | ipi_call_lock(); | 468 | ipi_call_lock(); |
467 | cpu_set(smp_processor_id(), cpu_online_map); | 469 | set_cpu_online(smp_processor_id(), true); |
468 | ipi_call_unlock(); | 470 | ipi_call_unlock(); |
469 | /* Switch on interrupts */ | 471 | /* Switch on interrupts */ |
470 | local_irq_enable(); | 472 | local_irq_enable(); |
471 | /* Print info about this processor */ | ||
472 | print_cpu_info(); | ||
473 | /* cpu_idle will call schedule for us */ | 473 | /* cpu_idle will call schedule for us */ |
474 | cpu_idle(); | 474 | cpu_idle(); |
475 | return 0; | 475 | return 0; |
476 | } | 476 | } |
477 | 477 | ||
478 | static void __init smp_create_idle(unsigned int cpu) | 478 | struct create_idle { |
479 | struct work_struct work; | ||
480 | struct task_struct *idle; | ||
481 | struct completion done; | ||
482 | int cpu; | ||
483 | }; | ||
484 | |||
485 | static void __cpuinit smp_fork_idle(struct work_struct *work) | ||
479 | { | 486 | { |
480 | struct task_struct *p; | 487 | struct create_idle *c_idle; |
481 | 488 | ||
482 | /* | 489 | c_idle = container_of(work, struct create_idle, work); |
483 | * don't care about the psw and regs settings since we'll never | 490 | c_idle->idle = fork_idle(c_idle->cpu); |
484 | * reschedule the forked task. | 491 | complete(&c_idle->done); |
485 | */ | ||
486 | p = fork_idle(cpu); | ||
487 | if (IS_ERR(p)) | ||
488 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
489 | current_set[cpu] = p; | ||
490 | } | 492 | } |
491 | 493 | ||
492 | static int __cpuinit smp_alloc_lowcore(int cpu) | 494 | static int __cpuinit smp_alloc_lowcore(int cpu) |
@@ -550,6 +552,7 @@ static void smp_free_lowcore(int cpu) | |||
550 | int __cpuinit __cpu_up(unsigned int cpu) | 552 | int __cpuinit __cpu_up(unsigned int cpu) |
551 | { | 553 | { |
552 | struct _lowcore *cpu_lowcore; | 554 | struct _lowcore *cpu_lowcore; |
555 | struct create_idle c_idle; | ||
553 | struct task_struct *idle; | 556 | struct task_struct *idle; |
554 | struct stack_frame *sf; | 557 | struct stack_frame *sf; |
555 | u32 lowcore; | 558 | u32 lowcore; |
@@ -557,6 +560,19 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
557 | 560 | ||
558 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 561 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
559 | return -EIO; | 562 | return -EIO; |
563 | idle = current_set[cpu]; | ||
564 | if (!idle) { | ||
565 | c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); | ||
566 | INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); | ||
567 | c_idle.cpu = cpu; | ||
568 | schedule_work(&c_idle.work); | ||
569 | wait_for_completion(&c_idle.done); | ||
570 | if (IS_ERR(c_idle.idle)) | ||
571 | return PTR_ERR(c_idle.idle); | ||
572 | idle = c_idle.idle; | ||
573 | current_set[cpu] = c_idle.idle; | ||
574 | } | ||
575 | init_idle(idle, cpu); | ||
560 | if (smp_alloc_lowcore(cpu)) | 576 | if (smp_alloc_lowcore(cpu)) |
561 | return -ENOMEM; | 577 | return -ENOMEM; |
562 | do { | 578 | do { |
@@ -571,7 +587,6 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
571 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | 587 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) |
572 | udelay(10); | 588 | udelay(10); |
573 | 589 | ||
574 | idle = current_set[cpu]; | ||
575 | cpu_lowcore = lowcore_ptr[cpu]; | 590 | cpu_lowcore = lowcore_ptr[cpu]; |
576 | cpu_lowcore->kernel_stack = (unsigned long) | 591 | cpu_lowcore->kernel_stack = (unsigned long) |
577 | task_stack_page(idle) + THREAD_SIZE; | 592 | task_stack_page(idle) + THREAD_SIZE; |
@@ -593,6 +608,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
593 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; | 608 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; |
594 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; | 609 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; |
595 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; | 610 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; |
611 | memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list, | ||
612 | MAX_FACILITY_BIT/8); | ||
596 | eieio(); | 613 | eieio(); |
597 | 614 | ||
598 | while (sigp(cpu, sigp_restart) == sigp_busy) | 615 | while (sigp(cpu, sigp_restart) == sigp_busy) |
@@ -626,7 +643,7 @@ int __cpu_disable(void) | |||
626 | struct ec_creg_mask_parms cr_parms; | 643 | struct ec_creg_mask_parms cr_parms; |
627 | int cpu = smp_processor_id(); | 644 | int cpu = smp_processor_id(); |
628 | 645 | ||
629 | cpu_clear(cpu, cpu_online_map); | 646 | set_cpu_online(cpu, false); |
630 | 647 | ||
631 | /* Disable pfault pseudo page faults on this cpu. */ | 648 | /* Disable pfault pseudo page faults on this cpu. */ |
632 | pfault_fini(); | 649 | pfault_fini(); |
@@ -636,8 +653,8 @@ int __cpu_disable(void) | |||
636 | 653 | ||
637 | /* disable all external interrupts */ | 654 | /* disable all external interrupts */ |
638 | cr_parms.orvals[0] = 0; | 655 | cr_parms.orvals[0] = 0; |
639 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | | 656 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | |
640 | 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); | 657 | 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4); |
641 | /* disable all I/O interrupts */ | 658 | /* disable all I/O interrupts */ |
642 | cr_parms.orvals[6] = 0; | 659 | cr_parms.orvals[6] = 0; |
643 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | | 660 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | |
@@ -661,10 +678,9 @@ void __cpu_die(unsigned int cpu) | |||
661 | udelay(10); | 678 | udelay(10); |
662 | smp_free_lowcore(cpu); | 679 | smp_free_lowcore(cpu); |
663 | atomic_dec(&init_mm.context.attach_count); | 680 | atomic_dec(&init_mm.context.attach_count); |
664 | pr_info("Processor %d stopped\n", cpu); | ||
665 | } | 681 | } |
666 | 682 | ||
667 | void cpu_die(void) | 683 | void __noreturn cpu_die(void) |
668 | { | 684 | { |
669 | idle_task_exit(); | 685 | idle_task_exit(); |
670 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) | 686 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) |
@@ -681,14 +697,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
681 | #endif | 697 | #endif |
682 | unsigned long async_stack, panic_stack; | 698 | unsigned long async_stack, panic_stack; |
683 | struct _lowcore *lowcore; | 699 | struct _lowcore *lowcore; |
684 | unsigned int cpu; | ||
685 | 700 | ||
686 | smp_detect_cpus(); | 701 | smp_detect_cpus(); |
687 | 702 | ||
688 | /* request the 0x1201 emergency signal external interrupt */ | 703 | /* request the 0x1201 emergency signal external interrupt */ |
689 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 704 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
690 | panic("Couldn't request external interrupt 0x1201"); | 705 | panic("Couldn't request external interrupt 0x1201"); |
691 | print_cpu_info(); | ||
692 | 706 | ||
693 | /* Reallocate current lowcore, but keep its contents. */ | 707 | /* Reallocate current lowcore, but keep its contents. */ |
694 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 708 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
@@ -716,9 +730,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
716 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) | 730 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) |
717 | BUG(); | 731 | BUG(); |
718 | #endif | 732 | #endif |
719 | for_each_possible_cpu(cpu) | ||
720 | if (cpu != smp_processor_id()) | ||
721 | smp_create_idle(cpu); | ||
722 | } | 733 | } |
723 | 734 | ||
724 | void __init smp_prepare_boot_cpu(void) | 735 | void __init smp_prepare_boot_cpu(void) |
@@ -726,8 +737,8 @@ void __init smp_prepare_boot_cpu(void) | |||
726 | BUG_ON(smp_processor_id() != 0); | 737 | BUG_ON(smp_processor_id() != 0); |
727 | 738 | ||
728 | current_thread_info()->cpu = 0; | 739 | current_thread_info()->cpu = 0; |
729 | cpu_set(0, cpu_present_map); | 740 | set_cpu_present(0, true); |
730 | cpu_set(0, cpu_online_map); | 741 | set_cpu_online(0, true); |
731 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 742 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
732 | current_set[0] = current; | 743 | current_set[0] = current; |
733 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | 744 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; |
@@ -1004,21 +1015,21 @@ int __ref smp_rescan_cpus(void) | |||
1004 | 1015 | ||
1005 | get_online_cpus(); | 1016 | get_online_cpus(); |
1006 | mutex_lock(&smp_cpu_state_mutex); | 1017 | mutex_lock(&smp_cpu_state_mutex); |
1007 | newcpus = cpu_present_map; | 1018 | cpumask_copy(&newcpus, cpu_present_mask); |
1008 | rc = __smp_rescan_cpus(); | 1019 | rc = __smp_rescan_cpus(); |
1009 | if (rc) | 1020 | if (rc) |
1010 | goto out; | 1021 | goto out; |
1011 | cpus_andnot(newcpus, cpu_present_map, newcpus); | 1022 | cpumask_andnot(&newcpus, cpu_present_mask, &newcpus); |
1012 | for_each_cpu_mask(cpu, newcpus) { | 1023 | for_each_cpu(cpu, &newcpus) { |
1013 | rc = smp_add_present_cpu(cpu); | 1024 | rc = smp_add_present_cpu(cpu); |
1014 | if (rc) | 1025 | if (rc) |
1015 | cpu_clear(cpu, cpu_present_map); | 1026 | set_cpu_present(cpu, false); |
1016 | } | 1027 | } |
1017 | rc = 0; | 1028 | rc = 0; |
1018 | out: | 1029 | out: |
1019 | mutex_unlock(&smp_cpu_state_mutex); | 1030 | mutex_unlock(&smp_cpu_state_mutex); |
1020 | put_online_cpus(); | 1031 | put_online_cpus(); |
1021 | if (!cpus_empty(newcpus)) | 1032 | if (!cpumask_empty(&newcpus)) |
1022 | topology_schedule_update(); | 1033 | topology_schedule_update(); |
1023 | return rc; | 1034 | return rc; |
1024 | } | 1035 | } |