diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-02-19 09:29:29 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-02-19 09:29:33 -0500 |
commit | 591bb4f637e48950d35e2ca01e6b962c519d96e5 (patch) | |
tree | ecc2dc149daccfa336e4cfc462574a9f3dc6c76d /arch/s390/kernel | |
parent | a22fb7ff1259e6ee87d0ba3559c9f7b7d0cb20d2 (diff) |
[S390] Initialize per cpu lowcores on cpu hotplug.
Just copy the first 512 read-only bytes of the current cpu lowcore if
a new cpu gets onlined. The rest is zeroed out and must be explicitly
initialized. Current code just copies the entire lowcore and
initializes the needed fields.
This should reveal bugs in future enhancements quite early.
Also when the lowcore of the first cpu is replaced this is now done
atomically (no interrupts, no machine checks).
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/smp.c | 53 |
1 files changed, 38 insertions, 15 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 85060659fb12..818bd09c0260 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -626,13 +626,17 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
626 | if (!lowcore) | 626 | if (!lowcore) |
627 | return -ENOMEM; | 627 | return -ENOMEM; |
628 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | 628 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
629 | if (!async_stack) | ||
630 | goto out_async_stack; | ||
631 | panic_stack = __get_free_page(GFP_KERNEL); | 629 | panic_stack = __get_free_page(GFP_KERNEL); |
632 | if (!panic_stack) | 630 | if (!panic_stack || !async_stack) |
633 | goto out_panic_stack; | 631 | goto out; |
634 | 632 | /* | |
635 | *lowcore = S390_lowcore; | 633 | * Only need to copy the first 512 bytes from address 0. But since |
634 | * the compiler emits a warning if src == NULL for memcpy use copy_page | ||
635 | * instead. Copies more than needed but this code is not performance | ||
636 | * critical. | ||
637 | */ | ||
638 | copy_page(lowcore, &S390_lowcore); | ||
639 | memset((void *)lowcore + 512, 0, sizeof(*lowcore) - 512); | ||
636 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 640 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
637 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 641 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
638 | 642 | ||
@@ -653,9 +657,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
653 | out_save_area: | 657 | out_save_area: |
654 | free_page(panic_stack); | 658 | free_page(panic_stack); |
655 | #endif | 659 | #endif |
656 | out_panic_stack: | 660 | out: |
657 | free_pages(async_stack, ASYNC_ORDER); | 661 | free_pages(async_stack, ASYNC_ORDER); |
658 | out_async_stack: | ||
659 | free_pages((unsigned long) lowcore, lc_order); | 662 | free_pages((unsigned long) lowcore, lc_order); |
660 | return -ENOMEM; | 663 | return -ENOMEM; |
661 | } | 664 | } |
@@ -719,8 +722,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
719 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | 722 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
720 | cpu_lowcore->current_task = (unsigned long) idle; | 723 | cpu_lowcore->current_task = (unsigned long) idle; |
721 | cpu_lowcore->cpu_data.cpu_nr = cpu; | 724 | cpu_lowcore->cpu_data.cpu_nr = cpu; |
722 | cpu_lowcore->softirq_pending = 0; | 725 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; |
723 | cpu_lowcore->ext_call_fast = 0; | 726 | cpu_lowcore->ipl_device = S390_lowcore.ipl_device; |
724 | eieio(); | 727 | eieio(); |
725 | 728 | ||
726 | while (signal_processor(cpu, sigp_restart) == sigp_busy) | 729 | while (signal_processor(cpu, sigp_restart) == sigp_busy) |
@@ -797,23 +800,43 @@ void cpu_die(void) | |||
797 | 800 | ||
798 | void __init smp_prepare_cpus(unsigned int max_cpus) | 801 | void __init smp_prepare_cpus(unsigned int max_cpus) |
799 | { | 802 | { |
803 | #ifndef CONFIG_64BIT | ||
804 | unsigned long save_area = 0; | ||
805 | #endif | ||
806 | unsigned long async_stack, panic_stack; | ||
807 | struct _lowcore *lowcore; | ||
800 | unsigned int cpu; | 808 | unsigned int cpu; |
809 | int lc_order; | ||
801 | 810 | ||
802 | smp_detect_cpus(); | 811 | smp_detect_cpus(); |
803 | 812 | ||
804 | /* request the 0x1201 emergency signal external interrupt */ | 813 | /* request the 0x1201 emergency signal external interrupt */ |
805 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 814 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
806 | panic("Couldn't request external interrupt 0x1201"); | 815 | panic("Couldn't request external interrupt 0x1201"); |
807 | memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); | ||
808 | print_cpu_info(&S390_lowcore.cpu_data); | 816 | print_cpu_info(&S390_lowcore.cpu_data); |
809 | smp_alloc_lowcore(smp_processor_id()); | ||
810 | 817 | ||
818 | /* Reallocate current lowcore, but keep its contents. */ | ||
819 | lc_order = sizeof(long) == 8 ? 1 : 0; | ||
820 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order); | ||
821 | panic_stack = __get_free_page(GFP_KERNEL); | ||
822 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
811 | #ifndef CONFIG_64BIT | 823 | #ifndef CONFIG_64BIT |
812 | if (MACHINE_HAS_IEEE) | 824 | if (MACHINE_HAS_IEEE) |
813 | ctl_set_bit(14, 29); /* enable extended save area */ | 825 | save_area = get_zeroed_page(GFP_KERNEL); |
814 | #endif | 826 | #endif |
815 | set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); | 827 | local_irq_disable(); |
816 | 828 | local_mcck_disable(); | |
829 | lowcore_ptr[smp_processor_id()] = lowcore; | ||
830 | *lowcore = S390_lowcore; | ||
831 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | ||
832 | lowcore->async_stack = async_stack + ASYNC_SIZE; | ||
833 | #ifndef CONFIG_64BIT | ||
834 | if (MACHINE_HAS_IEEE) | ||
835 | lowcore->extended_save_area_addr = (u32) save_area; | ||
836 | #endif | ||
837 | set_prefix((u32)(unsigned long) lowcore); | ||
838 | local_mcck_enable(); | ||
839 | local_irq_enable(); | ||
817 | for_each_possible_cpu(cpu) | 840 | for_each_possible_cpu(cpu) |
818 | if (cpu != smp_processor_id()) | 841 | if (cpu != smp_processor_id()) |
819 | smp_create_idle(cpu); | 842 | smp_create_idle(cpu); |