diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-01-26 08:11:14 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-01-26 08:11:22 -0500 |
commit | 1cb6bb4bbdfd7b6bbdd148c4a34c02066339806d (patch) | |
tree | 0beb7b2eb75fbe3fdce84ecdf5ce8c2fc8ebc179 /arch/s390/kernel | |
parent | c11ca97ee9d2ed593ab7b5523def7787b46f398f (diff) |
[S390] Allocate and free cpu lowcores and stacks when needed/possible.
No need to preallocate the per cpu lowcores and stacks.
Savings are 28-32k per offline cpu.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/smp.c | 106 |
1 files changed, 72 insertions, 34 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index d300a7fdf711..040406dbe9aa 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -589,8 +589,72 @@ static void __init smp_create_idle(unsigned int cpu) | |||
589 | spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); | 589 | spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); |
590 | } | 590 | } |
591 | 591 | ||
592 | static int __cpuinit smp_alloc_lowcore(int cpu) | ||
593 | { | ||
594 | unsigned long async_stack, panic_stack; | ||
595 | struct _lowcore *lowcore; | ||
596 | int lc_order; | ||
597 | |||
598 | lc_order = sizeof(long) == 8 ? 1 : 0; | ||
599 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order); | ||
600 | if (!lowcore) | ||
601 | return -ENOMEM; | ||
602 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
603 | if (!async_stack) | ||
604 | goto out_async_stack; | ||
605 | panic_stack = __get_free_page(GFP_KERNEL); | ||
606 | if (!panic_stack) | ||
607 | goto out_panic_stack; | ||
608 | |||
609 | *lowcore = S390_lowcore; | ||
610 | lowcore->async_stack = async_stack + ASYNC_SIZE; | ||
611 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | ||
612 | |||
613 | #ifndef CONFIG_64BIT | ||
614 | if (MACHINE_HAS_IEEE) { | ||
615 | unsigned long save_area; | ||
616 | |||
617 | save_area = get_zeroed_page(GFP_KERNEL); | ||
618 | if (!save_area) | ||
619 | goto out_save_area; | ||
620 | lowcore->extended_save_area_addr = (u32) save_area; | ||
621 | } | ||
622 | #endif | ||
623 | lowcore_ptr[cpu] = lowcore; | ||
624 | return 0; | ||
625 | |||
626 | #ifndef CONFIG_64BIT | ||
627 | out_save_area: | ||
628 | free_page(panic_stack); | ||
629 | #endif | ||
630 | out_panic_stack: | ||
631 | free_pages(async_stack, ASYNC_ORDER); | ||
632 | out_async_stack: | ||
633 | free_pages((unsigned long) lowcore, lc_order); | ||
634 | return -ENOMEM; | ||
635 | } | ||
636 | |||
637 | #ifdef CONFIG_HOTPLUG_CPU | ||
638 | static void smp_free_lowcore(int cpu) | ||
639 | { | ||
640 | struct _lowcore *lowcore; | ||
641 | int lc_order; | ||
642 | |||
643 | lc_order = sizeof(long) == 8 ? 1 : 0; | ||
644 | lowcore = lowcore_ptr[cpu]; | ||
645 | #ifndef CONFIG_64BIT | ||
646 | if (MACHINE_HAS_IEEE) | ||
647 | free_page((unsigned long) lowcore->extended_save_area_addr); | ||
648 | #endif | ||
649 | free_page(lowcore->panic_stack - PAGE_SIZE); | ||
650 | free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); | ||
651 | free_pages((unsigned long) lowcore, lc_order); | ||
652 | lowcore_ptr[cpu] = NULL; | ||
653 | } | ||
654 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
655 | |||
592 | /* Upping and downing of CPUs */ | 656 | /* Upping and downing of CPUs */ |
593 | int __cpu_up(unsigned int cpu) | 657 | int __cpuinit __cpu_up(unsigned int cpu) |
594 | { | 658 | { |
595 | struct task_struct *idle; | 659 | struct task_struct *idle; |
596 | struct _lowcore *cpu_lowcore; | 660 | struct _lowcore *cpu_lowcore; |
@@ -599,6 +663,8 @@ int __cpu_up(unsigned int cpu) | |||
599 | 663 | ||
600 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 664 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
601 | return -EIO; | 665 | return -EIO; |
666 | if (smp_alloc_lowcore(cpu)) | ||
667 | return -ENOMEM; | ||
602 | 668 | ||
603 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | 669 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), |
604 | cpu, sigp_set_prefix); | 670 | cpu, sigp_set_prefix); |
@@ -613,6 +679,7 @@ int __cpu_up(unsigned int cpu) | |||
613 | cpu_lowcore = lowcore_ptr[cpu]; | 679 | cpu_lowcore = lowcore_ptr[cpu]; |
614 | cpu_lowcore->kernel_stack = (unsigned long) | 680 | cpu_lowcore->kernel_stack = (unsigned long) |
615 | task_stack_page(idle) + THREAD_SIZE; | 681 | task_stack_page(idle) + THREAD_SIZE; |
682 | cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); | ||
616 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | 683 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack |
617 | - sizeof(struct pt_regs) | 684 | - sizeof(struct pt_regs) |
618 | - sizeof(struct stack_frame)); | 685 | - sizeof(struct stack_frame)); |
@@ -626,6 +693,8 @@ int __cpu_up(unsigned int cpu) | |||
626 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | 693 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
627 | cpu_lowcore->current_task = (unsigned long) idle; | 694 | cpu_lowcore->current_task = (unsigned long) idle; |
628 | cpu_lowcore->cpu_data.cpu_nr = cpu; | 695 | cpu_lowcore->cpu_data.cpu_nr = cpu; |
696 | cpu_lowcore->softirq_pending = 0; | ||
697 | cpu_lowcore->ext_call_fast = 0; | ||
629 | eieio(); | 698 | eieio(); |
630 | 699 | ||
631 | while (signal_processor(cpu, sigp_restart) == sigp_busy) | 700 | while (signal_processor(cpu, sigp_restart) == sigp_busy) |
@@ -686,6 +755,7 @@ void __cpu_die(unsigned int cpu) | |||
686 | /* Wait until target cpu is down */ | 755 | /* Wait until target cpu is down */ |
687 | while (!smp_cpu_not_running(cpu)) | 756 | while (!smp_cpu_not_running(cpu)) |
688 | cpu_relax(); | 757 | cpu_relax(); |
758 | smp_free_lowcore(cpu); | ||
689 | printk(KERN_INFO "Processor %d spun down\n", cpu); | 759 | printk(KERN_INFO "Processor %d spun down\n", cpu); |
690 | } | 760 | } |
691 | 761 | ||
@@ -699,15 +769,9 @@ void cpu_die(void) | |||
699 | 769 | ||
700 | #endif /* CONFIG_HOTPLUG_CPU */ | 770 | #endif /* CONFIG_HOTPLUG_CPU */ |
701 | 771 | ||
702 | /* | ||
703 | * Cycle through the processors and setup structures. | ||
704 | */ | ||
705 | |||
706 | void __init smp_prepare_cpus(unsigned int max_cpus) | 772 | void __init smp_prepare_cpus(unsigned int max_cpus) |
707 | { | 773 | { |
708 | unsigned long stack; | ||
709 | unsigned int cpu; | 774 | unsigned int cpu; |
710 | int i; | ||
711 | 775 | ||
712 | smp_detect_cpus(); | 776 | smp_detect_cpus(); |
713 | 777 | ||
@@ -715,35 +779,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
715 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 779 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
716 | panic("Couldn't request external interrupt 0x1201"); | 780 | panic("Couldn't request external interrupt 0x1201"); |
717 | memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); | 781 | memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); |
718 | /* | ||
719 | * Initialize prefix pages and stacks for all possible cpus | ||
720 | */ | ||
721 | print_cpu_info(&S390_lowcore.cpu_data); | 782 | print_cpu_info(&S390_lowcore.cpu_data); |
783 | smp_alloc_lowcore(smp_processor_id()); | ||
722 | 784 | ||
723 | for_each_possible_cpu(i) { | ||
724 | lowcore_ptr[i] = (struct _lowcore *) | ||
725 | __get_free_pages(GFP_KERNEL | GFP_DMA, | ||
726 | sizeof(void*) == 8 ? 1 : 0); | ||
727 | stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
728 | if (!lowcore_ptr[i] || !stack) | ||
729 | panic("smp_boot_cpus failed to allocate memory\n"); | ||
730 | |||
731 | *(lowcore_ptr[i]) = S390_lowcore; | ||
732 | lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; | ||
733 | stack = __get_free_pages(GFP_KERNEL, 0); | ||
734 | if (!stack) | ||
735 | panic("smp_boot_cpus failed to allocate memory\n"); | ||
736 | lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; | ||
737 | #ifndef CONFIG_64BIT | ||
738 | if (MACHINE_HAS_IEEE) { | ||
739 | lowcore_ptr[i]->extended_save_area_addr = | ||
740 | (__u32) __get_free_pages(GFP_KERNEL, 0); | ||
741 | if (!lowcore_ptr[i]->extended_save_area_addr) | ||
742 | panic("smp_boot_cpus failed to " | ||
743 | "allocate memory\n"); | ||
744 | } | ||
745 | #endif | ||
746 | } | ||
747 | #ifndef CONFIG_64BIT | 785 | #ifndef CONFIG_64BIT |
748 | if (MACHINE_HAS_IEEE) | 786 | if (MACHINE_HAS_IEEE) |
749 | ctl_set_bit(14, 29); /* enable extended save area */ | 787 | ctl_set_bit(14, 29); /* enable extended save area */ |