diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-01-05 06:48:08 -0500 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2011-01-05 06:47:30 -0500 |
commit | f230886b0b0f0ce604395481bea05f3c0ad8fc9e (patch) | |
tree | 939fd68ab2ed93420dcb0b439245b553763c2284 /arch/s390 | |
parent | 09a8e7adcf960bd6a7204f3f3b377a89ce22efbf (diff) |
[S390] smp: delay idle task creation
Delay idle task creation until a cpu gets set online instead of
creating them for all possible cpus at system startup.
For one cpu system this should safe more than 1 MB.
On my debug system with lots of debug stuff enabled this saves 2 MB.
Same as on x86.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kernel/smp.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a9702df22f3a..8e84b5af49ba 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #define KMSG_COMPONENT "cpu" | 23 | #define KMSG_COMPONENT "cpu" |
24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
25 | 25 | ||
26 | #include <linux/workqueue.h> | ||
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/init.h> | 28 | #include <linux/init.h> |
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
@@ -477,18 +478,20 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
477 | return 0; | 478 | return 0; |
478 | } | 479 | } |
479 | 480 | ||
480 | static void __init smp_create_idle(unsigned int cpu) | 481 | struct create_idle { |
482 | struct work_struct work; | ||
483 | struct task_struct *idle; | ||
484 | struct completion done; | ||
485 | int cpu; | ||
486 | }; | ||
487 | |||
488 | static void __cpuinit smp_fork_idle(struct work_struct *work) | ||
481 | { | 489 | { |
482 | struct task_struct *p; | 490 | struct create_idle *c_idle; |
483 | 491 | ||
484 | /* | 492 | c_idle = container_of(work, struct create_idle, work); |
485 | * don't care about the psw and regs settings since we'll never | 493 | c_idle->idle = fork_idle(c_idle->cpu); |
486 | * reschedule the forked task. | 494 | complete(&c_idle->done); |
487 | */ | ||
488 | p = fork_idle(cpu); | ||
489 | if (IS_ERR(p)) | ||
490 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
491 | current_set[cpu] = p; | ||
492 | } | 495 | } |
493 | 496 | ||
494 | static int __cpuinit smp_alloc_lowcore(int cpu) | 497 | static int __cpuinit smp_alloc_lowcore(int cpu) |
@@ -552,6 +555,7 @@ static void smp_free_lowcore(int cpu) | |||
552 | int __cpuinit __cpu_up(unsigned int cpu) | 555 | int __cpuinit __cpu_up(unsigned int cpu) |
553 | { | 556 | { |
554 | struct _lowcore *cpu_lowcore; | 557 | struct _lowcore *cpu_lowcore; |
558 | struct create_idle c_idle; | ||
555 | struct task_struct *idle; | 559 | struct task_struct *idle; |
556 | struct stack_frame *sf; | 560 | struct stack_frame *sf; |
557 | u32 lowcore; | 561 | u32 lowcore; |
@@ -559,6 +563,18 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
559 | 563 | ||
560 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 564 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
561 | return -EIO; | 565 | return -EIO; |
566 | idle = current_set[cpu]; | ||
567 | if (!idle) { | ||
568 | c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); | ||
569 | INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); | ||
570 | c_idle.cpu = cpu; | ||
571 | schedule_work(&c_idle.work); | ||
572 | wait_for_completion(&c_idle.done); | ||
573 | if (IS_ERR(c_idle.idle)) | ||
574 | return PTR_ERR(c_idle.idle); | ||
575 | idle = c_idle.idle; | ||
576 | current_set[cpu] = c_idle.idle; | ||
577 | } | ||
562 | if (smp_alloc_lowcore(cpu)) | 578 | if (smp_alloc_lowcore(cpu)) |
563 | return -ENOMEM; | 579 | return -ENOMEM; |
564 | do { | 580 | do { |
@@ -573,7 +589,6 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
573 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | 589 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) |
574 | udelay(10); | 590 | udelay(10); |
575 | 591 | ||
576 | idle = current_set[cpu]; | ||
577 | cpu_lowcore = lowcore_ptr[cpu]; | 592 | cpu_lowcore = lowcore_ptr[cpu]; |
578 | cpu_lowcore->kernel_stack = (unsigned long) | 593 | cpu_lowcore->kernel_stack = (unsigned long) |
579 | task_stack_page(idle) + THREAD_SIZE; | 594 | task_stack_page(idle) + THREAD_SIZE; |
@@ -685,7 +700,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
685 | #endif | 700 | #endif |
686 | unsigned long async_stack, panic_stack; | 701 | unsigned long async_stack, panic_stack; |
687 | struct _lowcore *lowcore; | 702 | struct _lowcore *lowcore; |
688 | unsigned int cpu; | ||
689 | 703 | ||
690 | smp_detect_cpus(); | 704 | smp_detect_cpus(); |
691 | 705 | ||
@@ -720,9 +734,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
720 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) | 734 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) |
721 | BUG(); | 735 | BUG(); |
722 | #endif | 736 | #endif |
723 | for_each_possible_cpu(cpu) | ||
724 | if (cpu != smp_processor_id()) | ||
725 | smp_create_idle(cpu); | ||
726 | } | 737 | } |
727 | 738 | ||
728 | void __init smp_prepare_boot_cpu(void) | 739 | void __init smp_prepare_boot_cpu(void) |