diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 44 |
1 files changed, 38 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3dc716f6d8ad..31e92aee6242 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) | |||
2544 | } | 2544 | } |
2545 | 2545 | ||
2546 | #ifdef CONFIG_SMP | 2546 | #ifdef CONFIG_SMP |
2547 | static void sched_ttwu_pending(void) | 2547 | static void sched_ttwu_do_pending(struct task_struct *list) |
2548 | { | 2548 | { |
2549 | struct rq *rq = this_rq(); | 2549 | struct rq *rq = this_rq(); |
2550 | struct task_struct *list = xchg(&rq->wake_list, NULL); | ||
2551 | |||
2552 | if (!list) | ||
2553 | return; | ||
2554 | 2550 | ||
2555 | raw_spin_lock(&rq->lock); | 2551 | raw_spin_lock(&rq->lock); |
2556 | 2552 | ||
@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void) | |||
2563 | raw_spin_unlock(&rq->lock); | 2559 | raw_spin_unlock(&rq->lock); |
2564 | } | 2560 | } |
2565 | 2561 | ||
2562 | #ifdef CONFIG_HOTPLUG_CPU | ||
2563 | |||
2564 | static void sched_ttwu_pending(void) | ||
2565 | { | ||
2566 | struct rq *rq = this_rq(); | ||
2567 | struct task_struct *list = xchg(&rq->wake_list, NULL); | ||
2568 | |||
2569 | if (!list) | ||
2570 | return; | ||
2571 | |||
2572 | sched_ttwu_do_pending(list); | ||
2573 | } | ||
2574 | |||
2575 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
2576 | |||
2566 | void scheduler_ipi(void) | 2577 | void scheduler_ipi(void) |
2567 | { | 2578 | { |
2568 | sched_ttwu_pending(); | 2579 | struct rq *rq = this_rq(); |
2580 | struct task_struct *list = xchg(&rq->wake_list, NULL); | ||
2581 | |||
2582 | if (!list) | ||
2583 | return; | ||
2584 | |||
2585 | /* | ||
2586 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since | ||
2587 | * traditionally all their work was done from the interrupt return | ||
2588 | * path. Now that we actually do some work, we need to make sure | ||
2589 | * we do call them. | ||
2590 | * | ||
2591 | * Some archs already do call them, luckily irq_enter/exit nest | ||
2592 | * properly. | ||
2593 | * | ||
2594 | * Arguably we should visit all archs and update all handlers, | ||
2595 | * however a fair share of IPIs are still resched only so this would | ||
2596 | * somewhat pessimize the simple resched case. | ||
2597 | */ | ||
2598 | irq_enter(); | ||
2599 | sched_ttwu_do_pending(list); | ||
2600 | irq_exit(); | ||
2569 | } | 2601 | } |
2570 | 2602 | ||
2571 | static void ttwu_queue_remote(struct task_struct *p, int cpu) | 2603 | static void ttwu_queue_remote(struct task_struct *p, int cpu) |