aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-03-10 06:54:16 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-05-06 08:58:25 -0400
commite9cd8fa4fcfd67c95db9b87c0fff88fa23cb00e5 (patch)
tree5e4250398f2787d9fcaf4752622f5bbb497d9940 /kernel/sched
parent94baf7a5d882cde0b4d591f4ab89cc32ee39ac6a (diff)
sched/migration: Move calc_load_migrate() into CPU_DYING
It really does not matter when we fold the load for the outgoing cpu. It's almost dead anyway, so there is no harm if we fail to fold the few microseconds which are required for going fully away. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.328739226@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c3
1 files changed, 0 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bafc308e6d45..688e8a83208c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5447,9 +5447,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5447 migrate_tasks(rq); 5447 migrate_tasks(rq);
5448 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5448 BUG_ON(rq->nr_running != 1); /* the migration thread */
5449 raw_spin_unlock_irqrestore(&rq->lock, flags); 5449 raw_spin_unlock_irqrestore(&rq->lock, flags);
5450 break;
5451
5452 case CPU_DEAD:
5453 calc_load_migrate(rq); 5450 calc_load_migrate(rq);
5454 break; 5451 break;
5455#endif 5452#endif