diff options
-rw-r--r-- | kernel/sched/core.c | 31 |
1 files changed, 10 insertions, 21 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fbf1fd098dc6..207a81c769d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5304,27 +5304,17 @@ void idle_task_exit(void) | |||
5304 | } | 5304 | } |
5305 | 5305 | ||
5306 | /* | 5306 | /* |
5307 | * While a dead CPU has no uninterruptible tasks queued at this point, | 5307 | * Since this CPU is going 'away' for a while, fold any nr_active delta |
5308 | * it might still have a nonzero ->nr_uninterruptible counter, because | 5308 | * we might have. Assumes we're called after migrate_tasks() so that the |
5309 | * for performance reasons the counter is not stricly tracking tasks to | 5309 | * nr_active count is stable. |
5310 | * their home CPUs. So we just add the counter to another CPU's counter, | 5310 | * |
5311 | * to keep the global sum constant after CPU-down: | 5311 | * Also see the comment "Global load-average calculations". |
5312 | */ | ||
5313 | static void migrate_nr_uninterruptible(struct rq *rq_src) | ||
5314 | { | ||
5315 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); | ||
5316 | |||
5317 | rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; | ||
5318 | rq_src->nr_uninterruptible = 0; | ||
5319 | } | ||
5320 | |||
5321 | /* | ||
5322 | * remove the tasks which were accounted by rq from calc_load_tasks. | ||
5323 | */ | 5312 | */ |
5324 | static void calc_global_load_remove(struct rq *rq) | 5313 | static void calc_load_migrate(struct rq *rq) |
5325 | { | 5314 | { |
5326 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); | 5315 | long delta = calc_load_fold_active(rq); |
5327 | rq->calc_load_active = 0; | 5316 | if (delta) |
5317 | atomic_long_add(delta, &calc_load_tasks); | ||
5328 | } | 5318 | } |
5329 | 5319 | ||
5330 | /* | 5320 | /* |
@@ -5618,8 +5608,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5618 | BUG_ON(rq->nr_running != 1); /* the migration thread */ | 5608 | BUG_ON(rq->nr_running != 1); /* the migration thread */ |
5619 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 5609 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
5620 | 5610 | ||
5621 | migrate_nr_uninterruptible(rq); | 5611 | calc_load_migrate(rq); |
5622 | calc_global_load_remove(rq); | ||
5623 | break; | 5612 | break; |
5624 | #endif | 5613 | #endif |
5625 | } | 5614 | } |