diff options
author | Peter Zijlstra <peterz@infradead.org> | 2012-08-20 05:26:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-09-04 08:30:18 -0400 |
commit | f319da0c6894fcf55e21320e40506418a2aad629 (patch) | |
tree | d1ae1cb6eb190e9feb6b9122bd62091cf6095809 /kernel | |
parent | 5b716ac728bcc01b1f2a7ed6e437196602237c27 (diff) |
sched: Fix load avg vs cpu-hotplug
Rabik and Paul reported two different issues related to the same few
lines of code.
Rabik's issue is that the nr_uninterruptible migration code is wrong in
that he sees artifacts due to this (Rabik please do expand in more
detail).
Paul's issue is that this code as it stands relies on us using
stop_machine() for unplug, we all would like to remove this assumption
so that eventually we can remove this stop_machine() usage altogether.
The only reason we'd have to migrate nr_uninterruptible is so that we
could use for_each_online_cpu() loops in favour of
for_each_possible_cpu() loops, however since nr_uninterruptible() is the
only such loop and its using possible lets not bother at all.
The problem Rabik sees is (probably) caused by the fact that by
migrating nr_uninterruptible we screw rq->calc_load_active for both rqs
involved.
So don't bother with fancy migration schemes (meaning we now have to
keep using for_each_possible_cpu()) and instead fold any nr_active delta
after we migrate all tasks away to make sure we don't have any skewed
nr_active accounting.
Reported-by: Rakib Mullick <rakib.mullick@gmail.com>
Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1345454817.23018.27.camel@twins
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 31 |
1 files changed, 10 insertions, 21 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fbf1fd098dc6..207a81c769d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5304,27 +5304,17 @@ void idle_task_exit(void) | |||
5304 | } | 5304 | } |
5305 | 5305 | ||
5306 | /* | 5306 | /* |
5307 | * While a dead CPU has no uninterruptible tasks queued at this point, | 5307 | * Since this CPU is going 'away' for a while, fold any nr_active delta |
5308 | * it might still have a nonzero ->nr_uninterruptible counter, because | 5308 | * we might have. Assumes we're called after migrate_tasks() so that the |
5309 | * for performance reasons the counter is not stricly tracking tasks to | 5309 | * nr_active count is stable. |
5310 | * their home CPUs. So we just add the counter to another CPU's counter, | 5310 | * |
5311 | * to keep the global sum constant after CPU-down: | 5311 | * Also see the comment "Global load-average calculations". |
5312 | */ | ||
5313 | static void migrate_nr_uninterruptible(struct rq *rq_src) | ||
5314 | { | ||
5315 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); | ||
5316 | |||
5317 | rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; | ||
5318 | rq_src->nr_uninterruptible = 0; | ||
5319 | } | ||
5320 | |||
5321 | /* | ||
5322 | * remove the tasks which were accounted by rq from calc_load_tasks. | ||
5323 | */ | 5312 | */ |
5324 | static void calc_global_load_remove(struct rq *rq) | 5313 | static void calc_load_migrate(struct rq *rq) |
5325 | { | 5314 | { |
5326 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); | 5315 | long delta = calc_load_fold_active(rq); |
5327 | rq->calc_load_active = 0; | 5316 | if (delta) |
5317 | atomic_long_add(delta, &calc_load_tasks); | ||
5328 | } | 5318 | } |
5329 | 5319 | ||
5330 | /* | 5320 | /* |
@@ -5618,8 +5608,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5618 | BUG_ON(rq->nr_running != 1); /* the migration thread */ | 5608 | BUG_ON(rq->nr_running != 1); /* the migration thread */ |
5619 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 5609 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
5620 | 5610 | ||
5621 | migrate_nr_uninterruptible(rq); | 5611 | calc_load_migrate(rq); |
5622 | calc_global_load_remove(rq); | ||
5623 | break; | 5612 | break; |
5624 | #endif | 5613 | #endif |
5625 | } | 5614 | } |