diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-03-14 15:55:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-03-14 22:02:49 -0400 |
commit | 3fe69747dab906cd6a8523230276a9820d6a514f (patch) | |
tree | bab43c856bd5c23a43fe641be3d1a0e85d3dd604 /kernel | |
parent | 0e1f34833bd9170ccc93ab759e48e695917fa48f (diff) |
sched: min_vruntime fix
Current min_vruntime tracking is incorrect and will cause serious
problems when we don't run the leftmost task for some reason.
min_vruntime does two things; 1) it's used to determine a forward
direction when the u64 vruntime wraps, 2) it's used to track the
leftmost vruntime to position newly enqueued tasks from.
The current logic advances min_vruntime whenever the current task's
vruntime advance. Because the current task may pass the leftmost task
still waiting we're failing the second goal. This causes new tasks to be
placed too far ahead and thus penalizes their runtime.
Fix this by making min_vruntime the min_vruntime of the waiting tasks by
tracking it in enqueue/dequeue, and compare against current's vruntime
to obtain the absolute minimum when placing new tasks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 46 |
1 files changed, 28 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e2a530515619..9d003c9d2a48 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -175,8 +175,15 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
175 | * Maintain a cache of leftmost tree entries (it is frequently | 175 | * Maintain a cache of leftmost tree entries (it is frequently |
176 | * used): | 176 | * used): |
177 | */ | 177 | */ |
178 | if (leftmost) | 178 | if (leftmost) { |
179 | cfs_rq->rb_leftmost = &se->run_node; | 179 | cfs_rq->rb_leftmost = &se->run_node; |
180 | /* | ||
181 | * maintain cfs_rq->min_vruntime to be a monotonic increasing | ||
182 | * value tracking the leftmost vruntime in the tree. | ||
183 | */ | ||
184 | cfs_rq->min_vruntime = | ||
185 | max_vruntime(cfs_rq->min_vruntime, se->vruntime); | ||
186 | } | ||
180 | 187 | ||
181 | rb_link_node(&se->run_node, parent, link); | 188 | rb_link_node(&se->run_node, parent, link); |
182 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 189 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); |
@@ -184,8 +191,21 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
184 | 191 | ||
185 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 192 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
186 | { | 193 | { |
187 | if (cfs_rq->rb_leftmost == &se->run_node) | 194 | if (cfs_rq->rb_leftmost == &se->run_node) { |
188 | cfs_rq->rb_leftmost = rb_next(&se->run_node); | 195 | struct rb_node *next_node; |
196 | struct sched_entity *next; | ||
197 | |||
198 | next_node = rb_next(&se->run_node); | ||
199 | cfs_rq->rb_leftmost = next_node; | ||
200 | |||
201 | if (next_node) { | ||
202 | next = rb_entry(next_node, | ||
203 | struct sched_entity, run_node); | ||
204 | cfs_rq->min_vruntime = | ||
205 | max_vruntime(cfs_rq->min_vruntime, | ||
206 | next->vruntime); | ||
207 | } | ||
208 | } | ||
189 | 209 | ||
190 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 210 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); |
191 | } | 211 | } |
@@ -303,7 +323,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
303 | unsigned long delta_exec) | 323 | unsigned long delta_exec) |
304 | { | 324 | { |
305 | unsigned long delta_exec_weighted; | 325 | unsigned long delta_exec_weighted; |
306 | u64 vruntime; | ||
307 | 326 | ||
308 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 327 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); |
309 | 328 | ||
@@ -315,19 +334,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
315 | &curr->load); | 334 | &curr->load); |
316 | } | 335 | } |
317 | curr->vruntime += delta_exec_weighted; | 336 | curr->vruntime += delta_exec_weighted; |
318 | |||
319 | /* | ||
320 | * maintain cfs_rq->min_vruntime to be a monotonic increasing | ||
321 | * value tracking the leftmost vruntime in the tree. | ||
322 | */ | ||
323 | if (first_fair(cfs_rq)) { | ||
324 | vruntime = min_vruntime(curr->vruntime, | ||
325 | __pick_next_entity(cfs_rq)->vruntime); | ||
326 | } else | ||
327 | vruntime = curr->vruntime; | ||
328 | |||
329 | cfs_rq->min_vruntime = | ||
330 | max_vruntime(cfs_rq->min_vruntime, vruntime); | ||
331 | } | 337 | } |
332 | 338 | ||
333 | static void update_curr(struct cfs_rq *cfs_rq) | 339 | static void update_curr(struct cfs_rq *cfs_rq) |
@@ -493,7 +499,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
493 | { | 499 | { |
494 | u64 vruntime; | 500 | u64 vruntime; |
495 | 501 | ||
496 | vruntime = cfs_rq->min_vruntime; | 502 | if (first_fair(cfs_rq)) { |
503 | vruntime = min_vruntime(cfs_rq->min_vruntime, | ||
504 | __pick_next_entity(cfs_rq)->vruntime); | ||
505 | } else | ||
506 | vruntime = cfs_rq->min_vruntime; | ||
497 | 507 | ||
498 | if (sched_feat(TREE_AVG)) { | 508 | if (sched_feat(TREE_AVG)) { |
499 | struct sched_entity *last = __pick_last_entity(cfs_rq); | 509 | struct sched_entity *last = __pick_last_entity(cfs_rq); |