aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-10-24 05:06:13 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-24 06:51:00 -0400
commit1af5f730fc1bf7c62ec9fb2d307206e18bf40a69 (patch)
tree6b9a1e5aa380abaaca68d3ba427ecec1a3b51a88 /kernel/sched_fair.c
parent01c8c57d668d94f1036d9ab11a22aa24ca16a35d (diff)
sched: more accurate min_vruntime accounting
Mike noticed the current min_vruntime tracking can go wrong and skip the current task. If the only remaining task in the tree is a nice 19 task with huge vruntime, new tasks will be inserted too far to the right too, causing some interactibity issues. min_vruntime can only change due to the leftmost entry disappearing (dequeue_entity()), or by the leftmost entry being incremented past the next entry, which elects a new leftmost (__update_curr()) Due to the current entry not being part of the actual tree, we have to compare the leftmost tree entry with the current entry, and take the leftmost of these two. So create a update_min_vruntime() function that takes computes the leftmost vruntime in the system (either tree of current) and increases the cfs_rq->min_vruntime if the computed value is larger than the previously found min_vruntime. And call this from the two sites we've identified that can change min_vruntime. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 42d211f08c94..b27ccc52f6aa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -223,6 +223,27 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
223 return se->vruntime - cfs_rq->min_vruntime; 223 return se->vruntime - cfs_rq->min_vruntime;
224} 224}
225 225
226static void update_min_vruntime(struct cfs_rq *cfs_rq)
227{
228 u64 vruntime = cfs_rq->min_vruntime;
229
230 if (cfs_rq->curr)
231 vruntime = cfs_rq->curr->vruntime;
232
233 if (cfs_rq->rb_leftmost) {
234 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
235 struct sched_entity,
236 run_node);
237
238 if (vruntime == cfs_rq->min_vruntime)
239 vruntime = se->vruntime;
240 else
241 vruntime = min_vruntime(vruntime, se->vruntime);
242 }
243
244 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
245}
246
226/* 247/*
227 * Enqueue an entity into the rb-tree: 248 * Enqueue an entity into the rb-tree:
228 */ 249 */
@@ -256,15 +277,8 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
256 * Maintain a cache of leftmost tree entries (it is frequently 277 * Maintain a cache of leftmost tree entries (it is frequently
257 * used): 278 * used):
258 */ 279 */
259 if (leftmost) { 280 if (leftmost)
260 cfs_rq->rb_leftmost = &se->run_node; 281 cfs_rq->rb_leftmost = &se->run_node;
261 /*
262 * maintain cfs_rq->min_vruntime to be a monotonic increasing
263 * value tracking the leftmost vruntime in the tree.
264 */
265 cfs_rq->min_vruntime =
266 max_vruntime(cfs_rq->min_vruntime, se->vruntime);
267 }
268 282
269 rb_link_node(&se->run_node, parent, link); 283 rb_link_node(&se->run_node, parent, link);
270 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); 284 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
@@ -274,18 +288,9 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
274{ 288{
275 if (cfs_rq->rb_leftmost == &se->run_node) { 289 if (cfs_rq->rb_leftmost == &se->run_node) {
276 struct rb_node *next_node; 290 struct rb_node *next_node;
277 struct sched_entity *next;
278 291
279 next_node = rb_next(&se->run_node); 292 next_node = rb_next(&se->run_node);
280 cfs_rq->rb_leftmost = next_node; 293 cfs_rq->rb_leftmost = next_node;
281
282 if (next_node) {
283 next = rb_entry(next_node,
284 struct sched_entity, run_node);
285 cfs_rq->min_vruntime =
286 max_vruntime(cfs_rq->min_vruntime,
287 next->vruntime);
288 }
289 } 294 }
290 295
291 if (cfs_rq->next == se) 296 if (cfs_rq->next == se)
@@ -424,6 +429,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
424 schedstat_add(cfs_rq, exec_clock, delta_exec); 429 schedstat_add(cfs_rq, exec_clock, delta_exec);
425 delta_exec_weighted = calc_delta_fair(delta_exec, curr); 430 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
426 curr->vruntime += delta_exec_weighted; 431 curr->vruntime += delta_exec_weighted;
432 update_min_vruntime(cfs_rq);
427} 433}
428 434
429static void update_curr(struct cfs_rq *cfs_rq) 435static void update_curr(struct cfs_rq *cfs_rq)
@@ -613,13 +619,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
613static void 619static void
614place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 620place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
615{ 621{
616 u64 vruntime; 622 u64 vruntime = cfs_rq->min_vruntime;
617
618 if (first_fair(cfs_rq)) {
619 vruntime = min_vruntime(cfs_rq->min_vruntime,
620 __pick_next_entity(cfs_rq)->vruntime);
621 } else
622 vruntime = cfs_rq->min_vruntime;
623 623
624 /* 624 /*
625 * The 'current' period is already promised to the current tasks, 625 * The 'current' period is already promised to the current tasks,
@@ -696,6 +696,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
696 if (se != cfs_rq->curr) 696 if (se != cfs_rq->curr)
697 __dequeue_entity(cfs_rq, se); 697 __dequeue_entity(cfs_rq, se);
698 account_entity_dequeue(cfs_rq, se); 698 account_entity_dequeue(cfs_rq, se);
699 update_min_vruntime(cfs_rq);
699} 700}
700 701
701/* 702/*