diff options
| -rw-r--r-- | kernel/sched_fair.c | 49 |
1 files changed, 25 insertions, 24 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 42d211f08c94..b27ccc52f6aa 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -223,6 +223,27 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 223 | return se->vruntime - cfs_rq->min_vruntime; | 223 | return se->vruntime - cfs_rq->min_vruntime; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | ||
| 227 | { | ||
| 228 | u64 vruntime = cfs_rq->min_vruntime; | ||
| 229 | |||
| 230 | if (cfs_rq->curr) | ||
| 231 | vruntime = cfs_rq->curr->vruntime; | ||
| 232 | |||
| 233 | if (cfs_rq->rb_leftmost) { | ||
| 234 | struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, | ||
| 235 | struct sched_entity, | ||
| 236 | run_node); | ||
| 237 | |||
| 238 | if (vruntime == cfs_rq->min_vruntime) | ||
| 239 | vruntime = se->vruntime; | ||
| 240 | else | ||
| 241 | vruntime = min_vruntime(vruntime, se->vruntime); | ||
| 242 | } | ||
| 243 | |||
| 244 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | ||
| 245 | } | ||
| 246 | |||
| 226 | /* | 247 | /* |
| 227 | * Enqueue an entity into the rb-tree: | 248 | * Enqueue an entity into the rb-tree: |
| 228 | */ | 249 | */ |
| @@ -256,15 +277,8 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 256 | * Maintain a cache of leftmost tree entries (it is frequently | 277 | * Maintain a cache of leftmost tree entries (it is frequently |
| 257 | * used): | 278 | * used): |
| 258 | */ | 279 | */ |
| 259 | if (leftmost) { | 280 | if (leftmost) |
| 260 | cfs_rq->rb_leftmost = &se->run_node; | 281 | cfs_rq->rb_leftmost = &se->run_node; |
| 261 | /* | ||
| 262 | * maintain cfs_rq->min_vruntime to be a monotonic increasing | ||
| 263 | * value tracking the leftmost vruntime in the tree. | ||
| 264 | */ | ||
| 265 | cfs_rq->min_vruntime = | ||
| 266 | max_vruntime(cfs_rq->min_vruntime, se->vruntime); | ||
| 267 | } | ||
| 268 | 282 | ||
| 269 | rb_link_node(&se->run_node, parent, link); | 283 | rb_link_node(&se->run_node, parent, link); |
| 270 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 284 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); |
| @@ -274,18 +288,9 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 274 | { | 288 | { |
| 275 | if (cfs_rq->rb_leftmost == &se->run_node) { | 289 | if (cfs_rq->rb_leftmost == &se->run_node) { |
| 276 | struct rb_node *next_node; | 290 | struct rb_node *next_node; |
| 277 | struct sched_entity *next; | ||
| 278 | 291 | ||
| 279 | next_node = rb_next(&se->run_node); | 292 | next_node = rb_next(&se->run_node); |
| 280 | cfs_rq->rb_leftmost = next_node; | 293 | cfs_rq->rb_leftmost = next_node; |
| 281 | |||
| 282 | if (next_node) { | ||
| 283 | next = rb_entry(next_node, | ||
| 284 | struct sched_entity, run_node); | ||
| 285 | cfs_rq->min_vruntime = | ||
| 286 | max_vruntime(cfs_rq->min_vruntime, | ||
| 287 | next->vruntime); | ||
| 288 | } | ||
| 289 | } | 294 | } |
| 290 | 295 | ||
| 291 | if (cfs_rq->next == se) | 296 | if (cfs_rq->next == se) |
| @@ -424,6 +429,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
| 424 | schedstat_add(cfs_rq, exec_clock, delta_exec); | 429 | schedstat_add(cfs_rq, exec_clock, delta_exec); |
| 425 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 430 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); |
| 426 | curr->vruntime += delta_exec_weighted; | 431 | curr->vruntime += delta_exec_weighted; |
| 432 | update_min_vruntime(cfs_rq); | ||
| 427 | } | 433 | } |
| 428 | 434 | ||
| 429 | static void update_curr(struct cfs_rq *cfs_rq) | 435 | static void update_curr(struct cfs_rq *cfs_rq) |
| @@ -613,13 +619,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 613 | static void | 619 | static void |
| 614 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 620 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) |
| 615 | { | 621 | { |
| 616 | u64 vruntime; | 622 | u64 vruntime = cfs_rq->min_vruntime; |
| 617 | |||
| 618 | if (first_fair(cfs_rq)) { | ||
| 619 | vruntime = min_vruntime(cfs_rq->min_vruntime, | ||
| 620 | __pick_next_entity(cfs_rq)->vruntime); | ||
| 621 | } else | ||
| 622 | vruntime = cfs_rq->min_vruntime; | ||
| 623 | 623 | ||
| 624 | /* | 624 | /* |
| 625 | * The 'current' period is already promised to the current tasks, | 625 | * The 'current' period is already promised to the current tasks, |
| @@ -696,6 +696,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
| 696 | if (se != cfs_rq->curr) | 696 | if (se != cfs_rq->curr) |
| 697 | __dequeue_entity(cfs_rq, se); | 697 | __dequeue_entity(cfs_rq, se); |
| 698 | account_entity_dequeue(cfs_rq, se); | 698 | account_entity_dequeue(cfs_rq, se); |
| 699 | update_min_vruntime(cfs_rq); | ||
| 699 | } | 700 | } |
| 700 | 701 | ||
| 701 | /* | 702 | /* |
