aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2012-10-04 07:18:31 -0400
committerIngo Molnar <mingo@kernel.org>2012-10-24 04:27:29 -0400
commitf269ae0469fc882332bdfb5db15d3c1315fe2a10 (patch)
treeaf5e130745392fa9fdd32f59b10272e269192b95 /kernel/sched/fair.c
parent48a1675323fa1b7844e479ad2a4469f4558c0f79 (diff)
sched: Update_cfs_shares at period edge
Now that our measurement intervals are small (~1ms) we can amortize the posting of update_shares() to be about each period overflow. This is a large cost saving for frequently switching tasks. Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Ben Segall <bsegall@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20120823141507.200772172@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dcc27d8ae6ba..002a7697f437 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1187,6 +1187,7 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1187 } 1187 }
1188 1188
1189 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update); 1189 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
1190 update_cfs_shares(cfs_rq);
1190} 1191}
1191 1192
1192static inline void update_rq_runnable_avg(struct rq *rq, int runnable) 1193static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
@@ -1396,9 +1397,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1396 * Update run-time statistics of the 'current'. 1397 * Update run-time statistics of the 'current'.
1397 */ 1398 */
1398 update_curr(cfs_rq); 1399 update_curr(cfs_rq);
1399 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
1400 account_entity_enqueue(cfs_rq, se); 1400 account_entity_enqueue(cfs_rq, se);
1401 update_cfs_shares(cfs_rq); 1401 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
1402 1402
1403 if (flags & ENQUEUE_WAKEUP) { 1403 if (flags & ENQUEUE_WAKEUP) {
1404 place_entity(cfs_rq, se, 0); 1404 place_entity(cfs_rq, se, 0);
@@ -1471,7 +1471,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1471 * Update run-time statistics of the 'current'. 1471 * Update run-time statistics of the 'current'.
1472 */ 1472 */
1473 update_curr(cfs_rq); 1473 update_curr(cfs_rq);
1474 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1475 1474
1476 update_stats_dequeue(cfs_rq, se); 1475 update_stats_dequeue(cfs_rq, se);
1477 if (flags & DEQUEUE_SLEEP) { 1476 if (flags & DEQUEUE_SLEEP) {
@@ -1491,8 +1490,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1491 1490
1492 if (se != cfs_rq->curr) 1491 if (se != cfs_rq->curr)
1493 __dequeue_entity(cfs_rq, se); 1492 __dequeue_entity(cfs_rq, se);
1494 se->on_rq = 0;
1495 account_entity_dequeue(cfs_rq, se); 1493 account_entity_dequeue(cfs_rq, se);
1494 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1496 1495
1497 /* 1496 /*
1498 * Normalize the entity after updating the min_vruntime because the 1497 * Normalize the entity after updating the min_vruntime because the
@@ -1506,7 +1505,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1506 return_cfs_rq_runtime(cfs_rq); 1505 return_cfs_rq_runtime(cfs_rq);
1507 1506
1508 update_min_vruntime(cfs_rq); 1507 update_min_vruntime(cfs_rq);
1509 update_cfs_shares(cfs_rq); 1508 se->on_rq = 0;
1510} 1509}
1511 1510
1512/* 1511/*
@@ -2518,8 +2517,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2518 if (cfs_rq_throttled(cfs_rq)) 2517 if (cfs_rq_throttled(cfs_rq))
2519 break; 2518 break;
2520 2519
2521 update_cfs_shares(cfs_rq);
2522 update_entity_load_avg(se, 1); 2520 update_entity_load_avg(se, 1);
2521 update_cfs_rq_blocked_load(cfs_rq, 0);
2523 } 2522 }
2524 2523
2525 if (!se) { 2524 if (!se) {
@@ -2579,8 +2578,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2579 if (cfs_rq_throttled(cfs_rq)) 2578 if (cfs_rq_throttled(cfs_rq))
2580 break; 2579 break;
2581 2580
2582 update_cfs_shares(cfs_rq);
2583 update_entity_load_avg(se, 1); 2581 update_entity_load_avg(se, 1);
2582 update_cfs_rq_blocked_load(cfs_rq, 0);
2584 } 2583 }
2585 2584
2586 if (!se) { 2585 if (!se) {
@@ -5639,8 +5638,11 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
5639 se = tg->se[i]; 5638 se = tg->se[i];
5640 /* Propagate contribution to hierarchy */ 5639 /* Propagate contribution to hierarchy */
5641 raw_spin_lock_irqsave(&rq->lock, flags); 5640 raw_spin_lock_irqsave(&rq->lock, flags);
5642 for_each_sched_entity(se) 5641 for_each_sched_entity(se) {
5643 update_cfs_shares(group_cfs_rq(se)); 5642 update_cfs_shares(group_cfs_rq(se));
5643 /* update contribution to parent */
5644 update_entity_load_avg(se, 1);
5645 }
5644 raw_spin_unlock_irqrestore(&rq->lock, flags); 5646 raw_spin_unlock_irqrestore(&rq->lock, flags);
5645 } 5647 }
5646 5648