aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-12-15 22:10:17 -0500
committerIngo Molnar <mingo@elte.hu>2010-12-19 10:36:22 -0500
commit43365bd7ff37979d2afdccbe953299ed64a4649b (patch)
treeb90aa8813d0f92cae88bc26604e48dea0ee9f87d /kernel/sched_fair.c
parentca680888d5d0d03862ec311a83c6a1c7a1e00a01 (diff)
sched: Move periodic share updates to entity_tick()
Long running entities that do not block (dequeue) require periodic updates to maintain accurate share values. (Note: group entities with several threads are quite likely to be non-blocking in many circumstances). By virtue of being long-running however, we will see entity ticks (otherwise the required update occurs in dequeue/put and we are done). Thus we can move the detection (and associated work) for these updates into the periodic path. This restores the 'atomicity' of update_curr() with respect to accounting. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101216031038.067028969@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c88671718bc9..16ee398d8a4e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -564,10 +564,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
564 564
565#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED 565#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
566 cfs_rq->load_unacc_exec_time += delta_exec; 566 cfs_rq->load_unacc_exec_time += delta_exec;
567 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
568 update_cfs_load(cfs_rq, 0);
569 update_cfs_shares(cfs_rq, 0);
570 }
571#endif 567#endif
572} 568}
573 569
@@ -809,6 +805,14 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
809 805
810 reweight_entity(cfs_rq_of(se), se, shares); 806 reweight_entity(cfs_rq_of(se), se, shares);
811} 807}
808
809static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
810{
811 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
812 update_cfs_load(cfs_rq, 0);
813 update_cfs_shares(cfs_rq, 0);
814 }
815}
812#else /* CONFIG_FAIR_GROUP_SCHED */ 816#else /* CONFIG_FAIR_GROUP_SCHED */
813static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) 817static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
814{ 818{
@@ -817,6 +821,10 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
817static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) 821static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
818{ 822{
819} 823}
824
825static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
826{
827}
820#endif /* CONFIG_FAIR_GROUP_SCHED */ 828#endif /* CONFIG_FAIR_GROUP_SCHED */
821 829
822static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 830static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1133,6 +1141,11 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1133 */ 1141 */
1134 update_curr(cfs_rq); 1142 update_curr(cfs_rq);
1135 1143
1144 /*
1145 * Update share accounting for long-running entities.
1146 */
1147 update_entity_shares_tick(cfs_rq);
1148
1136#ifdef CONFIG_SCHED_HRTICK 1149#ifdef CONFIG_SCHED_HRTICK
1137 /* 1150 /*
1138 * queued ticks are scheduled to match the slice, so don't bother 1151 * queued ticks are scheduled to match the slice, so don't bother