diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-15 11:00:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:07 -0400 |
commit | 02e0431a3db554019b816936b597d618256b705d (patch) | |
tree | 2d3d9c5d95e9a2ccc248d78a3ffe950be0e77b3b | |
parent | 35a6ff5417bf94c9e19b6b55a9eb6eea14cc7be7 (diff) |
sched: better min_vruntime tracking
Better min_vruntime tracking: update it every time 'curr' is
updated - not just when a task is enqueued into the tree.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/sched_fair.c | 40 |
1 files changed, 33 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e3081fb65d63..ec445cadbb01 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -116,22 +116,28 @@ static inline struct task_struct *task_of(struct sched_entity *se) | |||
116 | * Scheduling class tree data structure manipulation methods: | 116 | * Scheduling class tree data structure manipulation methods: |
117 | */ | 117 | */ |
118 | 118 | ||
119 | static inline u64 | ||
120 | max_vruntime(u64 min_vruntime, u64 vruntime) | ||
121 | { | ||
122 | if ((vruntime > min_vruntime) || | ||
123 | (min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50))) | ||
124 | min_vruntime = vruntime; | ||
125 | |||
126 | return min_vruntime; | ||
127 | } | ||
128 | |||
119 | static inline void | 129 | static inline void |
120 | set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost) | 130 | set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost) |
121 | { | 131 | { |
122 | struct sched_entity *se; | 132 | struct sched_entity *se; |
123 | 133 | ||
124 | cfs_rq->rb_leftmost = leftmost; | 134 | cfs_rq->rb_leftmost = leftmost; |
125 | if (leftmost) { | 135 | if (leftmost) |
126 | se = rb_entry(leftmost, struct sched_entity, run_node); | 136 | se = rb_entry(leftmost, struct sched_entity, run_node); |
127 | if ((se->vruntime > cfs_rq->min_vruntime) || | ||
128 | (cfs_rq->min_vruntime > (1ULL << 61) && | ||
129 | se->vruntime < (1ULL << 50))) | ||
130 | cfs_rq->min_vruntime = se->vruntime; | ||
131 | } | ||
132 | } | 137 | } |
133 | 138 | ||
134 | s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 139 | static inline s64 |
140 | entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
135 | { | 141 | { |
136 | return se->fair_key - cfs_rq->min_vruntime; | 142 | return se->fair_key - cfs_rq->min_vruntime; |
137 | } | 143 | } |
@@ -254,6 +260,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
254 | unsigned long delta_exec) | 260 | unsigned long delta_exec) |
255 | { | 261 | { |
256 | unsigned long delta_exec_weighted; | 262 | unsigned long delta_exec_weighted; |
263 | u64 next_vruntime, min_vruntime; | ||
257 | 264 | ||
258 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 265 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); |
259 | 266 | ||
@@ -265,6 +272,25 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
265 | &curr->load); | 272 | &curr->load); |
266 | } | 273 | } |
267 | curr->vruntime += delta_exec_weighted; | 274 | curr->vruntime += delta_exec_weighted; |
275 | |||
276 | /* | ||
277 | * maintain cfs_rq->min_vruntime to be a monotonic increasing | ||
278 | * value tracking the leftmost vruntime in the tree. | ||
279 | */ | ||
280 | if (first_fair(cfs_rq)) { | ||
281 | next_vruntime = __pick_next_entity(cfs_rq)->vruntime; | ||
282 | |||
283 | /* min_vruntime() := !max_vruntime() */ | ||
284 | min_vruntime = max_vruntime(curr->vruntime, next_vruntime); | ||
285 | if (min_vruntime == next_vruntime) | ||
286 | min_vruntime = curr->vruntime; | ||
287 | else | ||
288 | min_vruntime = next_vruntime; | ||
289 | } else | ||
290 | min_vruntime = curr->vruntime; | ||
291 | |||
292 | cfs_rq->min_vruntime = | ||
293 | max_vruntime(cfs_rq->min_vruntime, min_vruntime); | ||
268 | } | 294 | } |
269 | 295 | ||
270 | static void update_curr(struct cfs_rq *cfs_rq) | 296 | static void update_curr(struct cfs_rq *cfs_rq) |