diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:06 -0400 |
commit | bbdba7c0e1161934ae881ad00e4db49830f5ef59 (patch) | |
tree | 1c5c5e9c9c0c6d6cb72b843121e7a38f2768356a /kernel/sched_fair.c | |
parent | e22f5bbf86d8cce710d5c8ba5bf57832e73aab8c (diff) |
sched: remove wait_runtime fields and features
remove wait_runtime based fields and features, now that the CFS
math has been changed over to the vruntime metric.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 74 |
1 files changed, 4 insertions, 70 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a94189c42d1a..2df5a6467812 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -178,8 +178,6 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
178 | update_load_add(&cfs_rq->load, se->load.weight); | 178 | update_load_add(&cfs_rq->load, se->load.weight); |
179 | cfs_rq->nr_running++; | 179 | cfs_rq->nr_running++; |
180 | se->on_rq = 1; | 180 | se->on_rq = 1; |
181 | |||
182 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | ||
183 | } | 181 | } |
184 | 182 | ||
185 | static void | 183 | static void |
@@ -192,8 +190,6 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
192 | update_load_sub(&cfs_rq->load, se->load.weight); | 190 | update_load_sub(&cfs_rq->load, se->load.weight); |
193 | cfs_rq->nr_running--; | 191 | cfs_rq->nr_running--; |
194 | se->on_rq = 0; | 192 | se->on_rq = 0; |
195 | |||
196 | schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); | ||
197 | } | 193 | } |
198 | 194 | ||
199 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) | 195 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) |
@@ -249,13 +245,6 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
249 | return period; | 245 | return period; |
250 | } | 246 | } |
251 | 247 | ||
252 | static void | ||
253 | add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | ||
254 | { | ||
255 | se->wait_runtime += delta; | ||
256 | schedstat_add(cfs_rq, wait_runtime, delta); | ||
257 | } | ||
258 | |||
259 | /* | 248 | /* |
260 | * Update the current task's runtime statistics. Skip current tasks that | 249 | * Update the current task's runtime statistics. Skip current tasks that |
261 | * are not in our scheduling class. | 250 | * are not in our scheduling class. |
@@ -264,9 +253,7 @@ static inline void | |||
264 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 253 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, |
265 | unsigned long delta_exec) | 254 | unsigned long delta_exec) |
266 | { | 255 | { |
267 | unsigned long delta_fair, delta_mine, delta_exec_weighted; | 256 | unsigned long delta_exec_weighted; |
268 | struct load_weight *lw = &cfs_rq->load; | ||
269 | unsigned long load = lw->weight; | ||
270 | 257 | ||
271 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 258 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); |
272 | 259 | ||
@@ -278,25 +265,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
278 | &curr->load); | 265 | &curr->load); |
279 | } | 266 | } |
280 | curr->vruntime += delta_exec_weighted; | 267 | curr->vruntime += delta_exec_weighted; |
281 | |||
282 | if (!sched_feat(FAIR_SLEEPERS)) | ||
283 | return; | ||
284 | |||
285 | if (unlikely(!load)) | ||
286 | return; | ||
287 | |||
288 | delta_fair = calc_delta_fair(delta_exec, lw); | ||
289 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); | ||
290 | |||
291 | cfs_rq->fair_clock += delta_fair; | ||
292 | /* | ||
293 | * We executed delta_exec amount of time on the CPU, | ||
294 | * but we were only entitled to delta_mine amount of | ||
295 | * time during that period (if nr_running == 1 then | ||
296 | * the two values are equal) | ||
297 | * [Note: delta_mine - delta_exec is negative]: | ||
298 | */ | ||
299 | add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec); | ||
300 | } | 268 | } |
301 | 269 | ||
302 | static void update_curr(struct cfs_rq *cfs_rq) | 270 | static void update_curr(struct cfs_rq *cfs_rq) |
@@ -322,7 +290,6 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
322 | static inline void | 290 | static inline void |
323 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 291 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
324 | { | 292 | { |
325 | se->wait_start_fair = cfs_rq->fair_clock; | ||
326 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 293 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); |
327 | } | 294 | } |
328 | 295 | ||
@@ -354,35 +321,11 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
354 | se->fair_key = se->vruntime; | 321 | se->fair_key = se->vruntime; |
355 | } | 322 | } |
356 | 323 | ||
357 | /* | ||
358 | * Note: must be called with a freshly updated rq->fair_clock. | ||
359 | */ | ||
360 | static inline void | ||
361 | __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, | ||
362 | unsigned long delta_fair) | ||
363 | { | ||
364 | schedstat_set(se->wait_max, max(se->wait_max, | ||
365 | rq_of(cfs_rq)->clock - se->wait_start)); | ||
366 | |||
367 | delta_fair = calc_weighted(delta_fair, se); | ||
368 | |||
369 | add_wait_runtime(cfs_rq, se, delta_fair); | ||
370 | } | ||
371 | |||
372 | static void | 324 | static void |
373 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 325 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) |
374 | { | 326 | { |
375 | unsigned long delta_fair; | 327 | schedstat_set(se->wait_max, max(se->wait_max, |
376 | 328 | rq_of(cfs_rq)->clock - se->wait_start)); | |
377 | if (unlikely(!se->wait_start_fair)) | ||
378 | return; | ||
379 | |||
380 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), | ||
381 | (u64)(cfs_rq->fair_clock - se->wait_start_fair)); | ||
382 | |||
383 | __update_stats_wait_end(cfs_rq, se, delta_fair); | ||
384 | |||
385 | se->wait_start_fair = 0; | ||
386 | schedstat_set(se->wait_start, 0); | 329 | schedstat_set(se->wait_start, 0); |
387 | } | 330 | } |
388 | 331 | ||
@@ -552,9 +495,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
552 | /* | 495 | /* |
553 | * Any task has to be enqueued before it get to execute on | 496 | * Any task has to be enqueued before it get to execute on |
554 | * a CPU. So account for the time it spent waiting on the | 497 | * a CPU. So account for the time it spent waiting on the |
555 | * runqueue. (note, here we rely on pick_next_task() having | 498 | * runqueue. |
556 | * done a put_prev_task_fair() shortly before this, which | ||
557 | * updated rq->fair_clock - used by update_stats_wait_end()) | ||
558 | */ | 499 | */ |
559 | update_stats_wait_end(cfs_rq, se); | 500 | update_stats_wait_end(cfs_rq, se); |
560 | update_stats_curr_start(cfs_rq, se); | 501 | update_stats_curr_start(cfs_rq, se); |
@@ -989,13 +930,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
989 | update_curr(cfs_rq); | 930 | update_curr(cfs_rq); |
990 | place_entity(cfs_rq, se, 1); | 931 | place_entity(cfs_rq, se, 1); |
991 | 932 | ||
992 | /* | ||
993 | * The statistical average of wait_runtime is about | ||
994 | * -granularity/2, so initialize the task with that: | ||
995 | */ | ||
996 | if (sched_feat(START_DEBIT)) | ||
997 | se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2); | ||
998 | |||
999 | if (sysctl_sched_child_runs_first && | 933 | if (sysctl_sched_child_runs_first && |
1000 | curr->vruntime < se->vruntime) { | 934 | curr->vruntime < se->vruntime) { |
1001 | 935 | ||