diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-08-09 05:16:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-08-09 05:16:47 -0400 |
commit | b7cc089657c12340077fe937380f9e54bbd6b300 (patch) | |
tree | 822be822d637541a8f4e6c0a6d14111bc82b722b /kernel | |
parent | 5cef9eca3837a8dcf605a360e213c4179a07c41a (diff) |
sched: remove the 'u64 now' parameter from update_curr()
remove the 'u64 now' parameter from update_curr().
( identity transformation that causes no change in functionality. )
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 025ac532b27a..798759882822 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -281,7 +281,7 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | |||
281 | * are not in our scheduling class. | 281 | * are not in our scheduling class. |
282 | */ | 282 | */ |
283 | static inline void | 283 | static inline void |
284 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now) | 284 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
285 | { | 285 | { |
286 | unsigned long delta, delta_exec, delta_fair, delta_mine; | 286 | unsigned long delta, delta_exec, delta_fair, delta_mine; |
287 | struct load_weight *lw = &cfs_rq->load; | 287 | struct load_weight *lw = &cfs_rq->load; |
@@ -320,7 +320,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now) | |||
320 | add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec); | 320 | add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec); |
321 | } | 321 | } |
322 | 322 | ||
323 | static void update_curr(struct cfs_rq *cfs_rq, u64 now) | 323 | static void update_curr(struct cfs_rq *cfs_rq) |
324 | { | 324 | { |
325 | struct sched_entity *curr = cfs_rq_curr(cfs_rq); | 325 | struct sched_entity *curr = cfs_rq_curr(cfs_rq); |
326 | unsigned long delta_exec; | 326 | unsigned long delta_exec; |
@@ -338,7 +338,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now) | |||
338 | curr->delta_exec += delta_exec; | 338 | curr->delta_exec += delta_exec; |
339 | 339 | ||
340 | if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) { | 340 | if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) { |
341 | __update_curr(cfs_rq, curr, now); | 341 | __update_curr(cfs_rq, curr); |
342 | curr->delta_exec = 0; | 342 | curr->delta_exec = 0; |
343 | } | 343 | } |
344 | curr->exec_start = rq_of(cfs_rq)->clock; | 344 | curr->exec_start = rq_of(cfs_rq)->clock; |
@@ -453,7 +453,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
453 | static inline void | 453 | static inline void |
454 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | 454 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) |
455 | { | 455 | { |
456 | update_curr(cfs_rq, now); | 456 | update_curr(cfs_rq); |
457 | /* | 457 | /* |
458 | * Mark the end of the wait period if dequeueing a | 458 | * Mark the end of the wait period if dequeueing a |
459 | * waiting task: | 459 | * waiting task: |
@@ -579,7 +579,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
579 | /* | 579 | /* |
580 | * Update the fair clock. | 580 | * Update the fair clock. |
581 | */ | 581 | */ |
582 | update_curr(cfs_rq, now); | 582 | update_curr(cfs_rq); |
583 | 583 | ||
584 | if (wakeup) | 584 | if (wakeup) |
585 | enqueue_sleeper(cfs_rq, se, now); | 585 | enqueue_sleeper(cfs_rq, se, now); |
@@ -660,7 +660,7 @@ put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev, u64 now) | |||
660 | * was not called and update_curr() has to be done: | 660 | * was not called and update_curr() has to be done: |
661 | */ | 661 | */ |
662 | if (prev->on_rq) | 662 | if (prev->on_rq) |
663 | update_curr(cfs_rq, now); | 663 | update_curr(cfs_rq); |
664 | 664 | ||
665 | update_stats_curr_end(cfs_rq, prev, now); | 665 | update_stats_curr_end(cfs_rq, prev, now); |
666 | 666 | ||
@@ -851,7 +851,7 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p) | |||
851 | 851 | ||
852 | if (unlikely(rt_prio(p->prio))) { | 852 | if (unlikely(rt_prio(p->prio))) { |
853 | update_rq_clock(rq); | 853 | update_rq_clock(rq); |
854 | update_curr(cfs_rq, rq->clock); | 854 | update_curr(cfs_rq); |
855 | resched_task(curr); | 855 | resched_task(curr); |
856 | return; | 856 | return; |
857 | } | 857 | } |