diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:03 -0400 |
commit | 429d43bcc026b92b9dfaccd3577fec290f6a67ce (patch) | |
tree | d457d48080ce35566463f807e253edec95ddebd9 | |
parent | 62160e3f4a06d948ec89665d29f1173e551deedc (diff) |
sched: cleanup: simplify cfs_rq_curr() methods
cleanup: simplify cfs_rq_curr() methods - now that the cfs_rq->curr
pointer is unconditionally present, remove the wrappers.
kernel/sched.o:
text data bss dec hex filename
11784 224 2012 14020 36c4 sched.o.before
11784 224 2012 14020 36c4 sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/sched_fair.c | 24 |
1 files changed, 6 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 335faf06a561..74d47e65b9ea 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -111,18 +111,6 @@ extern struct sched_class fair_sched_class; | |||
111 | * CFS operations on generic schedulable entities: | 111 | * CFS operations on generic schedulable entities: |
112 | */ | 112 | */ |
113 | 113 | ||
114 | /* currently running entity (if any) on this cfs_rq */ | ||
115 | static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq) | ||
116 | { | ||
117 | return cfs_rq->curr; | ||
118 | } | ||
119 | |||
120 | static inline void | ||
121 | set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
122 | { | ||
123 | cfs_rq->curr = se; | ||
124 | } | ||
125 | |||
126 | #ifdef CONFIG_FAIR_GROUP_SCHED | 114 | #ifdef CONFIG_FAIR_GROUP_SCHED |
127 | 115 | ||
128 | /* cpu runqueue to which this cfs_rq is attached */ | 116 | /* cpu runqueue to which this cfs_rq is attached */ |
@@ -382,7 +370,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
382 | 370 | ||
383 | static void update_curr(struct cfs_rq *cfs_rq) | 371 | static void update_curr(struct cfs_rq *cfs_rq) |
384 | { | 372 | { |
385 | struct sched_entity *curr = cfs_rq_curr(cfs_rq); | 373 | struct sched_entity *curr = cfs_rq->curr; |
386 | u64 now = rq_of(cfs_rq)->clock; | 374 | u64 now = rq_of(cfs_rq)->clock; |
387 | unsigned long delta_exec; | 375 | unsigned long delta_exec; |
388 | 376 | ||
@@ -440,7 +428,7 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
440 | * Are we enqueueing a waiting task? (for current tasks | 428 | * Are we enqueueing a waiting task? (for current tasks |
441 | * a dequeue/enqueue event is a NOP) | 429 | * a dequeue/enqueue event is a NOP) |
442 | */ | 430 | */ |
443 | if (se != cfs_rq_curr(cfs_rq)) | 431 | if (se != cfs_rq->curr) |
444 | update_stats_wait_start(cfs_rq, se); | 432 | update_stats_wait_start(cfs_rq, se); |
445 | /* | 433 | /* |
446 | * Update the key: | 434 | * Update the key: |
@@ -511,7 +499,7 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
511 | * Mark the end of the wait period if dequeueing a | 499 | * Mark the end of the wait period if dequeueing a |
512 | * waiting task: | 500 | * waiting task: |
513 | */ | 501 | */ |
514 | if (se != cfs_rq_curr(cfs_rq)) | 502 | if (se != cfs_rq->curr) |
515 | update_stats_wait_end(cfs_rq, se); | 503 | update_stats_wait_end(cfs_rq, se); |
516 | } | 504 | } |
517 | 505 | ||
@@ -717,7 +705,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
717 | */ | 705 | */ |
718 | update_stats_wait_end(cfs_rq, se); | 706 | update_stats_wait_end(cfs_rq, se); |
719 | update_stats_curr_start(cfs_rq, se); | 707 | update_stats_curr_start(cfs_rq, se); |
720 | set_cfs_rq_curr(cfs_rq, se); | 708 | cfs_rq->curr = se; |
721 | #ifdef CONFIG_SCHEDSTATS | 709 | #ifdef CONFIG_SCHEDSTATS |
722 | /* | 710 | /* |
723 | * Track our maximum slice length, if the CPU's load is at | 711 | * Track our maximum slice length, if the CPU's load is at |
@@ -754,7 +742,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
754 | 742 | ||
755 | if (prev->on_rq) | 743 | if (prev->on_rq) |
756 | update_stats_wait_start(cfs_rq, prev); | 744 | update_stats_wait_start(cfs_rq, prev); |
757 | set_cfs_rq_curr(cfs_rq, NULL); | 745 | cfs_rq->curr = NULL; |
758 | } | 746 | } |
759 | 747 | ||
760 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 748 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
@@ -1153,7 +1141,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) | |||
1153 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 1141 | static void task_new_fair(struct rq *rq, struct task_struct *p) |
1154 | { | 1142 | { |
1155 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 1143 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
1156 | struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq); | 1144 | struct sched_entity *se = &p->se, *curr = cfs_rq->curr; |
1157 | 1145 | ||
1158 | sched_info_queued(p); | 1146 | sched_info_queued(p); |
1159 | 1147 | ||