diff options
author | Alexey Dobriyan <adobriyan@sw.ru> | 2007-10-15 11:00:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:13 -0400 |
commit | a9957449b08ab561a33e1e038df06843b8d8dd9f (patch) | |
tree | 066272181b3d563f8029c99c8c5587d04597ffda /kernel | |
parent | 155bb293ae8387526e6e07d42b1691104e55d9a2 (diff) |
sched: uninline scheduler
* save ~300 bytes
* activate_idle_task() was moved to avoid a warning
bloat-o-meter output:
add/remove: 6/0 grow/shrink: 0/16 up/down: 438/-733 (-295) <===
function old new delta
__enqueue_entity - 165 +165
finish_task_switch - 110 +110
update_curr_rt - 79 +79
__load_balance_iterator - 32 +32
__task_rq_unlock - 28 +28
find_process_by_pid - 24 +24
do_sched_setscheduler 133 123 -10
sys_sched_rr_get_interval 176 165 -11
sys_sched_getparam 156 145 -11
normalize_rt_tasks 482 470 -12
sched_getaffinity 112 99 -13
sys_sched_getscheduler 86 72 -14
sched_setaffinity 226 212 -14
sched_setscheduler 666 642 -24
load_balance_start_fair 33 9 -24
load_balance_next_fair 33 9 -24
dequeue_task_rt 133 67 -66
put_prev_task_rt 97 28 -69
schedule_tail 133 50 -83
schedule 682 594 -88
enqueue_entity 499 366 -133
task_new_fair 317 180 -137
Signed-off-by: Alexey Dobriyan <adobriyan@sw.ru>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 44 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 2 |
3 files changed, 24 insertions, 24 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4f13d379bea5..ce9bb7aa7c12 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -608,7 +608,7 @@ repeat_lock_task: | |||
608 | return rq; | 608 | return rq; |
609 | } | 609 | } |
610 | 610 | ||
611 | static inline void __task_rq_unlock(struct rq *rq) | 611 | static void __task_rq_unlock(struct rq *rq) |
612 | __releases(rq->lock) | 612 | __releases(rq->lock) |
613 | { | 613 | { |
614 | spin_unlock(&rq->lock); | 614 | spin_unlock(&rq->lock); |
@@ -623,7 +623,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) | |||
623 | /* | 623 | /* |
624 | * this_rq_lock - lock this runqueue and disable interrupts. | 624 | * this_rq_lock - lock this runqueue and disable interrupts. |
625 | */ | 625 | */ |
626 | static inline struct rq *this_rq_lock(void) | 626 | static struct rq *this_rq_lock(void) |
627 | __acquires(rq->lock) | 627 | __acquires(rq->lock) |
628 | { | 628 | { |
629 | struct rq *rq; | 629 | struct rq *rq; |
@@ -986,20 +986,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
986 | } | 986 | } |
987 | 987 | ||
988 | /* | 988 | /* |
989 | * activate_idle_task - move idle task to the _front_ of runqueue. | ||
990 | */ | ||
991 | static inline void activate_idle_task(struct task_struct *p, struct rq *rq) | ||
992 | { | ||
993 | update_rq_clock(rq); | ||
994 | |||
995 | if (p->state == TASK_UNINTERRUPTIBLE) | ||
996 | rq->nr_uninterruptible--; | ||
997 | |||
998 | enqueue_task(rq, p, 0); | ||
999 | inc_nr_running(p, rq); | ||
1000 | } | ||
1001 | |||
1002 | /* | ||
1003 | * deactivate_task - remove a task from the runqueue. | 989 | * deactivate_task - remove a task from the runqueue. |
1004 | */ | 990 | */ |
1005 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | 991 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) |
@@ -1206,7 +1192,7 @@ void kick_process(struct task_struct *p) | |||
1206 | * We want to under-estimate the load of migration sources, to | 1192 | * We want to under-estimate the load of migration sources, to |
1207 | * balance conservatively. | 1193 | * balance conservatively. |
1208 | */ | 1194 | */ |
1209 | static inline unsigned long source_load(int cpu, int type) | 1195 | static unsigned long source_load(int cpu, int type) |
1210 | { | 1196 | { |
1211 | struct rq *rq = cpu_rq(cpu); | 1197 | struct rq *rq = cpu_rq(cpu); |
1212 | unsigned long total = weighted_cpuload(cpu); | 1198 | unsigned long total = weighted_cpuload(cpu); |
@@ -1221,7 +1207,7 @@ static inline unsigned long source_load(int cpu, int type) | |||
1221 | * Return a high guess at the load of a migration-target cpu weighted | 1207 | * Return a high guess at the load of a migration-target cpu weighted |
1222 | * according to the scheduling class and "nice" value. | 1208 | * according to the scheduling class and "nice" value. |
1223 | */ | 1209 | */ |
1224 | static inline unsigned long target_load(int cpu, int type) | 1210 | static unsigned long target_load(int cpu, int type) |
1225 | { | 1211 | { |
1226 | struct rq *rq = cpu_rq(cpu); | 1212 | struct rq *rq = cpu_rq(cpu); |
1227 | unsigned long total = weighted_cpuload(cpu); | 1213 | unsigned long total = weighted_cpuload(cpu); |
@@ -1813,7 +1799,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, | |||
1813 | * with the lock held can cause deadlocks; see schedule() for | 1799 | * with the lock held can cause deadlocks; see schedule() for |
1814 | * details.) | 1800 | * details.) |
1815 | */ | 1801 | */ |
1816 | static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) | 1802 | static void finish_task_switch(struct rq *rq, struct task_struct *prev) |
1817 | __releases(rq->lock) | 1803 | __releases(rq->lock) |
1818 | { | 1804 | { |
1819 | struct mm_struct *mm = rq->prev_mm; | 1805 | struct mm_struct *mm = rq->prev_mm; |
@@ -3020,7 +3006,7 @@ static DEFINE_SPINLOCK(balancing); | |||
3020 | * | 3006 | * |
3021 | * Balancing parameters are set up in arch_init_sched_domains. | 3007 | * Balancing parameters are set up in arch_init_sched_domains. |
3022 | */ | 3008 | */ |
3023 | static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) | 3009 | static void rebalance_domains(int cpu, enum cpu_idle_type idle) |
3024 | { | 3010 | { |
3025 | int balance = 1; | 3011 | int balance = 1; |
3026 | struct rq *rq = cpu_rq(cpu); | 3012 | struct rq *rq = cpu_rq(cpu); |
@@ -4140,7 +4126,7 @@ struct task_struct *idle_task(int cpu) | |||
4140 | * find_process_by_pid - find a process with a matching PID value. | 4126 | * find_process_by_pid - find a process with a matching PID value. |
4141 | * @pid: the pid in question. | 4127 | * @pid: the pid in question. |
4142 | */ | 4128 | */ |
4143 | static inline struct task_struct *find_process_by_pid(pid_t pid) | 4129 | static struct task_struct *find_process_by_pid(pid_t pid) |
4144 | { | 4130 | { |
4145 | return pid ? find_task_by_pid(pid) : current; | 4131 | return pid ? find_task_by_pid(pid) : current; |
4146 | } | 4132 | } |
@@ -5157,6 +5143,20 @@ static void migrate_live_tasks(int src_cpu) | |||
5157 | } | 5143 | } |
5158 | 5144 | ||
5159 | /* | 5145 | /* |
5146 | * activate_idle_task - move idle task to the _front_ of runqueue. | ||
5147 | */ | ||
5148 | static void activate_idle_task(struct task_struct *p, struct rq *rq) | ||
5149 | { | ||
5150 | update_rq_clock(rq); | ||
5151 | |||
5152 | if (p->state == TASK_UNINTERRUPTIBLE) | ||
5153 | rq->nr_uninterruptible--; | ||
5154 | |||
5155 | enqueue_task(rq, p, 0); | ||
5156 | inc_nr_running(p, rq); | ||
5157 | } | ||
5158 | |||
5159 | /* | ||
5160 | * Schedules idle task to be the next runnable task on current CPU. | 5160 | * Schedules idle task to be the next runnable task on current CPU. |
5161 | * It does so by boosting its priority to highest possible and adding it to | 5161 | * It does so by boosting its priority to highest possible and adding it to |
5162 | * the _front_ of the runqueue. Used by CPU offline code. | 5162 | * the _front_ of the runqueue. Used by CPU offline code. |
@@ -6494,7 +6494,7 @@ int in_sched_functions(unsigned long addr) | |||
6494 | && addr < (unsigned long)__sched_text_end); | 6494 | && addr < (unsigned long)__sched_text_end); |
6495 | } | 6495 | } |
6496 | 6496 | ||
6497 | static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | 6497 | static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) |
6498 | { | 6498 | { |
6499 | cfs_rq->tasks_timeline = RB_ROOT; | 6499 | cfs_rq->tasks_timeline = RB_ROOT; |
6500 | #ifdef CONFIG_FAIR_GROUP_SCHED | 6500 | #ifdef CONFIG_FAIR_GROUP_SCHED |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0856701db14e..48604eab7dad 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -892,7 +892,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | |||
892 | * achieve that by always pre-iterating before returning | 892 | * achieve that by always pre-iterating before returning |
893 | * the current task: | 893 | * the current task: |
894 | */ | 894 | */ |
895 | static inline struct task_struct * | 895 | static struct task_struct * |
896 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) | 896 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) |
897 | { | 897 | { |
898 | struct task_struct *p; | 898 | struct task_struct *p; |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index dbe4d8cf80d6..2f26c3d73506 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Update the current task's runtime statistics. Skip current tasks that | 7 | * Update the current task's runtime statistics. Skip current tasks that |
8 | * are not in our scheduling class. | 8 | * are not in our scheduling class. |
9 | */ | 9 | */ |
10 | static inline void update_curr_rt(struct rq *rq) | 10 | static void update_curr_rt(struct rq *rq) |
11 | { | 11 | { |
12 | struct task_struct *curr = rq->curr; | 12 | struct task_struct *curr = rq->curr; |
13 | u64 delta_exec; | 13 | u64 delta_exec; |