diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:12 -0400 |
commit | 5522d5d5f70005faeffff3ffc0cfa8eec0155de4 (patch) | |
tree | d05fc41d19b8a70102a0ad3ad7ed8f6fd2d1cf7d /kernel | |
parent | b9fa3df33f9166daf81bfa8253d339f5a7726122 (diff) |
sched: mark scheduling classes as const
mark scheduling classes as const. The speeds up the code
a bit and shrinks it:
text data bss dec hex filename
40027 4018 292 44337 ad31 sched.o.before
40190 3842 292 44324 ad24 sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 17 | ||||
-rw-r--r-- | kernel/sched_fair.c | 5 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 3 | ||||
-rw-r--r-- | kernel/sched_rt.c | 3 |
4 files changed, 11 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e1657e0c86d0..f582e2cedb09 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -847,9 +847,9 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
847 | int *this_best_prio, struct rq_iterator *iterator); | 847 | int *this_best_prio, struct rq_iterator *iterator); |
848 | 848 | ||
849 | #include "sched_stats.h" | 849 | #include "sched_stats.h" |
850 | #include "sched_rt.c" | ||
851 | #include "sched_fair.c" | ||
852 | #include "sched_idletask.c" | 850 | #include "sched_idletask.c" |
851 | #include "sched_fair.c" | ||
852 | #include "sched_rt.c" | ||
853 | #ifdef CONFIG_SCHED_DEBUG | 853 | #ifdef CONFIG_SCHED_DEBUG |
854 | # include "sched_debug.c" | 854 | # include "sched_debug.c" |
855 | #endif | 855 | #endif |
@@ -2251,7 +2251,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2251 | struct sched_domain *sd, enum cpu_idle_type idle, | 2251 | struct sched_domain *sd, enum cpu_idle_type idle, |
2252 | int *all_pinned) | 2252 | int *all_pinned) |
2253 | { | 2253 | { |
2254 | struct sched_class *class = sched_class_highest; | 2254 | const struct sched_class *class = sched_class_highest; |
2255 | unsigned long total_load_moved = 0; | 2255 | unsigned long total_load_moved = 0; |
2256 | int this_best_prio = this_rq->curr->prio; | 2256 | int this_best_prio = this_rq->curr->prio; |
2257 | 2257 | ||
@@ -2276,7 +2276,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2276 | static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2276 | static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2277 | struct sched_domain *sd, enum cpu_idle_type idle) | 2277 | struct sched_domain *sd, enum cpu_idle_type idle) |
2278 | { | 2278 | { |
2279 | struct sched_class *class; | 2279 | const struct sched_class *class; |
2280 | int this_best_prio = MAX_PRIO; | 2280 | int this_best_prio = MAX_PRIO; |
2281 | 2281 | ||
2282 | for (class = sched_class_highest; class; class = class->next) | 2282 | for (class = sched_class_highest; class; class = class->next) |
@@ -3432,7 +3432,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3432 | static inline struct task_struct * | 3432 | static inline struct task_struct * |
3433 | pick_next_task(struct rq *rq, struct task_struct *prev) | 3433 | pick_next_task(struct rq *rq, struct task_struct *prev) |
3434 | { | 3434 | { |
3435 | struct sched_class *class; | 3435 | const struct sched_class *class; |
3436 | struct task_struct *p; | 3436 | struct task_struct *p; |
3437 | 3437 | ||
3438 | /* | 3438 | /* |
@@ -6504,13 +6504,6 @@ void __init sched_init(void) | |||
6504 | int highest_cpu = 0; | 6504 | int highest_cpu = 0; |
6505 | int i, j; | 6505 | int i, j; |
6506 | 6506 | ||
6507 | /* | ||
6508 | * Link up the scheduling class hierarchy: | ||
6509 | */ | ||
6510 | rt_sched_class.next = &fair_sched_class; | ||
6511 | fair_sched_class.next = &idle_sched_class; | ||
6512 | idle_sched_class.next = NULL; | ||
6513 | |||
6514 | for_each_possible_cpu(i) { | 6507 | for_each_possible_cpu(i) { |
6515 | struct rt_prio_array *array; | 6508 | struct rt_prio_array *array; |
6516 | struct rq *rq; | 6509 | struct rq *rq; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index de13a6f5b977..32fd976f8566 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -76,8 +76,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL; | |||
76 | */ | 76 | */ |
77 | const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL; | 77 | const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL; |
78 | 78 | ||
79 | extern struct sched_class fair_sched_class; | ||
80 | |||
81 | /************************************************************** | 79 | /************************************************************** |
82 | * CFS operations on generic schedulable entities: | 80 | * CFS operations on generic schedulable entities: |
83 | */ | 81 | */ |
@@ -1031,7 +1029,8 @@ static void set_curr_task_fair(struct rq *rq) | |||
1031 | /* | 1029 | /* |
1032 | * All the scheduling class methods: | 1030 | * All the scheduling class methods: |
1033 | */ | 1031 | */ |
1034 | struct sched_class fair_sched_class __read_mostly = { | 1032 | static const struct sched_class fair_sched_class = { |
1033 | .next = &idle_sched_class, | ||
1035 | .enqueue_task = enqueue_task_fair, | 1034 | .enqueue_task = enqueue_task_fair, |
1036 | .dequeue_task = dequeue_task_fair, | 1035 | .dequeue_task = dequeue_task_fair, |
1037 | .yield_task = yield_task_fair, | 1036 | .yield_task = yield_task_fair, |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 5ebf829cdd73..6e2ead41516e 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -57,7 +57,8 @@ static void set_curr_task_idle(struct rq *rq) | |||
57 | /* | 57 | /* |
58 | * Simple, special scheduling class for the per-CPU idle tasks: | 58 | * Simple, special scheduling class for the per-CPU idle tasks: |
59 | */ | 59 | */ |
60 | static struct sched_class idle_sched_class __read_mostly = { | 60 | const struct sched_class idle_sched_class = { |
61 | /* .next is NULL */ | ||
61 | /* no enqueue/yield_task for idle tasks */ | 62 | /* no enqueue/yield_task for idle tasks */ |
62 | 63 | ||
63 | /* dequeue is not valid, we print a debug message there: */ | 64 | /* dequeue is not valid, we print a debug message there: */ |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index e1d5f1c8b532..dbe4d8cf80d6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -225,7 +225,8 @@ static void set_curr_task_rt(struct rq *rq) | |||
225 | p->se.exec_start = rq->clock; | 225 | p->se.exec_start = rq->clock; |
226 | } | 226 | } |
227 | 227 | ||
228 | static struct sched_class rt_sched_class __read_mostly = { | 228 | const struct sched_class rt_sched_class = { |
229 | .next = &fair_sched_class, | ||
229 | .enqueue_task = enqueue_task_rt, | 230 | .enqueue_task = enqueue_task_rt, |
230 | .dequeue_task = dequeue_task_rt, | 231 | .dequeue_task = dequeue_task_rt, |
231 | .yield_task = yield_task_rt, | 232 | .yield_task = yield_task_rt, |