diff options
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/sched.c | 17 | ||||
-rw-r--r-- | kernel/sched_fair.c | 5 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 3 | ||||
-rw-r--r-- | kernel/sched_rt.c | 3 |
5 files changed, 13 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 97f736b749c2..47e3717a0356 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -863,7 +863,7 @@ struct rq; | |||
863 | struct sched_domain; | 863 | struct sched_domain; |
864 | 864 | ||
865 | struct sched_class { | 865 | struct sched_class { |
866 | struct sched_class *next; | 866 | const struct sched_class *next; |
867 | 867 | ||
868 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); | 868 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); |
869 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | 869 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); |
@@ -949,7 +949,7 @@ struct task_struct { | |||
949 | 949 | ||
950 | int prio, static_prio, normal_prio; | 950 | int prio, static_prio, normal_prio; |
951 | struct list_head run_list; | 951 | struct list_head run_list; |
952 | struct sched_class *sched_class; | 952 | const struct sched_class *sched_class; |
953 | struct sched_entity se; | 953 | struct sched_entity se; |
954 | 954 | ||
955 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 955 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
diff --git a/kernel/sched.c b/kernel/sched.c index e1657e0c86d0..f582e2cedb09 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -847,9 +847,9 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
847 | int *this_best_prio, struct rq_iterator *iterator); | 847 | int *this_best_prio, struct rq_iterator *iterator); |
848 | 848 | ||
849 | #include "sched_stats.h" | 849 | #include "sched_stats.h" |
850 | #include "sched_rt.c" | ||
851 | #include "sched_fair.c" | ||
852 | #include "sched_idletask.c" | 850 | #include "sched_idletask.c" |
851 | #include "sched_fair.c" | ||
852 | #include "sched_rt.c" | ||
853 | #ifdef CONFIG_SCHED_DEBUG | 853 | #ifdef CONFIG_SCHED_DEBUG |
854 | # include "sched_debug.c" | 854 | # include "sched_debug.c" |
855 | #endif | 855 | #endif |
@@ -2251,7 +2251,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2251 | struct sched_domain *sd, enum cpu_idle_type idle, | 2251 | struct sched_domain *sd, enum cpu_idle_type idle, |
2252 | int *all_pinned) | 2252 | int *all_pinned) |
2253 | { | 2253 | { |
2254 | struct sched_class *class = sched_class_highest; | 2254 | const struct sched_class *class = sched_class_highest; |
2255 | unsigned long total_load_moved = 0; | 2255 | unsigned long total_load_moved = 0; |
2256 | int this_best_prio = this_rq->curr->prio; | 2256 | int this_best_prio = this_rq->curr->prio; |
2257 | 2257 | ||
@@ -2276,7 +2276,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2276 | static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2276 | static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2277 | struct sched_domain *sd, enum cpu_idle_type idle) | 2277 | struct sched_domain *sd, enum cpu_idle_type idle) |
2278 | { | 2278 | { |
2279 | struct sched_class *class; | 2279 | const struct sched_class *class; |
2280 | int this_best_prio = MAX_PRIO; | 2280 | int this_best_prio = MAX_PRIO; |
2281 | 2281 | ||
2282 | for (class = sched_class_highest; class; class = class->next) | 2282 | for (class = sched_class_highest; class; class = class->next) |
@@ -3432,7 +3432,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3432 | static inline struct task_struct * | 3432 | static inline struct task_struct * |
3433 | pick_next_task(struct rq *rq, struct task_struct *prev) | 3433 | pick_next_task(struct rq *rq, struct task_struct *prev) |
3434 | { | 3434 | { |
3435 | struct sched_class *class; | 3435 | const struct sched_class *class; |
3436 | struct task_struct *p; | 3436 | struct task_struct *p; |
3437 | 3437 | ||
3438 | /* | 3438 | /* |
@@ -6504,13 +6504,6 @@ void __init sched_init(void) | |||
6504 | int highest_cpu = 0; | 6504 | int highest_cpu = 0; |
6505 | int i, j; | 6505 | int i, j; |
6506 | 6506 | ||
6507 | /* | ||
6508 | * Link up the scheduling class hierarchy: | ||
6509 | */ | ||
6510 | rt_sched_class.next = &fair_sched_class; | ||
6511 | fair_sched_class.next = &idle_sched_class; | ||
6512 | idle_sched_class.next = NULL; | ||
6513 | |||
6514 | for_each_possible_cpu(i) { | 6507 | for_each_possible_cpu(i) { |
6515 | struct rt_prio_array *array; | 6508 | struct rt_prio_array *array; |
6516 | struct rq *rq; | 6509 | struct rq *rq; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index de13a6f5b977..32fd976f8566 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -76,8 +76,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL; | |||
76 | */ | 76 | */ |
77 | const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL; | 77 | const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL; |
78 | 78 | ||
79 | extern struct sched_class fair_sched_class; | ||
80 | |||
81 | /************************************************************** | 79 | /************************************************************** |
82 | * CFS operations on generic schedulable entities: | 80 | * CFS operations on generic schedulable entities: |
83 | */ | 81 | */ |
@@ -1031,7 +1029,8 @@ static void set_curr_task_fair(struct rq *rq) | |||
1031 | /* | 1029 | /* |
1032 | * All the scheduling class methods: | 1030 | * All the scheduling class methods: |
1033 | */ | 1031 | */ |
1034 | struct sched_class fair_sched_class __read_mostly = { | 1032 | static const struct sched_class fair_sched_class = { |
1033 | .next = &idle_sched_class, | ||
1035 | .enqueue_task = enqueue_task_fair, | 1034 | .enqueue_task = enqueue_task_fair, |
1036 | .dequeue_task = dequeue_task_fair, | 1035 | .dequeue_task = dequeue_task_fair, |
1037 | .yield_task = yield_task_fair, | 1036 | .yield_task = yield_task_fair, |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 5ebf829cdd73..6e2ead41516e 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -57,7 +57,8 @@ static void set_curr_task_idle(struct rq *rq) | |||
57 | /* | 57 | /* |
58 | * Simple, special scheduling class for the per-CPU idle tasks: | 58 | * Simple, special scheduling class for the per-CPU idle tasks: |
59 | */ | 59 | */ |
60 | static struct sched_class idle_sched_class __read_mostly = { | 60 | const struct sched_class idle_sched_class = { |
61 | /* .next is NULL */ | ||
61 | /* no enqueue/yield_task for idle tasks */ | 62 | /* no enqueue/yield_task for idle tasks */ |
62 | 63 | ||
63 | /* dequeue is not valid, we print a debug message there: */ | 64 | /* dequeue is not valid, we print a debug message there: */ |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index e1d5f1c8b532..dbe4d8cf80d6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -225,7 +225,8 @@ static void set_curr_task_rt(struct rq *rq) | |||
225 | p->se.exec_start = rq->clock; | 225 | p->se.exec_start = rq->clock; |
226 | } | 226 | } |
227 | 227 | ||
228 | static struct sched_class rt_sched_class __read_mostly = { | 228 | const struct sched_class rt_sched_class = { |
229 | .next = &fair_sched_class, | ||
229 | .enqueue_task = enqueue_task_rt, | 230 | .enqueue_task = enqueue_task_rt, |
230 | .dequeue_task = dequeue_task_rt, | 231 | .dequeue_task = dequeue_task_rt, |
231 | .yield_task = yield_task_rt, | 232 | .yield_task = yield_task_rt, |