diff options
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 3 | ||||
-rw-r--r-- | kernel/sched_rt.c | 3 |
4 files changed, 4 insertions, 7 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 62ddddb49db3..b11dedfbab6e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -855,8 +855,7 @@ struct sched_domain; | |||
855 | struct sched_class { | 855 | struct sched_class { |
856 | struct sched_class *next; | 856 | struct sched_class *next; |
857 | 857 | ||
858 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, | 858 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); |
859 | int wakeup, u64 now); | ||
860 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, | 859 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, |
861 | int sleep, u64 now); | 860 | int sleep, u64 now); |
862 | void (*yield_task) (struct rq *rq, struct task_struct *p); | 861 | void (*yield_task) (struct rq *rq, struct task_struct *p); |
diff --git a/kernel/sched.c b/kernel/sched.c index 49a5fb0cdea0..43ae1566b8fc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -852,7 +852,7 @@ static void | |||
852 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now) | 852 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now) |
853 | { | 853 | { |
854 | sched_info_queued(p); | 854 | sched_info_queued(p); |
855 | p->sched_class->enqueue_task(rq, p, wakeup, now); | 855 | p->sched_class->enqueue_task(rq, p, wakeup); |
856 | p->se.on_rq = 1; | 856 | p->se.on_rq = 1; |
857 | } | 857 | } |
858 | 858 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a11d18861a3c..81db9626b7ed 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -782,8 +782,7 @@ static inline int is_same_group(struct task_struct *curr, struct task_struct *p) | |||
782 | * increased. Here we update the fair scheduling stats and | 782 | * increased. Here we update the fair scheduling stats and |
783 | * then put the task into the rbtree: | 783 | * then put the task into the rbtree: |
784 | */ | 784 | */ |
785 | static void | 785 | static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) |
786 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, u64 now) | ||
787 | { | 786 | { |
788 | struct cfs_rq *cfs_rq; | 787 | struct cfs_rq *cfs_rq; |
789 | struct sched_entity *se = &p->se; | 788 | struct sched_entity *se = &p->se; |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index fa5a46273b79..1edaa99e0d3d 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -25,8 +25,7 @@ static inline void update_curr_rt(struct rq *rq) | |||
25 | curr->se.exec_start = rq->clock; | 25 | curr->se.exec_start = rq->clock; |
26 | } | 26 | } |
27 | 27 | ||
28 | static void | 28 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) |
29 | enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, u64 now) | ||
30 | { | 29 | { |
31 | struct rt_prio_array *array = &rq->rt.active; | 30 | struct rt_prio_array *array = &rq->rt.active; |
32 | 31 | ||