diff options
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 10 | ||||
-rw-r--r-- | kernel/sched_rt.c | 4 |
4 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index f776a30b403e..66169005f008 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -858,7 +858,7 @@ struct sched_class { | |||
858 | 858 | ||
859 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); | 859 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); |
860 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | 860 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); |
861 | void (*yield_task) (struct rq *rq, struct task_struct *p); | 861 | void (*yield_task) (struct rq *rq); |
862 | 862 | ||
863 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); | 863 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); |
864 | 864 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 3b104635a8ea..e1f784f4b4db 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4537,7 +4537,7 @@ asmlinkage long sys_sched_yield(void) | |||
4537 | struct rq *rq = this_rq_lock(); | 4537 | struct rq *rq = this_rq_lock(); |
4538 | 4538 | ||
4539 | schedstat_inc(rq, yld_cnt); | 4539 | schedstat_inc(rq, yld_cnt); |
4540 | current->sched_class->yield_task(rq, current); | 4540 | current->sched_class->yield_task(rq); |
4541 | 4541 | ||
4542 | /* | 4542 | /* |
4543 | * Since we are going to call schedule() anyway, there's | 4543 | * Since we are going to call schedule() anyway, there's |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 827a0636525a..4dd256d46853 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -722,11 +722,11 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | |||
722 | * | 722 | * |
723 | * If compat_yield is turned on then we requeue to the end of the tree. | 723 | * If compat_yield is turned on then we requeue to the end of the tree. |
724 | */ | 724 | */ |
725 | static void yield_task_fair(struct rq *rq, struct task_struct *p) | 725 | static void yield_task_fair(struct rq *rq) |
726 | { | 726 | { |
727 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 727 | struct cfs_rq *cfs_rq = &rq->cfs; |
728 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 728 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; |
729 | struct sched_entity *rightmost, *se = &p->se; | 729 | struct sched_entity *rightmost, *se = &rq->curr->se; |
730 | struct rb_node *parent; | 730 | struct rb_node *parent; |
731 | 731 | ||
732 | /* | 732 | /* |
@@ -741,8 +741,8 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p) | |||
741 | * Dequeue and enqueue the task to update its | 741 | * Dequeue and enqueue the task to update its |
742 | * position within the tree: | 742 | * position within the tree: |
743 | */ | 743 | */ |
744 | dequeue_entity(cfs_rq, &p->se, 0); | 744 | dequeue_entity(cfs_rq, se, 0); |
745 | enqueue_entity(cfs_rq, &p->se, 0); | 745 | enqueue_entity(cfs_rq, se, 0); |
746 | 746 | ||
747 | return; | 747 | return; |
748 | } | 748 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 45b339f56aea..b86944c20f9f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -59,9 +59,9 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | static void | 61 | static void |
62 | yield_task_rt(struct rq *rq, struct task_struct *p) | 62 | yield_task_rt(struct rq *rq) |
63 | { | 63 | { |
64 | requeue_task_rt(rq, p); | 64 | requeue_task_rt(rq, rq->curr); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | 67 | /* |