aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_fair.c25
-rw-r--r--kernel/sched_stats.h20
3 files changed, 24 insertions, 29 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d42992bccdfa..aa14a56f9d03 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8510,12 +8510,12 @@ void sched_move_task(struct task_struct *tsk)
8510 if (unlikely(running)) 8510 if (unlikely(running))
8511 tsk->sched_class->put_prev_task(rq, tsk); 8511 tsk->sched_class->put_prev_task(rq, tsk);
8512 8512
8513 set_task_rq(tsk, task_cpu(tsk));
8514
8515#ifdef CONFIG_FAIR_GROUP_SCHED 8513#ifdef CONFIG_FAIR_GROUP_SCHED
8516 if (tsk->sched_class->moved_group) 8514 if (tsk->sched_class->task_move_group)
8517 tsk->sched_class->moved_group(tsk, on_rq); 8515 tsk->sched_class->task_move_group(tsk, on_rq);
8516 else
8518#endif 8517#endif
8518 set_task_rq(tsk, task_cpu(tsk));
8519 8519
8520 if (unlikely(running)) 8520 if (unlikely(running))
8521 tsk->sched_class->set_curr_task(rq); 8521 tsk->sched_class->set_curr_task(rq);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 933f3d1b62ea..f4f6a8326dd0 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3869,13 +3869,26 @@ static void set_curr_task_fair(struct rq *rq)
3869} 3869}
3870 3870
3871#ifdef CONFIG_FAIR_GROUP_SCHED 3871#ifdef CONFIG_FAIR_GROUP_SCHED
3872static void moved_group_fair(struct task_struct *p, int on_rq) 3872static void task_move_group_fair(struct task_struct *p, int on_rq)
3873{ 3873{
3874 struct cfs_rq *cfs_rq = task_cfs_rq(p); 3874 /*
3875 3875 * If the task was not on the rq at the time of this cgroup movement
3876 update_curr(cfs_rq); 3876 * it must have been asleep, sleeping tasks keep their ->vruntime
3877 * absolute on their old rq until wakeup (needed for the fair sleeper
3878 * bonus in place_entity()).
3879 *
3880 * If it was on the rq, we've just 'preempted' it, which does convert
3881 * ->vruntime to a relative base.
3882 *
3883 * Make sure both cases convert their relative position when migrating
3884 * to another cgroup's rq. This does somewhat interfere with the
3885 * fair sleeper stuff for the first placement, but who cares.
3886 */
3887 if (!on_rq)
3888 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
3889 set_task_rq(p, task_cpu(p));
3877 if (!on_rq) 3890 if (!on_rq)
3878 place_entity(cfs_rq, &p->se, 1); 3891 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
3879} 3892}
3880#endif 3893#endif
3881 3894
@@ -3927,7 +3940,7 @@ static const struct sched_class fair_sched_class = {
3927 .get_rr_interval = get_rr_interval_fair, 3940 .get_rr_interval = get_rr_interval_fair,
3928 3941
3929#ifdef CONFIG_FAIR_GROUP_SCHED 3942#ifdef CONFIG_FAIR_GROUP_SCHED
3930 .moved_group = moved_group_fair, 3943 .task_move_group = task_move_group_fair,
3931#endif 3944#endif
3932}; 3945};
3933 3946
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 25c2f962f6fc..48ddf431db0e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -157,15 +157,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
157} 157}
158 158
159/* 159/*
160 * Called when a process is dequeued from the active array and given 160 * We are interested in knowing how long it was from the *first* time a
161 * the cpu. We should note that with the exception of interactive
162 * tasks, the expired queue will become the active queue after the active
163 * queue is empty, without explicitly dequeuing and requeuing tasks in the
164 * expired queue. (Interactive tasks may be requeued directly to the
165 * active queue, thus delaying tasks in the expired queue from running;
166 * see scheduler_tick()).
167 *
168 * Though we are interested in knowing how long it was from the *first* time a
169 * task was queued to the time that it finally hit a cpu, we call this routine 161 * task was queued to the time that it finally hit a cpu, we call this routine
170 * from dequeue_task() to account for possible rq->clock skew across cpus. The 162 * from dequeue_task() to account for possible rq->clock skew across cpus. The
171 * delta taken on each cpu would annul the skew. 163 * delta taken on each cpu would annul the skew.
@@ -203,16 +195,6 @@ static void sched_info_arrive(struct task_struct *t)
203} 195}
204 196
205/* 197/*
206 * Called when a process is queued into either the active or expired
207 * array. The time is noted and later used to determine how long we
208 * had to wait for us to reach the cpu. Since the expired queue will
209 * become the active queue after active queue is empty, without dequeuing
210 * and requeuing any tasks, we are interested in queuing to either. It
211 * is unusual but not impossible for tasks to be dequeued and immediately
212 * requeued in the same or another array: this can happen in sched_yield(),
213 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
214 * to runqueue.
215 *
216 * This function is only called from enqueue_task(), but also only updates 198 * This function is only called from enqueue_task(), but also only updates
217 * the timestamp if it is already not set. It's assumed that 199 * the timestamp if it is already not set. It's assumed that
218 * sched_info_dequeued() will clear that stamp when appropriate. 200 * sched_info_dequeued() will clear that stamp when appropriate.