aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/sched/sched.h23
4 files changed, 33 insertions, 16 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9e65eff6af3b..b806b821e735 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -123,8 +123,17 @@ extern struct group_info init_groups;
123 123
124extern struct cred init_cred; 124extern struct cred init_cred;
125 125
126extern struct task_group root_task_group;
127
128#ifdef CONFIG_CGROUP_SCHED
129# define INIT_CGROUP_SCHED(tsk) \
130 .sched_task_group = &root_task_group,
131#else
132# define INIT_CGROUP_SCHED(tsk)
133#endif
134
126#ifdef CONFIG_PERF_EVENTS 135#ifdef CONFIG_PERF_EVENTS
127# define INIT_PERF_EVENTS(tsk) \ 136# define INIT_PERF_EVENTS(tsk) \
128 .perf_event_mutex = \ 137 .perf_event_mutex = \
129 __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ 138 __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
130 .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), 139 .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
@@ -161,6 +170,7 @@ extern struct cred init_cred;
161 }, \ 170 }, \
162 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 171 .tasks = LIST_HEAD_INIT(tsk.tasks), \
163 INIT_PUSHABLE_TASKS(tsk) \ 172 INIT_PUSHABLE_TASKS(tsk) \
173 INIT_CGROUP_SCHED(tsk) \
164 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ 174 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
165 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ 175 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
166 .real_parent = &tsk, \ 176 .real_parent = &tsk, \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bc9952991710..fd9436a3a545 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1245,6 +1245,9 @@ struct task_struct {
1245 const struct sched_class *sched_class; 1245 const struct sched_class *sched_class;
1246 struct sched_entity se; 1246 struct sched_entity se;
1247 struct sched_rt_entity rt; 1247 struct sched_rt_entity rt;
1248#ifdef CONFIG_CGROUP_SCHED
1249 struct task_group *sched_task_group;
1250#endif
1248 1251
1249#ifdef CONFIG_PREEMPT_NOTIFIERS 1252#ifdef CONFIG_PREEMPT_NOTIFIERS
1250 /* list of struct preempt_notifier: */ 1253 /* list of struct preempt_notifier: */
@@ -2724,7 +2727,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
2724extern long sched_group_rt_period(struct task_group *tg); 2727extern long sched_group_rt_period(struct task_group *tg);
2725extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 2728extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2726#endif 2729#endif
2727#endif 2730#endif /* CONFIG_CGROUP_SCHED */
2728 2731
2729extern int task_can_switch_user(struct user_struct *up, 2732extern int task_can_switch_user(struct user_struct *up,
2730 struct task_struct *tsk); 2733 struct task_struct *tsk);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 536b213f0ce5..5d011ef4c0df 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1096,7 +1096,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1096 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1096 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1097 * 1097 *
1098 * sched_move_task() holds both and thus holding either pins the cgroup, 1098 * sched_move_task() holds both and thus holding either pins the cgroup,
1099 * see set_task_rq(). 1099 * see task_group().
1100 * 1100 *
1101 * Furthermore, all task_rq users should acquire both locks, see 1101 * Furthermore, all task_rq users should acquire both locks, see
1102 * task_rq_lock(). 1102 * task_rq_lock().
@@ -7658,6 +7658,7 @@ void sched_destroy_group(struct task_group *tg)
7658 */ 7658 */
7659void sched_move_task(struct task_struct *tsk) 7659void sched_move_task(struct task_struct *tsk)
7660{ 7660{
7661 struct task_group *tg;
7661 int on_rq, running; 7662 int on_rq, running;
7662 unsigned long flags; 7663 unsigned long flags;
7663 struct rq *rq; 7664 struct rq *rq;
@@ -7672,6 +7673,12 @@ void sched_move_task(struct task_struct *tsk)
7672 if (unlikely(running)) 7673 if (unlikely(running))
7673 tsk->sched_class->put_prev_task(rq, tsk); 7674 tsk->sched_class->put_prev_task(rq, tsk);
7674 7675
7676 tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
7677 lockdep_is_held(&tsk->sighand->siglock)),
7678 struct task_group, css);
7679 tg = autogroup_task_group(tsk, tg);
7680 tsk->sched_task_group = tg;
7681
7675#ifdef CONFIG_FAIR_GROUP_SCHED 7682#ifdef CONFIG_FAIR_GROUP_SCHED
7676 if (tsk->sched_class->task_move_group) 7683 if (tsk->sched_class->task_move_group)
7677 tsk->sched_class->task_move_group(tsk, on_rq); 7684 tsk->sched_class->task_move_group(tsk, on_rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 55844f24435a..c35a1a7dd4d6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -538,22 +538,19 @@ extern int group_balance_cpu(struct sched_group *sg);
538/* 538/*
539 * Return the group to which this tasks belongs. 539 * Return the group to which this tasks belongs.
540 * 540 *
541 * We use task_subsys_state_check() and extend the RCU verification with 541 * We cannot use task_subsys_state() and friends because the cgroup
542 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each 542 * subsystem changes that value before the cgroup_subsys::attach() method
543 * task it moves into the cgroup. Therefore by holding either of those locks, 543 * is called, therefore we cannot pin it and might observe the wrong value.
544 * we pin the task to the current cgroup. 544 *
545 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
546 * core changes this before calling sched_move_task().
547 *
548 * Instead we use a 'copy' which is updated from sched_move_task() while
549 * holding both task_struct::pi_lock and rq::lock.
545 */ 550 */
546static inline struct task_group *task_group(struct task_struct *p) 551static inline struct task_group *task_group(struct task_struct *p)
547{ 552{
548 struct task_group *tg; 553 return p->sched_task_group;
549 struct cgroup_subsys_state *css;
550
551 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
552 lockdep_is_held(&p->pi_lock) ||
553 lockdep_is_held(&task_rq(p)->lock));
554 tg = container_of(css, struct task_group, css);
555
556 return autogroup_task_group(p, tg);
557} 554}
558 555
559/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 556/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */