diff options
Diffstat (limited to 'kernel/sched/auto_group.c')
-rw-r--r-- | kernel/sched/auto_group.c | 36 |
1 files changed, 28 insertions, 8 deletions
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index a5d966cb8891..f1c8fd566246 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c | |||
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) | |||
111 | { | 111 | { |
112 | if (tg != &root_task_group) | 112 | if (tg != &root_task_group) |
113 | return false; | 113 | return false; |
114 | |||
115 | /* | 114 | /* |
116 | * We can only assume the task group can't go away on us if | 115 | * If we race with autogroup_move_group() the caller can use the old |
117 | * autogroup_move_group() can see us on ->thread_group list. | 116 | * value of signal->autogroup but in this case sched_move_task() will |
117 | * be called again before autogroup_kref_put(). | ||
118 | * | ||
119 | * However, there is no way sched_autogroup_exit_task() could tell us | ||
120 | * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. | ||
118 | */ | 121 | */ |
119 | if (p->flags & PF_EXITING) | 122 | if (p->flags & PF_EXITING) |
120 | return false; | 123 | return false; |
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) | |||
122 | return true; | 125 | return true; |
123 | } | 126 | } |
124 | 127 | ||
128 | void sched_autogroup_exit_task(struct task_struct *p) | ||
129 | { | ||
130 | /* | ||
131 | * We are going to call exit_notify() and autogroup_move_group() can't | ||
132 | * see this thread after that: we can no longer use signal->autogroup. | ||
133 | * See the PF_EXITING check in task_wants_autogroup(). | ||
134 | */ | ||
135 | sched_move_task(p); | ||
136 | } | ||
137 | |||
125 | static void | 138 | static void |
126 | autogroup_move_group(struct task_struct *p, struct autogroup *ag) | 139 | autogroup_move_group(struct task_struct *p, struct autogroup *ag) |
127 | { | 140 | { |
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) | |||
138 | } | 151 | } |
139 | 152 | ||
140 | p->signal->autogroup = autogroup_kref_get(ag); | 153 | p->signal->autogroup = autogroup_kref_get(ag); |
141 | 154 | /* | |
142 | if (!READ_ONCE(sysctl_sched_autogroup_enabled)) | 155 | * We can't avoid sched_move_task() after we changed signal->autogroup, |
143 | goto out; | 156 | * this process can already run with task_group() == prev->tg or we can |
144 | 157 | * race with cgroup code which can read autogroup = prev under rq->lock. | |
158 | * In the latter case for_each_thread() can not miss a migrating thread, | ||
159 | * cpu_cgroup_attach() must not be possible after cgroup_exit() and it | ||
160 | * can't be removed from thread list, we hold ->siglock. | ||
161 | * | ||
162 | * If an exiting thread was already removed from thread list we rely on | ||
163 | * sched_autogroup_exit_task(). | ||
164 | */ | ||
145 | for_each_thread(p, t) | 165 | for_each_thread(p, t) |
146 | sched_move_task(t); | 166 | sched_move_task(t); |
147 | out: | 167 | |
148 | unlock_task_sighand(p, &flags); | 168 | unlock_task_sighand(p, &flags); |
149 | autogroup_kref_put(prev); | 169 | autogroup_kref_put(prev); |
150 | } | 170 | } |