aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c19
-rw-r--r--kernel/cpuset.c14
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/nsproxy.c27
-rw-r--r--kernel/pid_namespace.c4
-rw-r--r--kernel/time/timer_list.c41
-rw-r--r--kernel/workqueue.c9
7 files changed, 79 insertions, 40 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 781845a013ab..e91963302c0d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4480 struct dentry *d = cgrp->dentry; 4480 struct dentry *d = cgrp->dentry;
4481 struct cgroup_event *event, *tmp; 4481 struct cgroup_event *event, *tmp;
4482 struct cgroup_subsys *ss; 4482 struct cgroup_subsys *ss;
4483 struct cgroup *child;
4483 bool empty; 4484 bool empty;
4484 4485
4485 lockdep_assert_held(&d->d_inode->i_mutex); 4486 lockdep_assert_held(&d->d_inode->i_mutex);
@@ -4490,12 +4491,28 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4490 * @cgrp from being removed while __put_css_set() is in progress. 4491 * @cgrp from being removed while __put_css_set() is in progress.
4491 */ 4492 */
4492 read_lock(&css_set_lock); 4493 read_lock(&css_set_lock);
4493 empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children); 4494 empty = list_empty(&cgrp->cset_links);
4494 read_unlock(&css_set_lock); 4495 read_unlock(&css_set_lock);
4495 if (!empty) 4496 if (!empty)
4496 return -EBUSY; 4497 return -EBUSY;
4497 4498
4498 /* 4499 /*
4500 * Make sure there's no live children. We can't test ->children
4501 * emptiness as dead children linger on it while being destroyed;
4502 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
4503 */
4504 empty = true;
4505 rcu_read_lock();
4506 list_for_each_entry_rcu(child, &cgrp->children, sibling) {
4507 empty = cgroup_is_dead(child);
4508 if (!empty)
4509 break;
4510 }
4511 rcu_read_unlock();
4512 if (!empty)
4513 return -EBUSY;
4514
4515 /*
4499 * Block new css_tryget() by killing css refcnts. cgroup core 4516 * Block new css_tryget() by killing css refcnts. cgroup core
4500 * guarantees that, by the time ->css_offline() is invoked, no new 4517 * guarantees that, by the time ->css_offline() is invoked, no new
4501 * css reference will be given out via css_tryget(). We can't 4518 * css reference will be given out via css_tryget(). We can't
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 010a0083c0ae..ea1966db34f2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
475 475
476 /* 476 /*
477 * Cpusets with tasks - existing or newly being attached - can't 477 * Cpusets with tasks - existing or newly being attached - can't
478 * have empty cpus_allowed or mems_allowed. 478 * be changed to have empty cpus_allowed or mems_allowed.
479 */ 479 */
480 ret = -ENOSPC; 480 ret = -ENOSPC;
481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && 481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
482 (cpumask_empty(trial->cpus_allowed) && 482 if (!cpumask_empty(cur->cpus_allowed) &&
483 nodes_empty(trial->mems_allowed))) 483 cpumask_empty(trial->cpus_allowed))
484 goto out; 484 goto out;
485 if (!nodes_empty(cur->mems_allowed) &&
486 nodes_empty(trial->mems_allowed))
487 goto out;
488 }
485 489
486 ret = 0; 490 ret = 0;
487out: 491out:
diff --git a/kernel/fork.c b/kernel/fork.c
index e23bb19e2a3e..bf46287c91a4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1177 * don't allow the creation of threads. 1177 * don't allow the creation of threads.
1178 */ 1178 */
1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && 1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
1180 (task_active_pid_ns(current) != current->nsproxy->pid_ns)) 1180 (task_active_pid_ns(current) !=
1181 current->nsproxy->pid_ns_for_children))
1181 return ERR_PTR(-EINVAL); 1182 return ERR_PTR(-EINVAL);
1182 1183
1183 retval = security_task_create(clone_flags); 1184 retval = security_task_create(clone_flags);
@@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1351 1352
1352 if (pid != &init_struct_pid) { 1353 if (pid != &init_struct_pid) {
1353 retval = -ENOMEM; 1354 retval = -ENOMEM;
1354 pid = alloc_pid(p->nsproxy->pid_ns); 1355 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1355 if (!pid) 1356 if (!pid)
1356 goto bad_fork_cleanup_io; 1357 goto bad_fork_cleanup_io;
1357 } 1358 }
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 364ceab15f0c..997cbb951a3b 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -29,15 +29,15 @@
29static struct kmem_cache *nsproxy_cachep; 29static struct kmem_cache *nsproxy_cachep;
30 30
31struct nsproxy init_nsproxy = { 31struct nsproxy init_nsproxy = {
32 .count = ATOMIC_INIT(1), 32 .count = ATOMIC_INIT(1),
33 .uts_ns = &init_uts_ns, 33 .uts_ns = &init_uts_ns,
34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) 34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
35 .ipc_ns = &init_ipc_ns, 35 .ipc_ns = &init_ipc_ns,
36#endif 36#endif
37 .mnt_ns = NULL, 37 .mnt_ns = NULL,
38 .pid_ns = &init_pid_ns, 38 .pid_ns_for_children = &init_pid_ns,
39#ifdef CONFIG_NET 39#ifdef CONFIG_NET
40 .net_ns = &init_net, 40 .net_ns = &init_net,
41#endif 41#endif
42}; 42};
43 43
@@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
85 goto out_ipc; 85 goto out_ipc;
86 } 86 }
87 87
88 new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); 88 new_nsp->pid_ns_for_children =
89 if (IS_ERR(new_nsp->pid_ns)) { 89 copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
90 err = PTR_ERR(new_nsp->pid_ns); 90 if (IS_ERR(new_nsp->pid_ns_for_children)) {
91 err = PTR_ERR(new_nsp->pid_ns_for_children);
91 goto out_pid; 92 goto out_pid;
92 } 93 }
93 94
@@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
100 return new_nsp; 101 return new_nsp;
101 102
102out_net: 103out_net:
103 if (new_nsp->pid_ns) 104 if (new_nsp->pid_ns_for_children)
104 put_pid_ns(new_nsp->pid_ns); 105 put_pid_ns(new_nsp->pid_ns_for_children);
105out_pid: 106out_pid:
106 if (new_nsp->ipc_ns) 107 if (new_nsp->ipc_ns)
107 put_ipc_ns(new_nsp->ipc_ns); 108 put_ipc_ns(new_nsp->ipc_ns);
@@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
174 put_uts_ns(ns->uts_ns); 175 put_uts_ns(ns->uts_ns);
175 if (ns->ipc_ns) 176 if (ns->ipc_ns)
176 put_ipc_ns(ns->ipc_ns); 177 put_ipc_ns(ns->ipc_ns);
177 if (ns->pid_ns) 178 if (ns->pid_ns_for_children)
178 put_pid_ns(ns->pid_ns); 179 put_pid_ns(ns->pid_ns_for_children);
179 put_net(ns->net_ns); 180 put_net(ns->net_ns);
180 kmem_cache_free(nsproxy_cachep, ns); 181 kmem_cache_free(nsproxy_cachep, ns);
181} 182}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 6917e8edb48e..601bb361c235 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
349 if (ancestor != active) 349 if (ancestor != active)
350 return -EINVAL; 350 return -EINVAL;
351 351
352 put_pid_ns(nsproxy->pid_ns); 352 put_pid_ns(nsproxy->pid_ns_for_children);
353 nsproxy->pid_ns = get_pid_ns(new); 353 nsproxy->pid_ns_for_children = get_pid_ns(new);
354 return 0; 354 return 0;
355} 355}
356 356
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 3bdf28323012..61ed862cdd37 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
265static int timer_list_show(struct seq_file *m, void *v) 265static int timer_list_show(struct seq_file *m, void *v)
266{ 266{
267 struct timer_list_iter *iter = v; 267 struct timer_list_iter *iter = v;
268 u64 now = ktime_to_ns(ktime_get());
269 268
270 if (iter->cpu == -1 && !iter->second_pass) 269 if (iter->cpu == -1 && !iter->second_pass)
271 timer_list_header(m, now); 270 timer_list_header(m, iter->now);
272 else if (!iter->second_pass) 271 else if (!iter->second_pass)
273 print_cpu(m, iter->cpu, iter->now); 272 print_cpu(m, iter->cpu, iter->now);
274#ifdef CONFIG_GENERIC_CLOCKEVENTS 273#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -298,33 +297,41 @@ void sysrq_timer_list_show(void)
298 return; 297 return;
299} 298}
300 299
301static void *timer_list_start(struct seq_file *file, loff_t *offset) 300static void *move_iter(struct timer_list_iter *iter, loff_t offset)
302{ 301{
303 struct timer_list_iter *iter = file->private; 302 for (; offset; offset--) {
304 303 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
305 if (!*offset) { 304 if (iter->cpu >= nr_cpu_ids) {
306 iter->cpu = -1;
307 iter->now = ktime_to_ns(ktime_get());
308 } else if (iter->cpu >= nr_cpu_ids) {
309#ifdef CONFIG_GENERIC_CLOCKEVENTS 305#ifdef CONFIG_GENERIC_CLOCKEVENTS
310 if (!iter->second_pass) { 306 if (!iter->second_pass) {
311 iter->cpu = -1; 307 iter->cpu = -1;
312 iter->second_pass = true; 308 iter->second_pass = true;
313 } else 309 } else
314 return NULL; 310 return NULL;
315#else 311#else
316 return NULL; 312 return NULL;
317#endif 313#endif
314 }
318 } 315 }
319 return iter; 316 return iter;
320} 317}
321 318
319static void *timer_list_start(struct seq_file *file, loff_t *offset)
320{
321 struct timer_list_iter *iter = file->private;
322
323 if (!*offset)
324 iter->now = ktime_to_ns(ktime_get());
325 iter->cpu = -1;
326 iter->second_pass = false;
327 return move_iter(iter, *offset);
328}
329
322static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) 330static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset)
323{ 331{
324 struct timer_list_iter *iter = file->private; 332 struct timer_list_iter *iter = file->private;
325 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
326 ++*offset; 333 ++*offset;
327 return timer_list_start(file, offset); 334 return move_iter(iter, 1);
328} 335}
329 336
330static void timer_list_stop(struct seq_file *seq, void *v) 337static void timer_list_stop(struct seq_file *seq, void *v)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7f5d4be22034..e93f7b9067d8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2201,6 +2201,15 @@ __acquires(&pool->lock)
2201 dump_stack(); 2201 dump_stack();
2202 } 2202 }
2203 2203
2204 /*
2205 * The following prevents a kworker from hogging CPU on !PREEMPT
2206 * kernels, where a requeueing work item waiting for something to
2207 * happen could deadlock with stop_machine as such work item could
2208 * indefinitely requeue itself while all other CPUs are trapped in
2209 * stop_machine.
2210 */
2211 cond_resched();
2212
2204 spin_lock_irq(&pool->lock); 2213 spin_lock_irq(&pool->lock);
2205 2214
2206 /* clear cpu intensive status */ 2215 /* clear cpu intensive status */