aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMilosz Tanski <milosz@adfin.com>2013-09-06 12:41:20 -0400
committerMilosz Tanski <milosz@adfin.com>2013-09-06 12:41:20 -0400
commitcd0a2df681ec2af45f50c555c2a39dc92a4dff71 (patch)
tree35d2278a9494582025f3dac08feb2266adef6a4d /kernel
parentc35455791c1131e7ccbf56ea6fbdd562401c2ce2 (diff)
parent5a6f282a2052bb13171b53f03b34501cf72c33f1 (diff)
Merge tag 'fscache-fixes-for-ceph' into wip-fscache
Patches for Ceph FS-Cache support
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c19
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/nsproxy.c27
-rw-r--r--kernel/pid_namespace.c4
-rw-r--r--kernel/power/qos.c20
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/time/timer_list.c41
-rw-r--r--kernel/wait.c3
-rw-r--r--kernel/workqueue.c9
11 files changed, 100 insertions, 55 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 781845a013ab..e91963302c0d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4480 struct dentry *d = cgrp->dentry; 4480 struct dentry *d = cgrp->dentry;
4481 struct cgroup_event *event, *tmp; 4481 struct cgroup_event *event, *tmp;
4482 struct cgroup_subsys *ss; 4482 struct cgroup_subsys *ss;
4483 struct cgroup *child;
4483 bool empty; 4484 bool empty;
4484 4485
4485 lockdep_assert_held(&d->d_inode->i_mutex); 4486 lockdep_assert_held(&d->d_inode->i_mutex);
@@ -4490,12 +4491,28 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4490 * @cgrp from being removed while __put_css_set() is in progress. 4491 * @cgrp from being removed while __put_css_set() is in progress.
4491 */ 4492 */
4492 read_lock(&css_set_lock); 4493 read_lock(&css_set_lock);
4493 empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children); 4494 empty = list_empty(&cgrp->cset_links);
4494 read_unlock(&css_set_lock); 4495 read_unlock(&css_set_lock);
4495 if (!empty) 4496 if (!empty)
4496 return -EBUSY; 4497 return -EBUSY;
4497 4498
4498 /* 4499 /*
4500 * Make sure there's no live children. We can't test ->children
4501 * emptiness as dead children linger on it while being destroyed;
4502 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
4503 */
4504 empty = true;
4505 rcu_read_lock();
4506 list_for_each_entry_rcu(child, &cgrp->children, sibling) {
4507 empty = cgroup_is_dead(child);
4508 if (!empty)
4509 break;
4510 }
4511 rcu_read_unlock();
4512 if (!empty)
4513 return -EBUSY;
4514
4515 /*
4499 * Block new css_tryget() by killing css refcnts. cgroup core 4516 * Block new css_tryget() by killing css refcnts. cgroup core
4500 * guarantees that, by the time ->css_offline() is invoked, no new 4517 * guarantees that, by the time ->css_offline() is invoked, no new
4501 * css reference will be given out via css_tryget(). We can't 4518 * css reference will be given out via css_tryget(). We can't
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e5657788fedd..ea1966db34f2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
475 475
476 /* 476 /*
477 * Cpusets with tasks - existing or newly being attached - can't 477 * Cpusets with tasks - existing or newly being attached - can't
478 * have empty cpus_allowed or mems_allowed. 478 * be changed to have empty cpus_allowed or mems_allowed.
479 */ 479 */
480 ret = -ENOSPC; 480 ret = -ENOSPC;
481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && 481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
482 (cpumask_empty(trial->cpus_allowed) && 482 if (!cpumask_empty(cur->cpus_allowed) &&
483 nodes_empty(trial->mems_allowed))) 483 cpumask_empty(trial->cpus_allowed))
484 goto out; 484 goto out;
485 if (!nodes_empty(cur->mems_allowed) &&
486 nodes_empty(trial->mems_allowed))
487 goto out;
488 }
485 489
486 ret = 0; 490 ret = 0;
487out: 491out:
@@ -1608,11 +1612,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1608{ 1612{
1609 struct cpuset *cs = cgroup_cs(cgrp); 1613 struct cpuset *cs = cgroup_cs(cgrp);
1610 cpuset_filetype_t type = cft->private; 1614 cpuset_filetype_t type = cft->private;
1611 int retval = -ENODEV; 1615 int retval = 0;
1612 1616
1613 mutex_lock(&cpuset_mutex); 1617 mutex_lock(&cpuset_mutex);
1614 if (!is_cpuset_online(cs)) 1618 if (!is_cpuset_online(cs)) {
1619 retval = -ENODEV;
1615 goto out_unlock; 1620 goto out_unlock;
1621 }
1616 1622
1617 switch (type) { 1623 switch (type) {
1618 case FILE_CPU_EXCLUSIVE: 1624 case FILE_CPU_EXCLUSIVE:
diff --git a/kernel/fork.c b/kernel/fork.c
index e23bb19e2a3e..bf46287c91a4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1177 * don't allow the creation of threads. 1177 * don't allow the creation of threads.
1178 */ 1178 */
1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && 1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
1180 (task_active_pid_ns(current) != current->nsproxy->pid_ns)) 1180 (task_active_pid_ns(current) !=
1181 current->nsproxy->pid_ns_for_children))
1181 return ERR_PTR(-EINVAL); 1182 return ERR_PTR(-EINVAL);
1182 1183
1183 retval = security_task_create(clone_flags); 1184 retval = security_task_create(clone_flags);
@@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1351 1352
1352 if (pid != &init_struct_pid) { 1353 if (pid != &init_struct_pid) {
1353 retval = -ENOMEM; 1354 retval = -ENOMEM;
1354 pid = alloc_pid(p->nsproxy->pid_ns); 1355 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1355 if (!pid) 1356 if (!pid)
1356 goto bad_fork_cleanup_io; 1357 goto bad_fork_cleanup_io;
1357 } 1358 }
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 364ceab15f0c..997cbb951a3b 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -29,15 +29,15 @@
29static struct kmem_cache *nsproxy_cachep; 29static struct kmem_cache *nsproxy_cachep;
30 30
31struct nsproxy init_nsproxy = { 31struct nsproxy init_nsproxy = {
32 .count = ATOMIC_INIT(1), 32 .count = ATOMIC_INIT(1),
33 .uts_ns = &init_uts_ns, 33 .uts_ns = &init_uts_ns,
34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) 34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
35 .ipc_ns = &init_ipc_ns, 35 .ipc_ns = &init_ipc_ns,
36#endif 36#endif
37 .mnt_ns = NULL, 37 .mnt_ns = NULL,
38 .pid_ns = &init_pid_ns, 38 .pid_ns_for_children = &init_pid_ns,
39#ifdef CONFIG_NET 39#ifdef CONFIG_NET
40 .net_ns = &init_net, 40 .net_ns = &init_net,
41#endif 41#endif
42}; 42};
43 43
@@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
85 goto out_ipc; 85 goto out_ipc;
86 } 86 }
87 87
88 new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); 88 new_nsp->pid_ns_for_children =
89 if (IS_ERR(new_nsp->pid_ns)) { 89 copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
90 err = PTR_ERR(new_nsp->pid_ns); 90 if (IS_ERR(new_nsp->pid_ns_for_children)) {
91 err = PTR_ERR(new_nsp->pid_ns_for_children);
91 goto out_pid; 92 goto out_pid;
92 } 93 }
93 94
@@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
100 return new_nsp; 101 return new_nsp;
101 102
102out_net: 103out_net:
103 if (new_nsp->pid_ns) 104 if (new_nsp->pid_ns_for_children)
104 put_pid_ns(new_nsp->pid_ns); 105 put_pid_ns(new_nsp->pid_ns_for_children);
105out_pid: 106out_pid:
106 if (new_nsp->ipc_ns) 107 if (new_nsp->ipc_ns)
107 put_ipc_ns(new_nsp->ipc_ns); 108 put_ipc_ns(new_nsp->ipc_ns);
@@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
174 put_uts_ns(ns->uts_ns); 175 put_uts_ns(ns->uts_ns);
175 if (ns->ipc_ns) 176 if (ns->ipc_ns)
176 put_ipc_ns(ns->ipc_ns); 177 put_ipc_ns(ns->ipc_ns);
177 if (ns->pid_ns) 178 if (ns->pid_ns_for_children)
178 put_pid_ns(ns->pid_ns); 179 put_pid_ns(ns->pid_ns_for_children);
179 put_net(ns->net_ns); 180 put_net(ns->net_ns);
180 kmem_cache_free(nsproxy_cachep, ns); 181 kmem_cache_free(nsproxy_cachep, ns);
181} 182}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 6917e8edb48e..601bb361c235 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
349 if (ancestor != active) 349 if (ancestor != active)
350 return -EINVAL; 350 return -EINVAL;
351 351
352 put_pid_ns(nsproxy->pid_ns); 352 put_pid_ns(nsproxy->pid_ns_for_children);
353 nsproxy->pid_ns = get_pid_ns(new); 353 nsproxy->pid_ns_for_children = get_pid_ns(new);
354 return 0; 354 return 0;
355} 355}
356 356
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe28589e9c..a394297f8b2f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
296} 296}
297EXPORT_SYMBOL_GPL(pm_qos_request_active); 297EXPORT_SYMBOL_GPL(pm_qos_request_active);
298 298
299static void __pm_qos_update_request(struct pm_qos_request *req,
300 s32 new_value)
301{
302 trace_pm_qos_update_request(req->pm_qos_class, new_value);
303
304 if (new_value != req->node.prio)
305 pm_qos_update_target(
306 pm_qos_array[req->pm_qos_class]->constraints,
307 &req->node, PM_QOS_UPDATE_REQ, new_value);
308}
309
299/** 310/**
300 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout 311 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
301 * @work: work struct for the delayed work (timeout) 312 * @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
308 struct pm_qos_request, 319 struct pm_qos_request,
309 work); 320 work);
310 321
311 pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); 322 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
312} 323}
313 324
314/** 325/**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
364 } 375 }
365 376
366 cancel_delayed_work_sync(&req->work); 377 cancel_delayed_work_sync(&req->work);
367 378 __pm_qos_update_request(req, new_value);
368 trace_pm_qos_update_request(req->pm_qos_class, new_value);
369 if (new_value != req->node.prio)
370 pm_qos_update_target(
371 pm_qos_array[req->pm_qos_class]->constraints,
372 &req->node, PM_QOS_UPDATE_REQ, new_value);
373} 379}
374EXPORT_SYMBOL_GPL(pm_qos_update_request); 380EXPORT_SYMBOL_GPL(pm_qos_update_request);
375 381
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27d7f09..0b479a6a22bb 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
121 BUG_ON(bits > 32); 121 BUG_ON(bits > 32);
122 WARN_ON(!irqs_disabled()); 122 WARN_ON(!irqs_disabled());
123 read_sched_clock = read; 123 read_sched_clock = read;
124 sched_clock_mask = (1 << bits) - 1; 124 sched_clock_mask = (1ULL << bits) - 1;
125 cd.rate = rate; 125 cd.rate = rate;
126 126
127 /* calculate the mult/shift to convert counter ticks to ns. */ 127 /* calculate the mult/shift to convert counter ticks to ns. */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e77edc97e036..e8a1516cc0a3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void)
182 * Don't allow the user to think they can get 182 * Don't allow the user to think they can get
183 * full NO_HZ with this machine. 183 * full NO_HZ with this machine.
184 */ 184 */
185 WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); 185 WARN_ONCE(have_nohz_full_mask,
186 "NO_HZ FULL will not work with unstable sched clock");
186 return false; 187 return false;
187 } 188 }
188#endif 189#endif
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void)
343 344
344void __init tick_nohz_init(void) 345void __init tick_nohz_init(void)
345{ 346{
346 int cpu;
347
348 if (!have_nohz_full_mask) { 347 if (!have_nohz_full_mask) {
349 if (tick_nohz_init_all() < 0) 348 if (tick_nohz_init_all() < 0)
350 return; 349 return;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 3bdf28323012..61ed862cdd37 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
265static int timer_list_show(struct seq_file *m, void *v) 265static int timer_list_show(struct seq_file *m, void *v)
266{ 266{
267 struct timer_list_iter *iter = v; 267 struct timer_list_iter *iter = v;
268 u64 now = ktime_to_ns(ktime_get());
269 268
270 if (iter->cpu == -1 && !iter->second_pass) 269 if (iter->cpu == -1 && !iter->second_pass)
271 timer_list_header(m, now); 270 timer_list_header(m, iter->now);
272 else if (!iter->second_pass) 271 else if (!iter->second_pass)
273 print_cpu(m, iter->cpu, iter->now); 272 print_cpu(m, iter->cpu, iter->now);
274#ifdef CONFIG_GENERIC_CLOCKEVENTS 273#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -298,33 +297,41 @@ void sysrq_timer_list_show(void)
298 return; 297 return;
299} 298}
300 299
301static void *timer_list_start(struct seq_file *file, loff_t *offset) 300static void *move_iter(struct timer_list_iter *iter, loff_t offset)
302{ 301{
303 struct timer_list_iter *iter = file->private; 302 for (; offset; offset--) {
304 303 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
305 if (!*offset) { 304 if (iter->cpu >= nr_cpu_ids) {
306 iter->cpu = -1;
307 iter->now = ktime_to_ns(ktime_get());
308 } else if (iter->cpu >= nr_cpu_ids) {
309#ifdef CONFIG_GENERIC_CLOCKEVENTS 305#ifdef CONFIG_GENERIC_CLOCKEVENTS
310 if (!iter->second_pass) { 306 if (!iter->second_pass) {
311 iter->cpu = -1; 307 iter->cpu = -1;
312 iter->second_pass = true; 308 iter->second_pass = true;
313 } else 309 } else
314 return NULL; 310 return NULL;
315#else 311#else
316 return NULL; 312 return NULL;
317#endif 313#endif
314 }
318 } 315 }
319 return iter; 316 return iter;
320} 317}
321 318
319static void *timer_list_start(struct seq_file *file, loff_t *offset)
320{
321 struct timer_list_iter *iter = file->private;
322
323 if (!*offset)
324 iter->now = ktime_to_ns(ktime_get());
325 iter->cpu = -1;
326 iter->second_pass = false;
327 return move_iter(iter, *offset);
328}
329
322static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) 330static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset)
323{ 331{
324 struct timer_list_iter *iter = file->private; 332 struct timer_list_iter *iter = file->private;
325 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
326 ++*offset; 333 ++*offset;
327 return timer_list_start(file, offset); 334 return move_iter(iter, 1);
328} 335}
329 336
330static void timer_list_stop(struct seq_file *seq, void *v) 337static void timer_list_stop(struct seq_file *seq, void *v)
diff --git a/kernel/wait.c b/kernel/wait.c
index dec68bd4e9d8..d550920e040c 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
363 363
364/** 364/**
365 * wake_up_atomic_t - Wake up a waiter on a atomic_t 365 * wake_up_atomic_t - Wake up a waiter on a atomic_t
366 * @word: The word being waited on, a kernel virtual address 366 * @p: The atomic_t being waited on, a kernel virtual address
367 * @bit: The bit of the word being waited on
368 * 367 *
369 * Wake up anyone waiting for the atomic_t to go to zero. 368 * Wake up anyone waiting for the atomic_t to go to zero.
370 * 369 *
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7f5d4be22034..e93f7b9067d8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2201,6 +2201,15 @@ __acquires(&pool->lock)
2201 dump_stack(); 2201 dump_stack();
2202 } 2202 }
2203 2203
2204 /*
2205 * The following prevents a kworker from hogging CPU on !PREEMPT
2206 * kernels, where a requeueing work item waiting for something to
2207 * happen could deadlock with stop_machine as such work item could
2208 * indefinitely requeue itself while all other CPUs are trapped in
2209 * stop_machine.
2210 */
2211 cond_resched();
2212
2204 spin_lock_irq(&pool->lock); 2213 spin_lock_irq(&pool->lock);
2205 2214
2206 /* clear cpu intensive status */ 2215 /* clear cpu intensive status */