aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2010-05-19 13:12:41 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2010-05-19 13:12:41 -0400
commit8d0bc2b456103a34c11e01305cd1aed1cde579e5 (patch)
tree5e1e6ad55cc9e2b5c5617f6f320114b8cff9e3f3 /kernel
parent30ba3ead05763b172acaa65ae1be71af2a878940 (diff)
parente40152ee1e1c7a63f4777791863215e3faa37a86 (diff)
Merge commit 'v2.6.34' into next
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c17
-rw-r--r--kernel/cgroup.c62
-rw-r--r--kernel/cgroup_freezer.c5
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kexec.c6
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/rcupdate.c18
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/workqueue.c2
14 files changed, 108 insertions, 38 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index 24f8c81fc48d..e4c0e1fee9b0 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -353,17 +353,18 @@ restart:
353 353
354void acct_exit_ns(struct pid_namespace *ns) 354void acct_exit_ns(struct pid_namespace *ns)
355{ 355{
356 struct bsd_acct_struct *acct; 356 struct bsd_acct_struct *acct = ns->bacct;
357 357
358 spin_lock(&acct_lock); 358 if (acct == NULL)
359 acct = ns->bacct; 359 return;
360 if (acct != NULL) {
361 if (acct->file != NULL)
362 acct_file_reopen(acct, NULL, NULL);
363 360
364 kfree(acct); 361 del_timer_sync(&acct->timer);
365 } 362 spin_lock(&acct_lock);
363 if (acct->file != NULL)
364 acct_file_reopen(acct, NULL, NULL);
366 spin_unlock(&acct_lock); 365 spin_unlock(&acct_lock);
366
367 kfree(acct);
367} 368}
368 369
369/* 370/*
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2769e13980c..6d870f2d1228 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1646,7 +1646,9 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
1646int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) 1646int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1647{ 1647{
1648 char *start; 1648 char *start;
1649 struct dentry *dentry = rcu_dereference(cgrp->dentry); 1649 struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
1650 rcu_read_lock_held() ||
1651 cgroup_lock_is_held());
1650 1652
1651 if (!dentry || cgrp == dummytop) { 1653 if (!dentry || cgrp == dummytop) {
1652 /* 1654 /*
@@ -1662,13 +1664,17 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1662 *--start = '\0'; 1664 *--start = '\0';
1663 for (;;) { 1665 for (;;) {
1664 int len = dentry->d_name.len; 1666 int len = dentry->d_name.len;
1667
1665 if ((start -= len) < buf) 1668 if ((start -= len) < buf)
1666 return -ENAMETOOLONG; 1669 return -ENAMETOOLONG;
1667 memcpy(start, cgrp->dentry->d_name.name, len); 1670 memcpy(start, dentry->d_name.name, len);
1668 cgrp = cgrp->parent; 1671 cgrp = cgrp->parent;
1669 if (!cgrp) 1672 if (!cgrp)
1670 break; 1673 break;
1671 dentry = rcu_dereference(cgrp->dentry); 1674
1675 dentry = rcu_dereference_check(cgrp->dentry,
1676 rcu_read_lock_held() ||
1677 cgroup_lock_is_held());
1672 if (!cgrp->parent) 1678 if (!cgrp->parent)
1673 continue; 1679 continue;
1674 if (--start < buf) 1680 if (--start < buf)
@@ -4429,7 +4435,15 @@ __setup("cgroup_disable=", cgroup_disable);
4429 */ 4435 */
4430unsigned short css_id(struct cgroup_subsys_state *css) 4436unsigned short css_id(struct cgroup_subsys_state *css)
4431{ 4437{
4432 struct css_id *cssid = rcu_dereference(css->id); 4438 struct css_id *cssid;
4439
4440 /*
4441 * This css_id() can return correct value when somone has refcnt
4442 * on this or this is under rcu_read_lock(). Once css->id is allocated,
4443 * it's unchanged until freed.
4444 */
4445 cssid = rcu_dereference_check(css->id,
4446 rcu_read_lock_held() || atomic_read(&css->refcnt));
4433 4447
4434 if (cssid) 4448 if (cssid)
4435 return cssid->id; 4449 return cssid->id;
@@ -4439,7 +4453,10 @@ EXPORT_SYMBOL_GPL(css_id);
4439 4453
4440unsigned short css_depth(struct cgroup_subsys_state *css) 4454unsigned short css_depth(struct cgroup_subsys_state *css)
4441{ 4455{
4442 struct css_id *cssid = rcu_dereference(css->id); 4456 struct css_id *cssid;
4457
4458 cssid = rcu_dereference_check(css->id,
4459 rcu_read_lock_held() || atomic_read(&css->refcnt));
4443 4460
4444 if (cssid) 4461 if (cssid)
4445 return cssid->depth; 4462 return cssid->depth;
@@ -4447,15 +4464,36 @@ unsigned short css_depth(struct cgroup_subsys_state *css)
4447} 4464}
4448EXPORT_SYMBOL_GPL(css_depth); 4465EXPORT_SYMBOL_GPL(css_depth);
4449 4466
4467/**
4468 * css_is_ancestor - test "root" css is an ancestor of "child"
4469 * @child: the css to be tested.
4470 * @root: the css supporsed to be an ancestor of the child.
4471 *
4472 * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
4473 * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
4474 * But, considering usual usage, the csses should be valid objects after test.
4475 * Assuming that the caller will do some action to the child if this returns
4476 * returns true, the caller must take "child";s reference count.
4477 * If "child" is valid object and this returns true, "root" is valid, too.
4478 */
4479
4450bool css_is_ancestor(struct cgroup_subsys_state *child, 4480bool css_is_ancestor(struct cgroup_subsys_state *child,
4451 const struct cgroup_subsys_state *root) 4481 const struct cgroup_subsys_state *root)
4452{ 4482{
4453 struct css_id *child_id = rcu_dereference(child->id); 4483 struct css_id *child_id;
4454 struct css_id *root_id = rcu_dereference(root->id); 4484 struct css_id *root_id;
4485 bool ret = true;
4455 4486
4456 if (!child_id || !root_id || (child_id->depth < root_id->depth)) 4487 rcu_read_lock();
4457 return false; 4488 child_id = rcu_dereference(child->id);
4458 return child_id->stack[root_id->depth] == root_id->id; 4489 root_id = rcu_dereference(root->id);
4490 if (!child_id
4491 || !root_id
4492 || (child_id->depth < root_id->depth)
4493 || (child_id->stack[root_id->depth] != root_id->id))
4494 ret = false;
4495 rcu_read_unlock();
4496 return ret;
4459} 4497}
4460 4498
4461static void __free_css_id_cb(struct rcu_head *head) 4499static void __free_css_id_cb(struct rcu_head *head)
@@ -4555,13 +4593,13 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
4555{ 4593{
4556 int subsys_id, i, depth = 0; 4594 int subsys_id, i, depth = 0;
4557 struct cgroup_subsys_state *parent_css, *child_css; 4595 struct cgroup_subsys_state *parent_css, *child_css;
4558 struct css_id *child_id, *parent_id = NULL; 4596 struct css_id *child_id, *parent_id;
4559 4597
4560 subsys_id = ss->subsys_id; 4598 subsys_id = ss->subsys_id;
4561 parent_css = parent->subsys[subsys_id]; 4599 parent_css = parent->subsys[subsys_id];
4562 child_css = child->subsys[subsys_id]; 4600 child_css = child->subsys[subsys_id];
4563 depth = css_depth(parent_css) + 1;
4564 parent_id = parent_css->id; 4601 parent_id = parent_css->id;
4602 depth = parent_id->depth;
4565 4603
4566 child_id = get_new_cssid(ss, depth); 4604 child_id = get_new_cssid(ss, depth);
4567 if (IS_ERR(child_id)) 4605 if (IS_ERR(child_id))
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index da5e13975531..e5c0244962b0 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
205 * No lock is needed, since the task isn't on tasklist yet, 205 * No lock is needed, since the task isn't on tasklist yet,
206 * so it can't be moved to another cgroup, which means the 206 * so it can't be moved to another cgroup, which means the
207 * freezer won't be removed and will be valid during this 207 * freezer won't be removed and will be valid during this
208 * function call. 208 * function call. Nevertheless, apply RCU read-side critical
209 * section to suppress RCU lockdep false positives.
209 */ 210 */
211 rcu_read_lock();
210 freezer = task_freezer(task); 212 freezer = task_freezer(task);
213 rcu_read_unlock();
211 214
212 /* 215 /*
213 * The root cgroup is non-freezable, so we can skip the 216 * The root cgroup is non-freezable, so we can skip the
diff --git a/kernel/cred.c b/kernel/cred.c
index e1dbe9eef800..62af1816c235 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -398,6 +398,8 @@ struct cred *prepare_usermodehelper_creds(void)
398 398
399error: 399error:
400 put_cred(new); 400 put_cred(new);
401 return NULL;
402
401free_tgcred: 403free_tgcred:
402#ifdef CONFIG_KEYS 404#ifdef CONFIG_KEYS
403 kfree(tgcred); 405 kfree(tgcred);
@@ -791,8 +793,6 @@ bool creds_are_invalid(const struct cred *cred)
791{ 793{
792 if (cred->magic != CRED_MAGIC) 794 if (cred->magic != CRED_MAGIC)
793 return true; 795 return true;
794 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
795 return true;
796#ifdef CONFIG_SECURITY_SELINUX 796#ifdef CONFIG_SECURITY_SELINUX
797 if (selinux_is_enabled()) { 797 if (selinux_is_enabled()) {
798 if ((unsigned long) cred->security < PAGE_SIZE) 798 if ((unsigned long) cred->security < PAGE_SIZE)
diff --git a/kernel/fork.c b/kernel/fork.c
index 44b0791b0a2e..4c14942a0ee3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1114,8 +1114,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1114 1114
1115 p->bts = NULL; 1115 p->bts = NULL;
1116 1116
1117 p->stack_start = stack_start;
1118
1119 /* Perform scheduler related setup. Assign this task to a CPU. */ 1117 /* Perform scheduler related setup. Assign this task to a CPU. */
1120 sched_fork(p, clone_flags); 1118 sched_fork(p, clone_flags);
1121 1119
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 87ebe8adc474..474a84715eac 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1134,11 +1134,9 @@ int crash_shrink_memory(unsigned long new_size)
1134 1134
1135 free_reserved_phys_range(end, crashk_res.end); 1135 free_reserved_phys_range(end, crashk_res.end);
1136 1136
1137 if (start == end) { 1137 if (start == end)
1138 crashk_res.end = end;
1139 release_resource(&crashk_res); 1138 release_resource(&crashk_res);
1140 } else 1139 crashk_res.end = end - 1;
1141 crashk_res.end = end - 1;
1142 1140
1143unlock: 1141unlock:
1144 mutex_unlock(&kexec_mutex); 1142 mutex_unlock(&kexec_mutex);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2f3fbf84215a..3d1552d3c12b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4897,7 +4897,7 @@ err_fput_free_put_context:
4897 4897
4898err_free_put_context: 4898err_free_put_context:
4899 if (err < 0) 4899 if (err < 0)
4900 kfree(event); 4900 free_event(event);
4901 4901
4902err_put_context: 4902err_put_context:
4903 if (err < 0) 4903 if (err < 0)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4d2289626a84..a8c96212bc1b 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
420 * User space encodes device types as two-byte values, 420 * User space encodes device types as two-byte values,
421 * so we need to recode them 421 * so we need to recode them
422 */ 422 */
423 swdev = old_decode_dev(swap_area.dev); 423 swdev = new_decode_dev(swap_area.dev);
424 if (swdev) { 424 if (swdev) {
425 offset = swap_area.offset; 425 offset = swap_area.offset;
426 data->swap = swap_type_of(swdev, offset, NULL); 426 data->swap = swap_type_of(swdev, offset, NULL);
diff --git a/kernel/profile.c b/kernel/profile.c
index a55d3a367ae8..dfadc5b729f1 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -127,8 +127,10 @@ int __ref profile_init(void)
127 return 0; 127 return 0;
128 128
129 prof_buffer = vmalloc(buffer_bytes); 129 prof_buffer = vmalloc(buffer_bytes);
130 if (prof_buffer) 130 if (prof_buffer) {
131 memset(prof_buffer, 0, buffer_bytes);
131 return 0; 132 return 0;
133 }
132 134
133 free_cpumask_var(prof_cpu_mask); 135 free_cpumask_var(prof_cpu_mask);
134 return -ENOMEM; 136 return -ENOMEM;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 63fe25433980..49d808e833b0 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -69,6 +69,13 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
69 69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 71
72int debug_lockdep_rcu_enabled(void)
73{
74 return rcu_scheduler_active && debug_locks &&
75 current->lockdep_recursion == 0;
76}
77EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
78
72/** 79/**
73 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? 80 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
74 * 81 *
@@ -115,3 +122,14 @@ void wakeme_after_rcu(struct rcu_head *head)
115 rcu = container_of(head, struct rcu_synchronize, head); 122 rcu = container_of(head, struct rcu_synchronize, head);
116 complete(&rcu->completion); 123 complete(&rcu->completion);
117} 124}
125
126#ifdef CONFIG_PROVE_RCU
127/*
128 * wrapper function to avoid #include problems.
129 */
130int rcu_my_thread_group_empty(void)
131{
132 return thread_group_empty(current);
133}
134EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
135#endif /* #ifdef CONFIG_PROVE_RCU */
diff --git a/kernel/sched.c b/kernel/sched.c
index 6af210a7de70..3c2a54f70ffe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
324static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
325{ 325{
326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
326#ifdef CONFIG_FAIR_GROUP_SCHED 335#ifdef CONFIG_FAIR_GROUP_SCHED
327 p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; 336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
328 p->se.parent = task_group(p)->se[cpu]; 337 p->se.parent = task_group(p)->se[cpu];
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
332 p->rt.rt_rq = task_group(p)->rt_rq[cpu]; 341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
333 p->rt.parent = task_group(p)->rt_se[cpu]; 342 p->rt.parent = task_group(p)->rt_se[cpu];
334#endif 343#endif
344 rcu_read_unlock();
335} 345}
336 346
337#else 347#else
@@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3780 * the mutex owner just released it and exited. 3790 * the mutex owner just released it and exited.
3781 */ 3791 */
3782 if (probe_kernel_address(&owner->cpu, cpu)) 3792 if (probe_kernel_address(&owner->cpu, cpu))
3783 goto out; 3793 return 0;
3784#else 3794#else
3785 cpu = owner->cpu; 3795 cpu = owner->cpu;
3786#endif 3796#endif
@@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3790 * the cpu field may no longer be valid. 3800 * the cpu field may no longer be valid.
3791 */ 3801 */
3792 if (cpu >= nr_cpumask_bits) 3802 if (cpu >= nr_cpumask_bits)
3793 goto out; 3803 return 0;
3794 3804
3795 /* 3805 /*
3796 * We need to validate that we can do a 3806 * We need to validate that we can do a
3797 * get_cpu() and that we have the percpu area. 3807 * get_cpu() and that we have the percpu area.
3798 */ 3808 */
3799 if (!cpu_online(cpu)) 3809 if (!cpu_online(cpu))
3800 goto out; 3810 return 0;
3801 3811
3802 rq = cpu_rq(cpu); 3812 rq = cpu_rq(cpu);
3803 3813
@@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3816 3826
3817 cpu_relax(); 3827 cpu_relax();
3818 } 3828 }
3819out: 3829
3820 return 1; 3830 return 1;
3821} 3831}
3822#endif 3832#endif
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 9b49db144037..19be00ba6123 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -114,7 +114,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
114 { 114 {
115 char path[64]; 115 char path[64];
116 116
117 rcu_read_lock();
117 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path)); 118 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
119 rcu_read_unlock();
118 SEQ_printf(m, " %s", path); 120 SEQ_printf(m, " %s", path);
119 } 121 }
120#endif 122#endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 6d1a7e0f9d5b..7cb426a58965 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1118,7 +1118,7 @@ DECLARE_RWSEM(uts_sem);
1118 1118
1119#ifdef COMPAT_UTS_MACHINE 1119#ifdef COMPAT_UTS_MACHINE
1120#define override_architecture(name) \ 1120#define override_architecture(name) \
1121 (current->personality == PER_LINUX32 && \ 1121 (personality(current->personality) == PER_LINUX32 && \
1122 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1122 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1123 sizeof(COMPAT_UTS_MACHINE))) 1123 sizeof(COMPAT_UTS_MACHINE)))
1124#else 1124#else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dee48658805c..5bfb213984b2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -774,7 +774,7 @@ void flush_delayed_work(struct delayed_work *dwork)
774{ 774{
775 if (del_timer_sync(&dwork->timer)) { 775 if (del_timer_sync(&dwork->timer)) {
776 struct cpu_workqueue_struct *cwq; 776 struct cpu_workqueue_struct *cwq;
777 cwq = wq_per_cpu(keventd_wq, get_cpu()); 777 cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
778 __queue_work(cwq, &dwork->work); 778 __queue_work(cwq, &dwork->work);
779 put_cpu(); 779 put_cpu();
780 } 780 }