aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c16
-rw-r--r--kernel/cgroup_freezer.c5
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/rcupdate.c11
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/workqueue.c2
7 files changed, 44 insertions, 12 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2769e13980c..3a53c771e503 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1646,7 +1646,9 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
1646int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) 1646int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1647{ 1647{
1648 char *start; 1648 char *start;
1649 struct dentry *dentry = rcu_dereference(cgrp->dentry); 1649 struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
1650 rcu_read_lock_held() ||
1651 cgroup_lock_is_held());
1650 1652
1651 if (!dentry || cgrp == dummytop) { 1653 if (!dentry || cgrp == dummytop) {
1652 /* 1654 /*
@@ -1662,13 +1664,17 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1662 *--start = '\0'; 1664 *--start = '\0';
1663 for (;;) { 1665 for (;;) {
1664 int len = dentry->d_name.len; 1666 int len = dentry->d_name.len;
1667
1665 if ((start -= len) < buf) 1668 if ((start -= len) < buf)
1666 return -ENAMETOOLONG; 1669 return -ENAMETOOLONG;
1667 memcpy(start, cgrp->dentry->d_name.name, len); 1670 memcpy(start, dentry->d_name.name, len);
1668 cgrp = cgrp->parent; 1671 cgrp = cgrp->parent;
1669 if (!cgrp) 1672 if (!cgrp)
1670 break; 1673 break;
1671 dentry = rcu_dereference(cgrp->dentry); 1674
1675 dentry = rcu_dereference_check(cgrp->dentry,
1676 rcu_read_lock_held() ||
1677 cgroup_lock_is_held());
1672 if (!cgrp->parent) 1678 if (!cgrp->parent)
1673 continue; 1679 continue;
1674 if (--start < buf) 1680 if (--start < buf)
@@ -4555,13 +4561,13 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
4555{ 4561{
4556 int subsys_id, i, depth = 0; 4562 int subsys_id, i, depth = 0;
4557 struct cgroup_subsys_state *parent_css, *child_css; 4563 struct cgroup_subsys_state *parent_css, *child_css;
4558 struct css_id *child_id, *parent_id = NULL; 4564 struct css_id *child_id, *parent_id;
4559 4565
4560 subsys_id = ss->subsys_id; 4566 subsys_id = ss->subsys_id;
4561 parent_css = parent->subsys[subsys_id]; 4567 parent_css = parent->subsys[subsys_id];
4562 child_css = child->subsys[subsys_id]; 4568 child_css = child->subsys[subsys_id];
4563 depth = css_depth(parent_css) + 1;
4564 parent_id = parent_css->id; 4569 parent_id = parent_css->id;
4570 depth = parent_id->depth;
4565 4571
4566 child_id = get_new_cssid(ss, depth); 4572 child_id = get_new_cssid(ss, depth);
4567 if (IS_ERR(child_id)) 4573 if (IS_ERR(child_id))
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index da5e13975531..e5c0244962b0 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
205 * No lock is needed, since the task isn't on tasklist yet, 205 * No lock is needed, since the task isn't on tasklist yet,
206 * so it can't be moved to another cgroup, which means the 206 * so it can't be moved to another cgroup, which means the
207 * freezer won't be removed and will be valid during this 207 * freezer won't be removed and will be valid during this
208 * function call. 208 * function call. Nevertheless, apply RCU read-side critical
209 * section to suppress RCU lockdep false positives.
209 */ 210 */
211 rcu_read_lock();
210 freezer = task_freezer(task); 212 freezer = task_freezer(task);
213 rcu_read_unlock();
211 214
212 /* 215 /*
213 * The root cgroup is non-freezable, so we can skip the 216 * The root cgroup is non-freezable, so we can skip the
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2f3fbf84215a..3d1552d3c12b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4897,7 +4897,7 @@ err_fput_free_put_context:
4897 4897
4898err_free_put_context: 4898err_free_put_context:
4899 if (err < 0) 4899 if (err < 0)
4900 kfree(event); 4900 free_event(event);
4901 4901
4902err_put_context: 4902err_put_context:
4903 if (err < 0) 4903 if (err < 0)
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 03a7ea1579f6..49d808e833b0 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -122,3 +122,14 @@ void wakeme_after_rcu(struct rcu_head *head)
122 rcu = container_of(head, struct rcu_synchronize, head); 122 rcu = container_of(head, struct rcu_synchronize, head);
123 complete(&rcu->completion); 123 complete(&rcu->completion);
124} 124}
125
126#ifdef CONFIG_PROVE_RCU
127/*
128 * wrapper function to avoid #include problems.
129 */
130int rcu_my_thread_group_empty(void)
131{
132 return thread_group_empty(current);
133}
134EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
135#endif /* #ifdef CONFIG_PROVE_RCU */
diff --git a/kernel/sched.c b/kernel/sched.c
index 6af210a7de70..3c2a54f70ffe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
324static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
325{ 325{
326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
326#ifdef CONFIG_FAIR_GROUP_SCHED 335#ifdef CONFIG_FAIR_GROUP_SCHED
327 p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; 336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
328 p->se.parent = task_group(p)->se[cpu]; 337 p->se.parent = task_group(p)->se[cpu];
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
332 p->rt.rt_rq = task_group(p)->rt_rq[cpu]; 341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
333 p->rt.parent = task_group(p)->rt_se[cpu]; 342 p->rt.parent = task_group(p)->rt_se[cpu];
334#endif 343#endif
344 rcu_read_unlock();
335} 345}
336 346
337#else 347#else
@@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3780 * the mutex owner just released it and exited. 3790 * the mutex owner just released it and exited.
3781 */ 3791 */
3782 if (probe_kernel_address(&owner->cpu, cpu)) 3792 if (probe_kernel_address(&owner->cpu, cpu))
3783 goto out; 3793 return 0;
3784#else 3794#else
3785 cpu = owner->cpu; 3795 cpu = owner->cpu;
3786#endif 3796#endif
@@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3790 * the cpu field may no longer be valid. 3800 * the cpu field may no longer be valid.
3791 */ 3801 */
3792 if (cpu >= nr_cpumask_bits) 3802 if (cpu >= nr_cpumask_bits)
3793 goto out; 3803 return 0;
3794 3804
3795 /* 3805 /*
3796 * We need to validate that we can do a 3806 * We need to validate that we can do a
3797 * get_cpu() and that we have the percpu area. 3807 * get_cpu() and that we have the percpu area.
3798 */ 3808 */
3799 if (!cpu_online(cpu)) 3809 if (!cpu_online(cpu))
3800 goto out; 3810 return 0;
3801 3811
3802 rq = cpu_rq(cpu); 3812 rq = cpu_rq(cpu);
3803 3813
@@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3816 3826
3817 cpu_relax(); 3827 cpu_relax();
3818 } 3828 }
3819out: 3829
3820 return 1; 3830 return 1;
3821} 3831}
3822#endif 3832#endif
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 9b49db144037..19be00ba6123 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -114,7 +114,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
114 { 114 {
115 char path[64]; 115 char path[64];
116 116
117 rcu_read_lock();
117 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path)); 118 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
119 rcu_read_unlock();
118 SEQ_printf(m, " %s", path); 120 SEQ_printf(m, " %s", path);
119 } 121 }
120#endif 122#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dee48658805c..5bfb213984b2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -774,7 +774,7 @@ void flush_delayed_work(struct delayed_work *dwork)
774{ 774{
775 if (del_timer_sync(&dwork->timer)) { 775 if (del_timer_sync(&dwork->timer)) {
776 struct cpu_workqueue_struct *cwq; 776 struct cpu_workqueue_struct *cwq;
777 cwq = wq_per_cpu(keventd_wq, get_cpu()); 777 cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
778 __queue_work(cwq, &dwork->work); 778 __queue_work(cwq, &dwork->work);
779 put_cpu(); 779 put_cpu();
780 } 780 }