aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup_freezer.c5
-rw-r--r--kernel/perf_event.c7
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/workqueue.c2
4 files changed, 25 insertions, 7 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index da5e13975531..e5c0244962b0 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
205 * No lock is needed, since the task isn't on tasklist yet, 205 * No lock is needed, since the task isn't on tasklist yet,
206 * so it can't be moved to another cgroup, which means the 206 * so it can't be moved to another cgroup, which means the
207 * freezer won't be removed and will be valid during this 207 * freezer won't be removed and will be valid during this
208 * function call. 208 * function call. Nevertheless, apply RCU read-side critical
209 * section to suppress RCU lockdep false positives.
209 */ 210 */
211 rcu_read_lock();
210 freezer = task_freezer(task); 212 freezer = task_freezer(task);
213 rcu_read_unlock();
211 214
212 /* 215 /*
213 * The root cgroup is non-freezable, so we can skip the 216 * The root cgroup is non-freezable, so we can skip the
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9dbe8cdaf145..49d8be5a45e3 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -342,6 +342,9 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
342 if (event->state > PERF_EVENT_STATE_OFF) 342 if (event->state > PERF_EVENT_STATE_OFF)
343 event->state = PERF_EVENT_STATE_OFF; 343 event->state = PERF_EVENT_STATE_OFF;
344 344
345 if (event->state > PERF_EVENT_STATE_FREE)
346 return;
347
345 /* 348 /*
346 * If this was a group event with sibling events then 349 * If this was a group event with sibling events then
347 * upgrade the siblings to singleton events by adding them 350 * upgrade the siblings to singleton events by adding them
@@ -1861,6 +1864,8 @@ int perf_event_release_kernel(struct perf_event *event)
1861{ 1864{
1862 struct perf_event_context *ctx = event->ctx; 1865 struct perf_event_context *ctx = event->ctx;
1863 1866
1867 event->state = PERF_EVENT_STATE_FREE;
1868
1864 WARN_ON_ONCE(ctx->parent_ctx); 1869 WARN_ON_ONCE(ctx->parent_ctx);
1865 mutex_lock(&ctx->mutex); 1870 mutex_lock(&ctx->mutex);
1866 perf_event_remove_from_context(event); 1871 perf_event_remove_from_context(event);
@@ -5021,7 +5026,7 @@ err_fput_free_put_context:
5021 5026
5022err_free_put_context: 5027err_free_put_context:
5023 if (err < 0) 5028 if (err < 0)
5024 kfree(event); 5029 free_event(event);
5025 5030
5026err_put_context: 5031err_put_context:
5027 if (err < 0) 5032 if (err < 0)
diff --git a/kernel/sched.c b/kernel/sched.c
index b0bbadc24955..b11b80a3eed3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
324static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
325{ 325{
326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
326#ifdef CONFIG_FAIR_GROUP_SCHED 335#ifdef CONFIG_FAIR_GROUP_SCHED
327 p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; 336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
328 p->se.parent = task_group(p)->se[cpu]; 337 p->se.parent = task_group(p)->se[cpu];
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
332 p->rt.rt_rq = task_group(p)->rt_rq[cpu]; 341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
333 p->rt.parent = task_group(p)->rt_se[cpu]; 342 p->rt.parent = task_group(p)->rt_se[cpu];
334#endif 343#endif
344 rcu_read_unlock();
335} 345}
336 346
337#else 347#else
@@ -3737,7 +3747,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3737 * the mutex owner just released it and exited. 3747 * the mutex owner just released it and exited.
3738 */ 3748 */
3739 if (probe_kernel_address(&owner->cpu, cpu)) 3749 if (probe_kernel_address(&owner->cpu, cpu))
3740 goto out; 3750 return 0;
3741#else 3751#else
3742 cpu = owner->cpu; 3752 cpu = owner->cpu;
3743#endif 3753#endif
@@ -3747,14 +3757,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3747 * the cpu field may no longer be valid. 3757 * the cpu field may no longer be valid.
3748 */ 3758 */
3749 if (cpu >= nr_cpumask_bits) 3759 if (cpu >= nr_cpumask_bits)
3750 goto out; 3760 return 0;
3751 3761
3752 /* 3762 /*
3753 * We need to validate that we can do a 3763 * We need to validate that we can do a
3754 * get_cpu() and that we have the percpu area. 3764 * get_cpu() and that we have the percpu area.
3755 */ 3765 */
3756 if (!cpu_online(cpu)) 3766 if (!cpu_online(cpu))
3757 goto out; 3767 return 0;
3758 3768
3759 rq = cpu_rq(cpu); 3769 rq = cpu_rq(cpu);
3760 3770
@@ -3773,7 +3783,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3773 3783
3774 cpu_relax(); 3784 cpu_relax();
3775 } 3785 }
3776out: 3786
3777 return 1; 3787 return 1;
3778} 3788}
3779#endif 3789#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dee48658805c..5bfb213984b2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -774,7 +774,7 @@ void flush_delayed_work(struct delayed_work *dwork)
774{ 774{
775 if (del_timer_sync(&dwork->timer)) { 775 if (del_timer_sync(&dwork->timer)) {
776 struct cpu_workqueue_struct *cwq; 776 struct cpu_workqueue_struct *cwq;
777 cwq = wq_per_cpu(keventd_wq, get_cpu()); 777 cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
778 __queue_work(cwq, &dwork->work); 778 __queue_work(cwq, &dwork->work);
779 put_cpu(); 779 put_cpu();
780 } 780 }