diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6af210a7de70..3c2a54f70ffe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
| 323 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | 323 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 324 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | 324 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 325 | { | 325 | { |
| 326 | /* | ||
| 327 | * Strictly speaking this rcu_read_lock() is not needed since the | ||
| 328 | * task_group is tied to the cgroup, which in turn can never go away | ||
| 329 | * as long as there are tasks attached to it. | ||
| 330 | * | ||
| 331 | * However since task_group() uses task_subsys_state() which is an | ||
| 332 | * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. | ||
| 333 | */ | ||
| 334 | rcu_read_lock(); | ||
| 326 | #ifdef CONFIG_FAIR_GROUP_SCHED | 335 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 327 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | 336 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; |
| 328 | p->se.parent = task_group(p)->se[cpu]; | 337 | p->se.parent = task_group(p)->se[cpu]; |
| @@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | |||
| 332 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; | 341 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; |
| 333 | p->rt.parent = task_group(p)->rt_se[cpu]; | 342 | p->rt.parent = task_group(p)->rt_se[cpu]; |
| 334 | #endif | 343 | #endif |
| 344 | rcu_read_unlock(); | ||
| 335 | } | 345 | } |
| 336 | 346 | ||
| 337 | #else | 347 | #else |
| @@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 3780 | * the mutex owner just released it and exited. | 3790 | * the mutex owner just released it and exited. |
| 3781 | */ | 3791 | */ |
| 3782 | if (probe_kernel_address(&owner->cpu, cpu)) | 3792 | if (probe_kernel_address(&owner->cpu, cpu)) |
| 3783 | goto out; | 3793 | return 0; |
| 3784 | #else | 3794 | #else |
| 3785 | cpu = owner->cpu; | 3795 | cpu = owner->cpu; |
| 3786 | #endif | 3796 | #endif |
| @@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 3790 | * the cpu field may no longer be valid. | 3800 | * the cpu field may no longer be valid. |
| 3791 | */ | 3801 | */ |
| 3792 | if (cpu >= nr_cpumask_bits) | 3802 | if (cpu >= nr_cpumask_bits) |
| 3793 | goto out; | 3803 | return 0; |
| 3794 | 3804 | ||
| 3795 | /* | 3805 | /* |
| 3796 | * We need to validate that we can do a | 3806 | * We need to validate that we can do a |
| 3797 | * get_cpu() and that we have the percpu area. | 3807 | * get_cpu() and that we have the percpu area. |
| 3798 | */ | 3808 | */ |
| 3799 | if (!cpu_online(cpu)) | 3809 | if (!cpu_online(cpu)) |
| 3800 | goto out; | 3810 | return 0; |
| 3801 | 3811 | ||
| 3802 | rq = cpu_rq(cpu); | 3812 | rq = cpu_rq(cpu); |
| 3803 | 3813 | ||
| @@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 3816 | 3826 | ||
| 3817 | cpu_relax(); | 3827 | cpu_relax(); |
| 3818 | } | 3828 | } |
| 3819 | out: | 3829 | |
| 3820 | return 1; | 3830 | return 1; |
| 3821 | } | 3831 | } |
| 3822 | #endif | 3832 | #endif |
