diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-05-07 05:30:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-07 05:30:30 -0400 |
commit | cce913178118b0b36742eb7544c2b38a0c957ee7 (patch) | |
tree | 25a6d7b4e01fea2932e6e2962a75f7a3d8c19a4f /kernel/sched.c | |
parent | d9f599e1e6d019968b35d2dc63074b9e8964fa69 (diff) | |
parent | 4fd38e4595e2f6c9d27732c042a0e16b2753049c (diff) |
Merge branch 'perf/urgent' into perf/core
Merge reason: Resolve patch dependency
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b0bbadc24955..b11b80a3eed3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
323 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | 323 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
324 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | 324 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
325 | { | 325 | { |
326 | /* | ||
327 | * Strictly speaking this rcu_read_lock() is not needed since the | ||
328 | * task_group is tied to the cgroup, which in turn can never go away | ||
329 | * as long as there are tasks attached to it. | ||
330 | * | ||
331 | * However since task_group() uses task_subsys_state() which is an | ||
332 | * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. | ||
333 | */ | ||
334 | rcu_read_lock(); | ||
326 | #ifdef CONFIG_FAIR_GROUP_SCHED | 335 | #ifdef CONFIG_FAIR_GROUP_SCHED |
327 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | 336 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; |
328 | p->se.parent = task_group(p)->se[cpu]; | 337 | p->se.parent = task_group(p)->se[cpu]; |
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | |||
332 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; | 341 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; |
333 | p->rt.parent = task_group(p)->rt_se[cpu]; | 342 | p->rt.parent = task_group(p)->rt_se[cpu]; |
334 | #endif | 343 | #endif |
344 | rcu_read_unlock(); | ||
335 | } | 345 | } |
336 | 346 | ||
337 | #else | 347 | #else |
@@ -3737,7 +3747,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3737 | * the mutex owner just released it and exited. | 3747 | * the mutex owner just released it and exited. |
3738 | */ | 3748 | */ |
3739 | if (probe_kernel_address(&owner->cpu, cpu)) | 3749 | if (probe_kernel_address(&owner->cpu, cpu)) |
3740 | goto out; | 3750 | return 0; |
3741 | #else | 3751 | #else |
3742 | cpu = owner->cpu; | 3752 | cpu = owner->cpu; |
3743 | #endif | 3753 | #endif |
@@ -3747,14 +3757,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3747 | * the cpu field may no longer be valid. | 3757 | * the cpu field may no longer be valid. |
3748 | */ | 3758 | */ |
3749 | if (cpu >= nr_cpumask_bits) | 3759 | if (cpu >= nr_cpumask_bits) |
3750 | goto out; | 3760 | return 0; |
3751 | 3761 | ||
3752 | /* | 3762 | /* |
3753 | * We need to validate that we can do a | 3763 | * We need to validate that we can do a |
3754 | * get_cpu() and that we have the percpu area. | 3764 | * get_cpu() and that we have the percpu area. |
3755 | */ | 3765 | */ |
3756 | if (!cpu_online(cpu)) | 3766 | if (!cpu_online(cpu)) |
3757 | goto out; | 3767 | return 0; |
3758 | 3768 | ||
3759 | rq = cpu_rq(cpu); | 3769 | rq = cpu_rq(cpu); |
3760 | 3770 | ||
@@ -3773,7 +3783,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3773 | 3783 | ||
3774 | cpu_relax(); | 3784 | cpu_relax(); |
3775 | } | 3785 | } |
3776 | out: | 3786 | |
3777 | return 1; | 3787 | return 1; |
3778 | } | 3788 | } |
3779 | #endif | 3789 | #endif |