diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 61 |
1 files changed, 14 insertions, 47 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b531d7934083..78554dd0d1a4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
323 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | 323 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
324 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | 324 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
325 | { | 325 | { |
326 | /* | ||
327 | * Strictly speaking this rcu_read_lock() is not needed since the | ||
328 | * task_group is tied to the cgroup, which in turn can never go away | ||
329 | * as long as there are tasks attached to it. | ||
330 | * | ||
331 | * However since task_group() uses task_subsys_state() which is an | ||
332 | * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. | ||
333 | */ | ||
334 | rcu_read_lock(); | ||
326 | #ifdef CONFIG_FAIR_GROUP_SCHED | 335 | #ifdef CONFIG_FAIR_GROUP_SCHED |
327 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | 336 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; |
328 | p->se.parent = task_group(p)->se[cpu]; | 337 | p->se.parent = task_group(p)->se[cpu]; |
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | |||
332 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; | 341 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; |
333 | p->rt.parent = task_group(p)->rt_se[cpu]; | 342 | p->rt.parent = task_group(p)->rt_se[cpu]; |
334 | #endif | 343 | #endif |
344 | rcu_read_unlock(); | ||
335 | } | 345 | } |
336 | 346 | ||
337 | #else | 347 | #else |
@@ -2058,49 +2068,6 @@ static bool migrate_task(struct task_struct *p, int dest_cpu) | |||
2058 | } | 2068 | } |
2059 | 2069 | ||
2060 | /* | 2070 | /* |
2061 | * wait_task_context_switch - wait for a thread to complete at least one | ||
2062 | * context switch. | ||
2063 | * | ||
2064 | * @p must not be current. | ||
2065 | */ | ||
2066 | void wait_task_context_switch(struct task_struct *p) | ||
2067 | { | ||
2068 | unsigned long nvcsw, nivcsw, flags; | ||
2069 | int running; | ||
2070 | struct rq *rq; | ||
2071 | |||
2072 | nvcsw = p->nvcsw; | ||
2073 | nivcsw = p->nivcsw; | ||
2074 | for (;;) { | ||
2075 | /* | ||
2076 | * The runqueue is assigned before the actual context | ||
2077 | * switch. We need to take the runqueue lock. | ||
2078 | * | ||
2079 | * We could check initially without the lock but it is | ||
2080 | * very likely that we need to take the lock in every | ||
2081 | * iteration. | ||
2082 | */ | ||
2083 | rq = task_rq_lock(p, &flags); | ||
2084 | running = task_running(rq, p); | ||
2085 | task_rq_unlock(rq, &flags); | ||
2086 | |||
2087 | if (likely(!running)) | ||
2088 | break; | ||
2089 | /* | ||
2090 | * The switch count is incremented before the actual | ||
2091 | * context switch. We thus wait for two switches to be | ||
2092 | * sure at least one completed. | ||
2093 | */ | ||
2094 | if ((p->nvcsw - nvcsw) > 1) | ||
2095 | break; | ||
2096 | if ((p->nivcsw - nivcsw) > 1) | ||
2097 | break; | ||
2098 | |||
2099 | cpu_relax(); | ||
2100 | } | ||
2101 | } | ||
2102 | |||
2103 | /* | ||
2104 | * wait_task_inactive - wait for a thread to unschedule. | 2071 | * wait_task_inactive - wait for a thread to unschedule. |
2105 | * | 2072 | * |
2106 | * If @match_state is nonzero, it's the @p->state value just checked and | 2073 | * If @match_state is nonzero, it's the @p->state value just checked and |
@@ -3724,7 +3691,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3724 | * the mutex owner just released it and exited. | 3691 | * the mutex owner just released it and exited. |
3725 | */ | 3692 | */ |
3726 | if (probe_kernel_address(&owner->cpu, cpu)) | 3693 | if (probe_kernel_address(&owner->cpu, cpu)) |
3727 | goto out; | 3694 | return 0; |
3728 | #else | 3695 | #else |
3729 | cpu = owner->cpu; | 3696 | cpu = owner->cpu; |
3730 | #endif | 3697 | #endif |
@@ -3734,14 +3701,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3734 | * the cpu field may no longer be valid. | 3701 | * the cpu field may no longer be valid. |
3735 | */ | 3702 | */ |
3736 | if (cpu >= nr_cpumask_bits) | 3703 | if (cpu >= nr_cpumask_bits) |
3737 | goto out; | 3704 | return 0; |
3738 | 3705 | ||
3739 | /* | 3706 | /* |
3740 | * We need to validate that we can do a | 3707 | * We need to validate that we can do a |
3741 | * get_cpu() and that we have the percpu area. | 3708 | * get_cpu() and that we have the percpu area. |
3742 | */ | 3709 | */ |
3743 | if (!cpu_online(cpu)) | 3710 | if (!cpu_online(cpu)) |
3744 | goto out; | 3711 | return 0; |
3745 | 3712 | ||
3746 | rq = cpu_rq(cpu); | 3713 | rq = cpu_rq(cpu); |
3747 | 3714 | ||
@@ -3760,7 +3727,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3760 | 3727 | ||
3761 | cpu_relax(); | 3728 | cpu_relax(); |
3762 | } | 3729 | } |
3763 | out: | 3730 | |
3764 | return 1; | 3731 | return 1; |
3765 | } | 3732 | } |
3766 | #endif | 3733 | #endif |