diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 76c0e9691fc0..e88689522e66 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq) | |||
| 676 | 676 | ||
| 677 | /** | 677 | /** |
| 678 | * runqueue_is_locked | 678 | * runqueue_is_locked |
| 679 | * @cpu: the processor in question. | ||
| 679 | * | 680 | * |
| 680 | * Returns true if the current cpu runqueue is locked. | 681 | * Returns true if the current cpu runqueue is locked. |
| 681 | * This interface allows printk to be called with the runqueue lock | 682 | * This interface allows printk to be called with the runqueue lock |
| @@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
| 2311 | { | 2312 | { |
| 2312 | int cpu, orig_cpu, this_cpu, success = 0; | 2313 | int cpu, orig_cpu, this_cpu, success = 0; |
| 2313 | unsigned long flags; | 2314 | unsigned long flags; |
| 2314 | struct rq *rq; | 2315 | struct rq *rq, *orig_rq; |
| 2315 | 2316 | ||
| 2316 | if (!sched_feat(SYNC_WAKEUPS)) | 2317 | if (!sched_feat(SYNC_WAKEUPS)) |
| 2317 | wake_flags &= ~WF_SYNC; | 2318 | wake_flags &= ~WF_SYNC; |
| @@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
| 2319 | this_cpu = get_cpu(); | 2320 | this_cpu = get_cpu(); |
| 2320 | 2321 | ||
| 2321 | smp_wmb(); | 2322 | smp_wmb(); |
| 2322 | rq = task_rq_lock(p, &flags); | 2323 | rq = orig_rq = task_rq_lock(p, &flags); |
| 2323 | update_rq_clock(rq); | 2324 | update_rq_clock(rq); |
| 2324 | if (!(p->state & state)) | 2325 | if (!(p->state & state)) |
| 2325 | goto out; | 2326 | goto out; |
| @@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
| 2350 | set_task_cpu(p, cpu); | 2351 | set_task_cpu(p, cpu); |
| 2351 | 2352 | ||
| 2352 | rq = task_rq_lock(p, &flags); | 2353 | rq = task_rq_lock(p, &flags); |
| 2354 | |||
| 2355 | if (rq != orig_rq) | ||
| 2356 | update_rq_clock(rq); | ||
| 2357 | |||
| 2353 | WARN_ON(p->state != TASK_WAKING); | 2358 | WARN_ON(p->state != TASK_WAKING); |
| 2354 | cpu = task_cpu(p); | 2359 | cpu = task_cpu(p); |
| 2355 | 2360 | ||
| @@ -3656,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
| 3656 | 3661 | ||
| 3657 | /** | 3662 | /** |
| 3658 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3663 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
| 3664 | * @sd: The sched_domain whose statistics are to be updated. | ||
| 3659 | * @group: sched_group whose statistics are to be updated. | 3665 | * @group: sched_group whose statistics are to be updated. |
| 3660 | * @this_cpu: Cpu for which load balance is currently performed. | 3666 | * @this_cpu: Cpu for which load balance is currently performed. |
| 3661 | * @idle: Idle status of this_cpu | 3667 | * @idle: Idle status of this_cpu |
| @@ -6718,9 +6724,6 @@ EXPORT_SYMBOL(yield); | |||
| 6718 | /* | 6724 | /* |
| 6719 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6725 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
| 6720 | * that process accounting knows that this is a task in IO wait state. | 6726 | * that process accounting knows that this is a task in IO wait state. |
| 6721 | * | ||
| 6722 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
| 6723 | * has set its backing_dev_info: the queue against which it should throttle) | ||
| 6724 | */ | 6727 | */ |
| 6725 | void __sched io_schedule(void) | 6728 | void __sched io_schedule(void) |
| 6726 | { | 6729 | { |
