diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1535f3884b88..e88689522e66 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq) | |||
676 | 676 | ||
677 | /** | 677 | /** |
678 | * runqueue_is_locked | 678 | * runqueue_is_locked |
679 | * @cpu: the processor in question. | ||
679 | * | 680 | * |
680 | * Returns true if the current cpu runqueue is locked. | 681 | * Returns true if the current cpu runqueue is locked. |
681 | * This interface allows printk to be called with the runqueue lock | 682 | * This interface allows printk to be called with the runqueue lock |
@@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2311 | { | 2312 | { |
2312 | int cpu, orig_cpu, this_cpu, success = 0; | 2313 | int cpu, orig_cpu, this_cpu, success = 0; |
2313 | unsigned long flags; | 2314 | unsigned long flags; |
2314 | struct rq *rq; | 2315 | struct rq *rq, *orig_rq; |
2315 | 2316 | ||
2316 | if (!sched_feat(SYNC_WAKEUPS)) | 2317 | if (!sched_feat(SYNC_WAKEUPS)) |
2317 | wake_flags &= ~WF_SYNC; | 2318 | wake_flags &= ~WF_SYNC; |
@@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2319 | this_cpu = get_cpu(); | 2320 | this_cpu = get_cpu(); |
2320 | 2321 | ||
2321 | smp_wmb(); | 2322 | smp_wmb(); |
2322 | rq = task_rq_lock(p, &flags); | 2323 | rq = orig_rq = task_rq_lock(p, &flags); |
2323 | update_rq_clock(rq); | 2324 | update_rq_clock(rq); |
2324 | if (!(p->state & state)) | 2325 | if (!(p->state & state)) |
2325 | goto out; | 2326 | goto out; |
@@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2350 | set_task_cpu(p, cpu); | 2351 | set_task_cpu(p, cpu); |
2351 | 2352 | ||
2352 | rq = task_rq_lock(p, &flags); | 2353 | rq = task_rq_lock(p, &flags); |
2354 | |||
2355 | if (rq != orig_rq) | ||
2356 | update_rq_clock(rq); | ||
2357 | |||
2353 | WARN_ON(p->state != TASK_WAKING); | 2358 | WARN_ON(p->state != TASK_WAKING); |
2354 | cpu = task_cpu(p); | 2359 | cpu = task_cpu(p); |
2355 | 2360 | ||
@@ -2515,22 +2520,17 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2515 | __sched_fork(p); | 2520 | __sched_fork(p); |
2516 | 2521 | ||
2517 | /* | 2522 | /* |
2518 | * Make sure we do not leak PI boosting priority to the child. | ||
2519 | */ | ||
2520 | p->prio = current->normal_prio; | ||
2521 | |||
2522 | /* | ||
2523 | * Revert to default priority/policy on fork if requested. | 2523 | * Revert to default priority/policy on fork if requested. |
2524 | */ | 2524 | */ |
2525 | if (unlikely(p->sched_reset_on_fork)) { | 2525 | if (unlikely(p->sched_reset_on_fork)) { |
2526 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) | 2526 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { |
2527 | p->policy = SCHED_NORMAL; | 2527 | p->policy = SCHED_NORMAL; |
2528 | 2528 | p->normal_prio = p->static_prio; | |
2529 | if (p->normal_prio < DEFAULT_PRIO) | 2529 | } |
2530 | p->prio = DEFAULT_PRIO; | ||
2531 | 2530 | ||
2532 | if (PRIO_TO_NICE(p->static_prio) < 0) { | 2531 | if (PRIO_TO_NICE(p->static_prio) < 0) { |
2533 | p->static_prio = NICE_TO_PRIO(0); | 2532 | p->static_prio = NICE_TO_PRIO(0); |
2533 | p->normal_prio = p->static_prio; | ||
2534 | set_load_weight(p); | 2534 | set_load_weight(p); |
2535 | } | 2535 | } |
2536 | 2536 | ||
@@ -2541,6 +2541,11 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2541 | p->sched_reset_on_fork = 0; | 2541 | p->sched_reset_on_fork = 0; |
2542 | } | 2542 | } |
2543 | 2543 | ||
2544 | /* | ||
2545 | * Make sure we do not leak PI boosting priority to the child. | ||
2546 | */ | ||
2547 | p->prio = current->normal_prio; | ||
2548 | |||
2544 | if (!rt_prio(p->prio)) | 2549 | if (!rt_prio(p->prio)) |
2545 | p->sched_class = &fair_sched_class; | 2550 | p->sched_class = &fair_sched_class; |
2546 | 2551 | ||
@@ -2581,8 +2586,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2581 | BUG_ON(p->state != TASK_RUNNING); | 2586 | BUG_ON(p->state != TASK_RUNNING); |
2582 | update_rq_clock(rq); | 2587 | update_rq_clock(rq); |
2583 | 2588 | ||
2584 | p->prio = effective_prio(p); | ||
2585 | |||
2586 | if (!p->sched_class->task_new || !current->se.on_rq) { | 2589 | if (!p->sched_class->task_new || !current->se.on_rq) { |
2587 | activate_task(rq, p, 0); | 2590 | activate_task(rq, p, 0); |
2588 | } else { | 2591 | } else { |
@@ -3658,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
3658 | 3661 | ||
3659 | /** | 3662 | /** |
3660 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3663 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
3664 | * @sd: The sched_domain whose statistics are to be updated. | ||
3661 | * @group: sched_group whose statistics are to be updated. | 3665 | * @group: sched_group whose statistics are to be updated. |
3662 | * @this_cpu: Cpu for which load balance is currently performed. | 3666 | * @this_cpu: Cpu for which load balance is currently performed. |
3663 | * @idle: Idle status of this_cpu | 3667 | * @idle: Idle status of this_cpu |
@@ -6720,9 +6724,6 @@ EXPORT_SYMBOL(yield); | |||
6720 | /* | 6724 | /* |
6721 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6725 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
6722 | * that process accounting knows that this is a task in IO wait state. | 6726 | * that process accounting knows that this is a task in IO wait state. |
6723 | * | ||
6724 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
6725 | * has set its backing_dev_info: the queue against which it should throttle) | ||
6726 | */ | 6727 | */ |
6727 | void __sched io_schedule(void) | 6728 | void __sched io_schedule(void) |
6728 | { | 6729 | { |