diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 55 |
1 files changed, 28 insertions, 27 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1535f3884b8..a455dca884a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq) | |||
676 | 676 | ||
677 | /** | 677 | /** |
678 | * runqueue_is_locked | 678 | * runqueue_is_locked |
679 | * @cpu: the processor in question. | ||
679 | * | 680 | * |
680 | * Returns true if the current cpu runqueue is locked. | 681 | * Returns true if the current cpu runqueue is locked. |
681 | * This interface allows printk to be called with the runqueue lock | 682 | * This interface allows printk to be called with the runqueue lock |
@@ -1563,11 +1564,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1563 | 1564 | ||
1564 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1565 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1565 | 1566 | ||
1566 | struct update_shares_data { | 1567 | static __read_mostly unsigned long *update_shares_data; |
1567 | unsigned long rq_weight[NR_CPUS]; | ||
1568 | }; | ||
1569 | |||
1570 | static DEFINE_PER_CPU(struct update_shares_data, update_shares_data); | ||
1571 | 1568 | ||
1572 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1569 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1573 | 1570 | ||
@@ -1577,12 +1574,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1577 | static void update_group_shares_cpu(struct task_group *tg, int cpu, | 1574 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
1578 | unsigned long sd_shares, | 1575 | unsigned long sd_shares, |
1579 | unsigned long sd_rq_weight, | 1576 | unsigned long sd_rq_weight, |
1580 | struct update_shares_data *usd) | 1577 | unsigned long *usd_rq_weight) |
1581 | { | 1578 | { |
1582 | unsigned long shares, rq_weight; | 1579 | unsigned long shares, rq_weight; |
1583 | int boost = 0; | 1580 | int boost = 0; |
1584 | 1581 | ||
1585 | rq_weight = usd->rq_weight[cpu]; | 1582 | rq_weight = usd_rq_weight[cpu]; |
1586 | if (!rq_weight) { | 1583 | if (!rq_weight) { |
1587 | boost = 1; | 1584 | boost = 1; |
1588 | rq_weight = NICE_0_LOAD; | 1585 | rq_weight = NICE_0_LOAD; |
@@ -1617,7 +1614,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1617 | static int tg_shares_up(struct task_group *tg, void *data) | 1614 | static int tg_shares_up(struct task_group *tg, void *data) |
1618 | { | 1615 | { |
1619 | unsigned long weight, rq_weight = 0, shares = 0; | 1616 | unsigned long weight, rq_weight = 0, shares = 0; |
1620 | struct update_shares_data *usd; | 1617 | unsigned long *usd_rq_weight; |
1621 | struct sched_domain *sd = data; | 1618 | struct sched_domain *sd = data; |
1622 | unsigned long flags; | 1619 | unsigned long flags; |
1623 | int i; | 1620 | int i; |
@@ -1626,11 +1623,11 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1626 | return 0; | 1623 | return 0; |
1627 | 1624 | ||
1628 | local_irq_save(flags); | 1625 | local_irq_save(flags); |
1629 | usd = &__get_cpu_var(update_shares_data); | 1626 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
1630 | 1627 | ||
1631 | for_each_cpu(i, sched_domain_span(sd)) { | 1628 | for_each_cpu(i, sched_domain_span(sd)) { |
1632 | weight = tg->cfs_rq[i]->load.weight; | 1629 | weight = tg->cfs_rq[i]->load.weight; |
1633 | usd->rq_weight[i] = weight; | 1630 | usd_rq_weight[i] = weight; |
1634 | 1631 | ||
1635 | /* | 1632 | /* |
1636 | * If there are currently no tasks on the cpu pretend there | 1633 | * If there are currently no tasks on the cpu pretend there |
@@ -1651,7 +1648,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1651 | shares = tg->shares; | 1648 | shares = tg->shares; |
1652 | 1649 | ||
1653 | for_each_cpu(i, sched_domain_span(sd)) | 1650 | for_each_cpu(i, sched_domain_span(sd)) |
1654 | update_group_shares_cpu(tg, i, shares, rq_weight, usd); | 1651 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
1655 | 1652 | ||
1656 | local_irq_restore(flags); | 1653 | local_irq_restore(flags); |
1657 | 1654 | ||
@@ -2311,7 +2308,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2311 | { | 2308 | { |
2312 | int cpu, orig_cpu, this_cpu, success = 0; | 2309 | int cpu, orig_cpu, this_cpu, success = 0; |
2313 | unsigned long flags; | 2310 | unsigned long flags; |
2314 | struct rq *rq; | 2311 | struct rq *rq, *orig_rq; |
2315 | 2312 | ||
2316 | if (!sched_feat(SYNC_WAKEUPS)) | 2313 | if (!sched_feat(SYNC_WAKEUPS)) |
2317 | wake_flags &= ~WF_SYNC; | 2314 | wake_flags &= ~WF_SYNC; |
@@ -2319,7 +2316,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2319 | this_cpu = get_cpu(); | 2316 | this_cpu = get_cpu(); |
2320 | 2317 | ||
2321 | smp_wmb(); | 2318 | smp_wmb(); |
2322 | rq = task_rq_lock(p, &flags); | 2319 | rq = orig_rq = task_rq_lock(p, &flags); |
2323 | update_rq_clock(rq); | 2320 | update_rq_clock(rq); |
2324 | if (!(p->state & state)) | 2321 | if (!(p->state & state)) |
2325 | goto out; | 2322 | goto out; |
@@ -2350,6 +2347,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2350 | set_task_cpu(p, cpu); | 2347 | set_task_cpu(p, cpu); |
2351 | 2348 | ||
2352 | rq = task_rq_lock(p, &flags); | 2349 | rq = task_rq_lock(p, &flags); |
2350 | |||
2351 | if (rq != orig_rq) | ||
2352 | update_rq_clock(rq); | ||
2353 | |||
2353 | WARN_ON(p->state != TASK_WAKING); | 2354 | WARN_ON(p->state != TASK_WAKING); |
2354 | cpu = task_cpu(p); | 2355 | cpu = task_cpu(p); |
2355 | 2356 | ||
@@ -2515,22 +2516,17 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2515 | __sched_fork(p); | 2516 | __sched_fork(p); |
2516 | 2517 | ||
2517 | /* | 2518 | /* |
2518 | * Make sure we do not leak PI boosting priority to the child. | ||
2519 | */ | ||
2520 | p->prio = current->normal_prio; | ||
2521 | |||
2522 | /* | ||
2523 | * Revert to default priority/policy on fork if requested. | 2519 | * Revert to default priority/policy on fork if requested. |
2524 | */ | 2520 | */ |
2525 | if (unlikely(p->sched_reset_on_fork)) { | 2521 | if (unlikely(p->sched_reset_on_fork)) { |
2526 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) | 2522 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { |
2527 | p->policy = SCHED_NORMAL; | 2523 | p->policy = SCHED_NORMAL; |
2528 | 2524 | p->normal_prio = p->static_prio; | |
2529 | if (p->normal_prio < DEFAULT_PRIO) | 2525 | } |
2530 | p->prio = DEFAULT_PRIO; | ||
2531 | 2526 | ||
2532 | if (PRIO_TO_NICE(p->static_prio) < 0) { | 2527 | if (PRIO_TO_NICE(p->static_prio) < 0) { |
2533 | p->static_prio = NICE_TO_PRIO(0); | 2528 | p->static_prio = NICE_TO_PRIO(0); |
2529 | p->normal_prio = p->static_prio; | ||
2534 | set_load_weight(p); | 2530 | set_load_weight(p); |
2535 | } | 2531 | } |
2536 | 2532 | ||
@@ -2541,6 +2537,11 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2541 | p->sched_reset_on_fork = 0; | 2537 | p->sched_reset_on_fork = 0; |
2542 | } | 2538 | } |
2543 | 2539 | ||
2540 | /* | ||
2541 | * Make sure we do not leak PI boosting priority to the child. | ||
2542 | */ | ||
2543 | p->prio = current->normal_prio; | ||
2544 | |||
2544 | if (!rt_prio(p->prio)) | 2545 | if (!rt_prio(p->prio)) |
2545 | p->sched_class = &fair_sched_class; | 2546 | p->sched_class = &fair_sched_class; |
2546 | 2547 | ||
@@ -2581,8 +2582,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2581 | BUG_ON(p->state != TASK_RUNNING); | 2582 | BUG_ON(p->state != TASK_RUNNING); |
2582 | update_rq_clock(rq); | 2583 | update_rq_clock(rq); |
2583 | 2584 | ||
2584 | p->prio = effective_prio(p); | ||
2585 | |||
2586 | if (!p->sched_class->task_new || !current->se.on_rq) { | 2585 | if (!p->sched_class->task_new || !current->se.on_rq) { |
2587 | activate_task(rq, p, 0); | 2586 | activate_task(rq, p, 0); |
2588 | } else { | 2587 | } else { |
@@ -3658,6 +3657,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
3658 | 3657 | ||
3659 | /** | 3658 | /** |
3660 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3659 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
3660 | * @sd: The sched_domain whose statistics are to be updated. | ||
3661 | * @group: sched_group whose statistics are to be updated. | 3661 | * @group: sched_group whose statistics are to be updated. |
3662 | * @this_cpu: Cpu for which load balance is currently performed. | 3662 | * @this_cpu: Cpu for which load balance is currently performed. |
3663 | * @idle: Idle status of this_cpu | 3663 | * @idle: Idle status of this_cpu |
@@ -6720,9 +6720,6 @@ EXPORT_SYMBOL(yield); | |||
6720 | /* | 6720 | /* |
6721 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6721 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
6722 | * that process accounting knows that this is a task in IO wait state. | 6722 | * that process accounting knows that this is a task in IO wait state. |
6723 | * | ||
6724 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
6725 | * has set its backing_dev_info: the queue against which it should throttle) | ||
6726 | */ | 6723 | */ |
6727 | void __sched io_schedule(void) | 6724 | void __sched io_schedule(void) |
6728 | { | 6725 | { |
@@ -9406,6 +9403,10 @@ void __init sched_init(void) | |||
9406 | #endif /* CONFIG_USER_SCHED */ | 9403 | #endif /* CONFIG_USER_SCHED */ |
9407 | #endif /* CONFIG_GROUP_SCHED */ | 9404 | #endif /* CONFIG_GROUP_SCHED */ |
9408 | 9405 | ||
9406 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
9407 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | ||
9408 | __alignof__(unsigned long)); | ||
9409 | #endif | ||
9409 | for_each_possible_cpu(i) { | 9410 | for_each_possible_cpu(i) { |
9410 | struct rq *rq; | 9411 | struct rq *rq; |
9411 | 9412 | ||