diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 68 |
1 files changed, 51 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e88689522e66..6ae2739b8f19 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | |||
| 309 | */ | 309 | */ |
| 310 | static DEFINE_SPINLOCK(task_group_lock); | 310 | static DEFINE_SPINLOCK(task_group_lock); |
| 311 | 311 | ||
| 312 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 313 | |||
| 312 | #ifdef CONFIG_SMP | 314 | #ifdef CONFIG_SMP |
| 313 | static int root_task_group_empty(void) | 315 | static int root_task_group_empty(void) |
| 314 | { | 316 | { |
| @@ -316,7 +318,6 @@ static int root_task_group_empty(void) | |||
| 316 | } | 318 | } |
| 317 | #endif | 319 | #endif |
| 318 | 320 | ||
| 319 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 320 | #ifdef CONFIG_USER_SCHED | 321 | #ifdef CONFIG_USER_SCHED |
| 321 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) | 322 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) |
| 322 | #else /* !CONFIG_USER_SCHED */ | 323 | #else /* !CONFIG_USER_SCHED */ |
| @@ -1564,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
| 1564 | 1565 | ||
| 1565 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1566 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1566 | 1567 | ||
| 1567 | struct update_shares_data { | 1568 | static __read_mostly unsigned long *update_shares_data; |
| 1568 | unsigned long rq_weight[NR_CPUS]; | ||
| 1569 | }; | ||
| 1570 | |||
| 1571 | static DEFINE_PER_CPU(struct update_shares_data, update_shares_data); | ||
| 1572 | 1569 | ||
| 1573 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1570 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
| 1574 | 1571 | ||
| @@ -1578,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
| 1578 | static void update_group_shares_cpu(struct task_group *tg, int cpu, | 1575 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
| 1579 | unsigned long sd_shares, | 1576 | unsigned long sd_shares, |
| 1580 | unsigned long sd_rq_weight, | 1577 | unsigned long sd_rq_weight, |
| 1581 | struct update_shares_data *usd) | 1578 | unsigned long *usd_rq_weight) |
| 1582 | { | 1579 | { |
| 1583 | unsigned long shares, rq_weight; | 1580 | unsigned long shares, rq_weight; |
| 1584 | int boost = 0; | 1581 | int boost = 0; |
| 1585 | 1582 | ||
| 1586 | rq_weight = usd->rq_weight[cpu]; | 1583 | rq_weight = usd_rq_weight[cpu]; |
| 1587 | if (!rq_weight) { | 1584 | if (!rq_weight) { |
| 1588 | boost = 1; | 1585 | boost = 1; |
| 1589 | rq_weight = NICE_0_LOAD; | 1586 | rq_weight = NICE_0_LOAD; |
| @@ -1618,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
| 1618 | static int tg_shares_up(struct task_group *tg, void *data) | 1615 | static int tg_shares_up(struct task_group *tg, void *data) |
| 1619 | { | 1616 | { |
| 1620 | unsigned long weight, rq_weight = 0, shares = 0; | 1617 | unsigned long weight, rq_weight = 0, shares = 0; |
| 1621 | struct update_shares_data *usd; | 1618 | unsigned long *usd_rq_weight; |
| 1622 | struct sched_domain *sd = data; | 1619 | struct sched_domain *sd = data; |
| 1623 | unsigned long flags; | 1620 | unsigned long flags; |
| 1624 | int i; | 1621 | int i; |
| @@ -1627,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
| 1627 | return 0; | 1624 | return 0; |
| 1628 | 1625 | ||
| 1629 | local_irq_save(flags); | 1626 | local_irq_save(flags); |
| 1630 | usd = &__get_cpu_var(update_shares_data); | 1627 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
| 1631 | 1628 | ||
| 1632 | for_each_cpu(i, sched_domain_span(sd)) { | 1629 | for_each_cpu(i, sched_domain_span(sd)) { |
| 1633 | weight = tg->cfs_rq[i]->load.weight; | 1630 | weight = tg->cfs_rq[i]->load.weight; |
| 1634 | usd->rq_weight[i] = weight; | 1631 | usd_rq_weight[i] = weight; |
| 1635 | 1632 | ||
| 1636 | /* | 1633 | /* |
| 1637 | * If there are currently no tasks on the cpu pretend there | 1634 | * If there are currently no tasks on the cpu pretend there |
| @@ -1652,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
| 1652 | shares = tg->shares; | 1649 | shares = tg->shares; |
| 1653 | 1650 | ||
| 1654 | for_each_cpu(i, sched_domain_span(sd)) | 1651 | for_each_cpu(i, sched_domain_span(sd)) |
| 1655 | update_group_shares_cpu(tg, i, shares, rq_weight, usd); | 1652 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
| 1656 | 1653 | ||
| 1657 | local_irq_restore(flags); | 1654 | local_irq_restore(flags); |
| 1658 | 1655 | ||
| @@ -1996,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
| 1996 | p->sched_class->prio_changed(rq, p, oldprio, running); | 1993 | p->sched_class->prio_changed(rq, p, oldprio, running); |
| 1997 | } | 1994 | } |
| 1998 | 1995 | ||
| 1996 | /** | ||
| 1997 | * kthread_bind - bind a just-created kthread to a cpu. | ||
| 1998 | * @p: thread created by kthread_create(). | ||
| 1999 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
| 2000 | * | ||
| 2001 | * Description: This function is equivalent to set_cpus_allowed(), | ||
| 2002 | * except that @cpu doesn't need to be online, and the thread must be | ||
| 2003 | * stopped (i.e., just returned from kthread_create()). | ||
| 2004 | * | ||
| 2005 | * Function lives here instead of kthread.c because it messes with | ||
| 2006 | * scheduler internals which require locking. | ||
| 2007 | */ | ||
| 2008 | void kthread_bind(struct task_struct *p, unsigned int cpu) | ||
| 2009 | { | ||
| 2010 | struct rq *rq = cpu_rq(cpu); | ||
| 2011 | unsigned long flags; | ||
| 2012 | |||
| 2013 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
| 2014 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
| 2015 | WARN_ON(1); | ||
| 2016 | return; | ||
| 2017 | } | ||
| 2018 | |||
| 2019 | spin_lock_irqsave(&rq->lock, flags); | ||
| 2020 | set_task_cpu(p, cpu); | ||
| 2021 | p->cpus_allowed = cpumask_of_cpu(cpu); | ||
| 2022 | p->rt.nr_cpus_allowed = 1; | ||
| 2023 | p->flags |= PF_THREAD_BOUND; | ||
| 2024 | spin_unlock_irqrestore(&rq->lock, flags); | ||
| 2025 | } | ||
| 2026 | EXPORT_SYMBOL(kthread_bind); | ||
| 2027 | |||
| 1999 | #ifdef CONFIG_SMP | 2028 | #ifdef CONFIG_SMP |
| 2000 | /* | 2029 | /* |
| 2001 | * Is this task likely cache-hot: | 2030 | * Is this task likely cache-hot: |
| @@ -2008,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
| 2008 | /* | 2037 | /* |
| 2009 | * Buddy candidates are cache hot: | 2038 | * Buddy candidates are cache hot: |
| 2010 | */ | 2039 | */ |
| 2011 | if (sched_feat(CACHE_HOT_BUDDY) && | 2040 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
| 2012 | (&p->se == cfs_rq_of(&p->se)->next || | 2041 | (&p->se == cfs_rq_of(&p->se)->next || |
| 2013 | &p->se == cfs_rq_of(&p->se)->last)) | 2042 | &p->se == cfs_rq_of(&p->se)->last)) |
| 2014 | return 1; | 2043 | return 1; |
| @@ -5452,7 +5481,7 @@ need_resched_nonpreemptible: | |||
| 5452 | } | 5481 | } |
| 5453 | EXPORT_SYMBOL(schedule); | 5482 | EXPORT_SYMBOL(schedule); |
| 5454 | 5483 | ||
| 5455 | #ifdef CONFIG_SMP | 5484 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 5456 | /* | 5485 | /* |
| 5457 | * Look out! "owner" is an entirely speculative pointer | 5486 | * Look out! "owner" is an entirely speculative pointer |
| 5458 | * access and not reliable. | 5487 | * access and not reliable. |
| @@ -9407,6 +9436,10 @@ void __init sched_init(void) | |||
| 9407 | #endif /* CONFIG_USER_SCHED */ | 9436 | #endif /* CONFIG_USER_SCHED */ |
| 9408 | #endif /* CONFIG_GROUP_SCHED */ | 9437 | #endif /* CONFIG_GROUP_SCHED */ |
| 9409 | 9438 | ||
| 9439 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
| 9440 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | ||
| 9441 | __alignof__(unsigned long)); | ||
| 9442 | #endif | ||
| 9410 | for_each_possible_cpu(i) { | 9443 | for_each_possible_cpu(i) { |
| 9411 | struct rq *rq; | 9444 | struct rq *rq; |
| 9412 | 9445 | ||
| @@ -9532,13 +9565,13 @@ void __init sched_init(void) | |||
| 9532 | current->sched_class = &fair_sched_class; | 9565 | current->sched_class = &fair_sched_class; |
| 9533 | 9566 | ||
| 9534 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 9567 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
| 9535 | alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 9568 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
| 9536 | #ifdef CONFIG_SMP | 9569 | #ifdef CONFIG_SMP |
| 9537 | #ifdef CONFIG_NO_HZ | 9570 | #ifdef CONFIG_NO_HZ |
| 9538 | alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 9571 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); |
| 9539 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 9572 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); |
| 9540 | #endif | 9573 | #endif |
| 9541 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 9574 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
| 9542 | #endif /* SMP */ | 9575 | #endif /* SMP */ |
| 9543 | 9576 | ||
| 9544 | perf_event_init(); | 9577 | perf_event_init(); |
| @@ -10868,6 +10901,7 @@ void synchronize_sched_expedited(void) | |||
| 10868 | spin_unlock_irqrestore(&rq->lock, flags); | 10901 | spin_unlock_irqrestore(&rq->lock, flags); |
| 10869 | } | 10902 | } |
| 10870 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | 10903 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; |
| 10904 | synchronize_sched_expedited_count++; | ||
| 10871 | mutex_unlock(&rcu_sched_expedited_mutex); | 10905 | mutex_unlock(&rcu_sched_expedited_mutex); |
| 10872 | put_online_cpus(); | 10906 | put_online_cpus(); |
| 10873 | if (need_full_sync) | 10907 | if (need_full_sync) |
