diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 49 |
1 files changed, 33 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6625c3c4b10d..b7480fb5c3dc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -386,7 +386,6 @@ struct cfs_rq { | |||
| 386 | 386 | ||
| 387 | u64 exec_clock; | 387 | u64 exec_clock; |
| 388 | u64 min_vruntime; | 388 | u64 min_vruntime; |
| 389 | u64 pair_start; | ||
| 390 | 389 | ||
| 391 | struct rb_root tasks_timeline; | 390 | struct rb_root tasks_timeline; |
| 392 | struct rb_node *rb_leftmost; | 391 | struct rb_node *rb_leftmost; |
| @@ -398,9 +397,9 @@ struct cfs_rq { | |||
| 398 | * 'curr' points to currently running entity on this cfs_rq. | 397 | * 'curr' points to currently running entity on this cfs_rq. |
| 399 | * It is set to NULL otherwise (i.e when none are currently running). | 398 | * It is set to NULL otherwise (i.e when none are currently running). |
| 400 | */ | 399 | */ |
| 401 | struct sched_entity *curr, *next; | 400 | struct sched_entity *curr, *next, *last; |
| 402 | 401 | ||
| 403 | unsigned long nr_spread_over; | 402 | unsigned int nr_spread_over; |
| 404 | 403 | ||
| 405 | #ifdef CONFIG_FAIR_GROUP_SCHED | 404 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 406 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| @@ -970,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
| 970 | } | 969 | } |
| 971 | } | 970 | } |
| 972 | 971 | ||
| 972 | void task_rq_unlock_wait(struct task_struct *p) | ||
| 973 | { | ||
| 974 | struct rq *rq = task_rq(p); | ||
| 975 | |||
| 976 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | ||
| 977 | spin_unlock_wait(&rq->lock); | ||
| 978 | } | ||
| 979 | |||
| 973 | static void __task_rq_unlock(struct rq *rq) | 980 | static void __task_rq_unlock(struct rq *rq) |
| 974 | __releases(rq->lock) | 981 | __releases(rq->lock) |
| 975 | { | 982 | { |
| @@ -1446,9 +1453,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
| 1446 | static unsigned long cpu_avg_load_per_task(int cpu) | 1453 | static unsigned long cpu_avg_load_per_task(int cpu) |
| 1447 | { | 1454 | { |
| 1448 | struct rq *rq = cpu_rq(cpu); | 1455 | struct rq *rq = cpu_rq(cpu); |
| 1456 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | ||
| 1449 | 1457 | ||
| 1450 | if (rq->nr_running) | 1458 | if (nr_running) |
| 1451 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1459 | rq->avg_load_per_task = rq->load.weight / nr_running; |
| 1460 | else | ||
| 1461 | rq->avg_load_per_task = 0; | ||
| 1452 | 1462 | ||
| 1453 | return rq->avg_load_per_task; | 1463 | return rq->avg_load_per_task; |
| 1454 | } | 1464 | } |
| @@ -1806,7 +1816,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
| 1806 | /* | 1816 | /* |
| 1807 | * Buddy candidates are cache hot: | 1817 | * Buddy candidates are cache hot: |
| 1808 | */ | 1818 | */ |
| 1809 | if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) | 1819 | if (sched_feat(CACHE_HOT_BUDDY) && |
| 1820 | (&p->se == cfs_rq_of(&p->se)->next || | ||
| 1821 | &p->se == cfs_rq_of(&p->se)->last)) | ||
| 1810 | return 1; | 1822 | return 1; |
| 1811 | 1823 | ||
| 1812 | if (p->sched_class != &fair_sched_class) | 1824 | if (p->sched_class != &fair_sched_class) |
| @@ -3344,7 +3356,7 @@ small_imbalance: | |||
| 3344 | } else | 3356 | } else |
| 3345 | this_load_per_task = cpu_avg_load_per_task(this_cpu); | 3357 | this_load_per_task = cpu_avg_load_per_task(this_cpu); |
| 3346 | 3358 | ||
| 3347 | if (max_load - this_load + 2*busiest_load_per_task >= | 3359 | if (max_load - this_load + busiest_load_per_task >= |
| 3348 | busiest_load_per_task * imbn) { | 3360 | busiest_load_per_task * imbn) { |
| 3349 | *imbalance = busiest_load_per_task; | 3361 | *imbalance = busiest_load_per_task; |
| 3350 | return busiest; | 3362 | return busiest; |
| @@ -5859,6 +5871,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5859 | struct rq *rq = cpu_rq(cpu); | 5871 | struct rq *rq = cpu_rq(cpu); |
| 5860 | unsigned long flags; | 5872 | unsigned long flags; |
| 5861 | 5873 | ||
| 5874 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5875 | |||
| 5862 | __sched_fork(idle); | 5876 | __sched_fork(idle); |
| 5863 | idle->se.exec_start = sched_clock(); | 5877 | idle->se.exec_start = sched_clock(); |
| 5864 | 5878 | ||
| @@ -5866,7 +5880,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5866 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5880 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
| 5867 | __set_task_cpu(idle, cpu); | 5881 | __set_task_cpu(idle, cpu); |
| 5868 | 5882 | ||
| 5869 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5870 | rq->curr = rq->idle = idle; | 5883 | rq->curr = rq->idle = idle; |
| 5871 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5884 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
| 5872 | idle->oncpu = 1; | 5885 | idle->oncpu = 1; |
| @@ -6876,15 +6889,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
| 6876 | struct sched_domain *tmp; | 6889 | struct sched_domain *tmp; |
| 6877 | 6890 | ||
| 6878 | /* Remove the sched domains which do not contribute to scheduling. */ | 6891 | /* Remove the sched domains which do not contribute to scheduling. */ |
| 6879 | for (tmp = sd; tmp; tmp = tmp->parent) { | 6892 | for (tmp = sd; tmp; ) { |
| 6880 | struct sched_domain *parent = tmp->parent; | 6893 | struct sched_domain *parent = tmp->parent; |
| 6881 | if (!parent) | 6894 | if (!parent) |
| 6882 | break; | 6895 | break; |
| 6896 | |||
| 6883 | if (sd_parent_degenerate(tmp, parent)) { | 6897 | if (sd_parent_degenerate(tmp, parent)) { |
| 6884 | tmp->parent = parent->parent; | 6898 | tmp->parent = parent->parent; |
| 6885 | if (parent->parent) | 6899 | if (parent->parent) |
| 6886 | parent->parent->child = tmp; | 6900 | parent->parent->child = tmp; |
| 6887 | } | 6901 | } else |
| 6902 | tmp = tmp->parent; | ||
| 6888 | } | 6903 | } |
| 6889 | 6904 | ||
| 6890 | if (sd && sd_degenerate(sd)) { | 6905 | if (sd && sd_degenerate(sd)) { |
| @@ -7673,6 +7688,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7673 | error: | 7688 | error: |
| 7674 | free_sched_groups(cpu_map, tmpmask); | 7689 | free_sched_groups(cpu_map, tmpmask); |
| 7675 | SCHED_CPUMASK_FREE((void *)allmasks); | 7690 | SCHED_CPUMASK_FREE((void *)allmasks); |
| 7691 | kfree(rd); | ||
| 7676 | return -ENOMEM; | 7692 | return -ENOMEM; |
| 7677 | #endif | 7693 | #endif |
| 7678 | } | 7694 | } |
| @@ -7774,13 +7790,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
| 7774 | * | 7790 | * |
| 7775 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 7791 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
| 7776 | * ownership of it and will kfree it when done with it. If the caller | 7792 | * ownership of it and will kfree it when done with it. If the caller |
| 7777 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7793 | * failed the kmalloc call, then it can pass in doms_new == NULL && |
| 7778 | * and partition_sched_domains() will fallback to the single partition | 7794 | * ndoms_new == 1, and partition_sched_domains() will fallback to |
| 7779 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7795 | * the single partition 'fallback_doms', it also forces the domains |
| 7796 | * to be rebuilt. | ||
| 7780 | * | 7797 | * |
| 7781 | * If doms_new==NULL it will be replaced with cpu_online_map. | 7798 | * If doms_new == NULL it will be replaced with cpu_online_map. |
| 7782 | * ndoms_new==0 is a special case for destroying existing domains. | 7799 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7783 | * It will not create the default domain. | 7800 | * and it will not create the default domain. |
| 7784 | * | 7801 | * |
| 7785 | * Call with hotplug lock held | 7802 | * Call with hotplug lock held |
| 7786 | */ | 7803 | */ |
