diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 26 |
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0a4dc3b1300b..2a106b6b78b0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -386,7 +386,6 @@ struct cfs_rq { | |||
386 | 386 | ||
387 | u64 exec_clock; | 387 | u64 exec_clock; |
388 | u64 min_vruntime; | 388 | u64 min_vruntime; |
389 | u64 pair_start; | ||
390 | 389 | ||
391 | struct rb_root tasks_timeline; | 390 | struct rb_root tasks_timeline; |
392 | struct rb_node *rb_leftmost; | 391 | struct rb_node *rb_leftmost; |
@@ -398,9 +397,9 @@ struct cfs_rq { | |||
398 | * 'curr' points to currently running entity on this cfs_rq. | 397 | * 'curr' points to currently running entity on this cfs_rq. |
399 | * It is set to NULL otherwise (i.e when none are currently running). | 398 | * It is set to NULL otherwise (i.e when none are currently running). |
400 | */ | 399 | */ |
401 | struct sched_entity *curr, *next; | 400 | struct sched_entity *curr, *next, *last; |
402 | 401 | ||
403 | unsigned long nr_spread_over; | 402 | unsigned int nr_spread_over; |
404 | 403 | ||
405 | #ifdef CONFIG_FAIR_GROUP_SCHED | 404 | #ifdef CONFIG_FAIR_GROUP_SCHED |
406 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
@@ -970,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
970 | } | 969 | } |
971 | } | 970 | } |
972 | 971 | ||
972 | void task_rq_unlock_wait(struct task_struct *p) | ||
973 | { | ||
974 | struct rq *rq = task_rq(p); | ||
975 | |||
976 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | ||
977 | spin_unlock_wait(&rq->lock); | ||
978 | } | ||
979 | |||
973 | static void __task_rq_unlock(struct rq *rq) | 980 | static void __task_rq_unlock(struct rq *rq) |
974 | __releases(rq->lock) | 981 | __releases(rq->lock) |
975 | { | 982 | { |
@@ -1806,7 +1813,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
1806 | /* | 1813 | /* |
1807 | * Buddy candidates are cache hot: | 1814 | * Buddy candidates are cache hot: |
1808 | */ | 1815 | */ |
1809 | if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) | 1816 | if (sched_feat(CACHE_HOT_BUDDY) && |
1817 | (&p->se == cfs_rq_of(&p->se)->next || | ||
1818 | &p->se == cfs_rq_of(&p->se)->last)) | ||
1810 | return 1; | 1819 | return 1; |
1811 | 1820 | ||
1812 | if (p->sched_class != &fair_sched_class) | 1821 | if (p->sched_class != &fair_sched_class) |
@@ -3344,7 +3353,7 @@ small_imbalance: | |||
3344 | } else | 3353 | } else |
3345 | this_load_per_task = cpu_avg_load_per_task(this_cpu); | 3354 | this_load_per_task = cpu_avg_load_per_task(this_cpu); |
3346 | 3355 | ||
3347 | if (max_load - this_load + 2*busiest_load_per_task >= | 3356 | if (max_load - this_load + busiest_load_per_task >= |
3348 | busiest_load_per_task * imbn) { | 3357 | busiest_load_per_task * imbn) { |
3349 | *imbalance = busiest_load_per_task; | 3358 | *imbalance = busiest_load_per_task; |
3350 | return busiest; | 3359 | return busiest; |
@@ -6876,15 +6885,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6876 | struct sched_domain *tmp; | 6885 | struct sched_domain *tmp; |
6877 | 6886 | ||
6878 | /* Remove the sched domains which do not contribute to scheduling. */ | 6887 | /* Remove the sched domains which do not contribute to scheduling. */ |
6879 | for (tmp = sd; tmp; tmp = tmp->parent) { | 6888 | for (tmp = sd; tmp; ) { |
6880 | struct sched_domain *parent = tmp->parent; | 6889 | struct sched_domain *parent = tmp->parent; |
6881 | if (!parent) | 6890 | if (!parent) |
6882 | break; | 6891 | break; |
6892 | |||
6883 | if (sd_parent_degenerate(tmp, parent)) { | 6893 | if (sd_parent_degenerate(tmp, parent)) { |
6884 | tmp->parent = parent->parent; | 6894 | tmp->parent = parent->parent; |
6885 | if (parent->parent) | 6895 | if (parent->parent) |
6886 | parent->parent->child = tmp; | 6896 | parent->parent->child = tmp; |
6887 | } | 6897 | } else |
6898 | tmp = tmp->parent; | ||
6888 | } | 6899 | } |
6889 | 6900 | ||
6890 | if (sd && sd_degenerate(sd)) { | 6901 | if (sd && sd_degenerate(sd)) { |
@@ -7673,6 +7684,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7673 | error: | 7684 | error: |
7674 | free_sched_groups(cpu_map, tmpmask); | 7685 | free_sched_groups(cpu_map, tmpmask); |
7675 | SCHED_CPUMASK_FREE((void *)allmasks); | 7686 | SCHED_CPUMASK_FREE((void *)allmasks); |
7687 | kfree(rd); | ||
7676 | return -ENOMEM; | 7688 | return -ENOMEM; |
7677 | #endif | 7689 | #endif |
7678 | } | 7690 | } |