aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6625c3c4b10d..57c933ffbee1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -386,7 +386,6 @@ struct cfs_rq {
386 386
387 u64 exec_clock; 387 u64 exec_clock;
388 u64 min_vruntime; 388 u64 min_vruntime;
389 u64 pair_start;
390 389
391 struct rb_root tasks_timeline; 390 struct rb_root tasks_timeline;
392 struct rb_node *rb_leftmost; 391 struct rb_node *rb_leftmost;
@@ -398,7 +397,7 @@ struct cfs_rq {
398 * 'curr' points to currently running entity on this cfs_rq. 397 * 'curr' points to currently running entity on this cfs_rq.
399 * It is set to NULL otherwise (i.e when none are currently running). 398 * It is set to NULL otherwise (i.e when none are currently running).
400 */ 399 */
401 struct sched_entity *curr, *next; 400 struct sched_entity *curr, *next, *last;
402 401
403 unsigned long nr_spread_over; 402 unsigned long nr_spread_over;
404 403
@@ -1806,7 +1805,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1806 /* 1805 /*
1807 * Buddy candidates are cache hot: 1806 * Buddy candidates are cache hot:
1808 */ 1807 */
1809 if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) 1808 if (sched_feat(CACHE_HOT_BUDDY) &&
1809 (&p->se == cfs_rq_of(&p->se)->next ||
1810 &p->se == cfs_rq_of(&p->se)->last))
1810 return 1; 1811 return 1;
1811 1812
1812 if (p->sched_class != &fair_sched_class) 1813 if (p->sched_class != &fair_sched_class)
@@ -3344,7 +3345,7 @@ small_imbalance:
3344 } else 3345 } else
3345 this_load_per_task = cpu_avg_load_per_task(this_cpu); 3346 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3346 3347
3347 if (max_load - this_load + 2*busiest_load_per_task >= 3348 if (max_load - this_load + busiest_load_per_task >=
3348 busiest_load_per_task * imbn) { 3349 busiest_load_per_task * imbn) {
3349 *imbalance = busiest_load_per_task; 3350 *imbalance = busiest_load_per_task;
3350 return busiest; 3351 return busiest;
@@ -6876,15 +6877,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6876 struct sched_domain *tmp; 6877 struct sched_domain *tmp;
6877 6878
6878 /* Remove the sched domains which do not contribute to scheduling. */ 6879 /* Remove the sched domains which do not contribute to scheduling. */
6879 for (tmp = sd; tmp; tmp = tmp->parent) { 6880 for (tmp = sd; tmp; ) {
6880 struct sched_domain *parent = tmp->parent; 6881 struct sched_domain *parent = tmp->parent;
6881 if (!parent) 6882 if (!parent)
6882 break; 6883 break;
6884
6883 if (sd_parent_degenerate(tmp, parent)) { 6885 if (sd_parent_degenerate(tmp, parent)) {
6884 tmp->parent = parent->parent; 6886 tmp->parent = parent->parent;
6885 if (parent->parent) 6887 if (parent->parent)
6886 parent->parent->child = tmp; 6888 parent->parent->child = tmp;
6887 } 6889 } else
6890 tmp = tmp->parent;
6888 } 6891 }
6889 6892
6890 if (sd && sd_degenerate(sd)) { 6893 if (sd && sd_degenerate(sd)) {
@@ -7673,6 +7676,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7673error: 7676error:
7674 free_sched_groups(cpu_map, tmpmask); 7677 free_sched_groups(cpu_map, tmpmask);
7675 SCHED_CPUMASK_FREE((void *)allmasks); 7678 SCHED_CPUMASK_FREE((void *)allmasks);
7679 kfree(rd);
7676 return -ENOMEM; 7680 return -ENOMEM;
7677#endif 7681#endif
7678} 7682}