aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-07 04:29:58 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-07 04:29:58 -0500
commit258594a138f4ca9adf214f5272592d7f21def610 (patch)
treed97ee71c997b0412f79b9ec4150cb52ce838fe13 /kernel/sched.c
parenta87d091434ed2a34d647979ab12084139ee1fe41 (diff)
parentca3273f9646694e0419cfb9d6c12deb1c9aff27c (diff)
Merge branch 'sched/urgent' into sched/core
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 213cad5e50aa..b24e57a10f6f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -397,7 +397,7 @@ struct cfs_rq {
397 * 'curr' points to currently running entity on this cfs_rq. 397 * 'curr' points to currently running entity on this cfs_rq.
398 * It is set to NULL otherwise (i.e when none are currently running). 398 * It is set to NULL otherwise (i.e when none are currently running).
399 */ 399 */
400 struct sched_entity *curr, *next; 400 struct sched_entity *curr, *next, *last;
401 401
402 unsigned long nr_spread_over; 402 unsigned long nr_spread_over;
403 403
@@ -1785,7 +1785,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1785 /* 1785 /*
1786 * Buddy candidates are cache hot: 1786 * Buddy candidates are cache hot:
1787 */ 1787 */
1788 if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) 1788 if (sched_feat(CACHE_HOT_BUDDY) &&
1789 (&p->se == cfs_rq_of(&p->se)->next ||
1790 &p->se == cfs_rq_of(&p->se)->last))
1789 return 1; 1791 return 1;
1790 1792
1791 if (p->sched_class != &fair_sched_class) 1793 if (p->sched_class != &fair_sched_class)
@@ -6832,15 +6834,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6832 struct sched_domain *tmp; 6834 struct sched_domain *tmp;
6833 6835
6834 /* Remove the sched domains which do not contribute to scheduling. */ 6836 /* Remove the sched domains which do not contribute to scheduling. */
6835 for (tmp = sd; tmp; tmp = tmp->parent) { 6837 for (tmp = sd; tmp; ) {
6836 struct sched_domain *parent = tmp->parent; 6838 struct sched_domain *parent = tmp->parent;
6837 if (!parent) 6839 if (!parent)
6838 break; 6840 break;
6841
6839 if (sd_parent_degenerate(tmp, parent)) { 6842 if (sd_parent_degenerate(tmp, parent)) {
6840 tmp->parent = parent->parent; 6843 tmp->parent = parent->parent;
6841 if (parent->parent) 6844 if (parent->parent)
6842 parent->parent->child = tmp; 6845 parent->parent->child = tmp;
6843 } 6846 } else
6847 tmp = tmp->parent;
6844 } 6848 }
6845 6849
6846 if (sd && sd_degenerate(sd)) { 6850 if (sd && sd_degenerate(sd)) {
@@ -7629,6 +7633,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7629error: 7633error:
7630 free_sched_groups(cpu_map, tmpmask); 7634 free_sched_groups(cpu_map, tmpmask);
7631 SCHED_CPUMASK_FREE((void *)allmasks); 7635 SCHED_CPUMASK_FREE((void *)allmasks);
7636 kfree(rd);
7632 return -ENOMEM; 7637 return -ENOMEM;
7633#endif 7638#endif
7634} 7639}