diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 22 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 92992e287b10..204d0662b438 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -399,9 +399,9 @@ struct cfs_rq { | |||
399 | * 'curr' points to currently running entity on this cfs_rq. | 399 | * 'curr' points to currently running entity on this cfs_rq. |
400 | * It is set to NULL otherwise (i.e when none are currently running). | 400 | * It is set to NULL otherwise (i.e when none are currently running). |
401 | */ | 401 | */ |
402 | struct sched_entity *curr, *next; | 402 | struct sched_entity *curr, *next, *last; |
403 | 403 | ||
404 | unsigned long nr_spread_over; | 404 | unsigned int nr_spread_over; |
405 | 405 | ||
406 | #ifdef CONFIG_FAIR_GROUP_SCHED | 406 | #ifdef CONFIG_FAIR_GROUP_SCHED |
407 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 407 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
@@ -971,6 +971,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
971 | } | 971 | } |
972 | } | 972 | } |
973 | 973 | ||
974 | void task_rq_unlock_wait(struct task_struct *p) | ||
975 | { | ||
976 | struct rq *rq = task_rq(p); | ||
977 | |||
978 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | ||
979 | spin_unlock_wait(&rq->lock); | ||
980 | } | ||
981 | |||
974 | static void __task_rq_unlock(struct rq *rq) | 982 | static void __task_rq_unlock(struct rq *rq) |
975 | __releases(rq->lock) | 983 | __releases(rq->lock) |
976 | { | 984 | { |
@@ -1450,6 +1458,8 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1450 | 1458 | ||
1451 | if (rq->nr_running) | 1459 | if (rq->nr_running) |
1452 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1460 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; |
1461 | else | ||
1462 | rq->avg_load_per_task = 0; | ||
1453 | 1463 | ||
1454 | return rq->avg_load_per_task; | 1464 | return rq->avg_load_per_task; |
1455 | } | 1465 | } |
@@ -1807,7 +1817,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
1807 | /* | 1817 | /* |
1808 | * Buddy candidates are cache hot: | 1818 | * Buddy candidates are cache hot: |
1809 | */ | 1819 | */ |
1810 | if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) | 1820 | if (sched_feat(CACHE_HOT_BUDDY) && |
1821 | (&p->se == cfs_rq_of(&p->se)->next || | ||
1822 | &p->se == cfs_rq_of(&p->se)->last)) | ||
1811 | return 1; | 1823 | return 1; |
1812 | 1824 | ||
1813 | if (p->sched_class != &fair_sched_class) | 1825 | if (p->sched_class != &fair_sched_class) |
@@ -5874,6 +5886,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5874 | struct rq *rq = cpu_rq(cpu); | 5886 | struct rq *rq = cpu_rq(cpu); |
5875 | unsigned long flags; | 5887 | unsigned long flags; |
5876 | 5888 | ||
5889 | spin_lock_irqsave(&rq->lock, flags); | ||
5890 | |||
5877 | __sched_fork(idle); | 5891 | __sched_fork(idle); |
5878 | idle->se.exec_start = sched_clock(); | 5892 | idle->se.exec_start = sched_clock(); |
5879 | 5893 | ||
@@ -5881,7 +5895,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5881 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5895 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
5882 | __set_task_cpu(idle, cpu); | 5896 | __set_task_cpu(idle, cpu); |
5883 | 5897 | ||
5884 | spin_lock_irqsave(&rq->lock, flags); | ||
5885 | rq->curr = rq->idle = idle; | 5898 | rq->curr = rq->idle = idle; |
5886 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5899 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
5887 | idle->oncpu = 1; | 5900 | idle->oncpu = 1; |
@@ -6891,15 +6904,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6891 | struct sched_domain *tmp; | 6904 | struct sched_domain *tmp; |
6892 | 6905 | ||
6893 | /* Remove the sched domains which do not contribute to scheduling. */ | 6906 | /* Remove the sched domains which do not contribute to scheduling. */ |
6894 | for (tmp = sd; tmp; tmp = tmp->parent) { | 6907 | for (tmp = sd; tmp; ) { |
6895 | struct sched_domain *parent = tmp->parent; | 6908 | struct sched_domain *parent = tmp->parent; |
6896 | if (!parent) | 6909 | if (!parent) |
6897 | break; | 6910 | break; |
6911 | |||
6898 | if (sd_parent_degenerate(tmp, parent)) { | 6912 | if (sd_parent_degenerate(tmp, parent)) { |
6899 | tmp->parent = parent->parent; | 6913 | tmp->parent = parent->parent; |
6900 | if (parent->parent) | 6914 | if (parent->parent) |
6901 | parent->parent->child = tmp; | 6915 | parent->parent->child = tmp; |
6902 | } | 6916 | } else |
6917 | tmp = tmp->parent; | ||
6903 | } | 6918 | } |
6904 | 6919 | ||
6905 | if (sd && sd_degenerate(sd)) { | 6920 | if (sd && sd_degenerate(sd)) { |
@@ -7688,6 +7703,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7688 | error: | 7703 | error: |
7689 | free_sched_groups(cpu_map, tmpmask); | 7704 | free_sched_groups(cpu_map, tmpmask); |
7690 | SCHED_CPUMASK_FREE((void *)allmasks); | 7705 | SCHED_CPUMASK_FREE((void *)allmasks); |
7706 | kfree(rd); | ||
7691 | return -ENOMEM; | 7707 | return -ENOMEM; |
7692 | #endif | 7708 | #endif |
7693 | } | 7709 | } |