aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@csr.com>2008-11-19 09:48:07 -0500
committerDavid Vrabel <david.vrabel@csr.com>2008-11-19 09:48:07 -0500
commitdba0a918722ee0f0ba3442575e4448c3ab622be4 (patch)
treefdb466cf09e7916135098d651b18924b2fe9ba5f /kernel/sched.c
parent0996e6382482ce9014787693d3884e9468153a5c (diff)
parent7f0f598a0069d1ab072375965a4b69137233169c (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c44
1 files changed, 30 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6625c3c4b10d..9b1e79371c20 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -386,7 +386,6 @@ struct cfs_rq {
386 386
387 u64 exec_clock; 387 u64 exec_clock;
388 u64 min_vruntime; 388 u64 min_vruntime;
389 u64 pair_start;
390 389
391 struct rb_root tasks_timeline; 390 struct rb_root tasks_timeline;
392 struct rb_node *rb_leftmost; 391 struct rb_node *rb_leftmost;
@@ -398,9 +397,9 @@ struct cfs_rq {
398 * 'curr' points to currently running entity on this cfs_rq. 397 * 'curr' points to currently running entity on this cfs_rq.
399 * It is set to NULL otherwise (i.e when none are currently running). 398 * It is set to NULL otherwise (i.e when none are currently running).
400 */ 399 */
401 struct sched_entity *curr, *next; 400 struct sched_entity *curr, *next, *last;
402 401
403 unsigned long nr_spread_over; 402 unsigned int nr_spread_over;
404 403
405#ifdef CONFIG_FAIR_GROUP_SCHED 404#ifdef CONFIG_FAIR_GROUP_SCHED
406 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 405 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
@@ -970,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
970 } 969 }
971} 970}
972 971
972void task_rq_unlock_wait(struct task_struct *p)
973{
974 struct rq *rq = task_rq(p);
975
976 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
977 spin_unlock_wait(&rq->lock);
978}
979
973static void __task_rq_unlock(struct rq *rq) 980static void __task_rq_unlock(struct rq *rq)
974 __releases(rq->lock) 981 __releases(rq->lock)
975{ 982{
@@ -1449,6 +1456,8 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1449 1456
1450 if (rq->nr_running) 1457 if (rq->nr_running)
1451 rq->avg_load_per_task = rq->load.weight / rq->nr_running; 1458 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1459 else
1460 rq->avg_load_per_task = 0;
1452 1461
1453 return rq->avg_load_per_task; 1462 return rq->avg_load_per_task;
1454} 1463}
@@ -1806,7 +1815,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1806 /* 1815 /*
1807 * Buddy candidates are cache hot: 1816 * Buddy candidates are cache hot:
1808 */ 1817 */
1809 if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) 1818 if (sched_feat(CACHE_HOT_BUDDY) &&
1819 (&p->se == cfs_rq_of(&p->se)->next ||
1820 &p->se == cfs_rq_of(&p->se)->last))
1810 return 1; 1821 return 1;
1811 1822
1812 if (p->sched_class != &fair_sched_class) 1823 if (p->sched_class != &fair_sched_class)
@@ -3344,7 +3355,7 @@ small_imbalance:
3344 } else 3355 } else
3345 this_load_per_task = cpu_avg_load_per_task(this_cpu); 3356 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3346 3357
3347 if (max_load - this_load + 2*busiest_load_per_task >= 3358 if (max_load - this_load + busiest_load_per_task >=
3348 busiest_load_per_task * imbn) { 3359 busiest_load_per_task * imbn) {
3349 *imbalance = busiest_load_per_task; 3360 *imbalance = busiest_load_per_task;
3350 return busiest; 3361 return busiest;
@@ -5859,6 +5870,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5859 struct rq *rq = cpu_rq(cpu); 5870 struct rq *rq = cpu_rq(cpu);
5860 unsigned long flags; 5871 unsigned long flags;
5861 5872
5873 spin_lock_irqsave(&rq->lock, flags);
5874
5862 __sched_fork(idle); 5875 __sched_fork(idle);
5863 idle->se.exec_start = sched_clock(); 5876 idle->se.exec_start = sched_clock();
5864 5877
@@ -5866,7 +5879,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5866 idle->cpus_allowed = cpumask_of_cpu(cpu); 5879 idle->cpus_allowed = cpumask_of_cpu(cpu);
5867 __set_task_cpu(idle, cpu); 5880 __set_task_cpu(idle, cpu);
5868 5881
5869 spin_lock_irqsave(&rq->lock, flags);
5870 rq->curr = rq->idle = idle; 5882 rq->curr = rq->idle = idle;
5871#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5883#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5872 idle->oncpu = 1; 5884 idle->oncpu = 1;
@@ -6876,15 +6888,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6876 struct sched_domain *tmp; 6888 struct sched_domain *tmp;
6877 6889
6878 /* Remove the sched domains which do not contribute to scheduling. */ 6890 /* Remove the sched domains which do not contribute to scheduling. */
6879 for (tmp = sd; tmp; tmp = tmp->parent) { 6891 for (tmp = sd; tmp; ) {
6880 struct sched_domain *parent = tmp->parent; 6892 struct sched_domain *parent = tmp->parent;
6881 if (!parent) 6893 if (!parent)
6882 break; 6894 break;
6895
6883 if (sd_parent_degenerate(tmp, parent)) { 6896 if (sd_parent_degenerate(tmp, parent)) {
6884 tmp->parent = parent->parent; 6897 tmp->parent = parent->parent;
6885 if (parent->parent) 6898 if (parent->parent)
6886 parent->parent->child = tmp; 6899 parent->parent->child = tmp;
6887 } 6900 } else
6901 tmp = tmp->parent;
6888 } 6902 }
6889 6903
6890 if (sd && sd_degenerate(sd)) { 6904 if (sd && sd_degenerate(sd)) {
@@ -7673,6 +7687,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7673error: 7687error:
7674 free_sched_groups(cpu_map, tmpmask); 7688 free_sched_groups(cpu_map, tmpmask);
7675 SCHED_CPUMASK_FREE((void *)allmasks); 7689 SCHED_CPUMASK_FREE((void *)allmasks);
7690 kfree(rd);
7676 return -ENOMEM; 7691 return -ENOMEM;
7677#endif 7692#endif
7678} 7693}
@@ -7774,13 +7789,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7774 * 7789 *
7775 * The passed in 'doms_new' should be kmalloc'd. This routine takes 7790 * The passed in 'doms_new' should be kmalloc'd. This routine takes
7776 * ownership of it and will kfree it when done with it. If the caller 7791 * ownership of it and will kfree it when done with it. If the caller
7777 * failed the kmalloc call, then it can pass in doms_new == NULL, 7792 * failed the kmalloc call, then it can pass in doms_new == NULL &&
7778 * and partition_sched_domains() will fallback to the single partition 7793 * ndoms_new == 1, and partition_sched_domains() will fallback to
7779 * 'fallback_doms', it also forces the domains to be rebuilt. 7794 * the single partition 'fallback_doms', it also forces the domains
7795 * to be rebuilt.
7780 * 7796 *
7781 * If doms_new==NULL it will be replaced with cpu_online_map. 7797 * If doms_new == NULL it will be replaced with cpu_online_map.
7782 * ndoms_new==0 is a special case for destroying existing domains. 7798 * ndoms_new == 0 is a special case for destroying existing domains,
7783 * It will not create the default domain. 7799 * and it will not create the default domain.
7784 * 7800 *
7785 * Call with hotplug lock held 7801 * Call with hotplug lock held
7786 */ 7802 */