aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-11-11 18:43:02 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-11 18:43:02 -0500
commit7e452baf6b96b5aeba097afd91501d33d390cc97 (patch)
tree9b0e062d3677d50d731ffd0fba47423bfdee9253 /kernel/sched.c
parent3ac38c3a2e7dac3f8f35a56eb85c27881a4c3833 (diff)
parentf21f237cf55494c3a4209de323281a3b0528da10 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/message/fusion/mptlan.c drivers/net/sfc/ethtool.c net/mac80211/debugfs_sta.c
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 82cc839c9210..50a21f964679 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -399,7 +399,7 @@ struct cfs_rq {
399 */ 399 */
400 struct sched_entity *curr, *next, *last; 400 struct sched_entity *curr, *next, *last;
401 401
402 unsigned long nr_spread_over; 402 unsigned int nr_spread_over;
403 403
404#ifdef CONFIG_FAIR_GROUP_SCHED 404#ifdef CONFIG_FAIR_GROUP_SCHED
405 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 405 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
@@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 } 969 }
970} 970}
971 971
972void task_rq_unlock_wait(struct task_struct *p)
973{
974 struct rq *rq = task_rq(p);
975
976 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
977 spin_unlock_wait(&rq->lock);
978}
979
972static void __task_rq_unlock(struct rq *rq) 980static void __task_rq_unlock(struct rq *rq)
973 __releases(rq->lock) 981 __releases(rq->lock)
974{ 982{
@@ -6877,15 +6885,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6877 struct sched_domain *tmp; 6885 struct sched_domain *tmp;
6878 6886
6879 /* Remove the sched domains which do not contribute to scheduling. */ 6887 /* Remove the sched domains which do not contribute to scheduling. */
6880 for (tmp = sd; tmp; tmp = tmp->parent) { 6888 for (tmp = sd; tmp; ) {
6881 struct sched_domain *parent = tmp->parent; 6889 struct sched_domain *parent = tmp->parent;
6882 if (!parent) 6890 if (!parent)
6883 break; 6891 break;
6892
6884 if (sd_parent_degenerate(tmp, parent)) { 6893 if (sd_parent_degenerate(tmp, parent)) {
6885 tmp->parent = parent->parent; 6894 tmp->parent = parent->parent;
6886 if (parent->parent) 6895 if (parent->parent)
6887 parent->parent->child = tmp; 6896 parent->parent->child = tmp;
6888 } 6897 } else
6898 tmp = tmp->parent;
6889 } 6899 }
6890 6900
6891 if (sd && sd_degenerate(sd)) { 6901 if (sd && sd_degenerate(sd)) {
@@ -7674,6 +7684,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7674error: 7684error:
7675 free_sched_groups(cpu_map, tmpmask); 7685 free_sched_groups(cpu_map, tmpmask);
7676 SCHED_CPUMASK_FREE((void *)allmasks); 7686 SCHED_CPUMASK_FREE((void *)allmasks);
7687 kfree(rd);
7677 return -ENOMEM; 7688 return -ENOMEM;
7678#endif 7689#endif
7679} 7690}