aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-11-26 09:03:27 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-26 09:05:21 -0500
commit22a867d81707b0a2720bb5f65255265b95d30526 (patch)
tree7ec19b155b50b13ae95244c2bfa16aea4920c4f6 /kernel/sched.c
parent5bb6b1ea67a73f0665a41726dd7138977b992c6c (diff)
parent3561d43fd289f590fdae672e5eb831b8d5cf0bf6 (diff)
Merge commit 'v2.6.37-rc3' into sched/core
Merge reason: Pick up latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 324afce0e223..3e8a7db951a6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -557,18 +557,8 @@ struct rq {
557 557
558static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 558static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
559 559
560static inline
561void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
562{
563 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
564 560
565 /* 561static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
566 * A queue event has occurred, and we're going to schedule. In
567 * this case, we can save a useless back to back clock update.
568 */
569 if (test_tsk_need_resched(p))
570 rq->skip_clock_update = 1;
571}
572 562
573static inline int cpu_of(struct rq *rq) 563static inline int cpu_of(struct rq *rq)
574{ 564{
@@ -1980,6 +1970,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1980 p->sched_class->prio_changed(rq, p, oldprio, running); 1970 p->sched_class->prio_changed(rq, p, oldprio, running);
1981} 1971}
1982 1972
1973static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1974{
1975 const struct sched_class *class;
1976
1977 if (p->sched_class == rq->curr->sched_class) {
1978 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1979 } else {
1980 for_each_class(class) {
1981 if (class == rq->curr->sched_class)
1982 break;
1983 if (class == p->sched_class) {
1984 resched_task(rq->curr);
1985 break;
1986 }
1987 }
1988 }
1989
1990 /*
1991 * A queue event has occurred, and we're going to schedule. In
1992 * this case, we can save a useless back to back clock update.
1993 */
1994 if (test_tsk_need_resched(rq->curr))
1995 rq->skip_clock_update = 1;
1996}
1997
1983#ifdef CONFIG_SMP 1998#ifdef CONFIG_SMP
1984/* 1999/*
1985 * Is this task likely cache-hot: 2000 * Is this task likely cache-hot:
@@ -6737,6 +6752,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6737 if (cpu != group_first_cpu(sd->groups)) 6752 if (cpu != group_first_cpu(sd->groups))
6738 return; 6753 return;
6739 6754
6755 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
6756
6740 child = sd->child; 6757 child = sd->child;
6741 6758
6742 sd->groups->cpu_power = 0; 6759 sd->groups->cpu_power = 0;