aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-12-10 05:20:23 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 12:55:42 -0500
commite418e1c2bf1a253916b569370653414eb28597b6 (patch)
treececaba4e3ce408bd4eeaff33abec2d73d99dba9c /kernel/sched.c
parent7835b98bc6de2ca10afa45572d272304b000b048 (diff)
[PATCH] sched: move idle status calculation into rebalance_tick()
Perform the idle state determination in rebalance_tick. If we separate balancing from sched_tick then we also need to determine the idle state in rebalance_tick. V2->V3 Remove useless idlle != 0 check. Checking nr_running seems to be sufficient. Thanks Suresh. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Peter Williams <pwil3058@bigpond.net.au> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c37
1 files changed, 16 insertions, 21 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c1b74aa1b07..14a8d9050cd4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2867,10 +2867,16 @@ static void update_load(struct rq *this_rq)
2867 */ 2867 */
2868 2868
2869static void 2869static void
2870rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) 2870rebalance_tick(int this_cpu, struct rq *this_rq)
2871{ 2871{
2872 unsigned long interval; 2872 unsigned long interval;
2873 struct sched_domain *sd; 2873 struct sched_domain *sd;
2874 /*
2875 * We are idle if there are no processes running. This
2876 * is valid even if we are the idle process (SMT).
2877 */
2878 enum idle_type idle = !this_rq->nr_running ?
2879 SCHED_IDLE : NOT_IDLE;
2874 2880
2875 for_each_domain(this_cpu, sd) { 2881 for_each_domain(this_cpu, sd) {
2876 if (!(sd->flags & SD_LOAD_BALANCE)) 2882 if (!(sd->flags & SD_LOAD_BALANCE))
@@ -2902,37 +2908,26 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2902/* 2908/*
2903 * on UP we do not need to balance between CPUs: 2909 * on UP we do not need to balance between CPUs:
2904 */ 2910 */
2905static inline void rebalance_tick(int cpu, struct rq *rq)
2906{
2907}
2908static inline void idle_balance(int cpu, struct rq *rq) 2911static inline void idle_balance(int cpu, struct rq *rq)
2909{ 2912{
2910} 2913}
2911static inline void update_load(struct rq *this_rq)
2912{
2913}
2914#endif 2914#endif
2915 2915
2916static inline int wake_priority_sleeper(struct rq *rq) 2916static inline void wake_priority_sleeper(struct rq *rq)
2917{ 2917{
2918 int ret = 0;
2919
2920#ifdef CONFIG_SCHED_SMT 2918#ifdef CONFIG_SCHED_SMT
2921 if (!rq->nr_running) 2919 if (!rq->nr_running)
2922 return 0; 2920 return;
2923 2921
2924 spin_lock(&rq->lock); 2922 spin_lock(&rq->lock);
2925 /* 2923 /*
2926 * If an SMT sibling task has been put to sleep for priority 2924 * If an SMT sibling task has been put to sleep for priority
2927 * reasons reschedule the idle task to see if it can now run. 2925 * reasons reschedule the idle task to see if it can now run.
2928 */ 2926 */
2929 if (rq->nr_running) { 2927 if (rq->nr_running)
2930 resched_task(rq->idle); 2928 resched_task(rq->idle);
2931 ret = 1;
2932 }
2933 spin_unlock(&rq->lock); 2929 spin_unlock(&rq->lock);
2934#endif 2930#endif
2935 return ret;
2936} 2931}
2937 2932
2938DEFINE_PER_CPU(struct kernel_stat, kstat); 2933DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -3148,20 +3143,20 @@ void scheduler_tick(void)
3148 struct task_struct *p = current; 3143 struct task_struct *p = current;
3149 int cpu = smp_processor_id(); 3144 int cpu = smp_processor_id();
3150 struct rq *rq = cpu_rq(cpu); 3145 struct rq *rq = cpu_rq(cpu);
3151 enum idle_type idle = NOT_IDLE;
3152 3146
3153 update_cpu_clock(p, rq, now); 3147 update_cpu_clock(p, rq, now);
3154 3148
3155 rq->timestamp_last_tick = now; 3149 rq->timestamp_last_tick = now;
3156 3150
3157 if (p == rq->idle) { 3151 if (p == rq->idle)
3158 /* Task on the idle queue */ 3152 /* Task on the idle queue */
3159 if (!wake_priority_sleeper(rq)) 3153 wake_priority_sleeper(rq);
3160 idle = SCHED_IDLE; 3154 else
3161 } else
3162 task_running_tick(rq, p); 3155 task_running_tick(rq, p);
3156#ifdef CONFIG_SMP
3163 update_load(rq); 3157 update_load(rq);
3164 rebalance_tick(cpu, rq, idle); 3158 rebalance_tick(cpu, rq);
3159#endif
3165} 3160}
3166 3161
3167#ifdef CONFIG_SCHED_SMT 3162#ifdef CONFIG_SCHED_SMT