aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChen Shang <shangcs@gmail.com>2005-06-25 17:57:31 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:44 -0400
commita3464a102a69a4e00efb0a763e274ce290995b4b (patch)
tree63a9301d4a02dfcefd8dff70f033c634aa93bb2f /kernel
parent77391d71681d05d2f4502f91ad62618522abf624 (diff)
[PATCH] sched: micro-optimize task requeueing in schedule()
micro-optimize task requeueing in schedule() & clean up recalc_task_prio(). Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6e452eb95ac3..a3d1c8e43d34 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -673,7 +673,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
673 rq->nr_running++; 673 rq->nr_running++;
674} 674}
675 675
676static void recalc_task_prio(task_t *p, unsigned long long now) 676static int recalc_task_prio(task_t *p, unsigned long long now)
677{ 677{
678 /* Caller must always ensure 'now >= p->timestamp' */ 678 /* Caller must always ensure 'now >= p->timestamp' */
679 unsigned long long __sleep_time = now - p->timestamp; 679 unsigned long long __sleep_time = now - p->timestamp;
@@ -732,7 +732,7 @@ static void recalc_task_prio(task_t *p, unsigned long long now)
732 } 732 }
733 } 733 }
734 734
735 p->prio = effective_prio(p); 735 return effective_prio(p);
736} 736}
737 737
738/* 738/*
@@ -755,7 +755,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
755 } 755 }
756#endif 756#endif
757 757
758 recalc_task_prio(p, now); 758 p->prio = recalc_task_prio(p, now);
759 759
760 /* 760 /*
761 * This checks to make sure it's not an uninterruptible task 761 * This checks to make sure it's not an uninterruptible task
@@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void)
2751 struct list_head *queue; 2751 struct list_head *queue;
2752 unsigned long long now; 2752 unsigned long long now;
2753 unsigned long run_time; 2753 unsigned long run_time;
2754 int cpu, idx; 2754 int cpu, idx, new_prio;
2755 2755
2756 /* 2756 /*
2757 * Test if we are atomic. Since do_exit() needs to call into 2757 * Test if we are atomic. Since do_exit() needs to call into
@@ -2873,9 +2873,14 @@ go_idle:
2873 delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; 2873 delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
2874 2874
2875 array = next->array; 2875 array = next->array;
2876 dequeue_task(next, array); 2876 new_prio = recalc_task_prio(next, next->timestamp + delta);
2877 recalc_task_prio(next, next->timestamp + delta); 2877
2878 enqueue_task(next, array); 2878 if (unlikely(next->prio != new_prio)) {
2879 dequeue_task(next, array);
2880 next->prio = new_prio;
2881 enqueue_task(next, array);
2882 } else
2883 requeue_task(next, array);
2879 } 2884 }
2880 next->activated = 0; 2885 next->activated = 0;
2881switch_tasks: 2886switch_tasks: