aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-12-10 05:20:27 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 12:55:43 -0500
commit1bd77f2da58e9cdd1f159217887343dadd9af417 (patch)
tree91104dab07ea6bb7a7fff9da16ac2ba41bba1686 /kernel/sched.c
parentc9819f4593e8d052b41a89f47140f5c5e7e30582 (diff)
[PATCH] sched: call tasklet less frequently
Trigger softirq less frequently We trigger the softirq before this patch using offset of sd->interval. However, if the queue is busy then it is sufficient to schedule the softirq with sd->interval * busy_factor. So we modify the calculation of the next time to balance by taking the interval added to last_balance again. This is only the right value if the idle/busy situation continues as is. There are two potential trouble spots: - If the queue was idle and now gets busy then we call rebalance early. However, that is not a problem because we will then use the longer interval for the next period. - If the queue was busy and becomes idle then we potentially wait too long before rebalancing. However, when the task goes idle then idle_balance is called. We add another calculation of the next balance time based on sd->interval in idle_balance so that we will rebalance soon. V2->V3: - Calculate rebalance time based on current jiffies and not based on the jiffies at the last time we load balanced. We no longer rely on staggering and therefore we can affort to do this now. V3->V4: - Use functions to do jiffy comparisons. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Peter Williams <pwil3058@bigpond.net.au> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 0a3e748d737d..0a4a26b21f69 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2774,14 +2774,28 @@ out_balanced:
2774static void idle_balance(int this_cpu, struct rq *this_rq) 2774static void idle_balance(int this_cpu, struct rq *this_rq)
2775{ 2775{
2776 struct sched_domain *sd; 2776 struct sched_domain *sd;
2777 int pulled_task = 0;
2778 unsigned long next_balance = jiffies + 60 * HZ;
2777 2779
2778 for_each_domain(this_cpu, sd) { 2780 for_each_domain(this_cpu, sd) {
2779 if (sd->flags & SD_BALANCE_NEWIDLE) { 2781 if (sd->flags & SD_BALANCE_NEWIDLE) {
2780 /* If we've pulled tasks over stop searching: */ 2782 /* If we've pulled tasks over stop searching: */
2781 if (load_balance_newidle(this_cpu, this_rq, sd)) 2783 pulled_task = load_balance_newidle(this_cpu,
2784 this_rq, sd);
2785 if (time_after(next_balance,
2786 sd->last_balance + sd->balance_interval))
2787 next_balance = sd->last_balance
2788 + sd->balance_interval;
2789 if (pulled_task)
2782 break; 2790 break;
2783 } 2791 }
2784 } 2792 }
2793 if (!pulled_task)
2794 /*
2795 * We are going idle. next_balance may be set based on
2796 * a busy processor. So reset next_balance.
2797 */
2798 this_rq->next_balance = next_balance;
2785} 2799}
2786 2800
2787/* 2801/*
@@ -2904,7 +2918,7 @@ static void run_rebalance_domains(struct softirq_action *h)
2904 */ 2918 */
2905 idle = NOT_IDLE; 2919 idle = NOT_IDLE;
2906 } 2920 }
2907 sd->last_balance += interval; 2921 sd->last_balance = jiffies;
2908 } 2922 }
2909 if (time_after(next_balance, sd->last_balance + interval)) 2923 if (time_after(next_balance, sd->last_balance + interval))
2910 next_balance = sd->last_balance + interval; 2924 next_balance = sd->last_balance + interval;