diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-12-10 05:20:25 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 12:55:42 -0500 |
commit | c9819f4593e8d052b41a89f47140f5c5e7e30582 (patch) | |
tree | 85da690ecd1e14506336009281e5c369d8457325 /kernel | |
parent | e418e1c2bf1a253916b569370653414eb28597b6 (diff) |
[PATCH] sched: use softirq for load balancing
Call rebalance_tick (renamed to run_rebalance_domains) from a newly introduced
softirq.
We calculate the earliest time for each layer of sched domains to be rescanned
(this is the rescan time for idle) and use the earliest of those to schedule
the softirq via a new field "next_balance" added to struct rq.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Peter Williams <pwil3058@bigpond.net.au>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 14a8d9050cd4..0a3e748d737d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -227,6 +227,7 @@ struct rq { | |||
227 | unsigned long expired_timestamp; | 227 | unsigned long expired_timestamp; |
228 | unsigned long long timestamp_last_tick; | 228 | unsigned long long timestamp_last_tick; |
229 | struct task_struct *curr, *idle; | 229 | struct task_struct *curr, *idle; |
230 | unsigned long next_balance; | ||
230 | struct mm_struct *prev_mm; | 231 | struct mm_struct *prev_mm; |
231 | struct prio_array *active, *expired, arrays[2]; | 232 | struct prio_array *active, *expired, arrays[2]; |
232 | int best_expired_prio; | 233 | int best_expired_prio; |
@@ -2858,7 +2859,7 @@ static void update_load(struct rq *this_rq) | |||
2858 | } | 2859 | } |
2859 | 2860 | ||
2860 | /* | 2861 | /* |
2861 | * rebalance_tick will get called every timer tick, on every CPU. | 2862 | * run_rebalance_domains is triggered when needed from the scheduler tick. |
2862 | * | 2863 | * |
2863 | * It checks each scheduling domain to see if it is due to be balanced, | 2864 | * It checks each scheduling domain to see if it is due to be balanced, |
2864 | * and initiates a balancing operation if so. | 2865 | * and initiates a balancing operation if so. |
@@ -2866,9 +2867,10 @@ static void update_load(struct rq *this_rq) | |||
2866 | * Balancing parameters are set up in arch_init_sched_domains. | 2867 | * Balancing parameters are set up in arch_init_sched_domains. |
2867 | */ | 2868 | */ |
2868 | 2869 | ||
2869 | static void | 2870 | static void run_rebalance_domains(struct softirq_action *h) |
2870 | rebalance_tick(int this_cpu, struct rq *this_rq) | ||
2871 | { | 2871 | { |
2872 | int this_cpu = smp_processor_id(); | ||
2873 | struct rq *this_rq = cpu_rq(this_cpu); | ||
2872 | unsigned long interval; | 2874 | unsigned long interval; |
2873 | struct sched_domain *sd; | 2875 | struct sched_domain *sd; |
2874 | /* | 2876 | /* |
@@ -2877,6 +2879,8 @@ rebalance_tick(int this_cpu, struct rq *this_rq) | |||
2877 | */ | 2879 | */ |
2878 | enum idle_type idle = !this_rq->nr_running ? | 2880 | enum idle_type idle = !this_rq->nr_running ? |
2879 | SCHED_IDLE : NOT_IDLE; | 2881 | SCHED_IDLE : NOT_IDLE; |
2882 | /* Earliest time when we have to call run_rebalance_domains again */ | ||
2883 | unsigned long next_balance = jiffies + 60*HZ; | ||
2880 | 2884 | ||
2881 | for_each_domain(this_cpu, sd) { | 2885 | for_each_domain(this_cpu, sd) { |
2882 | if (!(sd->flags & SD_LOAD_BALANCE)) | 2886 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -2891,7 +2895,7 @@ rebalance_tick(int this_cpu, struct rq *this_rq) | |||
2891 | if (unlikely(!interval)) | 2895 | if (unlikely(!interval)) |
2892 | interval = 1; | 2896 | interval = 1; |
2893 | 2897 | ||
2894 | if (jiffies - sd->last_balance >= interval) { | 2898 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
2895 | if (load_balance(this_cpu, this_rq, sd, idle)) { | 2899 | if (load_balance(this_cpu, this_rq, sd, idle)) { |
2896 | /* | 2900 | /* |
2897 | * We've pulled tasks over so either we're no | 2901 | * We've pulled tasks over so either we're no |
@@ -2902,7 +2906,10 @@ rebalance_tick(int this_cpu, struct rq *this_rq) | |||
2902 | } | 2906 | } |
2903 | sd->last_balance += interval; | 2907 | sd->last_balance += interval; |
2904 | } | 2908 | } |
2909 | if (time_after(next_balance, sd->last_balance + interval)) | ||
2910 | next_balance = sd->last_balance + interval; | ||
2905 | } | 2911 | } |
2912 | this_rq->next_balance = next_balance; | ||
2906 | } | 2913 | } |
2907 | #else | 2914 | #else |
2908 | /* | 2915 | /* |
@@ -3155,7 +3162,8 @@ void scheduler_tick(void) | |||
3155 | task_running_tick(rq, p); | 3162 | task_running_tick(rq, p); |
3156 | #ifdef CONFIG_SMP | 3163 | #ifdef CONFIG_SMP |
3157 | update_load(rq); | 3164 | update_load(rq); |
3158 | rebalance_tick(cpu, rq); | 3165 | if (time_after_eq(jiffies, rq->next_balance)) |
3166 | raise_softirq(SCHED_SOFTIRQ); | ||
3159 | #endif | 3167 | #endif |
3160 | } | 3168 | } |
3161 | 3169 | ||
@@ -6859,6 +6867,10 @@ void __init sched_init(void) | |||
6859 | 6867 | ||
6860 | set_load_weight(&init_task); | 6868 | set_load_weight(&init_task); |
6861 | 6869 | ||
6870 | #ifdef CONFIG_SMP | ||
6871 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); | ||
6872 | #endif | ||
6873 | |||
6862 | #ifdef CONFIG_RT_MUTEXES | 6874 | #ifdef CONFIG_RT_MUTEXES |
6863 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | 6875 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); |
6864 | #endif | 6876 | #endif |