diff options
-rw-r--r-- | include/linux/interrupt.h | 3 | ||||
-rw-r--r-- | kernel/sched.c | 22 |
2 files changed, 19 insertions, 6 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index de7593f4e895..e36e86c869fb 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -231,7 +231,8 @@ enum | |||
231 | NET_TX_SOFTIRQ, | 231 | NET_TX_SOFTIRQ, |
232 | NET_RX_SOFTIRQ, | 232 | NET_RX_SOFTIRQ, |
233 | BLOCK_SOFTIRQ, | 233 | BLOCK_SOFTIRQ, |
234 | TASKLET_SOFTIRQ | 234 | TASKLET_SOFTIRQ, |
235 | SCHED_SOFTIRQ, | ||
235 | }; | 236 | }; |
236 | 237 | ||
237 | /* softirq mask and active fields moved to irq_cpustat_t in | 238 | /* softirq mask and active fields moved to irq_cpustat_t in |
diff --git a/kernel/sched.c b/kernel/sched.c index 14a8d9050cd4..0a3e748d737d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -227,6 +227,7 @@ struct rq { | |||
227 | unsigned long expired_timestamp; | 227 | unsigned long expired_timestamp; |
228 | unsigned long long timestamp_last_tick; | 228 | unsigned long long timestamp_last_tick; |
229 | struct task_struct *curr, *idle; | 229 | struct task_struct *curr, *idle; |
230 | unsigned long next_balance; | ||
230 | struct mm_struct *prev_mm; | 231 | struct mm_struct *prev_mm; |
231 | struct prio_array *active, *expired, arrays[2]; | 232 | struct prio_array *active, *expired, arrays[2]; |
232 | int best_expired_prio; | 233 | int best_expired_prio; |
@@ -2858,7 +2859,7 @@ static void update_load(struct rq *this_rq) | |||
2858 | } | 2859 | } |
2859 | 2860 | ||
2860 | /* | 2861 | /* |
2861 | * rebalance_tick will get called every timer tick, on every CPU. | 2862 | * run_rebalance_domains is triggered when needed from the scheduler tick. |
2862 | * | 2863 | * |
2863 | * It checks each scheduling domain to see if it is due to be balanced, | 2864 | * It checks each scheduling domain to see if it is due to be balanced, |
2864 | * and initiates a balancing operation if so. | 2865 | * and initiates a balancing operation if so. |
@@ -2866,9 +2867,10 @@ static void update_load(struct rq *this_rq) | |||
2866 | * Balancing parameters are set up in arch_init_sched_domains. | 2867 | * Balancing parameters are set up in arch_init_sched_domains. |
2867 | */ | 2868 | */ |
2868 | 2869 | ||
2869 | static void | 2870 | static void run_rebalance_domains(struct softirq_action *h) |
2870 | rebalance_tick(int this_cpu, struct rq *this_rq) | ||
2871 | { | 2871 | { |
2872 | int this_cpu = smp_processor_id(); | ||
2873 | struct rq *this_rq = cpu_rq(this_cpu); | ||
2872 | unsigned long interval; | 2874 | unsigned long interval; |
2873 | struct sched_domain *sd; | 2875 | struct sched_domain *sd; |
2874 | /* | 2876 | /* |
@@ -2877,6 +2879,8 @@ rebalance_tick(int this_cpu, struct rq *this_rq) | |||
2877 | */ | 2879 | */ |
2878 | enum idle_type idle = !this_rq->nr_running ? | 2880 | enum idle_type idle = !this_rq->nr_running ? |
2879 | SCHED_IDLE : NOT_IDLE; | 2881 | SCHED_IDLE : NOT_IDLE; |
2882 | /* Earliest time when we have to call run_rebalance_domains again */ | ||
2883 | unsigned long next_balance = jiffies + 60*HZ; | ||
2880 | 2884 | ||
2881 | for_each_domain(this_cpu, sd) { | 2885 | for_each_domain(this_cpu, sd) { |
2882 | if (!(sd->flags & SD_LOAD_BALANCE)) | 2886 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -2891,7 +2895,7 @@ rebalance_tick(int this_cpu, struct rq *this_rq) | |||
2891 | if (unlikely(!interval)) | 2895 | if (unlikely(!interval)) |
2892 | interval = 1; | 2896 | interval = 1; |
2893 | 2897 | ||
2894 | if (jiffies - sd->last_balance >= interval) { | 2898 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
2895 | if (load_balance(this_cpu, this_rq, sd, idle)) { | 2899 | if (load_balance(this_cpu, this_rq, sd, idle)) { |
2896 | /* | 2900 | /* |
2897 | * We've pulled tasks over so either we're no | 2901 | * We've pulled tasks over so either we're no |
@@ -2902,7 +2906,10 @@ rebalance_tick(int this_cpu, struct rq *this_rq) | |||
2902 | } | 2906 | } |
2903 | sd->last_balance += interval; | 2907 | sd->last_balance += interval; |
2904 | } | 2908 | } |
2909 | if (time_after(next_balance, sd->last_balance + interval)) | ||
2910 | next_balance = sd->last_balance + interval; | ||
2905 | } | 2911 | } |
2912 | this_rq->next_balance = next_balance; | ||
2906 | } | 2913 | } |
2907 | #else | 2914 | #else |
2908 | /* | 2915 | /* |
@@ -3155,7 +3162,8 @@ void scheduler_tick(void) | |||
3155 | task_running_tick(rq, p); | 3162 | task_running_tick(rq, p); |
3156 | #ifdef CONFIG_SMP | 3163 | #ifdef CONFIG_SMP |
3157 | update_load(rq); | 3164 | update_load(rq); |
3158 | rebalance_tick(cpu, rq); | 3165 | if (time_after_eq(jiffies, rq->next_balance)) |
3166 | raise_softirq(SCHED_SOFTIRQ); | ||
3159 | #endif | 3167 | #endif |
3160 | } | 3168 | } |
3161 | 3169 | ||
@@ -6859,6 +6867,10 @@ void __init sched_init(void) | |||
6859 | 6867 | ||
6860 | set_load_weight(&init_task); | 6868 | set_load_weight(&init_task); |
6861 | 6869 | ||
6870 | #ifdef CONFIG_SMP | ||
6871 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); | ||
6872 | #endif | ||
6873 | |||
6862 | #ifdef CONFIG_RT_MUTEXES | 6874 | #ifdef CONFIG_RT_MUTEXES |
6863 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | 6875 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); |
6864 | #endif | 6876 | #endif |