aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-02-22 06:47:19 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-01 04:51:22 -0500
commit8e45cb545d98bc58e75b7de89ec8d3e5c8459ee6 (patch)
treeb855602a267eb38bc5d511050fb40477e5a607f1 /kernel/sched/fair.c
parent3c7d51843b03a6839e9ec7cda724e54d2319a63a (diff)
sched: Move load-balancing arguments into helper struct
Passing large sets of similar arguments all around the load-balancer gets tiresom when you want to modify something. Stick them all in a helper structure and pass the structure around. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: pjt@google.com Link: http://lkml.kernel.org/n/tip-5slqz0vhsdzewrfk9eza1aon@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c177
1 files changed, 93 insertions, 84 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 79e9e13c31ab..55b1f117419a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3135,13 +3135,25 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3135#define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */ 3135#define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */
3136#define LBF_ABORT 0x10 3136#define LBF_ABORT 0x10
3137 3137
3138struct lb_env {
3139 struct sched_domain *sd;
3140
3141 int this_cpu;
3142 struct rq *this_rq;
3143
3144 struct rq *busiest_rq;
3145 struct cfs_rq *busiest_cfs_rq;
3146
3147 enum cpu_idle_type idle;
3148 unsigned long max_load_move;
3149 unsigned int flags;
3150};
3151
3138/* 3152/*
3139 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 3153 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3140 */ 3154 */
3141static 3155static
3142int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, 3156int can_migrate_task(struct task_struct *p, struct lb_env *env)
3143 struct sched_domain *sd, enum cpu_idle_type idle,
3144 int *lb_flags)
3145{ 3157{
3146 int tsk_cache_hot = 0; 3158 int tsk_cache_hot = 0;
3147 /* 3159 /*
@@ -3150,13 +3162,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3150 * 2) cannot be migrated to this CPU due to cpus_allowed, or 3162 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3151 * 3) are cache-hot on their current CPU. 3163 * 3) are cache-hot on their current CPU.
3152 */ 3164 */
3153 if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) { 3165 if (!cpumask_test_cpu(env->this_cpu, tsk_cpus_allowed(p))) {
3154 schedstat_inc(p, se.statistics.nr_failed_migrations_affine); 3166 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3155 return 0; 3167 return 0;
3156 } 3168 }
3157 *lb_flags &= ~LBF_ALL_PINNED; 3169 env->flags &= ~LBF_ALL_PINNED;
3158 3170
3159 if (task_running(rq, p)) { 3171 if (task_running(env->busiest_rq, p)) {
3160 schedstat_inc(p, se.statistics.nr_failed_migrations_running); 3172 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3161 return 0; 3173 return 0;
3162 } 3174 }
@@ -3167,12 +3179,12 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3167 * 2) too many balance attempts have failed. 3179 * 2) too many balance attempts have failed.
3168 */ 3180 */
3169 3181
3170 tsk_cache_hot = task_hot(p, rq->clock_task, sd); 3182 tsk_cache_hot = task_hot(p, env->busiest_rq->clock_task, env->sd);
3171 if (!tsk_cache_hot || 3183 if (!tsk_cache_hot ||
3172 sd->nr_balance_failed > sd->cache_nice_tries) { 3184 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3173#ifdef CONFIG_SCHEDSTATS 3185#ifdef CONFIG_SCHEDSTATS
3174 if (tsk_cache_hot) { 3186 if (tsk_cache_hot) {
3175 schedstat_inc(sd, lb_hot_gained[idle]); 3187 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
3176 schedstat_inc(p, se.statistics.nr_forced_migrations); 3188 schedstat_inc(p, se.statistics.nr_forced_migrations);
3177 } 3189 }
3178#endif 3190#endif
@@ -3193,31 +3205,27 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3193 * 3205 *
3194 * Called with both runqueues locked. 3206 * Called with both runqueues locked.
3195 */ 3207 */
3196static int 3208static int move_one_task(struct lb_env *env)
3197move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3198 struct sched_domain *sd, enum cpu_idle_type idle)
3199{ 3209{
3200 struct task_struct *p, *n; 3210 struct task_struct *p, *n;
3201 struct cfs_rq *cfs_rq; 3211 struct cfs_rq *cfs_rq;
3202 int pinned = 0;
3203 3212
3204 for_each_leaf_cfs_rq(busiest, cfs_rq) { 3213 for_each_leaf_cfs_rq(env->busiest_rq, cfs_rq) {
3205 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) { 3214 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
3206 if (throttled_lb_pair(task_group(p), 3215 if (throttled_lb_pair(task_group(p),
3207 busiest->cpu, this_cpu)) 3216 env->busiest_rq->cpu, env->this_cpu))
3208 break; 3217 break;
3209 3218
3210 if (!can_migrate_task(p, busiest, this_cpu, 3219 if (!can_migrate_task(p, env))
3211 sd, idle, &pinned))
3212 continue; 3220 continue;
3213 3221
3214 pull_task(busiest, p, this_rq, this_cpu); 3222 pull_task(env->busiest_rq, p, env->this_rq, env->this_cpu);
3215 /* 3223 /*
3216 * Right now, this is only the second place pull_task() 3224 * Right now, this is only the second place pull_task()
3217 * is called, so we can safely collect pull_task() 3225 * is called, so we can safely collect pull_task()
3218 * stats here rather than inside pull_task(). 3226 * stats here rather than inside pull_task().
3219 */ 3227 */
3220 schedstat_inc(sd, lb_gained[idle]); 3228 schedstat_inc(env->sd, lb_gained[env->idle]);
3221 return 1; 3229 return 1;
3222 } 3230 }
3223 } 3231 }
@@ -3225,31 +3233,26 @@ move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3225 return 0; 3233 return 0;
3226} 3234}
3227 3235
3228static unsigned long 3236static unsigned long balance_tasks(struct lb_env *env)
3229balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3230 unsigned long max_load_move, struct sched_domain *sd,
3231 enum cpu_idle_type idle, int *lb_flags,
3232 struct cfs_rq *busiest_cfs_rq)
3233{ 3237{
3234 int loops = 0, pulled = 0; 3238 int loops = 0, pulled = 0;
3235 long rem_load_move = max_load_move; 3239 long rem_load_move = env->max_load_move;
3236 struct task_struct *p, *n; 3240 struct task_struct *p, *n;
3237 3241
3238 if (max_load_move == 0) 3242 if (env->max_load_move == 0)
3239 goto out; 3243 goto out;
3240 3244
3241 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { 3245 list_for_each_entry_safe(p, n, &env->busiest_cfs_rq->tasks, se.group_node) {
3242 if (loops++ > sysctl_sched_nr_migrate) { 3246 if (loops++ > sysctl_sched_nr_migrate) {
3243 *lb_flags |= LBF_NEED_BREAK; 3247 env->flags |= LBF_NEED_BREAK;
3244 break; 3248 break;
3245 } 3249 }
3246 3250
3247 if ((p->se.load.weight >> 1) > rem_load_move || 3251 if ((p->se.load.weight >> 1) > rem_load_move ||
3248 !can_migrate_task(p, busiest, this_cpu, sd, idle, 3252 !can_migrate_task(p, env))
3249 lb_flags))
3250 continue; 3253 continue;
3251 3254
3252 pull_task(busiest, p, this_rq, this_cpu); 3255 pull_task(env->busiest_rq, p, env->this_rq, env->this_cpu);
3253 pulled++; 3256 pulled++;
3254 rem_load_move -= p->se.load.weight; 3257 rem_load_move -= p->se.load.weight;
3255 3258
@@ -3259,8 +3262,8 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3259 * kernels will stop after the first task is pulled to minimize 3262 * kernels will stop after the first task is pulled to minimize
3260 * the critical section. 3263 * the critical section.
3261 */ 3264 */
3262 if (idle == CPU_NEWLY_IDLE) { 3265 if (env->idle == CPU_NEWLY_IDLE) {
3263 *lb_flags |= LBF_ABORT; 3266 env->flags |= LBF_ABORT;
3264 break; 3267 break;
3265 } 3268 }
3266#endif 3269#endif
@@ -3278,9 +3281,9 @@ out:
3278 * so we can safely collect pull_task() stats here rather than 3281 * so we can safely collect pull_task() stats here rather than
3279 * inside pull_task(). 3282 * inside pull_task().
3280 */ 3283 */
3281 schedstat_add(sd, lb_gained[idle], pulled); 3284 schedstat_add(env->sd, lb_gained[env->idle], pulled);
3282 3285
3283 return max_load_move - rem_load_move; 3286 return env->max_load_move - rem_load_move;
3284} 3287}
3285 3288
3286#ifdef CONFIG_FAIR_GROUP_SCHED 3289#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3363,40 +3366,39 @@ static void update_h_load(long cpu)
3363 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); 3366 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
3364} 3367}
3365 3368
3366static unsigned long 3369static unsigned long load_balance_fair(struct lb_env *env)
3367load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3368 unsigned long max_load_move,
3369 struct sched_domain *sd, enum cpu_idle_type idle,
3370 int *lb_flags)
3371{ 3370{
3372 long rem_load_move = max_load_move; 3371 unsigned long max_load_move = env->max_load_move;
3373 struct cfs_rq *busiest_cfs_rq; 3372 long rem_load_move = env->max_load_move;
3374 3373
3375 rcu_read_lock(); 3374 rcu_read_lock();
3376 update_h_load(cpu_of(busiest)); 3375 update_h_load(cpu_of(env->busiest_rq));
3377 3376
3378 for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) { 3377 for_each_leaf_cfs_rq(env->busiest_rq, env->busiest_cfs_rq) {
3379 unsigned long busiest_h_load = busiest_cfs_rq->h_load; 3378 unsigned long busiest_h_load = env->busiest_cfs_rq->h_load;
3380 unsigned long busiest_weight = busiest_cfs_rq->load.weight; 3379 unsigned long busiest_weight = env->busiest_cfs_rq->load.weight;
3381 u64 rem_load, moved_load; 3380 u64 rem_load, moved_load;
3382 3381
3383 if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT)) 3382 if (env->flags & (LBF_NEED_BREAK|LBF_ABORT))
3384 break; 3383 break;
3385 3384
3386 /* 3385 /*
3387 * empty group or part of a throttled hierarchy 3386 * empty group or part of a throttled hierarchy
3388 */ 3387 */
3389 if (!busiest_cfs_rq->task_weight || 3388 if (!env->busiest_cfs_rq->task_weight)
3390 throttled_lb_pair(busiest_cfs_rq->tg, cpu_of(busiest), this_cpu)) 3389 continue;
3390
3391 if (throttled_lb_pair(env->busiest_cfs_rq->tg,
3392 cpu_of(env->busiest_rq),
3393 env->this_cpu))
3391 continue; 3394 continue;
3392 3395
3393 rem_load = (u64)rem_load_move * busiest_weight; 3396 rem_load = (u64)rem_load_move * busiest_weight;
3394 rem_load = div_u64(rem_load, busiest_h_load + 1); 3397 rem_load = div_u64(rem_load, busiest_h_load + 1);
3395 3398
3396 moved_load = balance_tasks(this_rq, this_cpu, busiest, 3399 env->max_load_move = rem_load;
3397 rem_load, sd, idle, lb_flags,
3398 busiest_cfs_rq);
3399 3400
3401 moved_load = balance_tasks(env);
3400 if (!moved_load) 3402 if (!moved_load)
3401 continue; 3403 continue;
3402 3404
@@ -3416,15 +3418,10 @@ static inline void update_shares(int cpu)
3416{ 3418{
3417} 3419}
3418 3420
3419static unsigned long 3421static unsigned long load_balance_fair(struct lb_env *env)
3420load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3421 unsigned long max_load_move,
3422 struct sched_domain *sd, enum cpu_idle_type idle,
3423 int *lb_flags)
3424{ 3422{
3425 return balance_tasks(this_rq, this_cpu, busiest, 3423 env->busiest_cfs_rq = &env->busiest_rq->cfs;
3426 max_load_move, sd, idle, lb_flags, 3424 return balance_tasks(env);
3427 &busiest->cfs);
3428} 3425}
3429#endif 3426#endif
3430 3427
@@ -3435,21 +3432,17 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3435 * 3432 *
3436 * Called with both runqueues locked. 3433 * Called with both runqueues locked.
3437 */ 3434 */
3438static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 3435static int move_tasks(struct lb_env *env)
3439 unsigned long max_load_move,
3440 struct sched_domain *sd, enum cpu_idle_type idle,
3441 int *lb_flags)
3442{ 3436{
3437 unsigned long max_load_move = env->max_load_move;
3443 unsigned long total_load_moved = 0, load_moved; 3438 unsigned long total_load_moved = 0, load_moved;
3444 3439
3445 do { 3440 do {
3446 load_moved = load_balance_fair(this_rq, this_cpu, busiest, 3441 env->max_load_move = max_load_move - total_load_moved;
3447 max_load_move - total_load_moved, 3442 load_moved = load_balance_fair(env);
3448 sd, idle, lb_flags);
3449
3450 total_load_moved += load_moved; 3443 total_load_moved += load_moved;
3451 3444
3452 if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT)) 3445 if (env->flags & (LBF_NEED_BREAK|LBF_ABORT))
3453 break; 3446 break;
3454 3447
3455#ifdef CONFIG_PREEMPT 3448#ifdef CONFIG_PREEMPT
@@ -3458,8 +3451,8 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3458 * kernels will stop after the first task is pulled to minimize 3451 * kernels will stop after the first task is pulled to minimize
3459 * the critical section. 3452 * the critical section.
3460 */ 3453 */
3461 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) { 3454 if (env->idle == CPU_NEWLY_IDLE && env->this_rq->nr_running) {
3462 *lb_flags |= LBF_ABORT; 3455 env->flags |= LBF_ABORT;
3463 break; 3456 break;
3464 } 3457 }
3465#endif 3458#endif
@@ -4459,13 +4452,20 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4459 struct sched_domain *sd, enum cpu_idle_type idle, 4452 struct sched_domain *sd, enum cpu_idle_type idle,
4460 int *balance) 4453 int *balance)
4461{ 4454{
4462 int ld_moved, lb_flags = 0, active_balance = 0; 4455 int ld_moved, active_balance = 0;
4463 struct sched_group *group; 4456 struct sched_group *group;
4464 unsigned long imbalance; 4457 unsigned long imbalance;
4465 struct rq *busiest; 4458 struct rq *busiest;
4466 unsigned long flags; 4459 unsigned long flags;
4467 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4460 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4468 4461
4462 struct lb_env env = {
4463 .sd = sd,
4464 .this_cpu = this_cpu,
4465 .this_rq = this_rq,
4466 .idle = idle,
4467 };
4468
4469 cpumask_copy(cpus, cpu_active_mask); 4469 cpumask_copy(cpus, cpu_active_mask);
4470 4470
4471 schedstat_inc(sd, lb_count[idle]); 4471 schedstat_inc(sd, lb_count[idle]);
@@ -4500,11 +4500,13 @@ redo:
4500 * still unbalanced. ld_moved simply stays zero, so it is 4500 * still unbalanced. ld_moved simply stays zero, so it is
4501 * correctly treated as an imbalance. 4501 * correctly treated as an imbalance.
4502 */ 4502 */
4503 lb_flags |= LBF_ALL_PINNED; 4503 env.flags |= LBF_ALL_PINNED;
4504 env.max_load_move = imbalance;
4505 env.busiest_rq = busiest;
4506
4504 local_irq_save(flags); 4507 local_irq_save(flags);
4505 double_rq_lock(this_rq, busiest); 4508 double_rq_lock(this_rq, busiest);
4506 ld_moved = move_tasks(this_rq, this_cpu, busiest, 4509 ld_moved = move_tasks(&env);
4507 imbalance, sd, idle, &lb_flags);
4508 double_rq_unlock(this_rq, busiest); 4510 double_rq_unlock(this_rq, busiest);
4509 local_irq_restore(flags); 4511 local_irq_restore(flags);
4510 4512
@@ -4514,18 +4516,18 @@ redo:
4514 if (ld_moved && this_cpu != smp_processor_id()) 4516 if (ld_moved && this_cpu != smp_processor_id())
4515 resched_cpu(this_cpu); 4517 resched_cpu(this_cpu);
4516 4518
4517 if (lb_flags & LBF_ABORT) 4519 if (env.flags & LBF_ABORT)
4518 goto out_balanced; 4520 goto out_balanced;
4519 4521
4520 if (lb_flags & LBF_NEED_BREAK) { 4522 if (env.flags & LBF_NEED_BREAK) {
4521 lb_flags += LBF_HAD_BREAK - LBF_NEED_BREAK; 4523 env.flags += LBF_HAD_BREAK - LBF_NEED_BREAK;
4522 if (lb_flags & LBF_ABORT) 4524 if (env.flags & LBF_ABORT)
4523 goto out_balanced; 4525 goto out_balanced;
4524 goto redo; 4526 goto redo;
4525 } 4527 }
4526 4528
4527 /* All tasks on this runqueue were pinned by CPU affinity */ 4529 /* All tasks on this runqueue were pinned by CPU affinity */
4528 if (unlikely(lb_flags & LBF_ALL_PINNED)) { 4530 if (unlikely(env.flags & LBF_ALL_PINNED)) {
4529 cpumask_clear_cpu(cpu_of(busiest), cpus); 4531 cpumask_clear_cpu(cpu_of(busiest), cpus);
4530 if (!cpumask_empty(cpus)) 4532 if (!cpumask_empty(cpus))
4531 goto redo; 4533 goto redo;
@@ -4555,7 +4557,7 @@ redo:
4555 tsk_cpus_allowed(busiest->curr))) { 4557 tsk_cpus_allowed(busiest->curr))) {
4556 raw_spin_unlock_irqrestore(&busiest->lock, 4558 raw_spin_unlock_irqrestore(&busiest->lock,
4557 flags); 4559 flags);
4558 lb_flags |= LBF_ALL_PINNED; 4560 env.flags |= LBF_ALL_PINNED;
4559 goto out_one_pinned; 4561 goto out_one_pinned;
4560 } 4562 }
4561 4563
@@ -4608,7 +4610,7 @@ out_balanced:
4608 4610
4609out_one_pinned: 4611out_one_pinned:
4610 /* tune up the balancing interval */ 4612 /* tune up the balancing interval */
4611 if (((lb_flags & LBF_ALL_PINNED) && 4613 if (((env.flags & LBF_ALL_PINNED) &&
4612 sd->balance_interval < MAX_PINNED_INTERVAL) || 4614 sd->balance_interval < MAX_PINNED_INTERVAL) ||
4613 (sd->balance_interval < sd->max_interval)) 4615 (sd->balance_interval < sd->max_interval))
4614 sd->balance_interval *= 2; 4616 sd->balance_interval *= 2;
@@ -4718,10 +4720,17 @@ static int active_load_balance_cpu_stop(void *data)
4718 } 4720 }
4719 4721
4720 if (likely(sd)) { 4722 if (likely(sd)) {
4723 struct lb_env env = {
4724 .sd = sd,
4725 .this_cpu = target_cpu,
4726 .this_rq = target_rq,
4727 .busiest_rq = busiest_rq,
4728 .idle = CPU_IDLE,
4729 };
4730
4721 schedstat_inc(sd, alb_count); 4731 schedstat_inc(sd, alb_count);
4722 4732
4723 if (move_one_task(target_rq, target_cpu, busiest_rq, 4733 if (move_one_task(&env))
4724 sd, CPU_IDLE))
4725 schedstat_inc(sd, alb_pushed); 4734 schedstat_inc(sd, alb_pushed);
4726 else 4735 else
4727 schedstat_inc(sd, alb_failed); 4736 schedstat_inc(sd, alb_failed);