aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-09-22 09:23:13 -0400
committerIngo Molnar <mingo@elte.hu>2011-12-21 04:34:45 -0500
commit5b54b56be5b540a9cb12682c4d0df5454c098a38 (patch)
tree837b7084c5e2659968ea64ba2c6520ff6256567a /kernel/sched
parent518cd62341786aa4e3839810832af2fbc0de1ea4 (diff)
sched: Replace all_pinned with a generic flags field
Replace the all_pinned argument with a flags field so that we can add some extra controls throughout that entire call chain. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-33kevm71m924ok1gpxd720v3@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2237ffefdbce..be47ce6da2a5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3131,13 +3131,15 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3131 return delta < (s64)sysctl_sched_migration_cost; 3131 return delta < (s64)sysctl_sched_migration_cost;
3132} 3132}
3133 3133
3134#define LBF_ALL_PINNED 0x01
3135
3134/* 3136/*
3135 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 3137 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3136 */ 3138 */
3137static 3139static
3138int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, 3140int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3139 struct sched_domain *sd, enum cpu_idle_type idle, 3141 struct sched_domain *sd, enum cpu_idle_type idle,
3140 int *all_pinned) 3142 int *lb_flags)
3141{ 3143{
3142 int tsk_cache_hot = 0; 3144 int tsk_cache_hot = 0;
3143 /* 3145 /*
@@ -3150,7 +3152,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3150 schedstat_inc(p, se.statistics.nr_failed_migrations_affine); 3152 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3151 return 0; 3153 return 0;
3152 } 3154 }
3153 *all_pinned = 0; 3155 *lb_flags &= ~LBF_ALL_PINNED;
3154 3156
3155 if (task_running(rq, p)) { 3157 if (task_running(rq, p)) {
3156 schedstat_inc(p, se.statistics.nr_failed_migrations_running); 3158 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
@@ -3224,7 +3226,7 @@ move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3224static unsigned long 3226static unsigned long
3225balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 3227balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3226 unsigned long max_load_move, struct sched_domain *sd, 3228 unsigned long max_load_move, struct sched_domain *sd,
3227 enum cpu_idle_type idle, int *all_pinned, 3229 enum cpu_idle_type idle, int *lb_flags,
3228 struct cfs_rq *busiest_cfs_rq) 3230 struct cfs_rq *busiest_cfs_rq)
3229{ 3231{
3230 int loops = 0, pulled = 0; 3232 int loops = 0, pulled = 0;
@@ -3240,7 +3242,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3240 3242
3241 if ((p->se.load.weight >> 1) > rem_load_move || 3243 if ((p->se.load.weight >> 1) > rem_load_move ||
3242 !can_migrate_task(p, busiest, this_cpu, sd, idle, 3244 !can_migrate_task(p, busiest, this_cpu, sd, idle,
3243 all_pinned)) 3245 lb_flags))
3244 continue; 3246 continue;
3245 3247
3246 pull_task(busiest, p, this_rq, this_cpu); 3248 pull_task(busiest, p, this_rq, this_cpu);
@@ -3359,7 +3361,7 @@ static unsigned long
3359load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 3361load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3360 unsigned long max_load_move, 3362 unsigned long max_load_move,
3361 struct sched_domain *sd, enum cpu_idle_type idle, 3363 struct sched_domain *sd, enum cpu_idle_type idle,
3362 int *all_pinned) 3364 int *lb_flags)
3363{ 3365{
3364 long rem_load_move = max_load_move; 3366 long rem_load_move = max_load_move;
3365 struct cfs_rq *busiest_cfs_rq; 3367 struct cfs_rq *busiest_cfs_rq;
@@ -3383,7 +3385,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3383 rem_load = div_u64(rem_load, busiest_h_load + 1); 3385 rem_load = div_u64(rem_load, busiest_h_load + 1);
3384 3386
3385 moved_load = balance_tasks(this_rq, this_cpu, busiest, 3387 moved_load = balance_tasks(this_rq, this_cpu, busiest,
3386 rem_load, sd, idle, all_pinned, 3388 rem_load, sd, idle, lb_flags,
3387 busiest_cfs_rq); 3389 busiest_cfs_rq);
3388 3390
3389 if (!moved_load) 3391 if (!moved_load)
@@ -3409,10 +3411,10 @@ static unsigned long
3409load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 3411load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3410 unsigned long max_load_move, 3412 unsigned long max_load_move,
3411 struct sched_domain *sd, enum cpu_idle_type idle, 3413 struct sched_domain *sd, enum cpu_idle_type idle,
3412 int *all_pinned) 3414 int *lb_flags)
3413{ 3415{
3414 return balance_tasks(this_rq, this_cpu, busiest, 3416 return balance_tasks(this_rq, this_cpu, busiest,
3415 max_load_move, sd, idle, all_pinned, 3417 max_load_move, sd, idle, lb_flags,
3416 &busiest->cfs); 3418 &busiest->cfs);
3417} 3419}
3418#endif 3420#endif
@@ -3427,14 +3429,14 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3427static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 3429static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3428 unsigned long max_load_move, 3430 unsigned long max_load_move,
3429 struct sched_domain *sd, enum cpu_idle_type idle, 3431 struct sched_domain *sd, enum cpu_idle_type idle,
3430 int *all_pinned) 3432 int *lb_flags)
3431{ 3433{
3432 unsigned long total_load_moved = 0, load_moved; 3434 unsigned long total_load_moved = 0, load_moved;
3433 3435
3434 do { 3436 do {
3435 load_moved = load_balance_fair(this_rq, this_cpu, busiest, 3437 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
3436 max_load_move - total_load_moved, 3438 max_load_move - total_load_moved,
3437 sd, idle, all_pinned); 3439 sd, idle, lb_flags);
3438 3440
3439 total_load_moved += load_moved; 3441 total_load_moved += load_moved;
3440 3442
@@ -4439,7 +4441,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4439 struct sched_domain *sd, enum cpu_idle_type idle, 4441 struct sched_domain *sd, enum cpu_idle_type idle,
4440 int *balance) 4442 int *balance)
4441{ 4443{
4442 int ld_moved, all_pinned = 0, active_balance = 0; 4444 int ld_moved, lb_flags = 0, active_balance = 0;
4443 struct sched_group *group; 4445 struct sched_group *group;
4444 unsigned long imbalance; 4446 unsigned long imbalance;
4445 struct rq *busiest; 4447 struct rq *busiest;
@@ -4480,11 +4482,11 @@ redo:
4480 * still unbalanced. ld_moved simply stays zero, so it is 4482 * still unbalanced. ld_moved simply stays zero, so it is
4481 * correctly treated as an imbalance. 4483 * correctly treated as an imbalance.
4482 */ 4484 */
4483 all_pinned = 1; 4485 lb_flags |= LBF_ALL_PINNED;
4484 local_irq_save(flags); 4486 local_irq_save(flags);
4485 double_rq_lock(this_rq, busiest); 4487 double_rq_lock(this_rq, busiest);
4486 ld_moved = move_tasks(this_rq, this_cpu, busiest, 4488 ld_moved = move_tasks(this_rq, this_cpu, busiest,
4487 imbalance, sd, idle, &all_pinned); 4489 imbalance, sd, idle, &lb_flags);
4488 double_rq_unlock(this_rq, busiest); 4490 double_rq_unlock(this_rq, busiest);
4489 local_irq_restore(flags); 4491 local_irq_restore(flags);
4490 4492
@@ -4495,7 +4497,7 @@ redo:
4495 resched_cpu(this_cpu); 4497 resched_cpu(this_cpu);
4496 4498
4497 /* All tasks on this runqueue were pinned by CPU affinity */ 4499 /* All tasks on this runqueue were pinned by CPU affinity */
4498 if (unlikely(all_pinned)) { 4500 if (unlikely(lb_flags & LBF_ALL_PINNED)) {
4499 cpumask_clear_cpu(cpu_of(busiest), cpus); 4501 cpumask_clear_cpu(cpu_of(busiest), cpus);
4500 if (!cpumask_empty(cpus)) 4502 if (!cpumask_empty(cpus))
4501 goto redo; 4503 goto redo;
@@ -4525,7 +4527,7 @@ redo:
4525 tsk_cpus_allowed(busiest->curr))) { 4527 tsk_cpus_allowed(busiest->curr))) {
4526 raw_spin_unlock_irqrestore(&busiest->lock, 4528 raw_spin_unlock_irqrestore(&busiest->lock,
4527 flags); 4529 flags);
4528 all_pinned = 1; 4530 lb_flags |= LBF_ALL_PINNED;
4529 goto out_one_pinned; 4531 goto out_one_pinned;
4530 } 4532 }
4531 4533
@@ -4578,7 +4580,8 @@ out_balanced:
4578 4580
4579out_one_pinned: 4581out_one_pinned:
4580 /* tune up the balancing interval */ 4582 /* tune up the balancing interval */
4581 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || 4583 if (((lb_flags & LBF_ALL_PINNED) &&
4584 sd->balance_interval < MAX_PINNED_INTERVAL) ||
4582 (sd->balance_interval < sd->max_interval)) 4585 (sd->balance_interval < sd->max_interval))
4583 sd->balance_interval *= 2; 4586 sd->balance_interval *= 2;
4584 4587