aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-02-22 13:27:40 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-01 04:51:23 -0500
commitddcdf6e7d9919d139031fa2a6addd9544a9a833e (patch)
tree5c6883bdc1128a7c89b6d7e3ac13aa1a54f5c3c0 /kernel/sched
parent8e45cb545d98bc58e75b7de89ec8d3e5c8459ee6 (diff)
sched: Rename load-balancing fields
s/env->this_/env->dst_/g s/env->busiest_/env->src_/g s/pull_task/move_task/g Makes everything clearer. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: pjt@google.com Link: http://lkml.kernel.org/n/tip-0yvgms8t8x962drpvl0fu0kk@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c118
1 files changed, 60 insertions, 58 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 55b1f117419..233d05171bf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2918,7 +2918,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
2918 return; 2918 return;
2919 2919
2920 /* 2920 /*
2921 * This is possible from callers such as pull_task(), in which we 2921 * This is possible from callers such as move_task(), in which we
2922 * unconditionally check_prempt_curr() after an enqueue (which may have 2922 * unconditionally check_prempt_curr() after an enqueue (which may have
2923 * lead to a throttle). This both saves work and prevents false 2923 * lead to a throttle). This both saves work and prevents false
2924 * next-buddy nomination below. 2924 * next-buddy nomination below.
@@ -3084,17 +3084,37 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
3084 3084
3085static unsigned long __read_mostly max_load_balance_interval = HZ/10; 3085static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3086 3086
3087#define LBF_ALL_PINNED 0x01
3088#define LBF_NEED_BREAK 0x02 /* clears into HAD_BREAK */
3089#define LBF_HAD_BREAK 0x04
3090#define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */
3091#define LBF_ABORT 0x10
3092
3093struct lb_env {
3094 struct sched_domain *sd;
3095
3096 int src_cpu;
3097 struct rq *src_rq;
3098 struct cfs_rq *src_cfs_rq;
3099
3100 int dst_cpu;
3101 struct rq *dst_rq;
3102
3103 enum cpu_idle_type idle;
3104 unsigned long max_load_move;
3105 unsigned int flags;
3106};
3107
3087/* 3108/*
3088 * pull_task - move a task from a remote runqueue to the local runqueue. 3109 * move_task - move a task from one runqueue to another runqueue.
3089 * Both runqueues must be locked. 3110 * Both runqueues must be locked.
3090 */ 3111 */
3091static void pull_task(struct rq *src_rq, struct task_struct *p, 3112static void move_task(struct task_struct *p, struct lb_env *env)
3092 struct rq *this_rq, int this_cpu)
3093{ 3113{
3094 deactivate_task(src_rq, p, 0); 3114 deactivate_task(env->src_rq, p, 0);
3095 set_task_cpu(p, this_cpu); 3115 set_task_cpu(p, env->dst_cpu);
3096 activate_task(this_rq, p, 0); 3116 activate_task(env->dst_rq, p, 0);
3097 check_preempt_curr(this_rq, p, 0); 3117 check_preempt_curr(env->dst_rq, p, 0);
3098} 3118}
3099 3119
3100/* 3120/*
@@ -3129,26 +3149,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3129 return delta < (s64)sysctl_sched_migration_cost; 3149 return delta < (s64)sysctl_sched_migration_cost;
3130} 3150}
3131 3151
3132#define LBF_ALL_PINNED 0x01
3133#define LBF_NEED_BREAK 0x02 /* clears into HAD_BREAK */
3134#define LBF_HAD_BREAK 0x04
3135#define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */
3136#define LBF_ABORT 0x10
3137
3138struct lb_env {
3139 struct sched_domain *sd;
3140
3141 int this_cpu;
3142 struct rq *this_rq;
3143
3144 struct rq *busiest_rq;
3145 struct cfs_rq *busiest_cfs_rq;
3146
3147 enum cpu_idle_type idle;
3148 unsigned long max_load_move;
3149 unsigned int flags;
3150};
3151
3152/* 3152/*
3153 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 3153 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3154 */ 3154 */
@@ -3162,13 +3162,13 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
3162 * 2) cannot be migrated to this CPU due to cpus_allowed, or 3162 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3163 * 3) are cache-hot on their current CPU. 3163 * 3) are cache-hot on their current CPU.
3164 */ 3164 */
3165 if (!cpumask_test_cpu(env->this_cpu, tsk_cpus_allowed(p))) { 3165 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3166 schedstat_inc(p, se.statistics.nr_failed_migrations_affine); 3166 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3167 return 0; 3167 return 0;
3168 } 3168 }
3169 env->flags &= ~LBF_ALL_PINNED; 3169 env->flags &= ~LBF_ALL_PINNED;
3170 3170
3171 if (task_running(env->busiest_rq, p)) { 3171 if (task_running(env->src_rq, p)) {
3172 schedstat_inc(p, se.statistics.nr_failed_migrations_running); 3172 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3173 return 0; 3173 return 0;
3174 } 3174 }
@@ -3179,7 +3179,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
3179 * 2) too many balance attempts have failed. 3179 * 2) too many balance attempts have failed.
3180 */ 3180 */
3181 3181
3182 tsk_cache_hot = task_hot(p, env->busiest_rq->clock_task, env->sd); 3182 tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
3183 if (!tsk_cache_hot || 3183 if (!tsk_cache_hot ||
3184 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 3184 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3185#ifdef CONFIG_SCHEDSTATS 3185#ifdef CONFIG_SCHEDSTATS
@@ -3210,20 +3210,20 @@ static int move_one_task(struct lb_env *env)
3210 struct task_struct *p, *n; 3210 struct task_struct *p, *n;
3211 struct cfs_rq *cfs_rq; 3211 struct cfs_rq *cfs_rq;
3212 3212
3213 for_each_leaf_cfs_rq(env->busiest_rq, cfs_rq) { 3213 for_each_leaf_cfs_rq(env->src_rq, cfs_rq) {
3214 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) { 3214 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
3215 if (throttled_lb_pair(task_group(p), 3215 if (throttled_lb_pair(task_group(p),
3216 env->busiest_rq->cpu, env->this_cpu)) 3216 env->src_cpu, env->dst_cpu))
3217 break; 3217 break;
3218 3218
3219 if (!can_migrate_task(p, env)) 3219 if (!can_migrate_task(p, env))
3220 continue; 3220 continue;
3221 3221
3222 pull_task(env->busiest_rq, p, env->this_rq, env->this_cpu); 3222 move_task(p, env);
3223 /* 3223 /*
3224 * Right now, this is only the second place pull_task() 3224 * Right now, this is only the second place move_task()
3225 * is called, so we can safely collect pull_task() 3225 * is called, so we can safely collect move_task()
3226 * stats here rather than inside pull_task(). 3226 * stats here rather than inside move_task().
3227 */ 3227 */
3228 schedstat_inc(env->sd, lb_gained[env->idle]); 3228 schedstat_inc(env->sd, lb_gained[env->idle]);
3229 return 1; 3229 return 1;
@@ -3242,7 +3242,7 @@ static unsigned long balance_tasks(struct lb_env *env)
3242 if (env->max_load_move == 0) 3242 if (env->max_load_move == 0)
3243 goto out; 3243 goto out;
3244 3244
3245 list_for_each_entry_safe(p, n, &env->busiest_cfs_rq->tasks, se.group_node) { 3245 list_for_each_entry_safe(p, n, &env->src_cfs_rq->tasks, se.group_node) {
3246 if (loops++ > sysctl_sched_nr_migrate) { 3246 if (loops++ > sysctl_sched_nr_migrate) {
3247 env->flags |= LBF_NEED_BREAK; 3247 env->flags |= LBF_NEED_BREAK;
3248 break; 3248 break;
@@ -3252,7 +3252,7 @@ static unsigned long balance_tasks(struct lb_env *env)
3252 !can_migrate_task(p, env)) 3252 !can_migrate_task(p, env))
3253 continue; 3253 continue;
3254 3254
3255 pull_task(env->busiest_rq, p, env->this_rq, env->this_cpu); 3255 move_task(p, env);
3256 pulled++; 3256 pulled++;
3257 rem_load_move -= p->se.load.weight; 3257 rem_load_move -= p->se.load.weight;
3258 3258
@@ -3277,9 +3277,9 @@ static unsigned long balance_tasks(struct lb_env *env)
3277 } 3277 }
3278out: 3278out:
3279 /* 3279 /*
3280 * Right now, this is one of only two places pull_task() is called, 3280 * Right now, this is one of only two places move_task() is called,
3281 * so we can safely collect pull_task() stats here rather than 3281 * so we can safely collect move_task() stats here rather than
3282 * inside pull_task(). 3282 * inside move_task().
3283 */ 3283 */
3284 schedstat_add(env->sd, lb_gained[env->idle], pulled); 3284 schedstat_add(env->sd, lb_gained[env->idle], pulled);
3285 3285
@@ -3372,11 +3372,11 @@ static unsigned long load_balance_fair(struct lb_env *env)
3372 long rem_load_move = env->max_load_move; 3372 long rem_load_move = env->max_load_move;
3373 3373
3374 rcu_read_lock(); 3374 rcu_read_lock();
3375 update_h_load(cpu_of(env->busiest_rq)); 3375 update_h_load(cpu_of(env->src_rq));
3376 3376
3377 for_each_leaf_cfs_rq(env->busiest_rq, env->busiest_cfs_rq) { 3377 for_each_leaf_cfs_rq(env->src_rq, env->src_cfs_rq) {
3378 unsigned long busiest_h_load = env->busiest_cfs_rq->h_load; 3378 unsigned long busiest_h_load = env->src_cfs_rq->h_load;
3379 unsigned long busiest_weight = env->busiest_cfs_rq->load.weight; 3379 unsigned long busiest_weight = env->src_cfs_rq->load.weight;
3380 u64 rem_load, moved_load; 3380 u64 rem_load, moved_load;
3381 3381
3382 if (env->flags & (LBF_NEED_BREAK|LBF_ABORT)) 3382 if (env->flags & (LBF_NEED_BREAK|LBF_ABORT))
@@ -3385,12 +3385,12 @@ static unsigned long load_balance_fair(struct lb_env *env)
3385 /* 3385 /*
3386 * empty group or part of a throttled hierarchy 3386 * empty group or part of a throttled hierarchy
3387 */ 3387 */
3388 if (!env->busiest_cfs_rq->task_weight) 3388 if (!env->src_cfs_rq->task_weight)
3389 continue; 3389 continue;
3390 3390
3391 if (throttled_lb_pair(env->busiest_cfs_rq->tg, 3391 if (throttled_lb_pair(env->src_cfs_rq->tg,
3392 cpu_of(env->busiest_rq), 3392 cpu_of(env->src_rq),
3393 env->this_cpu)) 3393 env->dst_cpu))
3394 continue; 3394 continue;
3395 3395
3396 rem_load = (u64)rem_load_move * busiest_weight; 3396 rem_load = (u64)rem_load_move * busiest_weight;
@@ -3420,7 +3420,7 @@ static inline void update_shares(int cpu)
3420 3420
3421static unsigned long load_balance_fair(struct lb_env *env) 3421static unsigned long load_balance_fair(struct lb_env *env)
3422{ 3422{
3423 env->busiest_cfs_rq = &env->busiest_rq->cfs; 3423 env->src_cfs_rq = &env->src_rq->cfs;
3424 return balance_tasks(env); 3424 return balance_tasks(env);
3425} 3425}
3426#endif 3426#endif
@@ -3451,7 +3451,7 @@ static int move_tasks(struct lb_env *env)
3451 * kernels will stop after the first task is pulled to minimize 3451 * kernels will stop after the first task is pulled to minimize
3452 * the critical section. 3452 * the critical section.
3453 */ 3453 */
3454 if (env->idle == CPU_NEWLY_IDLE && env->this_rq->nr_running) { 3454 if (env->idle == CPU_NEWLY_IDLE && env->dst_rq->nr_running) {
3455 env->flags |= LBF_ABORT; 3455 env->flags |= LBF_ABORT;
3456 break; 3456 break;
3457 } 3457 }
@@ -4461,8 +4461,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4461 4461
4462 struct lb_env env = { 4462 struct lb_env env = {
4463 .sd = sd, 4463 .sd = sd,
4464 .this_cpu = this_cpu, 4464 .dst_cpu = this_cpu,
4465 .this_rq = this_rq, 4465 .dst_rq = this_rq,
4466 .idle = idle, 4466 .idle = idle,
4467 }; 4467 };
4468 4468
@@ -4502,7 +4502,8 @@ redo:
4502 */ 4502 */
4503 env.flags |= LBF_ALL_PINNED; 4503 env.flags |= LBF_ALL_PINNED;
4504 env.max_load_move = imbalance; 4504 env.max_load_move = imbalance;
4505 env.busiest_rq = busiest; 4505 env.src_cpu = busiest->cpu;
4506 env.src_rq = busiest;
4506 4507
4507 local_irq_save(flags); 4508 local_irq_save(flags);
4508 double_rq_lock(this_rq, busiest); 4509 double_rq_lock(this_rq, busiest);
@@ -4722,9 +4723,10 @@ static int active_load_balance_cpu_stop(void *data)
4722 if (likely(sd)) { 4723 if (likely(sd)) {
4723 struct lb_env env = { 4724 struct lb_env env = {
4724 .sd = sd, 4725 .sd = sd,
4725 .this_cpu = target_cpu, 4726 .dst_cpu = target_cpu,
4726 .this_rq = target_rq, 4727 .dst_rq = target_rq,
4727 .busiest_rq = busiest_rq, 4728 .src_cpu = busiest_rq->cpu,
4729 .src_rq = busiest_rq,
4728 .idle = CPU_IDLE, 4730 .idle = CPU_IDLE,
4729 }; 4731 };
4730 4732