aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-08-20 05:48:01 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-20 08:53:03 -0400
commite5673f280501298dbb56efa46e333cf64ee5080a (patch)
tree1de6b29733b2c8d65b2644e7517ac10bfb4a1a16 /kernel/sched
parenta1e01829796aa7a993e28ffd7fee5c8d525be175 (diff)
sched/fair: Remove double_lock_balance() from active_load_balance_cpu_stop()
Avoid double_rq_lock() and use the TASK_ON_RQ_MIGRATING state for active_load_balance_cpu_stop(). The advantage is (obviously) not holding two 'rq->lock's at the same time and thereby increasing parallelism. Further note that if there was no task to migrate we will not have acquired the second rq->lock at all. The important point to note is that because we acquire dst->lock immediately after releasing src->lock the potential wait time of task_rq_lock() callers on TASK_ON_RQ_MIGRATING is not longer than it would have been in the double rq lock scenario. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Turner <pjt@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1408528081.23412.92.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c60
1 files changed, 44 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9e6ca0d88f51..7e5cf051c144 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5138,6 +5138,8 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
5138{ 5138{
5139 s64 delta; 5139 s64 delta;
5140 5140
5141 lockdep_assert_held(&env->src_rq->lock);
5142
5141 if (p->sched_class != &fair_sched_class) 5143 if (p->sched_class != &fair_sched_class)
5142 return 0; 5144 return 0;
5143 5145
@@ -5257,6 +5259,9 @@ static
5257int can_migrate_task(struct task_struct *p, struct lb_env *env) 5259int can_migrate_task(struct task_struct *p, struct lb_env *env)
5258{ 5260{
5259 int tsk_cache_hot = 0; 5261 int tsk_cache_hot = 0;
5262
5263 lockdep_assert_held(&env->src_rq->lock);
5264
5260 /* 5265 /*
5261 * We do not migrate tasks that are: 5266 * We do not migrate tasks that are:
5262 * 1) throttled_lb_pair, or 5267 * 1) throttled_lb_pair, or
@@ -5341,30 +5346,49 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
5341} 5346}
5342 5347
5343/* 5348/*
5344 * move_one_task tries to move exactly one task from busiest to this_rq, as 5349 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
5345 * part of active balancing operations within "domain". 5350 * part of active balancing operations within "domain".
5346 * Returns 1 if successful and 0 otherwise.
5347 * 5351 *
5348 * Called with both runqueues locked. 5352 * Returns a task if successful and NULL otherwise.
5349 */ 5353 */
5350static int move_one_task(struct lb_env *env) 5354static struct task_struct *detach_one_task(struct lb_env *env)
5351{ 5355{
5352 struct task_struct *p, *n; 5356 struct task_struct *p, *n;
5353 5357
5358 lockdep_assert_held(&env->src_rq->lock);
5359
5354 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { 5360 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5355 if (!can_migrate_task(p, env)) 5361 if (!can_migrate_task(p, env))
5356 continue; 5362 continue;
5357 5363
5358 move_task(p, env); 5364 deactivate_task(env->src_rq, p, 0);
5365 p->on_rq = TASK_ON_RQ_MIGRATING;
5366 set_task_cpu(p, env->dst_cpu);
5367
5359 /* 5368 /*
5360 * Right now, this is only the second place move_task() 5369 * Right now, this is only the second place where
5361 * is called, so we can safely collect move_task() 5370 * lb_gained[env->idle] is updated (other is move_tasks)
5362 * stats here rather than inside move_task(). 5371 * so we can safely collect stats here rather than
5372 * inside move_tasks().
5363 */ 5373 */
5364 schedstat_inc(env->sd, lb_gained[env->idle]); 5374 schedstat_inc(env->sd, lb_gained[env->idle]);
5365 return 1; 5375 return p;
5366 } 5376 }
5367 return 0; 5377 return NULL;
5378}
5379
5380/*
5381 * attach_one_task() -- attaches the task returned from detach_one_task() to
5382 * its new rq.
5383 */
5384static void attach_one_task(struct rq *rq, struct task_struct *p)
5385{
5386 raw_spin_lock(&rq->lock);
5387 BUG_ON(task_rq(p) != rq);
5388 p->on_rq = TASK_ON_RQ_QUEUED;
5389 activate_task(rq, p, 0);
5390 check_preempt_curr(rq, p, 0);
5391 raw_spin_unlock(&rq->lock);
5368} 5392}
5369 5393
5370static const unsigned int sched_nr_migrate_break = 32; 5394static const unsigned int sched_nr_migrate_break = 32;
@@ -6943,6 +6967,7 @@ static int active_load_balance_cpu_stop(void *data)
6943 int target_cpu = busiest_rq->push_cpu; 6967 int target_cpu = busiest_rq->push_cpu;
6944 struct rq *target_rq = cpu_rq(target_cpu); 6968 struct rq *target_rq = cpu_rq(target_cpu);
6945 struct sched_domain *sd; 6969 struct sched_domain *sd;
6970 struct task_struct *p = NULL;
6946 6971
6947 raw_spin_lock_irq(&busiest_rq->lock); 6972 raw_spin_lock_irq(&busiest_rq->lock);
6948 6973
@@ -6962,9 +6987,6 @@ static int active_load_balance_cpu_stop(void *data)
6962 */ 6987 */
6963 BUG_ON(busiest_rq == target_rq); 6988 BUG_ON(busiest_rq == target_rq);
6964 6989
6965 /* move a task from busiest_rq to target_rq */
6966 double_lock_balance(busiest_rq, target_rq);
6967
6968 /* Search for an sd spanning us and the target CPU. */ 6990 /* Search for an sd spanning us and the target CPU. */
6969 rcu_read_lock(); 6991 rcu_read_lock();
6970 for_each_domain(target_cpu, sd) { 6992 for_each_domain(target_cpu, sd) {
@@ -6985,16 +7007,22 @@ static int active_load_balance_cpu_stop(void *data)
6985 7007
6986 schedstat_inc(sd, alb_count); 7008 schedstat_inc(sd, alb_count);
6987 7009
6988 if (move_one_task(&env)) 7010 p = detach_one_task(&env);
7011 if (p)
6989 schedstat_inc(sd, alb_pushed); 7012 schedstat_inc(sd, alb_pushed);
6990 else 7013 else
6991 schedstat_inc(sd, alb_failed); 7014 schedstat_inc(sd, alb_failed);
6992 } 7015 }
6993 rcu_read_unlock(); 7016 rcu_read_unlock();
6994 double_unlock_balance(busiest_rq, target_rq);
6995out_unlock: 7017out_unlock:
6996 busiest_rq->active_balance = 0; 7018 busiest_rq->active_balance = 0;
6997 raw_spin_unlock_irq(&busiest_rq->lock); 7019 raw_spin_unlock(&busiest_rq->lock);
7020
7021 if (p)
7022 attach_one_task(target_rq, p);
7023
7024 local_irq_enable();
7025
6998 return 0; 7026 return 0;
6999} 7027}
7000 7028