aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-09-12 07:03:34 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-19 06:35:21 -0400
commita15b12ac36ad4e7b856a4ae54937ae26a51aebad (patch)
tree78a87ec22776757a000e214313b78fabe3b6fbb6 /kernel/sched
parent1ba93d42727c44001aa8ccffd39c8ab5705379e2 (diff)
sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
If a task is queued but not running on it rq, we can simply migrate it without migration thread and switching of context. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1410519814.3569.7.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5536397a0309..4b1ddebed54a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4629,6 +4629,33 @@ void init_idle(struct task_struct *idle, int cpu)
4629} 4629}
4630 4630
4631#ifdef CONFIG_SMP 4631#ifdef CONFIG_SMP
4632/*
4633 * move_queued_task - move a queued task to new rq.
4634 *
4635 * Returns (locked) new rq. Old rq's lock is released.
4636 */
4637static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
4638{
4639 struct rq *rq = task_rq(p);
4640
4641 lockdep_assert_held(&rq->lock);
4642
4643 dequeue_task(rq, p, 0);
4644 p->on_rq = TASK_ON_RQ_MIGRATING;
4645 set_task_cpu(p, new_cpu);
4646 raw_spin_unlock(&rq->lock);
4647
4648 rq = cpu_rq(new_cpu);
4649
4650 raw_spin_lock(&rq->lock);
4651 BUG_ON(task_cpu(p) != new_cpu);
4652 p->on_rq = TASK_ON_RQ_QUEUED;
4653 enqueue_task(rq, p, 0);
4654 check_preempt_curr(rq, p, 0);
4655
4656 return rq;
4657}
4658
4632void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 4659void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4633{ 4660{
4634 if (p->sched_class && p->sched_class->set_cpus_allowed) 4661 if (p->sched_class && p->sched_class->set_cpus_allowed)
@@ -4685,14 +4712,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4685 goto out; 4712 goto out;
4686 4713
4687 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 4714 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4688 if (task_on_rq_queued(p) || p->state == TASK_WAKING) { 4715 if (task_running(rq, p) || p->state == TASK_WAKING) {
4689 struct migration_arg arg = { p, dest_cpu }; 4716 struct migration_arg arg = { p, dest_cpu };
4690 /* Need help from migration thread: drop lock and wait. */ 4717 /* Need help from migration thread: drop lock and wait. */
4691 task_rq_unlock(rq, p, &flags); 4718 task_rq_unlock(rq, p, &flags);
4692 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 4719 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
4693 tlb_migrate_finish(p->mm); 4720 tlb_migrate_finish(p->mm);
4694 return 0; 4721 return 0;
4695 } 4722 } else if (task_on_rq_queued(p))
4723 rq = move_queued_task(p, dest_cpu);
4696out: 4724out:
4697 task_rq_unlock(rq, p, &flags); 4725 task_rq_unlock(rq, p, &flags);
4698 4726
@@ -4735,19 +4763,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4735 * If we're not on a rq, the next wake-up will ensure we're 4763 * If we're not on a rq, the next wake-up will ensure we're
4736 * placed properly. 4764 * placed properly.
4737 */ 4765 */
4738 if (task_on_rq_queued(p)) { 4766 if (task_on_rq_queued(p))
4739 dequeue_task(rq, p, 0); 4767 rq = move_queued_task(p, dest_cpu);
4740 p->on_rq = TASK_ON_RQ_MIGRATING;
4741 set_task_cpu(p, dest_cpu);
4742 raw_spin_unlock(&rq->lock);
4743
4744 rq = cpu_rq(dest_cpu);
4745 raw_spin_lock(&rq->lock);
4746 BUG_ON(task_rq(p) != rq);
4747 p->on_rq = TASK_ON_RQ_QUEUED;
4748 enqueue_task(rq, p, 0);
4749 check_preempt_curr(rq, p, 0);
4750 }
4751done: 4768done:
4752 ret = 1; 4769 ret = 1;
4753fail: 4770fail: