diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 56 |
1 files changed, 40 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5e43e9dc65d1..cbb3a0eee58e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2573,7 +2573,26 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu) | |||
2573 | if (!next) | 2573 | if (!next) |
2574 | smp_send_reschedule(cpu); | 2574 | smp_send_reschedule(cpu); |
2575 | } | 2575 | } |
2576 | #endif | 2576 | |
2577 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
2578 | static int ttwu_activate_remote(struct task_struct *p, int wake_flags) | ||
2579 | { | ||
2580 | struct rq *rq; | ||
2581 | int ret = 0; | ||
2582 | |||
2583 | rq = __task_rq_lock(p); | ||
2584 | if (p->on_cpu) { | ||
2585 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); | ||
2586 | ttwu_do_wakeup(rq, p, wake_flags); | ||
2587 | ret = 1; | ||
2588 | } | ||
2589 | __task_rq_unlock(rq); | ||
2590 | |||
2591 | return ret; | ||
2592 | |||
2593 | } | ||
2594 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
2595 | #endif /* CONFIG_SMP */ | ||
2577 | 2596 | ||
2578 | static void ttwu_queue(struct task_struct *p, int cpu) | 2597 | static void ttwu_queue(struct task_struct *p, int cpu) |
2579 | { | 2598 | { |
@@ -2631,17 +2650,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
2631 | while (p->on_cpu) { | 2650 | while (p->on_cpu) { |
2632 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 2651 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
2633 | /* | 2652 | /* |
2634 | * If called from interrupt context we could have landed in the | 2653 | * In case the architecture enables interrupts in |
2635 | * middle of schedule(), in this case we should take care not | 2654 | * context_switch(), we cannot busy wait, since that |
2636 | * to spin on ->on_cpu if p is current, since that would | 2655 | * would lead to deadlocks when an interrupt hits and |
2637 | * deadlock. | 2656 | * tries to wake up @prev. So bail and do a complete |
2657 | * remote wakeup. | ||
2638 | */ | 2658 | */ |
2639 | if (p == current) { | 2659 | if (ttwu_activate_remote(p, wake_flags)) |
2640 | ttwu_queue(p, cpu); | ||
2641 | goto stat; | 2660 | goto stat; |
2642 | } | 2661 | #else |
2643 | #endif | ||
2644 | cpu_relax(); | 2662 | cpu_relax(); |
2663 | #endif | ||
2645 | } | 2664 | } |
2646 | /* | 2665 | /* |
2647 | * Pairs with the smp_wmb() in finish_lock_switch(). | 2666 | * Pairs with the smp_wmb() in finish_lock_switch(). |
@@ -5841,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5841 | idle->state = TASK_RUNNING; | 5860 | idle->state = TASK_RUNNING; |
5842 | idle->se.exec_start = sched_clock(); | 5861 | idle->se.exec_start = sched_clock(); |
5843 | 5862 | ||
5844 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 5863 | do_set_cpus_allowed(idle, cpumask_of(cpu)); |
5845 | /* | 5864 | /* |
5846 | * We're having a chicken and egg problem, even though we are | 5865 | * We're having a chicken and egg problem, even though we are |
5847 | * holding rq->lock, the cpu isn't yet set to this cpu so the | 5866 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
@@ -5929,6 +5948,16 @@ static inline void sched_init_granularity(void) | |||
5929 | } | 5948 | } |
5930 | 5949 | ||
5931 | #ifdef CONFIG_SMP | 5950 | #ifdef CONFIG_SMP |
5951 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | ||
5952 | { | ||
5953 | if (p->sched_class && p->sched_class->set_cpus_allowed) | ||
5954 | p->sched_class->set_cpus_allowed(p, new_mask); | ||
5955 | else { | ||
5956 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
5957 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | ||
5958 | } | ||
5959 | } | ||
5960 | |||
5932 | /* | 5961 | /* |
5933 | * This is how migration works: | 5962 | * This is how migration works: |
5934 | * | 5963 | * |
@@ -5974,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5974 | goto out; | 6003 | goto out; |
5975 | } | 6004 | } |
5976 | 6005 | ||
5977 | if (p->sched_class->set_cpus_allowed) | 6006 | do_set_cpus_allowed(p, new_mask); |
5978 | p->sched_class->set_cpus_allowed(p, new_mask); | ||
5979 | else { | ||
5980 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
5981 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | ||
5982 | } | ||
5983 | 6007 | ||
5984 | /* Can the task run on the task's current CPU? If so, we're done */ | 6008 | /* Can the task run on the task's current CPU? If so, we're done */ |
5985 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | 6009 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |