diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 15 | ||||
-rw-r--r-- | kernel/sched.c | 12 |
2 files changed, 25 insertions, 2 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index fa31cb9f9898..50f5dc463688 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1818,10 +1818,23 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) | |||
1818 | cpumask_t mask; | 1818 | cpumask_t mask; |
1819 | 1819 | ||
1820 | mutex_lock(&callback_mutex); | 1820 | mutex_lock(&callback_mutex); |
1821 | mask = cpuset_cpus_allowed_locked(tsk); | ||
1822 | mutex_unlock(&callback_mutex); | ||
1823 | |||
1824 | return mask; | ||
1825 | } | ||
1826 | |||
1827 | /** | ||
1828 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. | ||
1829 | * Must be called with callback_mutex held. | ||
1830 | **/ | ||
1831 | cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk) | ||
1832 | { | ||
1833 | cpumask_t mask; | ||
1834 | |||
1821 | task_lock(tsk); | 1835 | task_lock(tsk); |
1822 | guarantee_online_cpus(task_cs(tsk), &mask); | 1836 | guarantee_online_cpus(task_cs(tsk), &mask); |
1823 | task_unlock(tsk); | 1837 | task_unlock(tsk); |
1824 | mutex_unlock(&callback_mutex); | ||
1825 | 1838 | ||
1826 | return mask; | 1839 | return mask; |
1827 | } | 1840 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index a7e30462600f..4071306e1088 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5160,8 +5160,16 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5160 | 5160 | ||
5161 | /* No more Mr. Nice Guy. */ | 5161 | /* No more Mr. Nice Guy. */ |
5162 | if (dest_cpu == NR_CPUS) { | 5162 | if (dest_cpu == NR_CPUS) { |
5163 | cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); | ||
5164 | /* | ||
5165 | * Try to stay on the same cpuset, where the | ||
5166 | * current cpuset may be a subset of all cpus. | ||
5167 | * The cpuset_cpus_allowed_locked() variant of | ||
5168 | * cpuset_cpus_allowed() will not block. It must be | ||
5169 | * called within calls to cpuset_lock/cpuset_unlock. | ||
5170 | */ | ||
5163 | rq = task_rq_lock(p, &flags); | 5171 | rq = task_rq_lock(p, &flags); |
5164 | cpus_setall(p->cpus_allowed); | 5172 | p->cpus_allowed = cpus_allowed; |
5165 | dest_cpu = any_online_cpu(p->cpus_allowed); | 5173 | dest_cpu = any_online_cpu(p->cpus_allowed); |
5166 | task_rq_unlock(rq, &flags); | 5174 | task_rq_unlock(rq, &flags); |
5167 | 5175 | ||
@@ -5527,6 +5535,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5527 | 5535 | ||
5528 | case CPU_DEAD: | 5536 | case CPU_DEAD: |
5529 | case CPU_DEAD_FROZEN: | 5537 | case CPU_DEAD_FROZEN: |
5538 | cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ | ||
5530 | migrate_live_tasks(cpu); | 5539 | migrate_live_tasks(cpu); |
5531 | rq = cpu_rq(cpu); | 5540 | rq = cpu_rq(cpu); |
5532 | kthread_stop(rq->migration_thread); | 5541 | kthread_stop(rq->migration_thread); |
@@ -5540,6 +5549,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5540 | rq->idle->sched_class = &idle_sched_class; | 5549 | rq->idle->sched_class = &idle_sched_class; |
5541 | migrate_dead_tasks(cpu); | 5550 | migrate_dead_tasks(cpu); |
5542 | spin_unlock_irq(&rq->lock); | 5551 | spin_unlock_irq(&rq->lock); |
5552 | cpuset_unlock(); | ||
5543 | migrate_nr_uninterruptible(rq); | 5553 | migrate_nr_uninterruptible(rq); |
5544 | BUG_ON(rq->nr_running != 0); | 5554 | BUG_ON(rq->nr_running != 0); |
5545 | 5555 | ||