aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 0da2b2635c54..c4889abc00b6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5060,6 +5060,17 @@ wait_to_die:
5060} 5060}
5061 5061
5062#ifdef CONFIG_HOTPLUG_CPU 5062#ifdef CONFIG_HOTPLUG_CPU
5063
5064static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
5065{
5066 int ret;
5067
5068 local_irq_disable();
5069 ret = __migrate_task(p, src_cpu, dest_cpu);
5070 local_irq_enable();
5071 return ret;
5072}
5073
5063/* 5074/*
5064 * Figure out where task on dead CPU should go, use force if neccessary. 5075 * Figure out where task on dead CPU should go, use force if neccessary.
5065 * NOTE: interrupts should be disabled by the caller 5076 * NOTE: interrupts should be disabled by the caller
@@ -5098,7 +5109,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5098 "longer affine to cpu%d\n", 5109 "longer affine to cpu%d\n",
5099 p->pid, p->comm, dead_cpu); 5110 p->pid, p->comm, dead_cpu);
5100 } 5111 }
5101 } while (!__migrate_task(p, dead_cpu, dest_cpu)); 5112 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
5102} 5113}
5103 5114
5104/* 5115/*
@@ -5126,7 +5137,7 @@ static void migrate_live_tasks(int src_cpu)
5126{ 5137{
5127 struct task_struct *p, *t; 5138 struct task_struct *p, *t;
5128 5139
5129 write_lock_irq(&tasklist_lock); 5140 read_lock(&tasklist_lock);
5130 5141
5131 do_each_thread(t, p) { 5142 do_each_thread(t, p) {
5132 if (p == current) 5143 if (p == current)
@@ -5136,7 +5147,7 @@ static void migrate_live_tasks(int src_cpu)
5136 move_task_off_dead_cpu(src_cpu, p); 5147 move_task_off_dead_cpu(src_cpu, p);
5137 } while_each_thread(t, p); 5148 } while_each_thread(t, p);
5138 5149
5139 write_unlock_irq(&tasklist_lock); 5150 read_unlock(&tasklist_lock);
5140} 5151}
5141 5152
5142/* 5153/*
@@ -5214,11 +5225,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5214 * Drop lock around migration; if someone else moves it, 5225 * Drop lock around migration; if someone else moves it,
5215 * that's OK. No task can be added to this CPU, so iteration is 5226 * that's OK. No task can be added to this CPU, so iteration is
5216 * fine. 5227 * fine.
5217 * NOTE: interrupts should be left disabled --dev@
5218 */ 5228 */
5219 spin_unlock(&rq->lock); 5229 spin_unlock_irq(&rq->lock);
5220 move_task_off_dead_cpu(dead_cpu, p); 5230 move_task_off_dead_cpu(dead_cpu, p);
5221 spin_lock(&rq->lock); 5231 spin_lock_irq(&rq->lock);
5222 5232
5223 put_task_struct(p); 5233 put_task_struct(p);
5224} 5234}
@@ -5447,14 +5457,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5447 kthread_stop(rq->migration_thread); 5457 kthread_stop(rq->migration_thread);
5448 rq->migration_thread = NULL; 5458 rq->migration_thread = NULL;
5449 /* Idle task back to normal (off runqueue, low prio) */ 5459 /* Idle task back to normal (off runqueue, low prio) */
5450 rq = task_rq_lock(rq->idle, &flags); 5460 spin_lock_irq(&rq->lock);
5451 update_rq_clock(rq); 5461 update_rq_clock(rq);
5452 deactivate_task(rq, rq->idle, 0); 5462 deactivate_task(rq, rq->idle, 0);
5453 rq->idle->static_prio = MAX_PRIO; 5463 rq->idle->static_prio = MAX_PRIO;
5454 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 5464 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5455 rq->idle->sched_class = &idle_sched_class; 5465 rq->idle->sched_class = &idle_sched_class;
5456 migrate_dead_tasks(cpu); 5466 migrate_dead_tasks(cpu);
5457 task_rq_unlock(rq, &flags); 5467 spin_unlock_irq(&rq->lock);
5458 migrate_nr_uninterruptible(rq); 5468 migrate_nr_uninterruptible(rq);
5459 BUG_ON(rq->nr_running != 0); 5469 BUG_ON(rq->nr_running != 0);
5460 5470