aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a08387b5f7fa..f04add905bdf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1952,6 +1952,7 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1952 __acquires(rq1->lock) 1952 __acquires(rq1->lock)
1953 __acquires(rq2->lock) 1953 __acquires(rq2->lock)
1954{ 1954{
1955 BUG_ON(!irqs_disabled());
1955 if (rq1 == rq2) { 1956 if (rq1 == rq2) {
1956 spin_lock(&rq1->lock); 1957 spin_lock(&rq1->lock);
1957 __acquire(rq2->lock); /* Fake it out ;) */ 1958 __acquire(rq2->lock); /* Fake it out ;) */
@@ -1991,6 +1992,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
1991 __acquires(busiest->lock) 1992 __acquires(busiest->lock)
1992 __acquires(this_rq->lock) 1993 __acquires(this_rq->lock)
1993{ 1994{
1995 if (unlikely(!irqs_disabled())) {
1996 /* printk() doesn't work good under rq->lock */
1997 spin_unlock(&this_rq->lock);
1998 BUG_ON(1);
1999 }
1994 if (unlikely(!spin_trylock(&busiest->lock))) { 2000 if (unlikely(!spin_trylock(&busiest->lock))) {
1995 if (busiest < this_rq) { 2001 if (busiest < this_rq) {
1996 spin_unlock(&this_rq->lock); 2002 spin_unlock(&this_rq->lock);
@@ -5067,7 +5073,10 @@ wait_to_die:
5067} 5073}
5068 5074
5069#ifdef CONFIG_HOTPLUG_CPU 5075#ifdef CONFIG_HOTPLUG_CPU
5070/* Figure out where task on dead CPU should go, use force if neccessary. */ 5076/*
5077 * Figure out where task on dead CPU should go, use force if neccessary.
5078 * NOTE: interrupts should be disabled by the caller
5079 */
5071static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 5080static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5072{ 5081{
5073 unsigned long flags; 5082 unsigned long flags;
@@ -5187,6 +5196,7 @@ void idle_task_exit(void)
5187 mmdrop(mm); 5196 mmdrop(mm);
5188} 5197}
5189 5198
5199/* called under rq->lock with disabled interrupts */
5190static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) 5200static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5191{ 5201{
5192 struct rq *rq = cpu_rq(dead_cpu); 5202 struct rq *rq = cpu_rq(dead_cpu);
@@ -5203,10 +5213,11 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5203 * Drop lock around migration; if someone else moves it, 5213 * Drop lock around migration; if someone else moves it,
5204 * that's OK. No task can be added to this CPU, so iteration is 5214 * that's OK. No task can be added to this CPU, so iteration is
5205 * fine. 5215 * fine.
5216 * NOTE: interrupts should be left disabled --dev@
5206 */ 5217 */
5207 spin_unlock_irq(&rq->lock); 5218 spin_unlock(&rq->lock);
5208 move_task_off_dead_cpu(dead_cpu, p); 5219 move_task_off_dead_cpu(dead_cpu, p);
5209 spin_lock_irq(&rq->lock); 5220 spin_lock(&rq->lock);
5210 5221
5211 put_task_struct(p); 5222 put_task_struct(p);
5212} 5223}