diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6b6e0d70eb30..7ffaabd64f89 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -237,6 +237,7 @@ struct runqueue { | |||
237 | 237 | ||
238 | task_t *migration_thread; | 238 | task_t *migration_thread; |
239 | struct list_head migration_queue; | 239 | struct list_head migration_queue; |
240 | int cpu; | ||
240 | #endif | 241 | #endif |
241 | 242 | ||
242 | #ifdef CONFIG_SCHEDSTATS | 243 | #ifdef CONFIG_SCHEDSTATS |
@@ -1654,6 +1655,9 @@ unsigned long nr_iowait(void) | |||
1654 | /* | 1655 | /* |
1655 | * double_rq_lock - safely lock two runqueues | 1656 | * double_rq_lock - safely lock two runqueues |
1656 | * | 1657 | * |
1658 | * We must take them in cpu order to match code in | ||
1659 | * dependent_sleeper and wake_dependent_sleeper. | ||
1660 | * | ||
1657 | * Note this does not disable interrupts like task_rq_lock, | 1661 | * Note this does not disable interrupts like task_rq_lock, |
1658 | * you need to do so manually before calling. | 1662 | * you need to do so manually before calling. |
1659 | */ | 1663 | */ |
@@ -1665,7 +1669,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) | |||
1665 | spin_lock(&rq1->lock); | 1669 | spin_lock(&rq1->lock); |
1666 | __acquire(rq2->lock); /* Fake it out ;) */ | 1670 | __acquire(rq2->lock); /* Fake it out ;) */ |
1667 | } else { | 1671 | } else { |
1668 | if (rq1 < rq2) { | 1672 | if (rq1->cpu < rq2->cpu) { |
1669 | spin_lock(&rq1->lock); | 1673 | spin_lock(&rq1->lock); |
1670 | spin_lock(&rq2->lock); | 1674 | spin_lock(&rq2->lock); |
1671 | } else { | 1675 | } else { |
@@ -1701,7 +1705,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) | |||
1701 | __acquires(this_rq->lock) | 1705 | __acquires(this_rq->lock) |
1702 | { | 1706 | { |
1703 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1707 | if (unlikely(!spin_trylock(&busiest->lock))) { |
1704 | if (busiest < this_rq) { | 1708 | if (busiest->cpu < this_rq->cpu) { |
1705 | spin_unlock(&this_rq->lock); | 1709 | spin_unlock(&this_rq->lock); |
1706 | spin_lock(&busiest->lock); | 1710 | spin_lock(&busiest->lock); |
1707 | spin_lock(&this_rq->lock); | 1711 | spin_lock(&this_rq->lock); |
@@ -2869,7 +2873,7 @@ asmlinkage void __sched schedule(void) | |||
2869 | */ | 2873 | */ |
2870 | if (likely(!current->exit_state)) { | 2874 | if (likely(!current->exit_state)) { |
2871 | if (unlikely(in_atomic())) { | 2875 | if (unlikely(in_atomic())) { |
2872 | printk(KERN_ERR "scheduling while atomic: " | 2876 | printk(KERN_ERR "BUG: scheduling while atomic: " |
2873 | "%s/0x%08x/%d\n", | 2877 | "%s/0x%08x/%d\n", |
2874 | current->comm, preempt_count(), current->pid); | 2878 | current->comm, preempt_count(), current->pid); |
2875 | dump_stack(); | 2879 | dump_stack(); |
@@ -6029,6 +6033,7 @@ void __init sched_init(void) | |||
6029 | rq->push_cpu = 0; | 6033 | rq->push_cpu = 0; |
6030 | rq->migration_thread = NULL; | 6034 | rq->migration_thread = NULL; |
6031 | INIT_LIST_HEAD(&rq->migration_queue); | 6035 | INIT_LIST_HEAD(&rq->migration_queue); |
6036 | rq->cpu = i; | ||
6032 | #endif | 6037 | #endif |
6033 | atomic_set(&rq->nr_iowait, 0); | 6038 | atomic_set(&rq->nr_iowait, 0); |
6034 | 6039 | ||
@@ -6069,7 +6074,7 @@ void __might_sleep(char *file, int line) | |||
6069 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) | 6074 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
6070 | return; | 6075 | return; |
6071 | prev_jiffy = jiffies; | 6076 | prev_jiffy = jiffies; |
6072 | printk(KERN_ERR "Debug: sleeping function called from invalid" | 6077 | printk(KERN_ERR "BUG: sleeping function called from invalid" |
6073 | " context at %s:%d\n", file, line); | 6078 | " context at %s:%d\n", file, line); |
6074 | printk("in_atomic():%d, irqs_disabled():%d\n", | 6079 | printk("in_atomic():%d, irqs_disabled():%d\n", |
6075 | in_atomic(), irqs_disabled()); | 6080 | in_atomic(), irqs_disabled()); |