diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-02 14:53:37 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-09 02:43:04 -0400 |
commit | b46b6942b39e577fe3ef1af928cd927864011247 (patch) | |
tree | 8eea7316ac79fae34ee7fdbccc580fbac1589ff3 /arch | |
parent | 3035c8634f0538a0c6946e3191bb6c9284b63798 (diff) |
powerpc/mm: Fix a AB->BA deadlock scenario with nohash MMU context lock
The MMU context_lock can be taken from switch_mm() while the
rq->lock is held. The rq->lock can also be taken from interrupts,
thus if we get interrupted in destroy_context() with the context
lock held and that interrupt tries to take the rq->lock, there's
a possible deadlock scenario with another CPU having the rq->lock
and calling switch_mm() which takes our context lock.
The fix is to always ensure interrupts are off when taking our
context lock. The switch_mm() path is already good so this fixes
the destroy_context() path.
While at it, turn the context lock into a new style spinlock.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index c42858780cbd..8343986809c0 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c | |||
@@ -46,7 +46,7 @@ static unsigned int next_context, nr_free_contexts; | |||
46 | static unsigned long *context_map; | 46 | static unsigned long *context_map; |
47 | static unsigned long *stale_map[NR_CPUS]; | 47 | static unsigned long *stale_map[NR_CPUS]; |
48 | static struct mm_struct **context_mm; | 48 | static struct mm_struct **context_mm; |
49 | static spinlock_t context_lock = SPIN_LOCK_UNLOCKED; | 49 | static DEFINE_SPINLOCK(context_lock); |
50 | 50 | ||
51 | #define CTX_MAP_SIZE \ | 51 | #define CTX_MAP_SIZE \ |
52 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) | 52 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) |
@@ -276,6 +276,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) | |||
276 | */ | 276 | */ |
277 | void destroy_context(struct mm_struct *mm) | 277 | void destroy_context(struct mm_struct *mm) |
278 | { | 278 | { |
279 | unsigned long flags; | ||
279 | unsigned int id; | 280 | unsigned int id; |
280 | 281 | ||
281 | if (mm->context.id == MMU_NO_CONTEXT) | 282 | if (mm->context.id == MMU_NO_CONTEXT) |
@@ -283,7 +284,7 @@ void destroy_context(struct mm_struct *mm) | |||
283 | 284 | ||
284 | WARN_ON(mm->context.active != 0); | 285 | WARN_ON(mm->context.active != 0); |
285 | 286 | ||
286 | spin_lock(&context_lock); | 287 | spin_lock_irqsave(&context_lock, flags); |
287 | id = mm->context.id; | 288 | id = mm->context.id; |
288 | if (id != MMU_NO_CONTEXT) { | 289 | if (id != MMU_NO_CONTEXT) { |
289 | __clear_bit(id, context_map); | 290 | __clear_bit(id, context_map); |
@@ -294,7 +295,7 @@ void destroy_context(struct mm_struct *mm) | |||
294 | context_mm[id] = NULL; | 295 | context_mm[id] = NULL; |
295 | nr_free_contexts++; | 296 | nr_free_contexts++; |
296 | } | 297 | } |
297 | spin_unlock(&context_lock); | 298 | spin_unlock_irqrestore(&context_lock, flags); |
298 | } | 299 | } |
299 | 300 | ||
300 | #ifdef CONFIG_SMP | 301 | #ifdef CONFIG_SMP |