diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-10-13 14:49:15 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-10-13 14:49:15 -0400 |
commit | f7fcec93b619337feb9da829b8a9ab6ba86393bc (patch) | |
tree | 3598a9c02d14252150ee9b8c1cc7988dd4636f19 /arch/sh/mm | |
parent | 47da88f36639b8de57f6cdd680f8c27528ccd67c (diff) |
sh: Fix up PMB locking.
This first converts the PMB locking over to raw spinlocks, and secondly
fixes up a nested locking issue that was triggering lockdep early on:
swapper/0 is trying to acquire lock:
(&pmbe->lock){......}, at: [<806be9bc>] pmb_init+0xf4/0x4dc
but task is already holding lock:
(&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc
other info that might help us debug this:
1 lock held by swapper/0:
#0: (&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/pmb.c | 31 |
1 files changed, 15 insertions, 16 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 233c011c4d22..b20b1b3eee4b 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -40,7 +40,7 @@ struct pmb_entry { | |||
40 | unsigned long flags; | 40 | unsigned long flags; |
41 | unsigned long size; | 41 | unsigned long size; |
42 | 42 | ||
43 | spinlock_t lock; | 43 | raw_spinlock_t lock; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | 46 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or |
@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
265 | 265 | ||
266 | memset(pmbe, 0, sizeof(struct pmb_entry)); | 266 | memset(pmbe, 0, sizeof(struct pmb_entry)); |
267 | 267 | ||
268 | spin_lock_init(&pmbe->lock); | 268 | raw_spin_lock_init(&pmbe->lock); |
269 | 269 | ||
270 | pmbe->vpn = vpn; | 270 | pmbe->vpn = vpn; |
271 | pmbe->ppn = ppn; | 271 | pmbe->ppn = ppn; |
@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe) | |||
327 | { | 327 | { |
328 | unsigned long flags; | 328 | unsigned long flags; |
329 | 329 | ||
330 | spin_lock_irqsave(&pmbe->lock, flags); | 330 | raw_spin_lock_irqsave(&pmbe->lock, flags); |
331 | __set_pmb_entry(pmbe); | 331 | __set_pmb_entry(pmbe); |
332 | spin_unlock_irqrestore(&pmbe->lock, flags); | 332 | raw_spin_unlock_irqrestore(&pmbe->lock, flags); |
333 | } | 333 | } |
334 | #endif /* CONFIG_PM */ | 334 | #endif /* CONFIG_PM */ |
335 | 335 | ||
@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, | |||
368 | return PTR_ERR(pmbe); | 368 | return PTR_ERR(pmbe); |
369 | } | 369 | } |
370 | 370 | ||
371 | spin_lock_irqsave(&pmbe->lock, flags); | 371 | raw_spin_lock_irqsave(&pmbe->lock, flags); |
372 | 372 | ||
373 | pmbe->size = pmb_sizes[i].size; | 373 | pmbe->size = pmb_sizes[i].size; |
374 | 374 | ||
@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, | |||
383 | * entries for easier tear-down. | 383 | * entries for easier tear-down. |
384 | */ | 384 | */ |
385 | if (likely(pmbp)) { | 385 | if (likely(pmbp)) { |
386 | spin_lock(&pmbp->lock); | 386 | raw_spin_lock_nested(&pmbp->lock, |
387 | SINGLE_DEPTH_NESTING); | ||
387 | pmbp->link = pmbe; | 388 | pmbp->link = pmbe; |
388 | spin_unlock(&pmbp->lock); | 389 | raw_spin_unlock(&pmbp->lock); |
389 | } | 390 | } |
390 | 391 | ||
391 | pmbp = pmbe; | 392 | pmbp = pmbe; |
@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, | |||
398 | i--; | 399 | i--; |
399 | mapped++; | 400 | mapped++; |
400 | 401 | ||
401 | spin_unlock_irqrestore(&pmbe->lock, flags); | 402 | raw_spin_unlock_irqrestore(&pmbe->lock, flags); |
402 | } | 403 | } |
403 | } while (size >= SZ_16M); | 404 | } while (size >= SZ_16M); |
404 | 405 | ||
@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void) | |||
627 | continue; | 628 | continue; |
628 | } | 629 | } |
629 | 630 | ||
630 | spin_lock_irqsave(&pmbe->lock, irqflags); | 631 | raw_spin_lock_irqsave(&pmbe->lock, irqflags); |
631 | 632 | ||
632 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) | 633 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) |
633 | if (pmb_sizes[j].flag == size) | 634 | if (pmb_sizes[j].flag == size) |
634 | pmbe->size = pmb_sizes[j].size; | 635 | pmbe->size = pmb_sizes[j].size; |
635 | 636 | ||
636 | if (pmbp) { | 637 | if (pmbp) { |
637 | spin_lock(&pmbp->lock); | 638 | raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); |
638 | |||
639 | /* | 639 | /* |
640 | * Compare the previous entry against the current one to | 640 | * Compare the previous entry against the current one to |
641 | * see if the entries span a contiguous mapping. If so, | 641 | * see if the entries span a contiguous mapping. If so, |
@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void) | |||
644 | */ | 644 | */ |
645 | if (pmb_can_merge(pmbp, pmbe)) | 645 | if (pmb_can_merge(pmbp, pmbe)) |
646 | pmbp->link = pmbe; | 646 | pmbp->link = pmbe; |
647 | 647 | raw_spin_unlock(&pmbp->lock); | |
648 | spin_unlock(&pmbp->lock); | ||
649 | } | 648 | } |
650 | 649 | ||
651 | pmbp = pmbe; | 650 | pmbp = pmbe; |
652 | 651 | ||
653 | spin_unlock_irqrestore(&pmbe->lock, irqflags); | 652 | raw_spin_unlock_irqrestore(&pmbe->lock, irqflags); |
654 | } | 653 | } |
655 | } | 654 | } |
656 | 655 | ||
@@ -757,7 +756,7 @@ static void __init pmb_resize(void) | |||
757 | /* | 756 | /* |
758 | * Found it, now resize it. | 757 | * Found it, now resize it. |
759 | */ | 758 | */ |
760 | spin_lock_irqsave(&pmbe->lock, flags); | 759 | raw_spin_lock_irqsave(&pmbe->lock, flags); |
761 | 760 | ||
762 | pmbe->size = SZ_16M; | 761 | pmbe->size = SZ_16M; |
763 | pmbe->flags &= ~PMB_SZ_MASK; | 762 | pmbe->flags &= ~PMB_SZ_MASK; |
@@ -767,7 +766,7 @@ static void __init pmb_resize(void) | |||
767 | 766 | ||
768 | __set_pmb_entry(pmbe); | 767 | __set_pmb_entry(pmbe); |
769 | 768 | ||
770 | spin_unlock_irqrestore(&pmbe->lock, flags); | 769 | raw_spin_unlock_irqrestore(&pmbe->lock, flags); |
771 | } | 770 | } |
772 | 771 | ||
773 | read_unlock(&pmb_rwlock); | 772 | read_unlock(&pmb_rwlock); |