aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 6379091a1647..b20b1b3eee4b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -40,7 +40,7 @@ struct pmb_entry {
40 unsigned long flags; 40 unsigned long flags;
41 unsigned long size; 41 unsigned long size;
42 42
43 spinlock_t lock; 43 raw_spinlock_t lock;
44 44
45 /* 45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or 46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
265 265
266 memset(pmbe, 0, sizeof(struct pmb_entry)); 266 memset(pmbe, 0, sizeof(struct pmb_entry));
267 267
268 spin_lock_init(&pmbe->lock); 268 raw_spin_lock_init(&pmbe->lock);
269 269
270 pmbe->vpn = vpn; 270 pmbe->vpn = vpn;
271 pmbe->ppn = ppn; 271 pmbe->ppn = ppn;
@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
327{ 327{
328 unsigned long flags; 328 unsigned long flags;
329 329
330 spin_lock_irqsave(&pmbe->lock, flags); 330 raw_spin_lock_irqsave(&pmbe->lock, flags);
331 __set_pmb_entry(pmbe); 331 __set_pmb_entry(pmbe);
332 spin_unlock_irqrestore(&pmbe->lock, flags); 332 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
333} 333}
334#endif /* CONFIG_PM */ 334#endif /* CONFIG_PM */
335 335
@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
368 return PTR_ERR(pmbe); 368 return PTR_ERR(pmbe);
369 } 369 }
370 370
371 spin_lock_irqsave(&pmbe->lock, flags); 371 raw_spin_lock_irqsave(&pmbe->lock, flags);
372 372
373 pmbe->size = pmb_sizes[i].size; 373 pmbe->size = pmb_sizes[i].size;
374 374
@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
383 * entries for easier tear-down. 383 * entries for easier tear-down.
384 */ 384 */
385 if (likely(pmbp)) { 385 if (likely(pmbp)) {
386 spin_lock(&pmbp->lock); 386 raw_spin_lock_nested(&pmbp->lock,
387 SINGLE_DEPTH_NESTING);
387 pmbp->link = pmbe; 388 pmbp->link = pmbe;
388 spin_unlock(&pmbp->lock); 389 raw_spin_unlock(&pmbp->lock);
389 } 390 }
390 391
391 pmbp = pmbe; 392 pmbp = pmbe;
@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
398 i--; 399 i--;
399 mapped++; 400 mapped++;
400 401
401 spin_unlock_irqrestore(&pmbe->lock, flags); 402 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
402 } 403 }
403 } while (size >= SZ_16M); 404 } while (size >= SZ_16M);
404 405
@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void)
627 continue; 628 continue;
628 } 629 }
629 630
630 spin_lock_irqsave(&pmbe->lock, irqflags); 631 raw_spin_lock_irqsave(&pmbe->lock, irqflags);
631 632
632 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) 633 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
633 if (pmb_sizes[j].flag == size) 634 if (pmb_sizes[j].flag == size)
634 pmbe->size = pmb_sizes[j].size; 635 pmbe->size = pmb_sizes[j].size;
635 636
636 if (pmbp) { 637 if (pmbp) {
637 spin_lock(&pmbp->lock); 638 raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
638
639 /* 639 /*
640 * Compare the previous entry against the current one to 640 * Compare the previous entry against the current one to
641 * see if the entries span a contiguous mapping. If so, 641 * see if the entries span a contiguous mapping. If so,
@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void)
644 */ 644 */
645 if (pmb_can_merge(pmbp, pmbe)) 645 if (pmb_can_merge(pmbp, pmbe))
646 pmbp->link = pmbe; 646 pmbp->link = pmbe;
647 647 raw_spin_unlock(&pmbp->lock);
648 spin_unlock(&pmbp->lock);
649 } 648 }
650 649
651 pmbp = pmbe; 650 pmbp = pmbe;
652 651
653 spin_unlock_irqrestore(&pmbe->lock, irqflags); 652 raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
654 } 653 }
655} 654}
656 655
@@ -757,7 +756,7 @@ static void __init pmb_resize(void)
757 /* 756 /*
758 * Found it, now resize it. 757 * Found it, now resize it.
759 */ 758 */
760 spin_lock_irqsave(&pmbe->lock, flags); 759 raw_spin_lock_irqsave(&pmbe->lock, flags);
761 760
762 pmbe->size = SZ_16M; 761 pmbe->size = SZ_16M;
763 pmbe->flags &= ~PMB_SZ_MASK; 762 pmbe->flags &= ~PMB_SZ_MASK;
@@ -767,7 +766,7 @@ static void __init pmb_resize(void)
767 766
768 __set_pmb_entry(pmbe); 767 __set_pmb_entry(pmbe);
769 768
770 spin_unlock_irqrestore(&pmbe->lock, flags); 769 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
771 } 770 }
772 771
773 read_unlock(&pmb_rwlock); 772 read_unlock(&pmb_rwlock);
@@ -866,11 +865,9 @@ static int __init pmb_debugfs_init(void)
866 struct dentry *dentry; 865 struct dentry *dentry;
867 866
868 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, 867 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
869 sh_debugfs_root, NULL, &pmb_debugfs_fops); 868 arch_debugfs_dir, NULL, &pmb_debugfs_fops);
870 if (!dentry) 869 if (!dentry)
871 return -ENOMEM; 870 return -ENOMEM;
872 if (IS_ERR(dentry))
873 return PTR_ERR(dentry);
874 871
875 return 0; 872 return 0;
876} 873}