aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig4
-rw-r--r--arch/sh/mm/Makefile3
-rw-r--r--arch/sh/mm/asids-debugfs.c2
-rw-r--r--arch/sh/mm/cache-debugfs.c10
-rw-r--r--arch/sh/mm/consistent.c3
-rw-r--r--arch/sh/mm/init.c52
-rw-r--r--arch/sh/mm/nommu.c4
-rw-r--r--arch/sh/mm/pmb.c35
-rw-r--r--arch/sh/mm/sram.c34
-rw-r--r--arch/sh/mm/tlb-debugfs.c11
-rw-r--r--arch/sh/mm/tlbflush_32.c16
-rw-r--r--arch/sh/mm/tlbflush_64.c5
12 files changed, 126 insertions, 53 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 1445ca6257df..09370392aff1 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -168,6 +168,10 @@ config IOREMAP_FIXED
168config UNCACHED_MAPPING 168config UNCACHED_MAPPING
169 bool 169 bool
170 170
171config HAVE_SRAM_POOL
172 bool
173 select GENERIC_ALLOCATOR
174
171choice 175choice
172 prompt "Kernel page size" 176 prompt "Kernel page size"
173 default PAGE_SIZE_4KB 177 default PAGE_SIZE_4KB
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 53f7c684afb2..ab89ea4f9414 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_PMB) += pmb.o
40obj-$(CONFIG_NUMA) += numa.o 40obj-$(CONFIG_NUMA) += numa.o
41obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o 41obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
42obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o 42obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
43obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
43 44
44# Special flags for fault_64.o. This puts restrictions on the number of 45# Special flags for fault_64.o. This puts restrictions on the number of
45# caller-save registers that the compiler can target when building this file. 46# caller-save registers that the compiler can target when building this file.
@@ -66,4 +67,4 @@ CFLAGS_fault_64.o += -ffixed-r7 \
66 -ffixed-r60 -ffixed-r61 -ffixed-r62 \ 67 -ffixed-r60 -ffixed-r61 -ffixed-r62 \
67 -fomit-frame-pointer 68 -fomit-frame-pointer
68 69
69EXTRA_CFLAGS += -Werror 70ccflags-y := -Werror
diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c
index cd8c3bf39b5a..74c03ecc4871 100644
--- a/arch/sh/mm/asids-debugfs.c
+++ b/arch/sh/mm/asids-debugfs.c
@@ -63,7 +63,7 @@ static int __init asids_debugfs_init(void)
63{ 63{
64 struct dentry *asids_dentry; 64 struct dentry *asids_dentry;
65 65
66 asids_dentry = debugfs_create_file("asids", S_IRUSR, sh_debugfs_root, 66 asids_dentry = debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir,
67 NULL, &asids_debugfs_fops); 67 NULL, &asids_debugfs_fops);
68 if (!asids_dentry) 68 if (!asids_dentry)
69 return -ENOMEM; 69 return -ENOMEM;
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 690ed010d002..52411462c409 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -126,25 +126,19 @@ static int __init cache_debugfs_init(void)
126{ 126{
127 struct dentry *dcache_dentry, *icache_dentry; 127 struct dentry *dcache_dentry, *icache_dentry;
128 128
129 dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root, 129 dcache_dentry = debugfs_create_file("dcache", S_IRUSR, arch_debugfs_dir,
130 (unsigned int *)CACHE_TYPE_DCACHE, 130 (unsigned int *)CACHE_TYPE_DCACHE,
131 &cache_debugfs_fops); 131 &cache_debugfs_fops);
132 if (!dcache_dentry) 132 if (!dcache_dentry)
133 return -ENOMEM; 133 return -ENOMEM;
134 if (IS_ERR(dcache_dentry))
135 return PTR_ERR(dcache_dentry);
136 134
137 icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root, 135 icache_dentry = debugfs_create_file("icache", S_IRUSR, arch_debugfs_dir,
138 (unsigned int *)CACHE_TYPE_ICACHE, 136 (unsigned int *)CACHE_TYPE_ICACHE,
139 &cache_debugfs_fops); 137 &cache_debugfs_fops);
140 if (!icache_dentry) { 138 if (!icache_dentry) {
141 debugfs_remove(dcache_dentry); 139 debugfs_remove(dcache_dentry);
142 return -ENOMEM; 140 return -ENOMEM;
143 } 141 }
144 if (IS_ERR(icache_dentry)) {
145 debugfs_remove(dcache_dentry);
146 return PTR_ERR(icache_dentry);
147 }
148 142
149 return 0; 143 return 0;
150} 144}
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index c86a08540258..038793286990 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -38,11 +38,12 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
38 void *ret, *ret_nocache; 38 void *ret, *ret_nocache;
39 int order = get_order(size); 39 int order = get_order(size);
40 40
41 gfp |= __GFP_ZERO;
42
41 ret = (void *)__get_free_pages(gfp, order); 43 ret = (void *)__get_free_pages(gfp, order);
42 if (!ret) 44 if (!ret)
43 return NULL; 45 return NULL;
44 46
45 memset(ret, 0, size);
46 /* 47 /*
47 * Pages from the page allocator may have data present in 48 * Pages from the page allocator may have data present in
48 * cache. So flush the cache before using uncached memory. 49 * cache. So flush the cache before using uncached memory.
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 552bea5113f5..3385b28acaac 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -47,7 +47,6 @@ static pte_t *__get_pte_phys(unsigned long addr)
47 pgd_t *pgd; 47 pgd_t *pgd;
48 pud_t *pud; 48 pud_t *pud;
49 pmd_t *pmd; 49 pmd_t *pmd;
50 pte_t *pte;
51 50
52 pgd = pgd_offset_k(addr); 51 pgd = pgd_offset_k(addr);
53 if (pgd_none(*pgd)) { 52 if (pgd_none(*pgd)) {
@@ -67,8 +66,7 @@ static pte_t *__get_pte_phys(unsigned long addr)
67 return NULL; 66 return NULL;
68 } 67 }
69 68
70 pte = pte_offset_kernel(pmd, addr); 69 return pte_offset_kernel(pmd, addr);
71 return pte;
72} 70}
73 71
74static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
@@ -125,13 +123,45 @@ void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
125 clear_pte_phys(address, prot); 123 clear_pte_phys(address, prot);
126} 124}
127 125
126static pmd_t * __init one_md_table_init(pud_t *pud)
127{
128 if (pud_none(*pud)) {
129 pmd_t *pmd;
130
131 pmd = alloc_bootmem_pages(PAGE_SIZE);
132 pud_populate(&init_mm, pud, pmd);
133 BUG_ON(pmd != pmd_offset(pud, 0));
134 }
135
136 return pmd_offset(pud, 0);
137}
138
139static pte_t * __init one_page_table_init(pmd_t *pmd)
140{
141 if (pmd_none(*pmd)) {
142 pte_t *pte;
143
144 pte = alloc_bootmem_pages(PAGE_SIZE);
145 pmd_populate_kernel(&init_mm, pmd, pte);
146 BUG_ON(pte != pte_offset_kernel(pmd, 0));
147 }
148
149 return pte_offset_kernel(pmd, 0);
150}
151
152static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
153 unsigned long vaddr, pte_t *lastpte)
154{
155 return pte;
156}
157
128void __init page_table_range_init(unsigned long start, unsigned long end, 158void __init page_table_range_init(unsigned long start, unsigned long end,
129 pgd_t *pgd_base) 159 pgd_t *pgd_base)
130{ 160{
131 pgd_t *pgd; 161 pgd_t *pgd;
132 pud_t *pud; 162 pud_t *pud;
133 pmd_t *pmd; 163 pmd_t *pmd;
134 pte_t *pte; 164 pte_t *pte = NULL;
135 int i, j, k; 165 int i, j, k;
136 unsigned long vaddr; 166 unsigned long vaddr;
137 167
@@ -144,19 +174,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
144 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 174 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
145 pud = (pud_t *)pgd; 175 pud = (pud_t *)pgd;
146 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 176 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
147#ifdef __PAGETABLE_PMD_FOLDED 177 pmd = one_md_table_init(pud);
148 pmd = (pmd_t *)pud; 178#ifndef __PAGETABLE_PMD_FOLDED
149#else
150 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
151 pud_populate(&init_mm, pud, pmd);
152 pmd += k; 179 pmd += k;
153#endif 180#endif
154 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 181 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
155 if (pmd_none(*pmd)) { 182 pte = page_table_kmap_check(one_page_table_init(pmd),
156 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 183 pmd, vaddr, pte);
157 pmd_populate_kernel(&init_mm, pmd, pte);
158 BUG_ON(pte != pte_offset_kernel(pmd, 0));
159 }
160 vaddr += PMD_SIZE; 184 vaddr += PMD_SIZE;
161 } 185 }
162 k = 0; 186 k = 0;
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index 7694f50c9034..36312d254faf 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -67,6 +67,10 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
67 BUG(); 67 BUG();
68} 68}
69 69
70void __flush_tlb_global(void)
71{
72}
73
70void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) 74void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
71{ 75{
72} 76}
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 6379091a1647..b20b1b3eee4b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -40,7 +40,7 @@ struct pmb_entry {
40 unsigned long flags; 40 unsigned long flags;
41 unsigned long size; 41 unsigned long size;
42 42
43 spinlock_t lock; 43 raw_spinlock_t lock;
44 44
45 /* 45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or 46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
265 265
266 memset(pmbe, 0, sizeof(struct pmb_entry)); 266 memset(pmbe, 0, sizeof(struct pmb_entry));
267 267
268 spin_lock_init(&pmbe->lock); 268 raw_spin_lock_init(&pmbe->lock);
269 269
270 pmbe->vpn = vpn; 270 pmbe->vpn = vpn;
271 pmbe->ppn = ppn; 271 pmbe->ppn = ppn;
@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
327{ 327{
328 unsigned long flags; 328 unsigned long flags;
329 329
330 spin_lock_irqsave(&pmbe->lock, flags); 330 raw_spin_lock_irqsave(&pmbe->lock, flags);
331 __set_pmb_entry(pmbe); 331 __set_pmb_entry(pmbe);
332 spin_unlock_irqrestore(&pmbe->lock, flags); 332 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
333} 333}
334#endif /* CONFIG_PM */ 334#endif /* CONFIG_PM */
335 335
@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
368 return PTR_ERR(pmbe); 368 return PTR_ERR(pmbe);
369 } 369 }
370 370
371 spin_lock_irqsave(&pmbe->lock, flags); 371 raw_spin_lock_irqsave(&pmbe->lock, flags);
372 372
373 pmbe->size = pmb_sizes[i].size; 373 pmbe->size = pmb_sizes[i].size;
374 374
@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
383 * entries for easier tear-down. 383 * entries for easier tear-down.
384 */ 384 */
385 if (likely(pmbp)) { 385 if (likely(pmbp)) {
386 spin_lock(&pmbp->lock); 386 raw_spin_lock_nested(&pmbp->lock,
387 SINGLE_DEPTH_NESTING);
387 pmbp->link = pmbe; 388 pmbp->link = pmbe;
388 spin_unlock(&pmbp->lock); 389 raw_spin_unlock(&pmbp->lock);
389 } 390 }
390 391
391 pmbp = pmbe; 392 pmbp = pmbe;
@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
398 i--; 399 i--;
399 mapped++; 400 mapped++;
400 401
401 spin_unlock_irqrestore(&pmbe->lock, flags); 402 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
402 } 403 }
403 } while (size >= SZ_16M); 404 } while (size >= SZ_16M);
404 405
@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void)
627 continue; 628 continue;
628 } 629 }
629 630
630 spin_lock_irqsave(&pmbe->lock, irqflags); 631 raw_spin_lock_irqsave(&pmbe->lock, irqflags);
631 632
632 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) 633 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
633 if (pmb_sizes[j].flag == size) 634 if (pmb_sizes[j].flag == size)
634 pmbe->size = pmb_sizes[j].size; 635 pmbe->size = pmb_sizes[j].size;
635 636
636 if (pmbp) { 637 if (pmbp) {
637 spin_lock(&pmbp->lock); 638 raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
638
639 /* 639 /*
640 * Compare the previous entry against the current one to 640 * Compare the previous entry against the current one to
641 * see if the entries span a contiguous mapping. If so, 641 * see if the entries span a contiguous mapping. If so,
@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void)
644 */ 644 */
645 if (pmb_can_merge(pmbp, pmbe)) 645 if (pmb_can_merge(pmbp, pmbe))
646 pmbp->link = pmbe; 646 pmbp->link = pmbe;
647 647 raw_spin_unlock(&pmbp->lock);
648 spin_unlock(&pmbp->lock);
649 } 648 }
650 649
651 pmbp = pmbe; 650 pmbp = pmbe;
652 651
653 spin_unlock_irqrestore(&pmbe->lock, irqflags); 652 raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
654 } 653 }
655} 654}
656 655
@@ -757,7 +756,7 @@ static void __init pmb_resize(void)
757 /* 756 /*
758 * Found it, now resize it. 757 * Found it, now resize it.
759 */ 758 */
760 spin_lock_irqsave(&pmbe->lock, flags); 759 raw_spin_lock_irqsave(&pmbe->lock, flags);
761 760
762 pmbe->size = SZ_16M; 761 pmbe->size = SZ_16M;
763 pmbe->flags &= ~PMB_SZ_MASK; 762 pmbe->flags &= ~PMB_SZ_MASK;
@@ -767,7 +766,7 @@ static void __init pmb_resize(void)
767 766
768 __set_pmb_entry(pmbe); 767 __set_pmb_entry(pmbe);
769 768
770 spin_unlock_irqrestore(&pmbe->lock, flags); 769 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
771 } 770 }
772 771
773 read_unlock(&pmb_rwlock); 772 read_unlock(&pmb_rwlock);
@@ -866,11 +865,9 @@ static int __init pmb_debugfs_init(void)
866 struct dentry *dentry; 865 struct dentry *dentry;
867 866
868 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, 867 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
869 sh_debugfs_root, NULL, &pmb_debugfs_fops); 868 arch_debugfs_dir, NULL, &pmb_debugfs_fops);
870 if (!dentry) 869 if (!dentry)
871 return -ENOMEM; 870 return -ENOMEM;
872 if (IS_ERR(dentry))
873 return PTR_ERR(dentry);
874 871
875 return 0; 872 return 0;
876} 873}
diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c
new file mode 100644
index 000000000000..bc156ec4545e
--- /dev/null
+++ b/arch/sh/mm/sram.c
@@ -0,0 +1,34 @@
1/*
2 * SRAM pool for tiny memories not otherwise managed.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <asm/sram.h>
13
14/*
15 * This provides a standard SRAM pool for tiny memories that can be
16 * added either by the CPU or the platform code. Typical SRAM sizes
17 * to be inserted in to the pool will generally be less than the page
18 * size, with anything more reasonably sized handled as a NUMA memory
19 * node.
20 */
21struct gen_pool *sram_pool;
22
23static int __init sram_pool_init(void)
24{
25 /*
26 * This is a global pool, we don't care about node locality.
27 */
28 sram_pool = gen_pool_create(1, -1);
29 if (unlikely(!sram_pool))
30 return -ENOMEM;
31
32 return 0;
33}
34core_initcall(sram_pool_init);
diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c
index 229bf75f28df..dea637a09246 100644
--- a/arch/sh/mm/tlb-debugfs.c
+++ b/arch/sh/mm/tlb-debugfs.c
@@ -151,15 +151,13 @@ static int __init tlb_debugfs_init(void)
151{ 151{
152 struct dentry *itlb, *utlb; 152 struct dentry *itlb, *utlb;
153 153
154 itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root, 154 itlb = debugfs_create_file("itlb", S_IRUSR, arch_debugfs_dir,
155 (unsigned int *)TLB_TYPE_ITLB, 155 (unsigned int *)TLB_TYPE_ITLB,
156 &tlb_debugfs_fops); 156 &tlb_debugfs_fops);
157 if (unlikely(!itlb)) 157 if (unlikely(!itlb))
158 return -ENOMEM; 158 return -ENOMEM;
159 if (IS_ERR(itlb))
160 return PTR_ERR(itlb);
161 159
162 utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root, 160 utlb = debugfs_create_file("utlb", S_IRUSR, arch_debugfs_dir,
163 (unsigned int *)TLB_TYPE_UTLB, 161 (unsigned int *)TLB_TYPE_UTLB,
164 &tlb_debugfs_fops); 162 &tlb_debugfs_fops);
165 if (unlikely(!utlb)) { 163 if (unlikely(!utlb)) {
@@ -167,11 +165,6 @@ static int __init tlb_debugfs_init(void)
167 return -ENOMEM; 165 return -ENOMEM;
168 } 166 }
169 167
170 if (IS_ERR(utlb)) {
171 debugfs_remove(itlb);
172 return PTR_ERR(utlb);
173 }
174
175 return 0; 168 return 0;
176} 169}
177module_init(tlb_debugfs_init); 170module_init(tlb_debugfs_init);
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 3fbe03ce8fe3..a6a20d6de4c0 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -119,3 +119,19 @@ void local_flush_tlb_mm(struct mm_struct *mm)
119 local_irq_restore(flags); 119 local_irq_restore(flags);
120 } 120 }
121} 121}
122
123void __flush_tlb_global(void)
124{
125 unsigned long flags;
126
127 local_irq_save(flags);
128
129 /*
130 * This is the most destructive of the TLB flushing options,
131 * and will tear down all of the UTLB/ITLB mappings, including
132 * wired entries.
133 */
134 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
135
136 local_irq_restore(flags);
137}
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 03db41cc1268..7f5810f5dfdc 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -455,6 +455,11 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
455 flush_tlb_all(); 455 flush_tlb_all();
456} 456}
457 457
458void __flush_tlb_global(void)
459{
460 flush_tlb_all();
461}
462
458void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) 463void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
459{ 464{
460} 465}