aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/ioremap.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-18 23:34:38 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-18 23:34:38 -0500
commitd57d64080ddc0ff13fcffc898b6251074a482ba1 (patch)
treec38fd506a30d56de84a39285412ffc1b45cc8d33 /arch/sh/mm/ioremap.c
parentaf1415314a4190b8ea06e53808d392fcf91555af (diff)
sh: Prevent 64-bit pgprot clobbering across ioremap implementations.
Presently 'flags' gets passed around a lot between the various ioremap helpers and implementations, which is only 32-bits. In the X2TLB case we use 64-bit pgprots which presently results in the upper 32bits being chopped off (which handily include our read/write/exec permissions). As such, we convert everything internally to using pgprot_t directly and simply convert over with pgprot_val() where needed. With this in place, transparent fixmap utilization for early ioremap works as expected. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/ioremap.c')
-rw-r--r--arch/sh/mm/ioremap.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index a130b2278e92..85b420d00622 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -35,11 +35,10 @@
35 */ 35 */
36void __iomem * __init_refok 36void __iomem * __init_refok
37__ioremap_caller(unsigned long phys_addr, unsigned long size, 37__ioremap_caller(unsigned long phys_addr, unsigned long size,
38 unsigned long flags, void *caller) 38 pgprot_t pgprot, void *caller)
39{ 39{
40 struct vm_struct *area; 40 struct vm_struct *area;
41 unsigned long offset, last_addr, addr, orig_addr; 41 unsigned long offset, last_addr, addr, orig_addr;
42 pgprot_t pgprot;
43 42
44 /* Don't allow wraparound or zero size */ 43 /* Don't allow wraparound or zero size */
45 last_addr = phys_addr + size - 1; 44 last_addr = phys_addr + size - 1;
@@ -69,7 +68,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
69 * If we can't yet use the regular approach, go the fixmap route. 68 * If we can't yet use the regular approach, go the fixmap route.
70 */ 69 */
71 if (!mem_init_done) 70 if (!mem_init_done)
72 return ioremap_fixed(phys_addr, size, __pgprot(flags)); 71 return ioremap_fixed(phys_addr, size, pgprot);
73 72
74 /* 73 /*
75 * Ok, go for it.. 74 * Ok, go for it..
@@ -91,8 +90,9 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
91 * PMB entries are all pre-faulted. 90 * PMB entries are all pre-faulted.
92 */ 91 */
93 if (unlikely(phys_addr >= P1SEG)) { 92 if (unlikely(phys_addr >= P1SEG)) {
94 unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); 93 unsigned long mapped;
95 94
95 mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot));
96 if (likely(mapped)) { 96 if (likely(mapped)) {
97 addr += mapped; 97 addr += mapped;
98 phys_addr += mapped; 98 phys_addr += mapped;
@@ -101,7 +101,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
101 } 101 }
102#endif 102#endif
103 103
104 pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
105 if (likely(size)) 104 if (likely(size))
106 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { 105 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
107 vunmap((void *)orig_addr); 106 vunmap((void *)orig_addr);