aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-18 23:34:38 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-18 23:34:38 -0500
commitd57d64080ddc0ff13fcffc898b6251074a482ba1 (patch)
treec38fd506a30d56de84a39285412ffc1b45cc8d33 /arch/sh/include/asm
parentaf1415314a4190b8ea06e53808d392fcf91555af (diff)
sh: Prevent 64-bit pgprot clobbering across ioremap implementations.
Presently 'flags' gets passed around a lot between the various ioremap helpers and implementations, which is only 32-bits. In the X2TLB case we use 64-bit pgprots which presently results in the upper 32bits being chopped off (which handily include our read/write/exec permissions). As such, we convert everything internally to using pgprot_t directly and simply convert over with pgprot_val() where needed. With this in place, transparent fixmap utilization for early ioremap works as expected. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r--arch/sh/include/asm/io.h53
1 files changed, 31 insertions, 22 deletions
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 6a0dd8c1e0a9..13696dfccc16 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -235,7 +235,7 @@ unsigned long long poke_real_address_q(unsigned long long addr,
235 */ 235 */
236#ifdef CONFIG_MMU 236#ifdef CONFIG_MMU
237void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, 237void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
238 unsigned long flags, void *caller); 238 pgprot_t prot, void *caller);
239void __iounmap(void __iomem *addr); 239void __iounmap(void __iomem *addr);
240 240
241#ifdef CONFIG_IOREMAP_FIXED 241#ifdef CONFIG_IOREMAP_FIXED
@@ -254,13 +254,13 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
254#endif 254#endif
255 255
256static inline void __iomem * 256static inline void __iomem *
257__ioremap(unsigned long offset, unsigned long size, unsigned long flags) 257__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
258{ 258{
259 return __ioremap_caller(offset, size, flags, __builtin_return_address(0)); 259 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
260} 260}
261 261
262static inline void __iomem * 262static inline void __iomem *
263__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) 263__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
264{ 264{
265#ifdef CONFIG_29BIT 265#ifdef CONFIG_29BIT
266 unsigned long last_addr = offset + size - 1; 266 unsigned long last_addr = offset + size - 1;
@@ -272,7 +272,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
272 * mapping must be done by the PMB or by using page tables. 272 * mapping must be done by the PMB or by using page tables.
273 */ 273 */
274 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { 274 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
275 if (unlikely(flags & _PAGE_CACHABLE)) 275 if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
276 return (void __iomem *)P1SEGADDR(offset); 276 return (void __iomem *)P1SEGADDR(offset);
277 277
278 return (void __iomem *)P2SEGADDR(offset); 278 return (void __iomem *)P2SEGADDR(offset);
@@ -287,7 +287,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
287} 287}
288 288
289static inline void __iomem * 289static inline void __iomem *
290__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 290__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
291{ 291{
292 void __iomem *ret; 292 void __iomem *ret;
293 293
@@ -295,30 +295,39 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
295 if (ret) 295 if (ret)
296 return ret; 296 return ret;
297 297
298 ret = __ioremap_29bit(offset, size, flags); 298 ret = __ioremap_29bit(offset, size, prot);
299 if (ret) 299 if (ret)
300 return ret; 300 return ret;
301 301
302 return __ioremap(offset, size, flags); 302 return __ioremap(offset, size, prot);
303} 303}
304#else 304#else
305#define __ioremap(offset, size, flags) ((void __iomem *)(offset)) 305#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
306#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) 306#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
307#define __iounmap(addr) do { } while (0) 307#define __iounmap(addr) do { } while (0)
308#endif /* CONFIG_MMU */ 308#endif /* CONFIG_MMU */
309 309
310#define ioremap(offset, size) \ 310static inline void __iomem *
311 __ioremap_mode((offset), (size), 0) 311ioremap(unsigned long offset, unsigned long size)
312#define ioremap_nocache(offset, size) \ 312{
313 __ioremap_mode((offset), (size), 0) 313 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
314#define ioremap_cache(offset, size) \ 314}
315 __ioremap_mode((offset), (size), _PAGE_CACHABLE) 315
316#define p3_ioremap(offset, size, flags) \ 316static inline void __iomem *
317 __ioremap((offset), (size), (flags)) 317ioremap_cache(unsigned long offset, unsigned long size)
318#define ioremap_prot(offset, size, flags) \ 318{
319 __ioremap_mode((offset), (size), (flags)) 319 return __ioremap_mode(offset, size, PAGE_KERNEL);
320#define iounmap(addr) \ 320}
321 __iounmap((addr)) 321
322static inline void __iomem *
323ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
324{
325 return __ioremap_mode(offset, size, __pgprot(flags));
326}
327
328#define ioremap_nocache ioremap
329#define p3_ioremap __ioremap
330#define iounmap __iounmap
322 331
323#define maybebadio(port) \ 332#define maybebadio(port) \
324 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ 333 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \