aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorMagnus Damm <damm@igel.co.jp>2008-11-25 07:57:29 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-12-22 04:43:48 -0500
commit716777db7270255f1f7210fd87a7188b08c9a267 (patch)
tree01e65e44140231a2b16d7ebd273e7e7b280decb7 /arch/sh
parent0c9122323acb0c3410dfbd219cb47f4c2e9305e3 (diff)
sh: P4 ioremap pass-through
This patch adds a pass-through case when ioremapping P4 addresses. Addresses passed to ioremap() should be physical addresses, so the best option is usually to convert the virtual address to a physical address before calling ioremap. This will give you a virtual address in P2 which matches the physical address and this works well for most internal hardware blocks on the SuperH architecture. However, some hardware blocks must be accessed through P4. Converting the P4 address to a physical and then back to a P2 does not work. One example of this is the sh7722 TMU block, it must be accessed through P4. Without this patch P4 addresses will be mapped using PTEs which requires the page allocator to be up and running. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/addrspace.h11
-rw-r--r--arch/sh/include/asm/io.h4
-rw-r--r--arch/sh/mm/fault_32.c11
-rw-r--r--arch/sh/mm/ioremap_32.c3
4 files changed, 17 insertions, 12 deletions
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 2702d81bfc0d..36736c7e93db 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -49,5 +49,16 @@
49/* Check if an address can be reached in 29 bits */ 49/* Check if an address can be reached in 29 bits */
50#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000) 50#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000)
51 51
52#ifdef CONFIG_SH_STORE_QUEUES
53/*
54 * This is a special case for the SH-4 store queues, as pages for this
55 * space still need to be faulted in before it's possible to flush the
56 * store queue cache for writeout to the remapped region.
57 */
58#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
59#else
60#define P3_ADDR_MAX P4SEG
61#endif
62
52#endif /* __KERNEL__ */ 63#endif /* __KERNEL__ */
53#endif /* __ASM_SH_ADDRSPACE_H */ 64#endif /* __ASM_SH_ADDRSPACE_H */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 65eaae34e753..61f6dae40534 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -260,6 +260,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
260 260
261 return (void __iomem *)P2SEGADDR(offset); 261 return (void __iomem *)P2SEGADDR(offset);
262 } 262 }
263
264 /* P4 above the store queues are always mapped. */
265 if (unlikely(offset >= P3_ADDR_MAX))
266 return (void __iomem *)P4SEGADDR(offset);
263#endif 267#endif
264 268
265 return __ioremap(offset, size, flags); 269 return __ioremap(offset, size, flags);
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 898d477e47c1..e58726892b5f 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -265,17 +265,6 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
265 return ret; 265 return ret;
266} 266}
267 267
268#ifdef CONFIG_SH_STORE_QUEUES
269/*
270 * This is a special case for the SH-4 store queues, as pages for this
271 * space still need to be faulted in before it's possible to flush the
272 * store queue cache for writeout to the remapped region.
273 */
274#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
275#else
276#define P3_ADDR_MAX P4SEG
277#endif
278
279/* 268/*
280 * Called with interrupts disabled. 269 * Called with interrupts disabled.
281 */ 270 */
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index 882a32ebc6b7..32946fba123e 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -116,9 +116,10 @@ EXPORT_SYMBOL(__ioremap);
116void __iounmap(void __iomem *addr) 116void __iounmap(void __iomem *addr)
117{ 117{
118 unsigned long vaddr = (unsigned long __force)addr; 118 unsigned long vaddr = (unsigned long __force)addr;
119 unsigned long seg = PXSEG(vaddr);
119 struct vm_struct *p; 120 struct vm_struct *p;
120 121
121 if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr)) 122 if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr))
122 return; 123 return;
123 124
124#ifdef CONFIG_32BIT 125#ifdef CONFIG_32BIT