aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sh/kernel/Makefile_321
-rw-r--r--arch/sh/kernel/Makefile_641
-rw-r--r--arch/sh/kernel/io.c8
-rw-r--r--arch/sh/kernel/io_generic.c24
-rw-r--r--arch/sh/kernel/io_trapped.c269
-rw-r--r--arch/sh/kernel/traps_32.c59
-rw-r--r--arch/sh/mm/fault_32.c3
-rw-r--r--include/asm-sh/io.h10
-rw-r--r--include/asm-sh/io_trapped.h58
-rw-r--r--include/asm-sh/system.h5
-rw-r--r--include/asm-sh/system_32.h3
12 files changed, 406 insertions, 38 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8398cf105a0..f61bf17db39 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -93,6 +93,9 @@ config ARCH_NO_VIRT_TO_BUS
93config ARCH_SUPPORTS_AOUT 93config ARCH_SUPPORTS_AOUT
94 def_bool y 94 def_bool y
95 95
96config IO_TRAPPED
97 bool
98
96source "init/Kconfig" 99source "init/Kconfig"
97 100
98menu "System type" 101menu "System type"
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index c8928983105..62bf373266f 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -22,5 +22,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
22obj-$(CONFIG_PM) += pm.o 22obj-$(CONFIG_PM) += pm.o
23obj-$(CONFIG_STACKTRACE) += stacktrace.o 23obj-$(CONFIG_STACKTRACE) += stacktrace.o
24obj-$(CONFIG_BINFMT_ELF) += dump_task.o 24obj-$(CONFIG_BINFMT_ELF) += dump_task.o
25obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
25 26
26EXTRA_CFLAGS += -Werror 27EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 1ef21cc087f..e01283d49cb 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -18,5 +18,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
18obj-$(CONFIG_PM) += pm.o 18obj-$(CONFIG_PM) += pm.o
19obj-$(CONFIG_STACKTRACE) += stacktrace.o 19obj-$(CONFIG_STACKTRACE) += stacktrace.o
20obj-$(CONFIG_BINFMT_ELF) += dump_task.o 20obj-$(CONFIG_BINFMT_ELF) += dump_task.o
21obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
21 22
22EXTRA_CFLAGS += -Werror 23EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 71c9fde2fd9..2b899122990 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -63,7 +63,13 @@ EXPORT_SYMBOL(memset_io);
63 63
64void __iomem *ioport_map(unsigned long port, unsigned int nr) 64void __iomem *ioport_map(unsigned long port, unsigned int nr)
65{ 65{
66 return sh_mv.mv_ioport_map(port, nr); 66 void __iomem *ret;
67
68 ret = __ioport_map_trapped(port, nr);
69 if (ret)
70 return ret;
71
72 return __ioport_map(port, nr);
67} 73}
68EXPORT_SYMBOL(ioport_map); 74EXPORT_SYMBOL(ioport_map);
69 75
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index 771ea423044..db769449f5a 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -33,17 +33,17 @@ static inline void delay(void)
33 33
34u8 generic_inb(unsigned long port) 34u8 generic_inb(unsigned long port)
35{ 35{
36 return ctrl_inb((unsigned long __force)ioport_map(port, 1)); 36 return ctrl_inb((unsigned long __force)__ioport_map(port, 1));
37} 37}
38 38
39u16 generic_inw(unsigned long port) 39u16 generic_inw(unsigned long port)
40{ 40{
41 return ctrl_inw((unsigned long __force)ioport_map(port, 2)); 41 return ctrl_inw((unsigned long __force)__ioport_map(port, 2));
42} 42}
43 43
44u32 generic_inl(unsigned long port) 44u32 generic_inl(unsigned long port)
45{ 45{
46 return ctrl_inl((unsigned long __force)ioport_map(port, 4)); 46 return ctrl_inl((unsigned long __force)__ioport_map(port, 4));
47} 47}
48 48
49u8 generic_inb_p(unsigned long port) 49u8 generic_inb_p(unsigned long port)
@@ -81,7 +81,7 @@ void generic_insb(unsigned long port, void *dst, unsigned long count)
81 volatile u8 *port_addr; 81 volatile u8 *port_addr;
82 u8 *buf = dst; 82 u8 *buf = dst;
83 83
84 port_addr = (volatile u8 *)ioport_map(port, 1); 84 port_addr = (volatile u8 *)__ioport_map(port, 1);
85 while (count--) 85 while (count--)
86 *buf++ = *port_addr; 86 *buf++ = *port_addr;
87} 87}
@@ -91,7 +91,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count)
91 volatile u16 *port_addr; 91 volatile u16 *port_addr;
92 u16 *buf = dst; 92 u16 *buf = dst;
93 93
94 port_addr = (volatile u16 *)ioport_map(port, 2); 94 port_addr = (volatile u16 *)__ioport_map(port, 2);
95 while (count--) 95 while (count--)
96 *buf++ = *port_addr; 96 *buf++ = *port_addr;
97 97
@@ -103,7 +103,7 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
103 volatile u32 *port_addr; 103 volatile u32 *port_addr;
104 u32 *buf = dst; 104 u32 *buf = dst;
105 105
106 port_addr = (volatile u32 *)ioport_map(port, 4); 106 port_addr = (volatile u32 *)__ioport_map(port, 4);
107 while (count--) 107 while (count--)
108 *buf++ = *port_addr; 108 *buf++ = *port_addr;
109 109
@@ -112,17 +112,17 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
112 112
113void generic_outb(u8 b, unsigned long port) 113void generic_outb(u8 b, unsigned long port)
114{ 114{
115 ctrl_outb(b, (unsigned long __force)ioport_map(port, 1)); 115 ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1));
116} 116}
117 117
118void generic_outw(u16 b, unsigned long port) 118void generic_outw(u16 b, unsigned long port)
119{ 119{
120 ctrl_outw(b, (unsigned long __force)ioport_map(port, 2)); 120 ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2));
121} 121}
122 122
123void generic_outl(u32 b, unsigned long port) 123void generic_outl(u32 b, unsigned long port)
124{ 124{
125 ctrl_outl(b, (unsigned long __force)ioport_map(port, 4)); 125 ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4));
126} 126}
127 127
128void generic_outb_p(u8 b, unsigned long port) 128void generic_outb_p(u8 b, unsigned long port)
@@ -153,7 +153,7 @@ void generic_outsb(unsigned long port, const void *src, unsigned long count)
153 volatile u8 *port_addr; 153 volatile u8 *port_addr;
154 const u8 *buf = src; 154 const u8 *buf = src;
155 155
156 port_addr = (volatile u8 __force *)ioport_map(port, 1); 156 port_addr = (volatile u8 __force *)__ioport_map(port, 1);
157 157
158 while (count--) 158 while (count--)
159 *port_addr = *buf++; 159 *port_addr = *buf++;
@@ -164,7 +164,7 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count)
164 volatile u16 *port_addr; 164 volatile u16 *port_addr;
165 const u16 *buf = src; 165 const u16 *buf = src;
166 166
167 port_addr = (volatile u16 __force *)ioport_map(port, 2); 167 port_addr = (volatile u16 __force *)__ioport_map(port, 2);
168 168
169 while (count--) 169 while (count--)
170 *port_addr = *buf++; 170 *port_addr = *buf++;
@@ -177,7 +177,7 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count)
177 volatile u32 *port_addr; 177 volatile u32 *port_addr;
178 const u32 *buf = src; 178 const u32 *buf = src;
179 179
180 port_addr = (volatile u32 __force *)ioport_map(port, 4); 180 port_addr = (volatile u32 __force *)__ioport_map(port, 4);
181 while (count--) 181 while (count--)
182 *port_addr = *buf++; 182 *port_addr = *buf++;
183 183
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
new file mode 100644
index 00000000000..0bfdc9a34e1
--- /dev/null
+++ b/arch/sh/kernel/io_trapped.c
@@ -0,0 +1,269 @@
1/*
2 * Trapped io support
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * Intercept io operations by trapping.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/bitops.h>
15#include <linux/vmalloc.h>
16#include <asm/system.h>
17#include <asm/mmu_context.h>
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/io_trapped.h>
21
22#define TRAPPED_PAGES_MAX 16
23#define MAX(a, b) (((a) >= (b)) ? (a) : (b))
24
25#ifdef CONFIG_HAS_IOPORT
26LIST_HEAD(trapped_io);
27#endif
28#ifdef CONFIG_HAS_IOMEM
29LIST_HEAD(trapped_mem);
30#endif
31static DEFINE_SPINLOCK(trapped_lock);
32
33int __init register_trapped_io(struct trapped_io *tiop)
34{
35 struct resource *res;
36 unsigned long len = 0, flags = 0;
37 struct page *pages[TRAPPED_PAGES_MAX];
38 int k, n;
39
40 /* structure must be page aligned */
41 if ((unsigned long)tiop & (PAGE_SIZE - 1))
42 goto bad;
43
44 for (k = 0; k < tiop->num_resources; k++) {
45 res = tiop->resource + k;
46 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
47 flags |= res->flags;
48 }
49
50 /* support IORESOURCE_IO _or_ MEM, not both */
51 if (hweight_long(flags) != 1)
52 goto bad;
53
54 n = len >> PAGE_SHIFT;
55
56 if (n >= TRAPPED_PAGES_MAX)
57 goto bad;
58
59 for (k = 0; k < n; k++)
60 pages[k] = virt_to_page(tiop);
61
62 tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
63 if (!tiop->virt_base)
64 goto bad;
65
66 len = 0;
67 for (k = 0; k < tiop->num_resources; k++) {
68 res = tiop->resource + k;
69 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
70 (unsigned long)(tiop->virt_base + len),
71 res->flags & IORESOURCE_IO ? "io" : "mmio",
72 (unsigned long)res->start);
73 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
74 }
75
76 tiop->magic = IO_TRAPPED_MAGIC;
77 INIT_LIST_HEAD(&tiop->list);
78 spin_lock_irq(&trapped_lock);
79 if (flags & IORESOURCE_IO)
80 list_add(&tiop->list, &trapped_io);
81 if (flags & IORESOURCE_MEM)
82 list_add(&tiop->list, &trapped_mem);
83 spin_unlock_irq(&trapped_lock);
84
85 return 0;
86 bad:
87 pr_warning("unable to install trapped io filter\n");
88 return -1;
89}
90
91void __iomem *match_trapped_io_handler(struct list_head *list,
92 unsigned long offset,
93 unsigned long size)
94{
95 unsigned long voffs;
96 struct trapped_io *tiop;
97 struct resource *res;
98 int k, len;
99
100 spin_lock_irq(&trapped_lock);
101 list_for_each_entry(tiop, list, list) {
102 voffs = 0;
103 for (k = 0; k < tiop->num_resources; k++) {
104 res = tiop->resource + k;
105 if (res->start == offset) {
106 spin_unlock_irq(&trapped_lock);
107 return tiop->virt_base + voffs;
108 }
109
110 len = (res->end - res->start) + 1;
111 voffs += roundup(len, PAGE_SIZE);
112 }
113 }
114 spin_unlock_irq(&trapped_lock);
115 return NULL;
116}
117
118static struct trapped_io *lookup_tiop(unsigned long address)
119{
120 pgd_t *pgd_k;
121 pud_t *pud_k;
122 pmd_t *pmd_k;
123 pte_t *pte_k;
124 pte_t entry;
125
126 pgd_k = swapper_pg_dir + pgd_index(address);
127 if (!pgd_present(*pgd_k))
128 return NULL;
129
130 pud_k = pud_offset(pgd_k, address);
131 if (!pud_present(*pud_k))
132 return NULL;
133
134 pmd_k = pmd_offset(pud_k, address);
135 if (!pmd_present(*pmd_k))
136 return NULL;
137
138 pte_k = pte_offset_kernel(pmd_k, address);
139 entry = *pte_k;
140
141 return pfn_to_kaddr(pte_pfn(entry));
142}
143
144static unsigned long lookup_address(struct trapped_io *tiop,
145 unsigned long address)
146{
147 struct resource *res;
148 unsigned long vaddr = (unsigned long)tiop->virt_base;
149 unsigned long len;
150 int k;
151
152 for (k = 0; k < tiop->num_resources; k++) {
153 res = tiop->resource + k;
154 len = roundup((res->end - res->start) + 1, PAGE_SIZE);
155 if (address < (vaddr + len))
156 return res->start + (address - vaddr);
157 vaddr += len;
158 }
159 return 0;
160}
161
162static unsigned long long copy_word(unsigned long src_addr, int src_len,
163 unsigned long dst_addr, int dst_len)
164{
165 unsigned long long tmp = 0;
166
167 switch (src_len) {
168 case 1:
169 tmp = ctrl_inb(src_addr);
170 break;
171 case 2:
172 tmp = ctrl_inw(src_addr);
173 break;
174 case 4:
175 tmp = ctrl_inl(src_addr);
176 break;
177 case 8:
178 tmp = ctrl_inq(src_addr);
179 break;
180 }
181
182 switch (dst_len) {
183 case 1:
184 ctrl_outb(tmp, dst_addr);
185 break;
186 case 2:
187 ctrl_outw(tmp, dst_addr);
188 break;
189 case 4:
190 ctrl_outl(tmp, dst_addr);
191 break;
192 case 8:
193 ctrl_outq(tmp, dst_addr);
194 break;
195 }
196
197 return tmp;
198}
199
200static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
201{
202 struct trapped_io *tiop;
203 unsigned long src_addr = (unsigned long)src;
204 unsigned long long tmp;
205
206 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
207 tiop = lookup_tiop(src_addr);
208 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
209
210 src_addr = lookup_address(tiop, src_addr);
211 if (!src_addr)
212 return cnt;
213
214 tmp = copy_word(src_addr, MAX(cnt, (tiop->minimum_bus_width / 8)),
215 (unsigned long)dst, cnt);
216
217 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
218 return 0;
219}
220
221static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
222{
223 struct trapped_io *tiop;
224 unsigned long dst_addr = (unsigned long)dst;
225 unsigned long long tmp;
226
227 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
228 tiop = lookup_tiop(dst_addr);
229 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
230
231 dst_addr = lookup_address(tiop, dst_addr);
232 if (!dst_addr)
233 return cnt;
234
235 tmp = copy_word((unsigned long)src, cnt,
236 dst_addr, MAX(cnt, (tiop->minimum_bus_width / 8)));
237
238 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
239 return 0;
240}
241
242static struct mem_access trapped_io_access = {
243 from_device,
244 to_device,
245};
246
247int handle_trapped_io(struct pt_regs *regs, unsigned long address)
248{
249 mm_segment_t oldfs;
250 opcode_t instruction;
251 int tmp;
252
253 if (!lookup_tiop(address))
254 return 0;
255
256 WARN_ON(user_mode(regs));
257
258 oldfs = get_fs();
259 set_fs(KERNEL_DS);
260 if (copy_from_user(&instruction, (void *)(regs->pc),
261 sizeof(instruction))) {
262 set_fs(oldfs);
263 return 0;
264 }
265
266 tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
267 set_fs(oldfs);
268 return tmp == 0;
269}
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 25b1b8672cf..baa4fa368dc 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -172,6 +172,11 @@ static inline void sign_extend(unsigned int count, unsigned char *dst)
172#endif 172#endif
173} 173}
174 174
175static struct mem_access user_mem_access = {
176 copy_from_user,
177 copy_to_user,
178};
179
175/* 180/*
176 * handle an instruction that does an unaligned memory access by emulating the 181 * handle an instruction that does an unaligned memory access by emulating the
177 * desired behaviour 182 * desired behaviour
@@ -179,7 +184,8 @@ static inline void sign_extend(unsigned int count, unsigned char *dst)
179 * (if that instruction is in a branch delay slot) 184 * (if that instruction is in a branch delay slot)
180 * - return 0 if emulation okay, -EFAULT on existential error 185 * - return 0 if emulation okay, -EFAULT on existential error
181 */ 186 */
182static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) 187static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
188 struct mem_access *ma)
183{ 189{
184 int ret, index, count; 190 int ret, index, count;
185 unsigned long *rm, *rn; 191 unsigned long *rm, *rn;
@@ -206,7 +212,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
206#if !defined(__LITTLE_ENDIAN__) 212#if !defined(__LITTLE_ENDIAN__)
207 dst += 4-count; 213 dst += 4-count;
208#endif 214#endif
209 if (copy_from_user(dst, src, count)) 215 if (ma->from(dst, src, count))
210 goto fetch_fault; 216 goto fetch_fault;
211 217
212 sign_extend(count, dst); 218 sign_extend(count, dst);
@@ -219,7 +225,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
219 dst = (unsigned char*) *rn; 225 dst = (unsigned char*) *rn;
220 dst += regs->regs[0]; 226 dst += regs->regs[0];
221 227
222 if (copy_to_user(dst, src, count)) 228 if (ma->to(dst, src, count))
223 goto fetch_fault; 229 goto fetch_fault;
224 } 230 }
225 ret = 0; 231 ret = 0;
@@ -230,7 +236,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
230 dst = (unsigned char*) *rn; 236 dst = (unsigned char*) *rn;
231 dst += (instruction&0x000F)<<2; 237 dst += (instruction&0x000F)<<2;
232 238
233 if (copy_to_user(dst,src,4)) 239 if (ma->to(dst, src, 4))
234 goto fetch_fault; 240 goto fetch_fault;
235 ret = 0; 241 ret = 0;
236 break; 242 break;
@@ -243,7 +249,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
243#if !defined(__LITTLE_ENDIAN__) 249#if !defined(__LITTLE_ENDIAN__)
244 src += 4-count; 250 src += 4-count;
245#endif 251#endif
246 if (copy_to_user(dst, src, count)) 252 if (ma->to(dst, src, count))
247 goto fetch_fault; 253 goto fetch_fault;
248 ret = 0; 254 ret = 0;
249 break; 255 break;
@@ -254,7 +260,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
254 dst = (unsigned char*) rn; 260 dst = (unsigned char*) rn;
255 *(unsigned long*)dst = 0; 261 *(unsigned long*)dst = 0;
256 262
257 if (copy_from_user(dst,src,4)) 263 if (ma->from(dst, src, 4))
258 goto fetch_fault; 264 goto fetch_fault;
259 ret = 0; 265 ret = 0;
260 break; 266 break;
@@ -269,7 +275,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
269#if !defined(__LITTLE_ENDIAN__) 275#if !defined(__LITTLE_ENDIAN__)
270 dst += 4-count; 276 dst += 4-count;
271#endif 277#endif
272 if (copy_from_user(dst, src, count)) 278 if (ma->from(dst, src, count))
273 goto fetch_fault; 279 goto fetch_fault;
274 sign_extend(count, dst); 280 sign_extend(count, dst);
275 ret = 0; 281 ret = 0;
@@ -285,7 +291,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
285 dst = (unsigned char*) *rm; /* called Rn in the spec */ 291 dst = (unsigned char*) *rm; /* called Rn in the spec */
286 dst += (instruction&0x000F)<<1; 292 dst += (instruction&0x000F)<<1;
287 293
288 if (copy_to_user(dst, src, 2)) 294 if (ma->to(dst, src, 2))
289 goto fetch_fault; 295 goto fetch_fault;
290 ret = 0; 296 ret = 0;
291 break; 297 break;
@@ -299,7 +305,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
299#if !defined(__LITTLE_ENDIAN__) 305#if !defined(__LITTLE_ENDIAN__)
300 dst += 2; 306 dst += 2;
301#endif 307#endif
302 if (copy_from_user(dst, src, 2)) 308 if (ma->from(dst, src, 2))
303 goto fetch_fault; 309 goto fetch_fault;
304 sign_extend(2, dst); 310 sign_extend(2, dst);
305 ret = 0; 311 ret = 0;
@@ -320,8 +326,9 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
320 * emulate the instruction in the delay slot 326 * emulate the instruction in the delay slot
321 * - fetches the instruction from PC+2 327 * - fetches the instruction from PC+2
322 */ 328 */
323static inline int handle_unaligned_delayslot(struct pt_regs *regs, 329static inline int handle_delayslot(struct pt_regs *regs,
324 opcode_t old_instruction) 330 opcode_t old_instruction,
331 struct mem_access *ma)
325{ 332{
326 opcode_t instruction; 333 opcode_t instruction;
327 void *addr = (void *)(regs->pc + instruction_size(old_instruction)); 334 void *addr = (void *)(regs->pc + instruction_size(old_instruction));
@@ -336,7 +343,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs,
336 regs, 0); 343 regs, 0);
337 } 344 }
338 345
339 return handle_unaligned_ins(instruction, regs); 346 return handle_unaligned_ins(instruction, regs, ma);
340} 347}
341 348
342/* 349/*
@@ -362,7 +369,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs,
362 369
363static int handle_unaligned_notify_count = 10; 370static int handle_unaligned_notify_count = 10;
364 371
365static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) 372int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
373 struct mem_access *ma)
366{ 374{
367 u_int rm; 375 u_int rm;
368 int ret, index; 376 int ret, index;
@@ -385,19 +393,19 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
385 case 0x0000: 393 case 0x0000:
386 if (instruction==0x000B) { 394 if (instruction==0x000B) {
387 /* rts */ 395 /* rts */
388 ret = handle_unaligned_delayslot(regs, instruction); 396 ret = handle_delayslot(regs, instruction, ma);
389 if (ret==0) 397 if (ret==0)
390 regs->pc = regs->pr; 398 regs->pc = regs->pr;
391 } 399 }
392 else if ((instruction&0x00FF)==0x0023) { 400 else if ((instruction&0x00FF)==0x0023) {
393 /* braf @Rm */ 401 /* braf @Rm */
394 ret = handle_unaligned_delayslot(regs, instruction); 402 ret = handle_delayslot(regs, instruction, ma);
395 if (ret==0) 403 if (ret==0)
396 regs->pc += rm + 4; 404 regs->pc += rm + 4;
397 } 405 }
398 else if ((instruction&0x00FF)==0x0003) { 406 else if ((instruction&0x00FF)==0x0003) {
399 /* bsrf @Rm */ 407 /* bsrf @Rm */
400 ret = handle_unaligned_delayslot(regs, instruction); 408 ret = handle_delayslot(regs, instruction, ma);
401 if (ret==0) { 409 if (ret==0) {
402 regs->pr = regs->pc + 4; 410 regs->pr = regs->pc + 4;
403 regs->pc += rm + 4; 411 regs->pc += rm + 4;
@@ -418,13 +426,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
418 case 0x4000: 426 case 0x4000:
419 if ((instruction&0x00FF)==0x002B) { 427 if ((instruction&0x00FF)==0x002B) {
420 /* jmp @Rm */ 428 /* jmp @Rm */
421 ret = handle_unaligned_delayslot(regs, instruction); 429 ret = handle_delayslot(regs, instruction, ma);
422 if (ret==0) 430 if (ret==0)
423 regs->pc = rm; 431 regs->pc = rm;
424 } 432 }
425 else if ((instruction&0x00FF)==0x000B) { 433 else if ((instruction&0x00FF)==0x000B) {
426 /* jsr @Rm */ 434 /* jsr @Rm */
427 ret = handle_unaligned_delayslot(regs, instruction); 435 ret = handle_delayslot(regs, instruction, ma);
428 if (ret==0) { 436 if (ret==0) {
429 regs->pr = regs->pc + 4; 437 regs->pr = regs->pc + 4;
430 regs->pc = rm; 438 regs->pc = rm;
@@ -451,7 +459,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
451 case 0x0B00: /* bf lab - no delayslot*/ 459 case 0x0B00: /* bf lab - no delayslot*/
452 break; 460 break;
453 case 0x0F00: /* bf/s lab */ 461 case 0x0F00: /* bf/s lab */
454 ret = handle_unaligned_delayslot(regs, instruction); 462 ret = handle_delayslot(regs, instruction, ma);
455 if (ret==0) { 463 if (ret==0) {
456#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 464#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
457 if ((regs->sr & 0x00000001) != 0) 465 if ((regs->sr & 0x00000001) != 0)
@@ -464,7 +472,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
464 case 0x0900: /* bt lab - no delayslot */ 472 case 0x0900: /* bt lab - no delayslot */
465 break; 473 break;
466 case 0x0D00: /* bt/s lab */ 474 case 0x0D00: /* bt/s lab */
467 ret = handle_unaligned_delayslot(regs, instruction); 475 ret = handle_delayslot(regs, instruction, ma);
468 if (ret==0) { 476 if (ret==0) {
469#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 477#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
470 if ((regs->sr & 0x00000001) == 0) 478 if ((regs->sr & 0x00000001) == 0)
@@ -478,13 +486,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
478 break; 486 break;
479 487
480 case 0xA000: /* bra label */ 488 case 0xA000: /* bra label */
481 ret = handle_unaligned_delayslot(regs, instruction); 489 ret = handle_delayslot(regs, instruction, ma);
482 if (ret==0) 490 if (ret==0)
483 regs->pc += SH_PC_12BIT_OFFSET(instruction); 491 regs->pc += SH_PC_12BIT_OFFSET(instruction);
484 break; 492 break;
485 493
486 case 0xB000: /* bsr label */ 494 case 0xB000: /* bsr label */
487 ret = handle_unaligned_delayslot(regs, instruction); 495 ret = handle_delayslot(regs, instruction, ma);
488 if (ret==0) { 496 if (ret==0) {
489 regs->pr = regs->pc + 4; 497 regs->pr = regs->pc + 4;
490 regs->pc += SH_PC_12BIT_OFFSET(instruction); 498 regs->pc += SH_PC_12BIT_OFFSET(instruction);
@@ -495,7 +503,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
495 503
496 /* handle non-delay-slot instruction */ 504 /* handle non-delay-slot instruction */
497 simple: 505 simple:
498 ret = handle_unaligned_ins(instruction, regs); 506 ret = handle_unaligned_ins(instruction, regs, ma);
499 if (ret==0) 507 if (ret==0)
500 regs->pc += instruction_size(instruction); 508 regs->pc += instruction_size(instruction);
501 return ret; 509 return ret;
@@ -558,7 +566,8 @@ asmlinkage void do_address_error(struct pt_regs *regs,
558 goto uspace_segv; 566 goto uspace_segv;
559 } 567 }
560 568
561 tmp = handle_unaligned_access(instruction, regs); 569 tmp = handle_unaligned_access(instruction, regs,
570 &user_mem_access);
562 set_fs(oldfs); 571 set_fs(oldfs);
563 572
564 if (tmp==0) 573 if (tmp==0)
@@ -587,7 +596,7 @@ uspace_segv:
587 die("insn faulting in do_address_error", regs, 0); 596 die("insn faulting in do_address_error", regs, 0);
588 } 597 }
589 598
590 handle_unaligned_access(instruction, regs); 599 handle_unaligned_access(instruction, regs, &user_mem_access);
591 set_fs(oldfs); 600 set_fs(oldfs);
592 } 601 }
593} 602}
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 33b43d20e9f..4ef0a1f1a9a 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,6 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <asm/io_trapped.h>
18#include <asm/system.h> 19#include <asm/system.h>
19#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
20#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
@@ -163,6 +164,8 @@ no_context:
163 if (fixup_exception(regs)) 164 if (fixup_exception(regs))
164 return; 165 return;
165 166
167 if (handle_trapped_io(regs, address))
168 return;
166/* 169/*
167 * Oops. The kernel tried to access some bad page. We'll have to 170 * Oops. The kernel tried to access some bad page. We'll have to
168 * terminate things with extreme prejudice. 171 * terminate things with extreme prejudice.
diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h
index 94900c08951..3d2b114f9d5 100644
--- a/include/asm-sh/io.h
+++ b/include/asm-sh/io.h
@@ -38,6 +38,7 @@
38 */ 38 */
39#define __IO_PREFIX generic 39#define __IO_PREFIX generic
40#include <asm/io_generic.h> 40#include <asm/io_generic.h>
41#include <asm/io_trapped.h>
41 42
42#define maybebadio(port) \ 43#define maybebadio(port) \
43 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ 44 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
@@ -207,6 +208,8 @@ static inline void __set_io_port_base(unsigned long pbase)
207 generic_io_base = pbase; 208 generic_io_base = pbase;
208} 209}
209 210
211#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
212
210/* We really want to try and get these to memcpy etc */ 213/* We really want to try and get these to memcpy etc */
211extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long); 214extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long);
212extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long); 215extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
@@ -309,7 +312,14 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
309{ 312{
310#ifdef CONFIG_SUPERH32 313#ifdef CONFIG_SUPERH32
311 unsigned long last_addr = offset + size - 1; 314 unsigned long last_addr = offset + size - 1;
315#endif
316 void __iomem *ret;
312 317
318 ret = __ioremap_trapped(offset, size);
319 if (ret)
320 return ret;
321
322#ifdef CONFIG_SUPERH32
313 /* 323 /*
314 * For P1 and P2 space this is trivial, as everything is already 324 * For P1 and P2 space this is trivial, as everything is already
315 * mapped. Uncached access for P1 addresses are done through P2. 325 * mapped. Uncached access for P1 addresses are done through P2.
diff --git a/include/asm-sh/io_trapped.h b/include/asm-sh/io_trapped.h
new file mode 100644
index 00000000000..f1251d4f0ba
--- /dev/null
+++ b/include/asm-sh/io_trapped.h
@@ -0,0 +1,58 @@
1#ifndef __ASM_SH_IO_TRAPPED_H
2#define __ASM_SH_IO_TRAPPED_H
3
4#include <linux/list.h>
5#include <linux/ioport.h>
6#include <asm/page.h>
7
8#define IO_TRAPPED_MAGIC 0xfeedbeef
9
10struct trapped_io {
11 unsigned int magic;
12 struct resource *resource;
13 unsigned int num_resources;
14 unsigned int minimum_bus_width;
15 struct list_head list;
16 void __iomem *virt_base;
17} __aligned(PAGE_SIZE);
18
19#ifdef CONFIG_IO_TRAPPED
20int register_trapped_io(struct trapped_io *tiop);
21int handle_trapped_io(struct pt_regs *regs, unsigned long address);
22
23void __iomem *match_trapped_io_handler(struct list_head *list,
24 unsigned long offset,
25 unsigned long size);
26
27#ifdef CONFIG_HAS_IOMEM
28extern struct list_head trapped_mem;
29
30static inline void __iomem *
31__ioremap_trapped(unsigned long offset, unsigned long size)
32{
33 return match_trapped_io_handler(&trapped_mem, offset, size);
34}
35#else
36#define __ioremap_trapped(offset, size) NULL
37#endif
38
39#ifdef CONFIG_HAS_IOPORT
40extern struct list_head trapped_io;
41
42static inline void __iomem *
43__ioport_map_trapped(unsigned long offset, unsigned long size)
44{
45 return match_trapped_io_handler(&trapped_io, offset, size);
46}
47#else
48#define __ioport_map_trapped(offset, size) NULL
49#endif
50
51#else
52#define register_trapped_io(tiop) (-1)
53#define handle_trapped_io(tiop, address) 0
54#define __ioremap_trapped(offset, size) NULL
55#define __ioport_map_trapped(offset, size) NULL
56#endif
57
58#endif /* __ASM_SH_IO_TRAPPED_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 772cd1a0a67..5145aa2a0ce 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -182,6 +182,11 @@ BUILD_TRAP_HANDLER(fpu_state_restore);
182 182
183#define arch_align_stack(x) (x) 183#define arch_align_stack(x) (x)
184 184
185struct mem_access {
186 unsigned long (*from)(void *dst, const void *src, unsigned long cnt);
187 unsigned long (*to)(void *dst, const void *src, unsigned long cnt);
188};
189
185#ifdef CONFIG_SUPERH32 190#ifdef CONFIG_SUPERH32
186# include "system_32.h" 191# include "system_32.h"
187#else 192#else
diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h
index 7ff08d956ba..f11bcf0855e 100644
--- a/include/asm-sh/system_32.h
+++ b/include/asm-sh/system_32.h
@@ -96,4 +96,7 @@ do { \
96 : "=&r" (__dummy)); \ 96 : "=&r" (__dummy)); \
97} while (0) 97} while (0)
98 98
99int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
100 struct mem_access *ma);
101
99#endif /* __ASM_SH_SYSTEM_32_H */ 102#endif /* __ASM_SH_SYSTEM_32_H */