aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorMagnus Damm <magnus.damm@gmail.com>2008-02-07 06:18:21 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-02-14 00:22:09 -0500
commite7cc9a7340b8ec018caa9eb1d035fdaef1f2fc51 (patch)
treea797888f8d3f95734288978351c33af3c965494c /arch/sh/kernel
parent2ade1a9b425c24037327197ea97db054395b536b (diff)
sh: trapped io support V2
The idea is that we want to get rid of the in/out/readb/writeb callbacks from the machvec and replace that with simple inline read and write operations to memory. Fast and simple for most hardware devices (think pci). Some devices require special treatment though - like 16-bit only CF devices - so we need to have some method to hook in callbacks. This patch makes it possible to add a per-device trap generating filter. This way we can get maximum performance of sane hardware - which doesn't need this filter - and crappy hardware works but gets punished by a performance hit. V2 changes things around a bit and replaces io access callbacks with a simple minimum_bus_width value. In the future we can add stride as well. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile_321
-rw-r--r--arch/sh/kernel/Makefile_641
-rw-r--r--arch/sh/kernel/io.c8
-rw-r--r--arch/sh/kernel/io_generic.c24
-rw-r--r--arch/sh/kernel/io_trapped.c269
-rw-r--r--arch/sh/kernel/traps_32.c59
6 files changed, 324 insertions, 38 deletions
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index c89289831053..62bf373266f7 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -22,5 +22,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
22obj-$(CONFIG_PM) += pm.o 22obj-$(CONFIG_PM) += pm.o
23obj-$(CONFIG_STACKTRACE) += stacktrace.o 23obj-$(CONFIG_STACKTRACE) += stacktrace.o
24obj-$(CONFIG_BINFMT_ELF) += dump_task.o 24obj-$(CONFIG_BINFMT_ELF) += dump_task.o
25obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
25 26
26EXTRA_CFLAGS += -Werror 27EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 1ef21cc087f3..e01283d49cbf 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -18,5 +18,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
18obj-$(CONFIG_PM) += pm.o 18obj-$(CONFIG_PM) += pm.o
19obj-$(CONFIG_STACKTRACE) += stacktrace.o 19obj-$(CONFIG_STACKTRACE) += stacktrace.o
20obj-$(CONFIG_BINFMT_ELF) += dump_task.o 20obj-$(CONFIG_BINFMT_ELF) += dump_task.o
21obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
21 22
22EXTRA_CFLAGS += -Werror 23EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 71c9fde2fd90..2b8991229900 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -63,7 +63,13 @@ EXPORT_SYMBOL(memset_io);
63 63
64void __iomem *ioport_map(unsigned long port, unsigned int nr) 64void __iomem *ioport_map(unsigned long port, unsigned int nr)
65{ 65{
66 return sh_mv.mv_ioport_map(port, nr); 66 void __iomem *ret;
67
68 ret = __ioport_map_trapped(port, nr);
69 if (ret)
70 return ret;
71
72 return __ioport_map(port, nr);
67} 73}
68EXPORT_SYMBOL(ioport_map); 74EXPORT_SYMBOL(ioport_map);
69 75
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index 771ea4230441..db769449f5a7 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -33,17 +33,17 @@ static inline void delay(void)
33 33
34u8 generic_inb(unsigned long port) 34u8 generic_inb(unsigned long port)
35{ 35{
36 return ctrl_inb((unsigned long __force)ioport_map(port, 1)); 36 return ctrl_inb((unsigned long __force)__ioport_map(port, 1));
37} 37}
38 38
39u16 generic_inw(unsigned long port) 39u16 generic_inw(unsigned long port)
40{ 40{
41 return ctrl_inw((unsigned long __force)ioport_map(port, 2)); 41 return ctrl_inw((unsigned long __force)__ioport_map(port, 2));
42} 42}
43 43
44u32 generic_inl(unsigned long port) 44u32 generic_inl(unsigned long port)
45{ 45{
46 return ctrl_inl((unsigned long __force)ioport_map(port, 4)); 46 return ctrl_inl((unsigned long __force)__ioport_map(port, 4));
47} 47}
48 48
49u8 generic_inb_p(unsigned long port) 49u8 generic_inb_p(unsigned long port)
@@ -81,7 +81,7 @@ void generic_insb(unsigned long port, void *dst, unsigned long count)
81 volatile u8 *port_addr; 81 volatile u8 *port_addr;
82 u8 *buf = dst; 82 u8 *buf = dst;
83 83
84 port_addr = (volatile u8 *)ioport_map(port, 1); 84 port_addr = (volatile u8 *)__ioport_map(port, 1);
85 while (count--) 85 while (count--)
86 *buf++ = *port_addr; 86 *buf++ = *port_addr;
87} 87}
@@ -91,7 +91,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count)
91 volatile u16 *port_addr; 91 volatile u16 *port_addr;
92 u16 *buf = dst; 92 u16 *buf = dst;
93 93
94 port_addr = (volatile u16 *)ioport_map(port, 2); 94 port_addr = (volatile u16 *)__ioport_map(port, 2);
95 while (count--) 95 while (count--)
96 *buf++ = *port_addr; 96 *buf++ = *port_addr;
97 97
@@ -103,7 +103,7 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
103 volatile u32 *port_addr; 103 volatile u32 *port_addr;
104 u32 *buf = dst; 104 u32 *buf = dst;
105 105
106 port_addr = (volatile u32 *)ioport_map(port, 4); 106 port_addr = (volatile u32 *)__ioport_map(port, 4);
107 while (count--) 107 while (count--)
108 *buf++ = *port_addr; 108 *buf++ = *port_addr;
109 109
@@ -112,17 +112,17 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
112 112
113void generic_outb(u8 b, unsigned long port) 113void generic_outb(u8 b, unsigned long port)
114{ 114{
115 ctrl_outb(b, (unsigned long __force)ioport_map(port, 1)); 115 ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1));
116} 116}
117 117
118void generic_outw(u16 b, unsigned long port) 118void generic_outw(u16 b, unsigned long port)
119{ 119{
120 ctrl_outw(b, (unsigned long __force)ioport_map(port, 2)); 120 ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2));
121} 121}
122 122
123void generic_outl(u32 b, unsigned long port) 123void generic_outl(u32 b, unsigned long port)
124{ 124{
125 ctrl_outl(b, (unsigned long __force)ioport_map(port, 4)); 125 ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4));
126} 126}
127 127
128void generic_outb_p(u8 b, unsigned long port) 128void generic_outb_p(u8 b, unsigned long port)
@@ -153,7 +153,7 @@ void generic_outsb(unsigned long port, const void *src, unsigned long count)
153 volatile u8 *port_addr; 153 volatile u8 *port_addr;
154 const u8 *buf = src; 154 const u8 *buf = src;
155 155
156 port_addr = (volatile u8 __force *)ioport_map(port, 1); 156 port_addr = (volatile u8 __force *)__ioport_map(port, 1);
157 157
158 while (count--) 158 while (count--)
159 *port_addr = *buf++; 159 *port_addr = *buf++;
@@ -164,7 +164,7 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count)
164 volatile u16 *port_addr; 164 volatile u16 *port_addr;
165 const u16 *buf = src; 165 const u16 *buf = src;
166 166
167 port_addr = (volatile u16 __force *)ioport_map(port, 2); 167 port_addr = (volatile u16 __force *)__ioport_map(port, 2);
168 168
169 while (count--) 169 while (count--)
170 *port_addr = *buf++; 170 *port_addr = *buf++;
@@ -177,7 +177,7 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count)
177 volatile u32 *port_addr; 177 volatile u32 *port_addr;
178 const u32 *buf = src; 178 const u32 *buf = src;
179 179
180 port_addr = (volatile u32 __force *)ioport_map(port, 4); 180 port_addr = (volatile u32 __force *)__ioport_map(port, 4);
181 while (count--) 181 while (count--)
182 *port_addr = *buf++; 182 *port_addr = *buf++;
183 183
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
new file mode 100644
index 000000000000..0bfdc9a34e1a
--- /dev/null
+++ b/arch/sh/kernel/io_trapped.c
@@ -0,0 +1,269 @@
1/*
2 * Trapped io support
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * Intercept io operations by trapping.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/bitops.h>
15#include <linux/vmalloc.h>
16#include <asm/system.h>
17#include <asm/mmu_context.h>
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/io_trapped.h>
21
22#define TRAPPED_PAGES_MAX 16
23#define MAX(a, b) (((a) >= (b)) ? (a) : (b))
24
25#ifdef CONFIG_HAS_IOPORT
26LIST_HEAD(trapped_io);
27#endif
28#ifdef CONFIG_HAS_IOMEM
29LIST_HEAD(trapped_mem);
30#endif
31static DEFINE_SPINLOCK(trapped_lock);
32
33int __init register_trapped_io(struct trapped_io *tiop)
34{
35 struct resource *res;
36 unsigned long len = 0, flags = 0;
37 struct page *pages[TRAPPED_PAGES_MAX];
38 int k, n;
39
40 /* structure must be page aligned */
41 if ((unsigned long)tiop & (PAGE_SIZE - 1))
42 goto bad;
43
44 for (k = 0; k < tiop->num_resources; k++) {
45 res = tiop->resource + k;
46 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
47 flags |= res->flags;
48 }
49
50 /* support IORESOURCE_IO _or_ MEM, not both */
51 if (hweight_long(flags) != 1)
52 goto bad;
53
54 n = len >> PAGE_SHIFT;
55
56 if (n >= TRAPPED_PAGES_MAX)
57 goto bad;
58
59 for (k = 0; k < n; k++)
60 pages[k] = virt_to_page(tiop);
61
62 tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
63 if (!tiop->virt_base)
64 goto bad;
65
66 len = 0;
67 for (k = 0; k < tiop->num_resources; k++) {
68 res = tiop->resource + k;
69 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
70 (unsigned long)(tiop->virt_base + len),
71 res->flags & IORESOURCE_IO ? "io" : "mmio",
72 (unsigned long)res->start);
73 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
74 }
75
76 tiop->magic = IO_TRAPPED_MAGIC;
77 INIT_LIST_HEAD(&tiop->list);
78 spin_lock_irq(&trapped_lock);
79 if (flags & IORESOURCE_IO)
80 list_add(&tiop->list, &trapped_io);
81 if (flags & IORESOURCE_MEM)
82 list_add(&tiop->list, &trapped_mem);
83 spin_unlock_irq(&trapped_lock);
84
85 return 0;
86 bad:
87 pr_warning("unable to install trapped io filter\n");
88 return -1;
89}
90
91void __iomem *match_trapped_io_handler(struct list_head *list,
92 unsigned long offset,
93 unsigned long size)
94{
95 unsigned long voffs;
96 struct trapped_io *tiop;
97 struct resource *res;
98 int k, len;
99
100 spin_lock_irq(&trapped_lock);
101 list_for_each_entry(tiop, list, list) {
102 voffs = 0;
103 for (k = 0; k < tiop->num_resources; k++) {
104 res = tiop->resource + k;
105 if (res->start == offset) {
106 spin_unlock_irq(&trapped_lock);
107 return tiop->virt_base + voffs;
108 }
109
110 len = (res->end - res->start) + 1;
111 voffs += roundup(len, PAGE_SIZE);
112 }
113 }
114 spin_unlock_irq(&trapped_lock);
115 return NULL;
116}
117
118static struct trapped_io *lookup_tiop(unsigned long address)
119{
120 pgd_t *pgd_k;
121 pud_t *pud_k;
122 pmd_t *pmd_k;
123 pte_t *pte_k;
124 pte_t entry;
125
126 pgd_k = swapper_pg_dir + pgd_index(address);
127 if (!pgd_present(*pgd_k))
128 return NULL;
129
130 pud_k = pud_offset(pgd_k, address);
131 if (!pud_present(*pud_k))
132 return NULL;
133
134 pmd_k = pmd_offset(pud_k, address);
135 if (!pmd_present(*pmd_k))
136 return NULL;
137
138 pte_k = pte_offset_kernel(pmd_k, address);
139 entry = *pte_k;
140
141 return pfn_to_kaddr(pte_pfn(entry));
142}
143
144static unsigned long lookup_address(struct trapped_io *tiop,
145 unsigned long address)
146{
147 struct resource *res;
148 unsigned long vaddr = (unsigned long)tiop->virt_base;
149 unsigned long len;
150 int k;
151
152 for (k = 0; k < tiop->num_resources; k++) {
153 res = tiop->resource + k;
154 len = roundup((res->end - res->start) + 1, PAGE_SIZE);
155 if (address < (vaddr + len))
156 return res->start + (address - vaddr);
157 vaddr += len;
158 }
159 return 0;
160}
161
162static unsigned long long copy_word(unsigned long src_addr, int src_len,
163 unsigned long dst_addr, int dst_len)
164{
165 unsigned long long tmp = 0;
166
167 switch (src_len) {
168 case 1:
169 tmp = ctrl_inb(src_addr);
170 break;
171 case 2:
172 tmp = ctrl_inw(src_addr);
173 break;
174 case 4:
175 tmp = ctrl_inl(src_addr);
176 break;
177 case 8:
178 tmp = ctrl_inq(src_addr);
179 break;
180 }
181
182 switch (dst_len) {
183 case 1:
184 ctrl_outb(tmp, dst_addr);
185 break;
186 case 2:
187 ctrl_outw(tmp, dst_addr);
188 break;
189 case 4:
190 ctrl_outl(tmp, dst_addr);
191 break;
192 case 8:
193 ctrl_outq(tmp, dst_addr);
194 break;
195 }
196
197 return tmp;
198}
199
200static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
201{
202 struct trapped_io *tiop;
203 unsigned long src_addr = (unsigned long)src;
204 unsigned long long tmp;
205
206 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
207 tiop = lookup_tiop(src_addr);
208 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
209
210 src_addr = lookup_address(tiop, src_addr);
211 if (!src_addr)
212 return cnt;
213
214 tmp = copy_word(src_addr, MAX(cnt, (tiop->minimum_bus_width / 8)),
215 (unsigned long)dst, cnt);
216
217 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
218 return 0;
219}
220
221static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
222{
223 struct trapped_io *tiop;
224 unsigned long dst_addr = (unsigned long)dst;
225 unsigned long long tmp;
226
227 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
228 tiop = lookup_tiop(dst_addr);
229 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
230
231 dst_addr = lookup_address(tiop, dst_addr);
232 if (!dst_addr)
233 return cnt;
234
235 tmp = copy_word((unsigned long)src, cnt,
236 dst_addr, MAX(cnt, (tiop->minimum_bus_width / 8)));
237
238 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
239 return 0;
240}
241
242static struct mem_access trapped_io_access = {
243 from_device,
244 to_device,
245};
246
247int handle_trapped_io(struct pt_regs *regs, unsigned long address)
248{
249 mm_segment_t oldfs;
250 opcode_t instruction;
251 int tmp;
252
253 if (!lookup_tiop(address))
254 return 0;
255
256 WARN_ON(user_mode(regs));
257
258 oldfs = get_fs();
259 set_fs(KERNEL_DS);
260 if (copy_from_user(&instruction, (void *)(regs->pc),
261 sizeof(instruction))) {
262 set_fs(oldfs);
263 return 0;
264 }
265
266 tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
267 set_fs(oldfs);
268 return tmp == 0;
269}
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 25b1b8672cf0..baa4fa368dce 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -172,6 +172,11 @@ static inline void sign_extend(unsigned int count, unsigned char *dst)
172#endif 172#endif
173} 173}
174 174
175static struct mem_access user_mem_access = {
176 copy_from_user,
177 copy_to_user,
178};
179
175/* 180/*
176 * handle an instruction that does an unaligned memory access by emulating the 181 * handle an instruction that does an unaligned memory access by emulating the
177 * desired behaviour 182 * desired behaviour
@@ -179,7 +184,8 @@ static inline void sign_extend(unsigned int count, unsigned char *dst)
179 * (if that instruction is in a branch delay slot) 184 * (if that instruction is in a branch delay slot)
180 * - return 0 if emulation okay, -EFAULT on existential error 185 * - return 0 if emulation okay, -EFAULT on existential error
181 */ 186 */
182static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) 187static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
188 struct mem_access *ma)
183{ 189{
184 int ret, index, count; 190 int ret, index, count;
185 unsigned long *rm, *rn; 191 unsigned long *rm, *rn;
@@ -206,7 +212,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
206#if !defined(__LITTLE_ENDIAN__) 212#if !defined(__LITTLE_ENDIAN__)
207 dst += 4-count; 213 dst += 4-count;
208#endif 214#endif
209 if (copy_from_user(dst, src, count)) 215 if (ma->from(dst, src, count))
210 goto fetch_fault; 216 goto fetch_fault;
211 217
212 sign_extend(count, dst); 218 sign_extend(count, dst);
@@ -219,7 +225,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
219 dst = (unsigned char*) *rn; 225 dst = (unsigned char*) *rn;
220 dst += regs->regs[0]; 226 dst += regs->regs[0];
221 227
222 if (copy_to_user(dst, src, count)) 228 if (ma->to(dst, src, count))
223 goto fetch_fault; 229 goto fetch_fault;
224 } 230 }
225 ret = 0; 231 ret = 0;
@@ -230,7 +236,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
230 dst = (unsigned char*) *rn; 236 dst = (unsigned char*) *rn;
231 dst += (instruction&0x000F)<<2; 237 dst += (instruction&0x000F)<<2;
232 238
233 if (copy_to_user(dst,src,4)) 239 if (ma->to(dst, src, 4))
234 goto fetch_fault; 240 goto fetch_fault;
235 ret = 0; 241 ret = 0;
236 break; 242 break;
@@ -243,7 +249,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
243#if !defined(__LITTLE_ENDIAN__) 249#if !defined(__LITTLE_ENDIAN__)
244 src += 4-count; 250 src += 4-count;
245#endif 251#endif
246 if (copy_to_user(dst, src, count)) 252 if (ma->to(dst, src, count))
247 goto fetch_fault; 253 goto fetch_fault;
248 ret = 0; 254 ret = 0;
249 break; 255 break;
@@ -254,7 +260,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
254 dst = (unsigned char*) rn; 260 dst = (unsigned char*) rn;
255 *(unsigned long*)dst = 0; 261 *(unsigned long*)dst = 0;
256 262
257 if (copy_from_user(dst,src,4)) 263 if (ma->from(dst, src, 4))
258 goto fetch_fault; 264 goto fetch_fault;
259 ret = 0; 265 ret = 0;
260 break; 266 break;
@@ -269,7 +275,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
269#if !defined(__LITTLE_ENDIAN__) 275#if !defined(__LITTLE_ENDIAN__)
270 dst += 4-count; 276 dst += 4-count;
271#endif 277#endif
272 if (copy_from_user(dst, src, count)) 278 if (ma->from(dst, src, count))
273 goto fetch_fault; 279 goto fetch_fault;
274 sign_extend(count, dst); 280 sign_extend(count, dst);
275 ret = 0; 281 ret = 0;
@@ -285,7 +291,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
285 dst = (unsigned char*) *rm; /* called Rn in the spec */ 291 dst = (unsigned char*) *rm; /* called Rn in the spec */
286 dst += (instruction&0x000F)<<1; 292 dst += (instruction&0x000F)<<1;
287 293
288 if (copy_to_user(dst, src, 2)) 294 if (ma->to(dst, src, 2))
289 goto fetch_fault; 295 goto fetch_fault;
290 ret = 0; 296 ret = 0;
291 break; 297 break;
@@ -299,7 +305,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
299#if !defined(__LITTLE_ENDIAN__) 305#if !defined(__LITTLE_ENDIAN__)
300 dst += 2; 306 dst += 2;
301#endif 307#endif
302 if (copy_from_user(dst, src, 2)) 308 if (ma->from(dst, src, 2))
303 goto fetch_fault; 309 goto fetch_fault;
304 sign_extend(2, dst); 310 sign_extend(2, dst);
305 ret = 0; 311 ret = 0;
@@ -320,8 +326,9 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs)
320 * emulate the instruction in the delay slot 326 * emulate the instruction in the delay slot
321 * - fetches the instruction from PC+2 327 * - fetches the instruction from PC+2
322 */ 328 */
323static inline int handle_unaligned_delayslot(struct pt_regs *regs, 329static inline int handle_delayslot(struct pt_regs *regs,
324 opcode_t old_instruction) 330 opcode_t old_instruction,
331 struct mem_access *ma)
325{ 332{
326 opcode_t instruction; 333 opcode_t instruction;
327 void *addr = (void *)(regs->pc + instruction_size(old_instruction)); 334 void *addr = (void *)(regs->pc + instruction_size(old_instruction));
@@ -336,7 +343,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs,
336 regs, 0); 343 regs, 0);
337 } 344 }
338 345
339 return handle_unaligned_ins(instruction, regs); 346 return handle_unaligned_ins(instruction, regs, ma);
340} 347}
341 348
342/* 349/*
@@ -362,7 +369,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs,
362 369
363static int handle_unaligned_notify_count = 10; 370static int handle_unaligned_notify_count = 10;
364 371
365static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) 372int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
373 struct mem_access *ma)
366{ 374{
367 u_int rm; 375 u_int rm;
368 int ret, index; 376 int ret, index;
@@ -385,19 +393,19 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
385 case 0x0000: 393 case 0x0000:
386 if (instruction==0x000B) { 394 if (instruction==0x000B) {
387 /* rts */ 395 /* rts */
388 ret = handle_unaligned_delayslot(regs, instruction); 396 ret = handle_delayslot(regs, instruction, ma);
389 if (ret==0) 397 if (ret==0)
390 regs->pc = regs->pr; 398 regs->pc = regs->pr;
391 } 399 }
392 else if ((instruction&0x00FF)==0x0023) { 400 else if ((instruction&0x00FF)==0x0023) {
393 /* braf @Rm */ 401 /* braf @Rm */
394 ret = handle_unaligned_delayslot(regs, instruction); 402 ret = handle_delayslot(regs, instruction, ma);
395 if (ret==0) 403 if (ret==0)
396 regs->pc += rm + 4; 404 regs->pc += rm + 4;
397 } 405 }
398 else if ((instruction&0x00FF)==0x0003) { 406 else if ((instruction&0x00FF)==0x0003) {
399 /* bsrf @Rm */ 407 /* bsrf @Rm */
400 ret = handle_unaligned_delayslot(regs, instruction); 408 ret = handle_delayslot(regs, instruction, ma);
401 if (ret==0) { 409 if (ret==0) {
402 regs->pr = regs->pc + 4; 410 regs->pr = regs->pc + 4;
403 regs->pc += rm + 4; 411 regs->pc += rm + 4;
@@ -418,13 +426,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
418 case 0x4000: 426 case 0x4000:
419 if ((instruction&0x00FF)==0x002B) { 427 if ((instruction&0x00FF)==0x002B) {
420 /* jmp @Rm */ 428 /* jmp @Rm */
421 ret = handle_unaligned_delayslot(regs, instruction); 429 ret = handle_delayslot(regs, instruction, ma);
422 if (ret==0) 430 if (ret==0)
423 regs->pc = rm; 431 regs->pc = rm;
424 } 432 }
425 else if ((instruction&0x00FF)==0x000B) { 433 else if ((instruction&0x00FF)==0x000B) {
426 /* jsr @Rm */ 434 /* jsr @Rm */
427 ret = handle_unaligned_delayslot(regs, instruction); 435 ret = handle_delayslot(regs, instruction, ma);
428 if (ret==0) { 436 if (ret==0) {
429 regs->pr = regs->pc + 4; 437 regs->pr = regs->pc + 4;
430 regs->pc = rm; 438 regs->pc = rm;
@@ -451,7 +459,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
451 case 0x0B00: /* bf lab - no delayslot*/ 459 case 0x0B00: /* bf lab - no delayslot*/
452 break; 460 break;
453 case 0x0F00: /* bf/s lab */ 461 case 0x0F00: /* bf/s lab */
454 ret = handle_unaligned_delayslot(regs, instruction); 462 ret = handle_delayslot(regs, instruction, ma);
455 if (ret==0) { 463 if (ret==0) {
456#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 464#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
457 if ((regs->sr & 0x00000001) != 0) 465 if ((regs->sr & 0x00000001) != 0)
@@ -464,7 +472,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
464 case 0x0900: /* bt lab - no delayslot */ 472 case 0x0900: /* bt lab - no delayslot */
465 break; 473 break;
466 case 0x0D00: /* bt/s lab */ 474 case 0x0D00: /* bt/s lab */
467 ret = handle_unaligned_delayslot(regs, instruction); 475 ret = handle_delayslot(regs, instruction, ma);
468 if (ret==0) { 476 if (ret==0) {
469#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 477#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
470 if ((regs->sr & 0x00000001) == 0) 478 if ((regs->sr & 0x00000001) == 0)
@@ -478,13 +486,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
478 break; 486 break;
479 487
480 case 0xA000: /* bra label */ 488 case 0xA000: /* bra label */
481 ret = handle_unaligned_delayslot(regs, instruction); 489 ret = handle_delayslot(regs, instruction, ma);
482 if (ret==0) 490 if (ret==0)
483 regs->pc += SH_PC_12BIT_OFFSET(instruction); 491 regs->pc += SH_PC_12BIT_OFFSET(instruction);
484 break; 492 break;
485 493
486 case 0xB000: /* bsr label */ 494 case 0xB000: /* bsr label */
487 ret = handle_unaligned_delayslot(regs, instruction); 495 ret = handle_delayslot(regs, instruction, ma);
488 if (ret==0) { 496 if (ret==0) {
489 regs->pr = regs->pc + 4; 497 regs->pr = regs->pc + 4;
490 regs->pc += SH_PC_12BIT_OFFSET(instruction); 498 regs->pc += SH_PC_12BIT_OFFSET(instruction);
@@ -495,7 +503,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs)
495 503
496 /* handle non-delay-slot instruction */ 504 /* handle non-delay-slot instruction */
497 simple: 505 simple:
498 ret = handle_unaligned_ins(instruction, regs); 506 ret = handle_unaligned_ins(instruction, regs, ma);
499 if (ret==0) 507 if (ret==0)
500 regs->pc += instruction_size(instruction); 508 regs->pc += instruction_size(instruction);
501 return ret; 509 return ret;
@@ -558,7 +566,8 @@ asmlinkage void do_address_error(struct pt_regs *regs,
558 goto uspace_segv; 566 goto uspace_segv;
559 } 567 }
560 568
561 tmp = handle_unaligned_access(instruction, regs); 569 tmp = handle_unaligned_access(instruction, regs,
570 &user_mem_access);
562 set_fs(oldfs); 571 set_fs(oldfs);
563 572
564 if (tmp==0) 573 if (tmp==0)
@@ -587,7 +596,7 @@ uspace_segv:
587 die("insn faulting in do_address_error", regs, 0); 596 die("insn faulting in do_address_error", regs, 0);
588 } 597 }
589 598
590 handle_unaligned_access(instruction, regs); 599 handle_unaligned_access(instruction, regs, &user_mem_access);
591 set_fs(oldfs); 600 set_fs(oldfs);
592 } 601 }
593} 602}