diff options
author | Magnus Damm <magnus.damm@gmail.com> | 2008-02-07 06:18:21 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-02-14 00:22:09 -0500 |
commit | e7cc9a7340b8ec018caa9eb1d035fdaef1f2fc51 (patch) | |
tree | a797888f8d3f95734288978351c33af3c965494c /arch/sh/kernel/traps_32.c | |
parent | 2ade1a9b425c24037327197ea97db054395b536b (diff) |
sh: trapped io support V2
The idea is that we want to get rid of the in/out/readb/writeb callbacks from
the machvec and replace that with simple inline read and write operations to
memory. Fast and simple for most hardware devices (think pci).
Some devices require special treatment though - like 16-bit only CF devices -
so we need to have some method to hook in callbacks.
This patch makes it possible to add a per-device trap generating filter. This
way we can get maximum performance of sane hardware - which doesn't need this
filter - and crappy hardware works but gets punished by a performance hit.
V2 changes things around a bit and replaces io access callbacks with a
simple minimum_bus_width value. In the future we can add stride as well.
Signed-off-by: Magnus Damm <damm@igel.co.jp>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/traps_32.c')
-rw-r--r-- | arch/sh/kernel/traps_32.c | 59 |
1 files changed, 34 insertions, 25 deletions
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 25b1b8672cf0..baa4fa368dce 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -172,6 +172,11 @@ static inline void sign_extend(unsigned int count, unsigned char *dst) | |||
172 | #endif | 172 | #endif |
173 | } | 173 | } |
174 | 174 | ||
175 | static struct mem_access user_mem_access = { | ||
176 | copy_from_user, | ||
177 | copy_to_user, | ||
178 | }; | ||
179 | |||
175 | /* | 180 | /* |
176 | * handle an instruction that does an unaligned memory access by emulating the | 181 | * handle an instruction that does an unaligned memory access by emulating the |
177 | * desired behaviour | 182 | * desired behaviour |
@@ -179,7 +184,8 @@ static inline void sign_extend(unsigned int count, unsigned char *dst) | |||
179 | * (if that instruction is in a branch delay slot) | 184 | * (if that instruction is in a branch delay slot) |
180 | * - return 0 if emulation okay, -EFAULT on existential error | 185 | * - return 0 if emulation okay, -EFAULT on existential error |
181 | */ | 186 | */ |
182 | static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | 187 | static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, |
188 | struct mem_access *ma) | ||
183 | { | 189 | { |
184 | int ret, index, count; | 190 | int ret, index, count; |
185 | unsigned long *rm, *rn; | 191 | unsigned long *rm, *rn; |
@@ -206,7 +212,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
206 | #if !defined(__LITTLE_ENDIAN__) | 212 | #if !defined(__LITTLE_ENDIAN__) |
207 | dst += 4-count; | 213 | dst += 4-count; |
208 | #endif | 214 | #endif |
209 | if (copy_from_user(dst, src, count)) | 215 | if (ma->from(dst, src, count)) |
210 | goto fetch_fault; | 216 | goto fetch_fault; |
211 | 217 | ||
212 | sign_extend(count, dst); | 218 | sign_extend(count, dst); |
@@ -219,7 +225,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
219 | dst = (unsigned char*) *rn; | 225 | dst = (unsigned char*) *rn; |
220 | dst += regs->regs[0]; | 226 | dst += regs->regs[0]; |
221 | 227 | ||
222 | if (copy_to_user(dst, src, count)) | 228 | if (ma->to(dst, src, count)) |
223 | goto fetch_fault; | 229 | goto fetch_fault; |
224 | } | 230 | } |
225 | ret = 0; | 231 | ret = 0; |
@@ -230,7 +236,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
230 | dst = (unsigned char*) *rn; | 236 | dst = (unsigned char*) *rn; |
231 | dst += (instruction&0x000F)<<2; | 237 | dst += (instruction&0x000F)<<2; |
232 | 238 | ||
233 | if (copy_to_user(dst,src,4)) | 239 | if (ma->to(dst, src, 4)) |
234 | goto fetch_fault; | 240 | goto fetch_fault; |
235 | ret = 0; | 241 | ret = 0; |
236 | break; | 242 | break; |
@@ -243,7 +249,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
243 | #if !defined(__LITTLE_ENDIAN__) | 249 | #if !defined(__LITTLE_ENDIAN__) |
244 | src += 4-count; | 250 | src += 4-count; |
245 | #endif | 251 | #endif |
246 | if (copy_to_user(dst, src, count)) | 252 | if (ma->to(dst, src, count)) |
247 | goto fetch_fault; | 253 | goto fetch_fault; |
248 | ret = 0; | 254 | ret = 0; |
249 | break; | 255 | break; |
@@ -254,7 +260,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
254 | dst = (unsigned char*) rn; | 260 | dst = (unsigned char*) rn; |
255 | *(unsigned long*)dst = 0; | 261 | *(unsigned long*)dst = 0; |
256 | 262 | ||
257 | if (copy_from_user(dst,src,4)) | 263 | if (ma->from(dst, src, 4)) |
258 | goto fetch_fault; | 264 | goto fetch_fault; |
259 | ret = 0; | 265 | ret = 0; |
260 | break; | 266 | break; |
@@ -269,7 +275,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
269 | #if !defined(__LITTLE_ENDIAN__) | 275 | #if !defined(__LITTLE_ENDIAN__) |
270 | dst += 4-count; | 276 | dst += 4-count; |
271 | #endif | 277 | #endif |
272 | if (copy_from_user(dst, src, count)) | 278 | if (ma->from(dst, src, count)) |
273 | goto fetch_fault; | 279 | goto fetch_fault; |
274 | sign_extend(count, dst); | 280 | sign_extend(count, dst); |
275 | ret = 0; | 281 | ret = 0; |
@@ -285,7 +291,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
285 | dst = (unsigned char*) *rm; /* called Rn in the spec */ | 291 | dst = (unsigned char*) *rm; /* called Rn in the spec */ |
286 | dst += (instruction&0x000F)<<1; | 292 | dst += (instruction&0x000F)<<1; |
287 | 293 | ||
288 | if (copy_to_user(dst, src, 2)) | 294 | if (ma->to(dst, src, 2)) |
289 | goto fetch_fault; | 295 | goto fetch_fault; |
290 | ret = 0; | 296 | ret = 0; |
291 | break; | 297 | break; |
@@ -299,7 +305,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
299 | #if !defined(__LITTLE_ENDIAN__) | 305 | #if !defined(__LITTLE_ENDIAN__) |
300 | dst += 2; | 306 | dst += 2; |
301 | #endif | 307 | #endif |
302 | if (copy_from_user(dst, src, 2)) | 308 | if (ma->from(dst, src, 2)) |
303 | goto fetch_fault; | 309 | goto fetch_fault; |
304 | sign_extend(2, dst); | 310 | sign_extend(2, dst); |
305 | ret = 0; | 311 | ret = 0; |
@@ -320,8 +326,9 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) | |||
320 | * emulate the instruction in the delay slot | 326 | * emulate the instruction in the delay slot |
321 | * - fetches the instruction from PC+2 | 327 | * - fetches the instruction from PC+2 |
322 | */ | 328 | */ |
323 | static inline int handle_unaligned_delayslot(struct pt_regs *regs, | 329 | static inline int handle_delayslot(struct pt_regs *regs, |
324 | opcode_t old_instruction) | 330 | opcode_t old_instruction, |
331 | struct mem_access *ma) | ||
325 | { | 332 | { |
326 | opcode_t instruction; | 333 | opcode_t instruction; |
327 | void *addr = (void *)(regs->pc + instruction_size(old_instruction)); | 334 | void *addr = (void *)(regs->pc + instruction_size(old_instruction)); |
@@ -336,7 +343,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs, | |||
336 | regs, 0); | 343 | regs, 0); |
337 | } | 344 | } |
338 | 345 | ||
339 | return handle_unaligned_ins(instruction, regs); | 346 | return handle_unaligned_ins(instruction, regs, ma); |
340 | } | 347 | } |
341 | 348 | ||
342 | /* | 349 | /* |
@@ -362,7 +369,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs, | |||
362 | 369 | ||
363 | static int handle_unaligned_notify_count = 10; | 370 | static int handle_unaligned_notify_count = 10; |
364 | 371 | ||
365 | static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) | 372 | int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, |
373 | struct mem_access *ma) | ||
366 | { | 374 | { |
367 | u_int rm; | 375 | u_int rm; |
368 | int ret, index; | 376 | int ret, index; |
@@ -385,19 +393,19 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) | |||
385 | case 0x0000: | 393 | case 0x0000: |
386 | if (instruction==0x000B) { | 394 | if (instruction==0x000B) { |
387 | /* rts */ | 395 | /* rts */ |
388 | ret = handle_unaligned_delayslot(regs, instruction); | 396 | ret = handle_delayslot(regs, instruction, ma); |
389 | if (ret==0) | 397 | if (ret==0) |
390 | regs->pc = regs->pr; | 398 | regs->pc = regs->pr; |
391 | } | 399 | } |
392 | else if ((instruction&0x00FF)==0x0023) { | 400 | else if ((instruction&0x00FF)==0x0023) { |
393 | /* braf @Rm */ | 401 | /* braf @Rm */ |
394 | ret = handle_unaligned_delayslot(regs, instruction); | 402 | ret = handle_delayslot(regs, instruction, ma); |
395 | if (ret==0) | 403 | if (ret==0) |
396 | regs->pc += rm + 4; | 404 | regs->pc += rm + 4; |
397 | } | 405 | } |
398 | else if ((instruction&0x00FF)==0x0003) { | 406 | else if ((instruction&0x00FF)==0x0003) { |
399 | /* bsrf @Rm */ | 407 | /* bsrf @Rm */ |
400 | ret = handle_unaligned_delayslot(regs, instruction); | 408 | ret = handle_delayslot(regs, instruction, ma); |
401 | if (ret==0) { | 409 | if (ret==0) { |
402 | regs->pr = regs->pc + 4; | 410 | regs->pr = regs->pc + 4; |
403 | regs->pc += rm + 4; | 411 | regs->pc += rm + 4; |
@@ -418,13 +426,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) | |||
418 | case 0x4000: | 426 | case 0x4000: |
419 | if ((instruction&0x00FF)==0x002B) { | 427 | if ((instruction&0x00FF)==0x002B) { |
420 | /* jmp @Rm */ | 428 | /* jmp @Rm */ |
421 | ret = handle_unaligned_delayslot(regs, instruction); | 429 | ret = handle_delayslot(regs, instruction, ma); |
422 | if (ret==0) | 430 | if (ret==0) |
423 | regs->pc = rm; | 431 | regs->pc = rm; |
424 | } | 432 | } |
425 | else if ((instruction&0x00FF)==0x000B) { | 433 | else if ((instruction&0x00FF)==0x000B) { |
426 | /* jsr @Rm */ | 434 | /* jsr @Rm */ |
427 | ret = handle_unaligned_delayslot(regs, instruction); | 435 | ret = handle_delayslot(regs, instruction, ma); |
428 | if (ret==0) { | 436 | if (ret==0) { |
429 | regs->pr = regs->pc + 4; | 437 | regs->pr = regs->pc + 4; |
430 | regs->pc = rm; | 438 | regs->pc = rm; |
@@ -451,7 +459,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) | |||
451 | case 0x0B00: /* bf lab - no delayslot*/ | 459 | case 0x0B00: /* bf lab - no delayslot*/ |
452 | break; | 460 | break; |
453 | case 0x0F00: /* bf/s lab */ | 461 | case 0x0F00: /* bf/s lab */ |
454 | ret = handle_unaligned_delayslot(regs, instruction); | 462 | ret = handle_delayslot(regs, instruction, ma); |
455 | if (ret==0) { | 463 | if (ret==0) { |
456 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | 464 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) |
457 | if ((regs->sr & 0x00000001) != 0) | 465 | if ((regs->sr & 0x00000001) != 0) |
@@ -464,7 +472,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) | |||
464 | case 0x0900: /* bt lab - no delayslot */ | 472 | case 0x0900: /* bt lab - no delayslot */ |
465 | break; | 473 | break; |
466 | case 0x0D00: /* bt/s lab */ | 474 | case 0x0D00: /* bt/s lab */ |
467 | ret = handle_unaligned_delayslot(regs, instruction); | 475 | ret = handle_delayslot(regs, instruction, ma); |
468 | if (ret==0) { | 476 | if (ret==0) { |
469 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | 477 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) |
470 | if ((regs->sr & 0x00000001) == 0) | 478 | if ((regs->sr & 0x00000001) == 0) |
@@ -478,13 +486,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) | |||
478 | break; | 486 | break; |
479 | 487 | ||
480 | case 0xA000: /* bra label */ | 488 | case 0xA000: /* bra label */ |
481 | ret = handle_unaligned_delayslot(regs, instruction); | 489 | ret = handle_delayslot(regs, instruction, ma); |
482 | if (ret==0) | 490 | if (ret==0) |
483 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | 491 | regs->pc += SH_PC_12BIT_OFFSET(instruction); |
484 | break; | 492 | break; |
485 | 493 | ||
486 | case 0xB000: /* bsr label */ | 494 | case 0xB000: /* bsr label */ |
487 | ret = handle_unaligned_delayslot(regs, instruction); | 495 | ret = handle_delayslot(regs, instruction, ma); |
488 | if (ret==0) { | 496 | if (ret==0) { |
489 | regs->pr = regs->pc + 4; | 497 | regs->pr = regs->pc + 4; |
490 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | 498 | regs->pc += SH_PC_12BIT_OFFSET(instruction); |
@@ -495,7 +503,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) | |||
495 | 503 | ||
496 | /* handle non-delay-slot instruction */ | 504 | /* handle non-delay-slot instruction */ |
497 | simple: | 505 | simple: |
498 | ret = handle_unaligned_ins(instruction, regs); | 506 | ret = handle_unaligned_ins(instruction, regs, ma); |
499 | if (ret==0) | 507 | if (ret==0) |
500 | regs->pc += instruction_size(instruction); | 508 | regs->pc += instruction_size(instruction); |
501 | return ret; | 509 | return ret; |
@@ -558,7 +566,8 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
558 | goto uspace_segv; | 566 | goto uspace_segv; |
559 | } | 567 | } |
560 | 568 | ||
561 | tmp = handle_unaligned_access(instruction, regs); | 569 | tmp = handle_unaligned_access(instruction, regs, |
570 | &user_mem_access); | ||
562 | set_fs(oldfs); | 571 | set_fs(oldfs); |
563 | 572 | ||
564 | if (tmp==0) | 573 | if (tmp==0) |
@@ -587,7 +596,7 @@ uspace_segv: | |||
587 | die("insn faulting in do_address_error", regs, 0); | 596 | die("insn faulting in do_address_error", regs, 0); |
588 | } | 597 | } |
589 | 598 | ||
590 | handle_unaligned_access(instruction, regs); | 599 | handle_unaligned_access(instruction, regs, &user_mem_access); |
591 | set_fs(oldfs); | 600 | set_fs(oldfs); |
592 | } | 601 | } |
593 | } | 602 | } |