diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-06-14 01:05:24 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-06-14 01:05:24 -0400 |
commit | db218b3d55627ef515d3b32888b269ed319c4c8d (patch) | |
tree | 76eb331a1f3dd8e101f3c8f368adda00347a593b | |
parent | af68d8f06d7a4f62f647820bf3c6b390f9aef954 (diff) |
sh64: Use generic unaligned access control/counters.
This switches over from the special-casing that sh64 had and to the model
that's being employed for sh32.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/traps_64.c | 150 |
1 files changed, 37 insertions, 113 deletions
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index ba95d63e623d..488c1cd557f1 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/perf_event.h> | 27 | #include <linux/perf_event.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
30 | #include <linux/atomic.h> | 30 | #include <asm/alignment.h> |
31 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
33 | #include <asm/fpu.h> | 33 | #include <asm/fpu.h> |
@@ -264,20 +264,20 @@ static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_na | |||
264 | die_if_no_fixup(str, regs, error_code); | 264 | die_if_no_fixup(str, regs, error_code); |
265 | } | 265 | } |
266 | 266 | ||
267 | static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) | 267 | static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode) |
268 | { | 268 | { |
269 | int get_user_error; | 269 | int get_user_error; |
270 | unsigned long aligned_pc; | 270 | unsigned long aligned_pc; |
271 | unsigned long opcode; | 271 | insn_size_t opcode; |
272 | 272 | ||
273 | if ((pc & 3) == 1) { | 273 | if ((pc & 3) == 1) { |
274 | /* SHmedia */ | 274 | /* SHmedia */ |
275 | aligned_pc = pc & ~3; | 275 | aligned_pc = pc & ~3; |
276 | if (from_user_mode) { | 276 | if (from_user_mode) { |
277 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | 277 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) { |
278 | get_user_error = -EFAULT; | 278 | get_user_error = -EFAULT; |
279 | } else { | 279 | } else { |
280 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | 280 | get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc); |
281 | *result_opcode = opcode; | 281 | *result_opcode = opcode; |
282 | } | 282 | } |
283 | return get_user_error; | 283 | return get_user_error; |
@@ -285,7 +285,7 @@ static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int | |||
285 | /* If the fault was in the kernel, we can either read | 285 | /* If the fault was in the kernel, we can either read |
286 | * this directly, or if not, we fault. | 286 | * this directly, or if not, we fault. |
287 | */ | 287 | */ |
288 | *result_opcode = *(unsigned long *) aligned_pc; | 288 | *result_opcode = *(insn_size_t *)aligned_pc; |
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | } else if ((pc & 1) == 0) { | 291 | } else if ((pc & 1) == 0) { |
@@ -311,17 +311,23 @@ static int address_is_sign_extended(__u64 a) | |||
311 | #endif | 311 | #endif |
312 | } | 312 | } |
313 | 313 | ||
314 | /* return -1 for fault, 0 for OK */ | ||
314 | static int generate_and_check_address(struct pt_regs *regs, | 315 | static int generate_and_check_address(struct pt_regs *regs, |
315 | __u32 opcode, | 316 | insn_size_t opcode, |
316 | int displacement_not_indexed, | 317 | int displacement_not_indexed, |
317 | int width_shift, | 318 | int width_shift, |
318 | __u64 *address) | 319 | __u64 *address) |
319 | { | 320 | { |
320 | /* return -1 for fault, 0 for OK */ | ||
321 | |||
322 | __u64 base_address, addr; | 321 | __u64 base_address, addr; |
323 | int basereg; | 322 | int basereg; |
324 | 323 | ||
324 | switch (1 << width_shift) { | ||
325 | case 1: inc_unaligned_byte_access(); break; | ||
326 | case 2: inc_unaligned_word_access(); break; | ||
327 | case 4: inc_unaligned_dword_access(); break; | ||
328 | case 8: inc_unaligned_multi_access(); break; | ||
329 | } | ||
330 | |||
325 | basereg = (opcode >> 20) & 0x3f; | 331 | basereg = (opcode >> 20) & 0x3f; |
326 | base_address = regs->regs[basereg]; | 332 | base_address = regs->regs[basereg]; |
327 | if (displacement_not_indexed) { | 333 | if (displacement_not_indexed) { |
@@ -338,28 +344,28 @@ static int generate_and_check_address(struct pt_regs *regs, | |||
338 | } | 344 | } |
339 | 345 | ||
340 | /* Check sign extended */ | 346 | /* Check sign extended */ |
341 | if (!address_is_sign_extended(addr)) { | 347 | if (!address_is_sign_extended(addr)) |
342 | return -1; | 348 | return -1; |
343 | } | ||
344 | 349 | ||
345 | /* Check accessible. For misaligned access in the kernel, assume the | 350 | /* Check accessible. For misaligned access in the kernel, assume the |
346 | address is always accessible (and if not, just fault when the | 351 | address is always accessible (and if not, just fault when the |
347 | load/store gets done.) */ | 352 | load/store gets done.) */ |
348 | if (user_mode(regs)) { | 353 | if (user_mode(regs)) { |
349 | if (addr >= TASK_SIZE) { | 354 | inc_unaligned_user_access(); |
355 | |||
356 | if (addr >= TASK_SIZE) | ||
350 | return -1; | 357 | return -1; |
351 | } | 358 | } else |
352 | /* Do access_ok check later - it depends on whether it's a load or a store. */ | 359 | inc_unaligned_kernel_access(); |
353 | } | ||
354 | 360 | ||
355 | *address = addr; | 361 | *address = addr; |
362 | |||
363 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr); | ||
364 | unaligned_fixups_notify(current, opcode, regs); | ||
365 | |||
356 | return 0; | 366 | return 0; |
357 | } | 367 | } |
358 | 368 | ||
359 | static int user_mode_unaligned_fixup_count = 10; | ||
360 | static int user_mode_unaligned_fixup_enable = 1; | ||
361 | static int kernel_mode_unaligned_fixup_count = 32; | ||
362 | |||
363 | static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) | 369 | static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) |
364 | { | 370 | { |
365 | unsigned short x; | 371 | unsigned short x; |
@@ -389,7 +395,7 @@ static void misaligned_kernel_word_store(__u64 address, __u64 value) | |||
389 | } | 395 | } |
390 | 396 | ||
391 | static int misaligned_load(struct pt_regs *regs, | 397 | static int misaligned_load(struct pt_regs *regs, |
392 | __u32 opcode, | 398 | insn_size_t opcode, |
393 | int displacement_not_indexed, | 399 | int displacement_not_indexed, |
394 | int width_shift, | 400 | int width_shift, |
395 | int do_sign_extend) | 401 | int do_sign_extend) |
@@ -401,11 +407,8 @@ static int misaligned_load(struct pt_regs *regs, | |||
401 | 407 | ||
402 | error = generate_and_check_address(regs, opcode, | 408 | error = generate_and_check_address(regs, opcode, |
403 | displacement_not_indexed, width_shift, &address); | 409 | displacement_not_indexed, width_shift, &address); |
404 | if (error < 0) { | 410 | if (error < 0) |
405 | return error; | 411 | return error; |
406 | } | ||
407 | |||
408 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); | ||
409 | 412 | ||
410 | destreg = (opcode >> 4) & 0x3f; | 413 | destreg = (opcode >> 4) & 0x3f; |
411 | if (user_mode(regs)) { | 414 | if (user_mode(regs)) { |
@@ -464,11 +467,10 @@ static int misaligned_load(struct pt_regs *regs, | |||
464 | } | 467 | } |
465 | 468 | ||
466 | return 0; | 469 | return 0; |
467 | |||
468 | } | 470 | } |
469 | 471 | ||
470 | static int misaligned_store(struct pt_regs *regs, | 472 | static int misaligned_store(struct pt_regs *regs, |
471 | __u32 opcode, | 473 | insn_size_t opcode, |
472 | int displacement_not_indexed, | 474 | int displacement_not_indexed, |
473 | int width_shift) | 475 | int width_shift) |
474 | { | 476 | { |
@@ -479,11 +481,8 @@ static int misaligned_store(struct pt_regs *regs, | |||
479 | 481 | ||
480 | error = generate_and_check_address(regs, opcode, | 482 | error = generate_and_check_address(regs, opcode, |
481 | displacement_not_indexed, width_shift, &address); | 483 | displacement_not_indexed, width_shift, &address); |
482 | if (error < 0) { | 484 | if (error < 0) |
483 | return error; | 485 | return error; |
484 | } | ||
485 | |||
486 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); | ||
487 | 486 | ||
488 | srcreg = (opcode >> 4) & 0x3f; | 487 | srcreg = (opcode >> 4) & 0x3f; |
489 | if (user_mode(regs)) { | 488 | if (user_mode(regs)) { |
@@ -537,13 +536,12 @@ static int misaligned_store(struct pt_regs *regs, | |||
537 | } | 536 | } |
538 | 537 | ||
539 | return 0; | 538 | return 0; |
540 | |||
541 | } | 539 | } |
542 | 540 | ||
543 | /* Never need to fix up misaligned FPU accesses within the kernel since that's a real | 541 | /* Never need to fix up misaligned FPU accesses within the kernel since that's a real |
544 | error. */ | 542 | error. */ |
545 | static int misaligned_fpu_load(struct pt_regs *regs, | 543 | static int misaligned_fpu_load(struct pt_regs *regs, |
546 | __u32 opcode, | 544 | insn_size_t opcode, |
547 | int displacement_not_indexed, | 545 | int displacement_not_indexed, |
548 | int width_shift, | 546 | int width_shift, |
549 | int do_paired_load) | 547 | int do_paired_load) |
@@ -555,11 +553,8 @@ static int misaligned_fpu_load(struct pt_regs *regs, | |||
555 | 553 | ||
556 | error = generate_and_check_address(regs, opcode, | 554 | error = generate_and_check_address(regs, opcode, |
557 | displacement_not_indexed, width_shift, &address); | 555 | displacement_not_indexed, width_shift, &address); |
558 | if (error < 0) { | 556 | if (error < 0) |
559 | return error; | 557 | return error; |
560 | } | ||
561 | |||
562 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); | ||
563 | 558 | ||
564 | destreg = (opcode >> 4) & 0x3f; | 559 | destreg = (opcode >> 4) & 0x3f; |
565 | if (user_mode(regs)) { | 560 | if (user_mode(regs)) { |
@@ -615,12 +610,10 @@ static int misaligned_fpu_load(struct pt_regs *regs, | |||
615 | die ("Misaligned FPU load inside kernel", regs, 0); | 610 | die ("Misaligned FPU load inside kernel", regs, 0); |
616 | return -1; | 611 | return -1; |
617 | } | 612 | } |
618 | |||
619 | |||
620 | } | 613 | } |
621 | 614 | ||
622 | static int misaligned_fpu_store(struct pt_regs *regs, | 615 | static int misaligned_fpu_store(struct pt_regs *regs, |
623 | __u32 opcode, | 616 | insn_size_t opcode, |
624 | int displacement_not_indexed, | 617 | int displacement_not_indexed, |
625 | int width_shift, | 618 | int width_shift, |
626 | int do_paired_load) | 619 | int do_paired_load) |
@@ -632,11 +625,8 @@ static int misaligned_fpu_store(struct pt_regs *regs, | |||
632 | 625 | ||
633 | error = generate_and_check_address(regs, opcode, | 626 | error = generate_and_check_address(regs, opcode, |
634 | displacement_not_indexed, width_shift, &address); | 627 | displacement_not_indexed, width_shift, &address); |
635 | if (error < 0) { | 628 | if (error < 0) |
636 | return error; | 629 | return error; |
637 | } | ||
638 | |||
639 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); | ||
640 | 630 | ||
641 | srcreg = (opcode >> 4) & 0x3f; | 631 | srcreg = (opcode >> 4) & 0x3f; |
642 | if (user_mode(regs)) { | 632 | if (user_mode(regs)) { |
@@ -697,11 +687,13 @@ static int misaligned_fpu_store(struct pt_regs *regs, | |||
697 | 687 | ||
698 | static int misaligned_fixup(struct pt_regs *regs) | 688 | static int misaligned_fixup(struct pt_regs *regs) |
699 | { | 689 | { |
700 | unsigned long opcode; | 690 | insn_size_t opcode; |
701 | int error; | 691 | int error; |
702 | int major, minor; | 692 | int major, minor; |
693 | unsigned int user_action; | ||
703 | 694 | ||
704 | if (!user_mode_unaligned_fixup_enable) | 695 | user_action = unaligned_user_action(); |
696 | if (!(user_action & UM_FIXUP)) | ||
705 | return -1; | 697 | return -1; |
706 | 698 | ||
707 | error = read_opcode(regs->pc, &opcode, user_mode(regs)); | 699 | error = read_opcode(regs->pc, &opcode, user_mode(regs)); |
@@ -711,23 +703,6 @@ static int misaligned_fixup(struct pt_regs *regs) | |||
711 | major = (opcode >> 26) & 0x3f; | 703 | major = (opcode >> 26) & 0x3f; |
712 | minor = (opcode >> 16) & 0xf; | 704 | minor = (opcode >> 16) & 0xf; |
713 | 705 | ||
714 | if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { | ||
715 | --user_mode_unaligned_fixup_count; | ||
716 | /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ | ||
717 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | ||
718 | current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); | ||
719 | } else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { | ||
720 | --kernel_mode_unaligned_fixup_count; | ||
721 | if (in_interrupt()) { | ||
722 | printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", | ||
723 | (__u32)regs->pc, opcode); | ||
724 | } else { | ||
725 | printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | ||
726 | current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); | ||
727 | } | ||
728 | } | ||
729 | |||
730 | |||
731 | switch (major) { | 706 | switch (major) { |
732 | case (0x84>>2): /* LD.W */ | 707 | case (0x84>>2): /* LD.W */ |
733 | error = misaligned_load(regs, opcode, 1, 1, 1); | 708 | error = misaligned_load(regs, opcode, 1, 1, 1); |
@@ -855,57 +830,6 @@ static int misaligned_fixup(struct pt_regs *regs) | |||
855 | 830 | ||
856 | } | 831 | } |
857 | 832 | ||
858 | static ctl_table unaligned_table[] = { | ||
859 | { | ||
860 | .procname = "kernel_reports", | ||
861 | .data = &kernel_mode_unaligned_fixup_count, | ||
862 | .maxlen = sizeof(int), | ||
863 | .mode = 0644, | ||
864 | .proc_handler = proc_dointvec | ||
865 | }, | ||
866 | { | ||
867 | .procname = "user_reports", | ||
868 | .data = &user_mode_unaligned_fixup_count, | ||
869 | .maxlen = sizeof(int), | ||
870 | .mode = 0644, | ||
871 | .proc_handler = proc_dointvec | ||
872 | }, | ||
873 | { | ||
874 | .procname = "user_enable", | ||
875 | .data = &user_mode_unaligned_fixup_enable, | ||
876 | .maxlen = sizeof(int), | ||
877 | .mode = 0644, | ||
878 | .proc_handler = proc_dointvec}, | ||
879 | {} | ||
880 | }; | ||
881 | |||
882 | static ctl_table unaligned_root[] = { | ||
883 | { | ||
884 | .procname = "unaligned_fixup", | ||
885 | .mode = 0555, | ||
886 | .child = unaligned_table | ||
887 | }, | ||
888 | {} | ||
889 | }; | ||
890 | |||
891 | static ctl_table sh64_root[] = { | ||
892 | { | ||
893 | .procname = "sh64", | ||
894 | .mode = 0555, | ||
895 | .child = unaligned_root | ||
896 | }, | ||
897 | {} | ||
898 | }; | ||
899 | static struct ctl_table_header *sysctl_header; | ||
900 | static int __init init_sysctl(void) | ||
901 | { | ||
902 | sysctl_header = register_sysctl_table(sh64_root); | ||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | __initcall(init_sysctl); | ||
907 | |||
908 | |||
909 | asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) | 833 | asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) |
910 | { | 834 | { |
911 | u64 peek_real_address_q(u64 addr); | 835 | u64 peek_real_address_q(u64 addr); |