aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-12-07 04:44:55 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-12-07 04:44:55 -0500
commitbaa745a3378046ca1c5477495df6ccbec7690428 (patch)
treebc3a1339a45d70b6810f2ca88a7f46e89b33b2d2 /arch
parent794baba637999b81aa40e60fae1fa91978e08808 (diff)
[ARM] Fix alignment fault handling for ARMv6 and later CPUs
On ARMv6 and later CPUs, it is possible for userspace processes to get stuck on a misaligned load or store due to the "ignore fault" setting; unlike previous CPUs, retrying the instruction without the 'A' bit set does not always cause the load to succeed. We have no real option but to default to fixing up alignment faults on these CPUs, and having the CPU fix up those misaligned accesses which it can. Reported-by: Wolfgang Grandegger <wg@grandegger.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/alignment.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 133e65d166b3..2d5884ce0435 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -70,6 +70,10 @@ static unsigned long ai_dword;
70static unsigned long ai_multi; 70static unsigned long ai_multi;
71static int ai_usermode; 71static int ai_usermode;
72 72
73#define UM_WARN (1 << 0)
74#define UM_FIXUP (1 << 1)
75#define UM_SIGNAL (1 << 2)
76
73#ifdef CONFIG_PROC_FS 77#ifdef CONFIG_PROC_FS
74static const char *usermode_action[] = { 78static const char *usermode_action[] = {
75 "ignored", 79 "ignored",
@@ -754,7 +758,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
754 user: 758 user:
755 ai_user += 1; 759 ai_user += 1;
756 760
757 if (ai_usermode & 1) 761 if (ai_usermode & UM_WARN)
758 printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " 762 printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
759 "Address=0x%08lx FSR 0x%03x\n", current->comm, 763 "Address=0x%08lx FSR 0x%03x\n", current->comm,
760 task_pid_nr(current), instrptr, 764 task_pid_nr(current), instrptr,
@@ -762,10 +766,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
762 thumb_mode(regs) ? tinstr : instr, 766 thumb_mode(regs) ? tinstr : instr,
763 addr, fsr); 767 addr, fsr);
764 768
765 if (ai_usermode & 2) 769 if (ai_usermode & UM_FIXUP)
766 goto fixup; 770 goto fixup;
767 771
768 if (ai_usermode & 4) 772 if (ai_usermode & UM_SIGNAL)
769 force_sig(SIGBUS, current); 773 force_sig(SIGBUS, current);
770 else 774 else
771 set_cr(cr_no_alignment); 775 set_cr(cr_no_alignment);
@@ -796,6 +800,22 @@ static int __init alignment_init(void)
796 res->write_proc = proc_alignment_write; 800 res->write_proc = proc_alignment_write;
797#endif 801#endif
798 802
803 /*
804 * ARMv6 and later CPUs can perform unaligned accesses for
805 * most single load and store instructions up to word size.
806 * LDM, STM, LDRD and STRD still need to be handled.
807 *
808 * Ignoring the alignment fault is not an option on these
809 * CPUs since we spin re-faulting the instruction without
810 * making any progress.
811 */
812 if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
813 cr_alignment &= ~CR_A;
814 cr_no_alignment &= ~CR_A;
815 set_cr(cr_alignment);
816 ai_usermode = UM_FIXUP;
817 }
818
799 hook_fault_code(1, do_alignment, SIGILL, "alignment exception"); 819 hook_fault_code(1, do_alignment, SIGILL, "alignment exception");
800 hook_fault_code(3, do_alignment, SIGILL, "alignment exception"); 820 hook_fault_code(3, do_alignment, SIGILL, "alignment exception");
801 821