aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/alignment.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/alignment.c')
-rw-r--r--arch/arm/mm/alignment.c56
1 files changed, 41 insertions, 15 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index be7c638b648b..cfbcf8b95599 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -22,6 +22,7 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24 24
25#include <asm/system.h>
25#include <asm/unaligned.h> 26#include <asm/unaligned.h>
26 27
27#include "fault.h" 28#include "fault.h"
@@ -95,6 +96,33 @@ static const char *usermode_action[] = {
95 "signal+warn" 96 "signal+warn"
96}; 97};
97 98
99/* Return true if and only if the ARMv6 unaligned access model is in use. */
100static bool cpu_is_v6_unaligned(void)
101{
102 return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
103}
104
105static int safe_usermode(int new_usermode, bool warn)
106{
107 /*
108 * ARMv6 and later CPUs can perform unaligned accesses for
109 * most single load and store instructions up to word size.
110 * LDM, STM, LDRD and STRD still need to be handled.
111 *
112 * Ignoring the alignment fault is not an option on these
113 * CPUs since we spin re-faulting the instruction without
114 * making any progress.
115 */
116 if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
117 new_usermode |= UM_FIXUP;
118
119 if (warn)
120 printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
121 }
122
123 return new_usermode;
124}
125
98static int alignment_proc_show(struct seq_file *m, void *v) 126static int alignment_proc_show(struct seq_file *m, void *v)
99{ 127{
100 seq_printf(m, "User:\t\t%lu\n", ai_user); 128 seq_printf(m, "User:\t\t%lu\n", ai_user);
@@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
125 if (get_user(mode, buffer)) 153 if (get_user(mode, buffer))
126 return -EFAULT; 154 return -EFAULT;
127 if (mode >= '0' && mode <= '5') 155 if (mode >= '0' && mode <= '5')
128 ai_usermode = mode - '0'; 156 ai_usermode = safe_usermode(mode - '0', true);
129 } 157 }
130 return count; 158 return count;
131} 159}
@@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
886 if (ai_usermode & UM_FIXUP) 914 if (ai_usermode & UM_FIXUP)
887 goto fixup; 915 goto fixup;
888 916
889 if (ai_usermode & UM_SIGNAL) 917 if (ai_usermode & UM_SIGNAL) {
890 force_sig(SIGBUS, current); 918 siginfo_t si;
891 else { 919
920 si.si_signo = SIGBUS;
921 si.si_errno = 0;
922 si.si_code = BUS_ADRALN;
923 si.si_addr = (void __user *)addr;
924
925 force_sig_info(si.si_signo, &si, current);
926 } else {
892 /* 927 /*
893 * We're about to disable the alignment trap and return to 928 * We're about to disable the alignment trap and return to
894 * user space. But if an interrupt occurs before actually 929 * user space. But if an interrupt occurs before actually
@@ -926,20 +961,11 @@ static int __init alignment_init(void)
926 return -ENOMEM; 961 return -ENOMEM;
927#endif 962#endif
928 963
929 /* 964 if (cpu_is_v6_unaligned()) {
930 * ARMv6 and later CPUs can perform unaligned accesses for
931 * most single load and store instructions up to word size.
932 * LDM, STM, LDRD and STRD still need to be handled.
933 *
934 * Ignoring the alignment fault is not an option on these
935 * CPUs since we spin re-faulting the instruction without
936 * making any progress.
937 */
938 if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
939 cr_alignment &= ~CR_A; 965 cr_alignment &= ~CR_A;
940 cr_no_alignment &= ~CR_A; 966 cr_no_alignment &= ~CR_A;
941 set_cr(cr_alignment); 967 set_cr(cr_alignment);
942 ai_usermode = UM_FIXUP; 968 ai_usermode = safe_usermode(ai_usermode, false);
943 } 969 }
944 970
945 hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, 971 hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,