aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/kuser32.S
diff options
context:
space:
mode:
authorMatthew Leach <matthew.leach@arm.com>2013-10-11 09:52:14 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2013-10-25 10:59:37 -0400
commita1d5ebaf8ccdd100f45042ce32c591867de04ac3 (patch)
tree8f47a40eabb63ca19ec96936fd808537824c0b25 /arch/arm64/kernel/kuser32.S
parent55b89540b0d8d031f90e3d711ec0df3f797ecc61 (diff)
arm64: big-endian: don't treat code as data when copying sigret code
Currently the sigreturn compat code is copied to an offset in the vectors table. When using a BE kernel this data will be stored in the wrong endianess so when returning from a signal on a 32-bit BE system, arbitrary code will be executed. Instead of declaring the code inside a struct and copying that, use the assembler's .byte directives to store the code in the correct endianess regardless of platform endianess. Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Matthew Leach <matthew.leach@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/kuser32.S')
-rw-r--r--arch/arm64/kernel/kuser32.S42
1 files changed, 42 insertions, 0 deletions
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 8b69ecb1d8bc..1e4905d52d30 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,6 +27,9 @@
27 * 27 *
28 * See Documentation/arm/kernel_user_helpers.txt for formal definitions. 28 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
29 */ 29 */
30
31#include <asm/unistd32.h>
32
30 .align 5 33 .align 5
31 .globl __kuser_helper_start 34 .globl __kuser_helper_start
32__kuser_helper_start: 35__kuser_helper_start:
@@ -75,3 +78,42 @@ __kuser_helper_version: // 0xffff0ffc
75 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 78 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
76 .globl __kuser_helper_end 79 .globl __kuser_helper_end
77__kuser_helper_end: 80__kuser_helper_end:
81
82/*
83 * AArch32 sigreturn code
84 *
85 * For ARM syscalls, the syscall number has to be loaded into r7.
86 * We do not support an OABI userspace.
87 *
88 * For Thumb syscalls, we also pass the syscall number via r7. We therefore
89 * need two 16-bit instructions.
90 */
91 .globl __aarch32_sigret_code_start
92__aarch32_sigret_code_start:
93
94 /*
95 * ARM Code
96 */
97 .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
98 .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
99
100 /*
101 * Thumb code
102 */
103 .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
104 .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
105
106 /*
107 * ARM code
108 */
109 .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
110 .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
111
112 /*
113 * Thumb code
114 */
115 .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
116 .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
117
118 .globl __aarch32_sigret_code_end
119__aarch32_sigret_code_end: