diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-07 12:16:54 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-10-01 11:41:10 -0400 |
commit | 8ec53663d2698076468b3e1edc4e1b418bd54de3 (patch) | |
tree | d98f0ac21ec96be15aab1b05d3d6e2f60a657815 /arch | |
parent | 5ec9407dd1196daaf12b427b351e2cd62d2a16a7 (diff) |
[ARM] Improve non-executable support
Add support for detecting non-executable stack binaries, and adjust
permissions to prevent execution from data and stack areas. Also,
ensure that READ_IMPLIES_EXEC is enabled for older CPUs where that
is true, and for any executable-stack binary.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/elf.h | 68 | ||||
-rw-r--r-- | arch/arm/include/asm/page.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 47 | ||||
-rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kernel/elf.c | 79 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 2 |
6 files changed, 145 insertions, 58 deletions
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 7ea302c14a59..5be016980c19 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h | |||
@@ -18,9 +18,32 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | |||
18 | typedef struct user_fp elf_fpregset_t; | 18 | typedef struct user_fp elf_fpregset_t; |
19 | 19 | ||
20 | #define EM_ARM 40 | 20 | #define EM_ARM 40 |
21 | #define EF_ARM_APCS26 0x08 | 21 | |
22 | #define EF_ARM_SOFT_FLOAT 0x200 | 22 | #define EF_ARM_EABI_MASK 0xff000000 |
23 | #define EF_ARM_EABI_MASK 0xFF000000 | 23 | #define EF_ARM_EABI_UNKNOWN 0x00000000 |
24 | #define EF_ARM_EABI_VER1 0x01000000 | ||
25 | #define EF_ARM_EABI_VER2 0x02000000 | ||
26 | #define EF_ARM_EABI_VER3 0x03000000 | ||
27 | #define EF_ARM_EABI_VER4 0x04000000 | ||
28 | #define EF_ARM_EABI_VER5 0x05000000 | ||
29 | |||
30 | #define EF_ARM_BE8 0x00800000 /* ABI 4,5 */ | ||
31 | #define EF_ARM_LE8 0x00400000 /* ABI 4,5 */ | ||
32 | #define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */ | ||
33 | #define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */ | ||
34 | #define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */ | ||
35 | #define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */ | ||
36 | #define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */ | ||
37 | #define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */ | ||
38 | #define EF_ARM_PIC 0x00000020 /* ABI 0 */ | ||
39 | #define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */ | ||
40 | #define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */ | ||
41 | #define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */ | ||
42 | #define EF_ARM_APCS_26 0x00000008 /* ABI 0 */ | ||
43 | #define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */ | ||
44 | #define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */ | ||
45 | #define EF_ARM_HASENTRY 0x00000002 /* All */ | ||
46 | #define EF_ARM_RELEXEC 0x00000001 /* All */ | ||
24 | 47 | ||
25 | #define R_ARM_NONE 0 | 48 | #define R_ARM_NONE 0 |
26 | #define R_ARM_PC24 1 | 49 | #define R_ARM_PC24 1 |
@@ -57,23 +80,16 @@ typedef struct user_fp elf_fpregset_t; | |||
57 | 80 | ||
58 | extern char elf_platform[]; | 81 | extern char elf_platform[]; |
59 | 82 | ||
60 | /* | 83 | struct elf32_hdr; |
61 | * This is used to ensure we don't load something for the wrong architecture. | ||
62 | */ | ||
63 | #define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x)) | ||
64 | 84 | ||
65 | /* | 85 | /* |
66 | * 32-bit code is always OK. Some cpus can do 26-bit, some can't. | 86 | * This is used to ensure we don't load something for the wrong architecture. |
67 | */ | 87 | */ |
68 | #define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x)) | 88 | extern int elf_check_arch(const struct elf32_hdr *); |
69 | 89 | #define elf_check_arch elf_check_arch | |
70 | #define ELF_THUMB_OK(x) \ | ||
71 | ((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \ | ||
72 | ((x)->e_entry & 3) == 0) | ||
73 | 90 | ||
74 | #define ELF_26BIT_OK(x) \ | 91 | extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); |
75 | ((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \ | 92 | #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) |
76 | ((x)->e_flags & EF_ARM_APCS26) == 0) | ||
77 | 93 | ||
78 | #define USE_ELF_CORE_DUMP | 94 | #define USE_ELF_CORE_DUMP |
79 | #define ELF_EXEC_PAGESIZE 4096 | 95 | #define ELF_EXEC_PAGESIZE 4096 |
@@ -90,23 +106,7 @@ extern char elf_platform[]; | |||
90 | have no such handler. */ | 106 | have no such handler. */ |
91 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 | 107 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 |
92 | 108 | ||
93 | /* | 109 | extern void elf_set_personality(const struct elf32_hdr *); |
94 | * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 | 110 | #define SET_PERSONALITY(ex, ibcs2) elf_set_personality(&(ex)) |
95 | * and CP1, we only enable access to the iWMMXt coprocessor if the | ||
96 | * binary is EABI or softfloat (and thus, guaranteed not to use | ||
97 | * FPA instructions.) | ||
98 | */ | ||
99 | #define SET_PERSONALITY(ex, ibcs2) \ | ||
100 | do { \ | ||
101 | if ((ex).e_flags & EF_ARM_APCS26) { \ | ||
102 | set_personality(PER_LINUX); \ | ||
103 | } else { \ | ||
104 | set_personality(PER_LINUX_32BIT); \ | ||
105 | if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \ | ||
106 | set_thread_flag(TIF_USING_IWMMXT); \ | ||
107 | else \ | ||
108 | clear_thread_flag(TIF_USING_IWMMXT); \ | ||
109 | } \ | ||
110 | } while (0) | ||
111 | 111 | ||
112 | #endif | 112 | #endif |
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index cf2e2680daaa..bed1c0a00368 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -184,8 +184,9 @@ typedef struct page *pgtable_t; | |||
184 | 184 | ||
185 | #endif /* !__ASSEMBLY__ */ | 185 | #endif /* !__ASSEMBLY__ */ |
186 | 186 | ||
187 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 187 | #define VM_DATA_DEFAULT_FLAGS \ |
188 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 188 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ |
189 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
189 | 190 | ||
190 | /* | 191 | /* |
191 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | 192 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index e5054b026c24..b02be6c55aef 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -197,22 +197,29 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
197 | * shared mapping bits. | 197 | * shared mapping bits. |
198 | */ | 198 | */ |
199 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | 199 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG |
200 | #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | ||
201 | 200 | ||
202 | extern pgprot_t pgprot_user; | 201 | extern pgprot_t pgprot_user; |
203 | extern pgprot_t pgprot_kernel; | 202 | extern pgprot_t pgprot_kernel; |
204 | 203 | ||
205 | #define PAGE_NONE pgprot_user | 204 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
206 | #define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) | 205 | |
207 | #define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \ | 206 | #define PAGE_NONE pgprot_user |
208 | L_PTE_WRITE) | 207 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) |
209 | #define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) | 208 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) |
210 | #define PAGE_KERNEL pgprot_kernel | 209 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) |
211 | 210 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) | |
212 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) | 211 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) |
213 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) | 212 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) |
214 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) | 213 | #define PAGE_KERNEL pgprot_kernel |
215 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) | 214 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) |
215 | |||
216 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) | ||
217 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) | ||
218 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) | ||
219 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | ||
220 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | ||
221 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | ||
222 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | ||
216 | 223 | ||
217 | #endif /* __ASSEMBLY__ */ | 224 | #endif /* __ASSEMBLY__ */ |
218 | 225 | ||
@@ -228,19 +235,19 @@ extern pgprot_t pgprot_kernel; | |||
228 | #define __P001 __PAGE_READONLY | 235 | #define __P001 __PAGE_READONLY |
229 | #define __P010 __PAGE_COPY | 236 | #define __P010 __PAGE_COPY |
230 | #define __P011 __PAGE_COPY | 237 | #define __P011 __PAGE_COPY |
231 | #define __P100 __PAGE_READONLY | 238 | #define __P100 __PAGE_READONLY_EXEC |
232 | #define __P101 __PAGE_READONLY | 239 | #define __P101 __PAGE_READONLY_EXEC |
233 | #define __P110 __PAGE_COPY | 240 | #define __P110 __PAGE_COPY_EXEC |
234 | #define __P111 __PAGE_COPY | 241 | #define __P111 __PAGE_COPY_EXEC |
235 | 242 | ||
236 | #define __S000 __PAGE_NONE | 243 | #define __S000 __PAGE_NONE |
237 | #define __S001 __PAGE_READONLY | 244 | #define __S001 __PAGE_READONLY |
238 | #define __S010 __PAGE_SHARED | 245 | #define __S010 __PAGE_SHARED |
239 | #define __S011 __PAGE_SHARED | 246 | #define __S011 __PAGE_SHARED |
240 | #define __S100 __PAGE_READONLY | 247 | #define __S100 __PAGE_READONLY_EXEC |
241 | #define __S101 __PAGE_READONLY | 248 | #define __S101 __PAGE_READONLY_EXEC |
242 | #define __S110 __PAGE_SHARED | 249 | #define __S110 __PAGE_SHARED_EXEC |
243 | #define __S111 __PAGE_SHARED | 250 | #define __S111 __PAGE_SHARED_EXEC |
244 | 251 | ||
245 | #ifndef __ASSEMBLY__ | 252 | #ifndef __ASSEMBLY__ |
246 | /* | 253 | /* |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 1d296fc8494e..4305345987d3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -10,7 +10,7 @@ endif | |||
10 | 10 | ||
11 | # Object file lists. | 11 | # Object file lists. |
12 | 12 | ||
13 | obj-y := compat.o entry-armv.o entry-common.o irq.o \ | 13 | obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ |
14 | process.o ptrace.o setup.o signal.o \ | 14 | process.o ptrace.o setup.o signal.o \ |
15 | sys_arm.o stacktrace.o time.o traps.o | 15 | sys_arm.o stacktrace.o time.o traps.o |
16 | 16 | ||
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c new file mode 100644 index 000000000000..513f332f040d --- /dev/null +++ b/arch/arm/kernel/elf.c | |||
@@ -0,0 +1,79 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/personality.h> | ||
4 | #include <linux/binfmts.h> | ||
5 | #include <linux/elf.h> | ||
6 | |||
7 | int elf_check_arch(const struct elf32_hdr *x) | ||
8 | { | ||
9 | unsigned int eflags; | ||
10 | |||
11 | /* Make sure it's an ARM executable */ | ||
12 | if (x->e_machine != EM_ARM) | ||
13 | return 0; | ||
14 | |||
15 | /* Make sure the entry address is reasonable */ | ||
16 | if (x->e_entry & 1) { | ||
17 | if (!(elf_hwcap & HWCAP_THUMB)) | ||
18 | return 0; | ||
19 | } else if (x->e_entry & 3) | ||
20 | return 0; | ||
21 | |||
22 | eflags = x->e_flags; | ||
23 | if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { | ||
24 | /* APCS26 is only allowed if the CPU supports it */ | ||
25 | if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT)) | ||
26 | return 0; | ||
27 | |||
28 | /* VFP requires the supporting code */ | ||
29 | if ((eflags & EF_ARM_VFP_FLOAT) && !(elf_hwcap & HWCAP_VFP)) | ||
30 | return 0; | ||
31 | } | ||
32 | return 1; | ||
33 | } | ||
34 | EXPORT_SYMBOL(elf_check_arch); | ||
35 | |||
36 | void elf_set_personality(const struct elf32_hdr *x) | ||
37 | { | ||
38 | unsigned int eflags = x->e_flags; | ||
39 | unsigned int personality = PER_LINUX_32BIT; | ||
40 | |||
41 | /* | ||
42 | * APCS-26 is only valid for OABI executables | ||
43 | */ | ||
44 | if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { | ||
45 | if (eflags & EF_ARM_APCS_26) | ||
46 | personality = PER_LINUX; | ||
47 | } | ||
48 | |||
49 | set_personality(personality); | ||
50 | |||
51 | /* | ||
52 | * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 | ||
53 | * and CP1, we only enable access to the iWMMXt coprocessor if the | ||
54 | * binary is EABI or softfloat (and thus, guaranteed not to use | ||
55 | * FPA instructions.) | ||
56 | */ | ||
57 | if (elf_hwcap & HWCAP_IWMMXT && | ||
58 | eflags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) { | ||
59 | set_thread_flag(TIF_USING_IWMMXT); | ||
60 | } else { | ||
61 | clear_thread_flag(TIF_USING_IWMMXT); | ||
62 | } | ||
63 | } | ||
64 | EXPORT_SYMBOL(elf_set_personality); | ||
65 | |||
66 | /* | ||
67 | * Set READ_IMPLIES_EXEC if: | ||
68 | * - the binary requires an executable stack | ||
69 | * - we're running on a CPU which doesn't support NX. | ||
70 | */ | ||
71 | int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) | ||
72 | { | ||
73 | if (executable_stack != EXSTACK_ENABLE_X) | ||
74 | return 1; | ||
75 | if (cpu_architecture() <= CPU_ARCH_ARMv6) | ||
76 | return 1; | ||
77 | return 0; | ||
78 | } | ||
79 | EXPORT_SYMBOL(arm_elf_read_implies_exec); | ||
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index a68259a0cccd..9203ba7d58ee 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -47,7 +47,7 @@ void *module_alloc(unsigned long size) | |||
47 | if (!area) | 47 | if (!area) |
48 | return NULL; | 48 | return NULL; |
49 | 49 | ||
50 | return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); | 50 | return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); |
51 | } | 51 | } |
52 | #else /* CONFIG_MMU */ | 52 | #else /* CONFIG_MMU */ |
53 | void *module_alloc(unsigned long size) | 53 | void *module_alloc(unsigned long size) |