diff options
| -rw-r--r-- | arch/arm/Kconfig | 3 | ||||
| -rw-r--r-- | arch/arm/include/asm/elf.h | 4 | ||||
| -rw-r--r-- | arch/arm/include/asm/mmu.h | 1 | ||||
| -rw-r--r-- | arch/arm/include/asm/page.h | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/entry-armv.S | 103 | ||||
| -rw-r--r-- | arch/arm/kernel/fiq.c | 19 | ||||
| -rw-r--r-- | arch/arm/kernel/process.c | 45 | ||||
| -rw-r--r-- | arch/arm/kernel/signal.c | 52 | ||||
| -rw-r--r-- | arch/arm/kernel/signal.h | 12 | ||||
| -rw-r--r-- | arch/arm/kernel/traps.c | 46 | ||||
| -rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 17 | ||||
| -rw-r--r-- | arch/arm/mm/Kconfig | 34 | ||||
| -rw-r--r-- | arch/arm/mm/mmu.c | 14 |
13 files changed, 253 insertions, 99 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 82f069829ac0..995b55311c55 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -217,7 +217,8 @@ config VECTORS_BASE | |||
| 217 | default DRAM_BASE if REMAP_VECTORS_TO_RAM | 217 | default DRAM_BASE if REMAP_VECTORS_TO_RAM |
| 218 | default 0x00000000 | 218 | default 0x00000000 |
| 219 | help | 219 | help |
| 220 | The base address of exception vectors. | 220 | The base address of exception vectors. This must be two pages |
| 221 | in size. | ||
| 221 | 222 | ||
| 222 | config ARM_PATCH_PHYS_VIRT | 223 | config ARM_PATCH_PHYS_VIRT |
| 223 | bool "Patch physical to virtual translations at runtime" if EMBEDDED | 224 | bool "Patch physical to virtual translations at runtime" if EMBEDDED |
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 38050b1c4800..9c9b30717fda 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h | |||
| @@ -130,4 +130,8 @@ struct mm_struct; | |||
| 130 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | 130 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); |
| 131 | #define arch_randomize_brk arch_randomize_brk | 131 | #define arch_randomize_brk arch_randomize_brk |
| 132 | 132 | ||
| 133 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | ||
| 134 | struct linux_binprm; | ||
| 135 | int arch_setup_additional_pages(struct linux_binprm *, int); | ||
| 136 | |||
| 133 | #endif | 137 | #endif |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index d1b4998e4f43..6f18da09668b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
| @@ -10,6 +10,7 @@ typedef struct { | |||
| 10 | int switch_pending; | 10 | int switch_pending; |
| 11 | #endif | 11 | #endif |
| 12 | unsigned int vmalloc_seq; | 12 | unsigned int vmalloc_seq; |
| 13 | unsigned long sigpage; | ||
| 13 | } mm_context_t; | 14 | } mm_context_t; |
| 14 | 15 | ||
| 15 | #ifdef CONFIG_CPU_HAS_ASID | 16 | #ifdef CONFIG_CPU_HAS_ASID |
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 6363f3d1d505..4355f0ec44d6 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
| @@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | |||
| 142 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 142 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
| 143 | extern void copy_page(void *to, const void *from); | 143 | extern void copy_page(void *to, const void *from); |
| 144 | 144 | ||
| 145 | #ifdef CONFIG_KUSER_HELPERS | ||
| 145 | #define __HAVE_ARCH_GATE_AREA 1 | 146 | #define __HAVE_ARCH_GATE_AREA 1 |
| 147 | #endif | ||
| 146 | 148 | ||
| 147 | #ifdef CONFIG_ARM_LPAE | 149 | #ifdef CONFIG_ARM_LPAE |
| 148 | #include <asm/pgtable-3level-types.h> | 150 | #include <asm/pgtable-3level-types.h> |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a39cfc2a1f90..d40d0ef389db 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
| @@ -742,6 +742,18 @@ ENDPROC(__switch_to) | |||
| 742 | #endif | 742 | #endif |
| 743 | .endm | 743 | .endm |
| 744 | 744 | ||
| 745 | .macro kuser_pad, sym, size | ||
| 746 | .if (. - \sym) & 3 | ||
| 747 | .rept 4 - (. - \sym) & 3 | ||
| 748 | .byte 0 | ||
| 749 | .endr | ||
| 750 | .endif | ||
| 751 | .rept (\size - (. - \sym)) / 4 | ||
| 752 | .word 0xe7fddef1 | ||
| 753 | .endr | ||
| 754 | .endm | ||
| 755 | |||
| 756 | #ifdef CONFIG_KUSER_HELPERS | ||
| 745 | .align 5 | 757 | .align 5 |
| 746 | .globl __kuser_helper_start | 758 | .globl __kuser_helper_start |
| 747 | __kuser_helper_start: | 759 | __kuser_helper_start: |
| @@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup: | |||
| 832 | #error "incoherent kernel configuration" | 844 | #error "incoherent kernel configuration" |
| 833 | #endif | 845 | #endif |
| 834 | 846 | ||
| 835 | /* pad to next slot */ | 847 | kuser_pad __kuser_cmpxchg64, 64 |
| 836 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
| 837 | .word 0 | ||
| 838 | .endr | ||
| 839 | |||
| 840 | .align 5 | ||
| 841 | 848 | ||
| 842 | __kuser_memory_barrier: @ 0xffff0fa0 | 849 | __kuser_memory_barrier: @ 0xffff0fa0 |
| 843 | smp_dmb arm | 850 | smp_dmb arm |
| 844 | usr_ret lr | 851 | usr_ret lr |
| 845 | 852 | ||
| 846 | .align 5 | 853 | kuser_pad __kuser_memory_barrier, 32 |
| 847 | 854 | ||
| 848 | __kuser_cmpxchg: @ 0xffff0fc0 | 855 | __kuser_cmpxchg: @ 0xffff0fc0 |
| 849 | 856 | ||
| @@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup: | |||
| 916 | 923 | ||
| 917 | #endif | 924 | #endif |
| 918 | 925 | ||
| 919 | .align 5 | 926 | kuser_pad __kuser_cmpxchg, 32 |
| 920 | 927 | ||
| 921 | __kuser_get_tls: @ 0xffff0fe0 | 928 | __kuser_get_tls: @ 0xffff0fe0 |
| 922 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init | 929 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
| 923 | usr_ret lr | 930 | usr_ret lr |
| 924 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code | 931 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
| 925 | .rep 4 | 932 | kuser_pad __kuser_get_tls, 16 |
| 933 | .rep 3 | ||
| 926 | .word 0 @ 0xffff0ff0 software TLS value, then | 934 | .word 0 @ 0xffff0ff0 software TLS value, then |
| 927 | .endr @ pad up to __kuser_helper_version | 935 | .endr @ pad up to __kuser_helper_version |
| 928 | 936 | ||
| @@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc | |||
| 932 | .globl __kuser_helper_end | 940 | .globl __kuser_helper_end |
| 933 | __kuser_helper_end: | 941 | __kuser_helper_end: |
| 934 | 942 | ||
| 943 | #endif | ||
| 944 | |||
| 935 | THUMB( .thumb ) | 945 | THUMB( .thumb ) |
| 936 | 946 | ||
| 937 | /* | 947 | /* |
| 938 | * Vector stubs. | 948 | * Vector stubs. |
| 939 | * | 949 | * |
| 940 | * This code is copied to 0xffff0200 so we can use branches in the | 950 | * This code is copied to 0xffff1000 so we can use branches in the |
| 941 | * vectors, rather than ldr's. Note that this code must not | 951 | * vectors, rather than ldr's. Note that this code must not exceed |
| 942 | * exceed 0x300 bytes. | 952 | * a page size. |
| 943 | * | 953 | * |
| 944 | * Common stub entry macro: | 954 | * Common stub entry macro: |
| 945 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | 955 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
| @@ -986,8 +996,17 @@ ENDPROC(vector_\name) | |||
| 986 | 1: | 996 | 1: |
| 987 | .endm | 997 | .endm |
| 988 | 998 | ||
| 989 | .globl __stubs_start | 999 | .section .stubs, "ax", %progbits |
| 990 | __stubs_start: | 1000 | __stubs_start: |
| 1001 | @ This must be the first word | ||
| 1002 | .word vector_swi | ||
| 1003 | |||
| 1004 | vector_rst: | ||
| 1005 | ARM( swi SYS_ERROR0 ) | ||
| 1006 | THUMB( svc #0 ) | ||
| 1007 | THUMB( nop ) | ||
| 1008 | b vector_und | ||
| 1009 | |||
| 991 | /* | 1010 | /* |
| 992 | * Interrupt dispatcher | 1011 | * Interrupt dispatcher |
| 993 | */ | 1012 | */ |
| @@ -1082,6 +1101,16 @@ __stubs_start: | |||
| 1082 | .align 5 | 1101 | .align 5 |
| 1083 | 1102 | ||
| 1084 | /*============================================================================= | 1103 | /*============================================================================= |
| 1104 | * Address exception handler | ||
| 1105 | *----------------------------------------------------------------------------- | ||
| 1106 | * These aren't too critical. | ||
| 1107 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
| 1108 | */ | ||
| 1109 | |||
| 1110 | vector_addrexcptn: | ||
| 1111 | b vector_addrexcptn | ||
| 1112 | |||
| 1113 | /*============================================================================= | ||
| 1085 | * Undefined FIQs | 1114 | * Undefined FIQs |
| 1086 | *----------------------------------------------------------------------------- | 1115 | *----------------------------------------------------------------------------- |
| 1087 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | 1116 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC |
| @@ -1094,45 +1123,19 @@ __stubs_start: | |||
| 1094 | vector_fiq: | 1123 | vector_fiq: |
| 1095 | subs pc, lr, #4 | 1124 | subs pc, lr, #4 |
| 1096 | 1125 | ||
| 1097 | /*============================================================================= | 1126 | .globl vector_fiq_offset |
| 1098 | * Address exception handler | 1127 | .equ vector_fiq_offset, vector_fiq |
| 1099 | *----------------------------------------------------------------------------- | ||
| 1100 | * These aren't too critical. | ||
| 1101 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
| 1102 | */ | ||
| 1103 | |||
| 1104 | vector_addrexcptn: | ||
| 1105 | b vector_addrexcptn | ||
| 1106 | |||
| 1107 | /* | ||
| 1108 | * We group all the following data together to optimise | ||
| 1109 | * for CPUs with separate I & D caches. | ||
| 1110 | */ | ||
| 1111 | .align 5 | ||
| 1112 | |||
| 1113 | .LCvswi: | ||
| 1114 | .word vector_swi | ||
| 1115 | |||
| 1116 | .globl __stubs_end | ||
| 1117 | __stubs_end: | ||
| 1118 | |||
| 1119 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start | ||
| 1120 | 1128 | ||
| 1121 | .globl __vectors_start | 1129 | .section .vectors, "ax", %progbits |
| 1122 | __vectors_start: | 1130 | __vectors_start: |
| 1123 | ARM( swi SYS_ERROR0 ) | 1131 | W(b) vector_rst |
| 1124 | THUMB( svc #0 ) | 1132 | W(b) vector_und |
| 1125 | THUMB( nop ) | 1133 | W(ldr) pc, __vectors_start + 0x1000 |
| 1126 | W(b) vector_und + stubs_offset | 1134 | W(b) vector_pabt |
| 1127 | W(ldr) pc, .LCvswi + stubs_offset | 1135 | W(b) vector_dabt |
| 1128 | W(b) vector_pabt + stubs_offset | 1136 | W(b) vector_addrexcptn |
| 1129 | W(b) vector_dabt + stubs_offset | 1137 | W(b) vector_irq |
| 1130 | W(b) vector_addrexcptn + stubs_offset | 1138 | W(b) vector_fiq |
| 1131 | W(b) vector_irq + stubs_offset | ||
| 1132 | W(b) vector_fiq + stubs_offset | ||
| 1133 | |||
| 1134 | .globl __vectors_end | ||
| 1135 | __vectors_end: | ||
| 1136 | 1139 | ||
| 1137 | .data | 1140 | .data |
| 1138 | 1141 | ||
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 2adda11f712f..25442f451148 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
| @@ -47,6 +47,11 @@ | |||
| 47 | #include <asm/irq.h> | 47 | #include <asm/irq.h> |
| 48 | #include <asm/traps.h> | 48 | #include <asm/traps.h> |
| 49 | 49 | ||
| 50 | #define FIQ_OFFSET ({ \ | ||
| 51 | extern void *vector_fiq_offset; \ | ||
| 52 | (unsigned)&vector_fiq_offset; \ | ||
| 53 | }) | ||
| 54 | |||
| 50 | static unsigned long no_fiq_insn; | 55 | static unsigned long no_fiq_insn; |
| 51 | 56 | ||
| 52 | /* Default reacquire function | 57 | /* Default reacquire function |
| @@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
| 80 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
| 81 | { | 86 | { |
| 82 | #if defined(CONFIG_CPU_USE_DOMAINS) | 87 | #if defined(CONFIG_CPU_USE_DOMAINS) |
| 83 | memcpy((void *)0xffff001c, start, length); | 88 | void *base = (void *)0xffff0000; |
| 84 | #else | 89 | #else |
| 85 | memcpy(vectors_page + 0x1c, start, length); | 90 | void *base = vectors_page; |
| 86 | #endif | 91 | #endif |
| 87 | flush_icache_range(0xffff001c, 0xffff001c + length); | 92 | unsigned offset = FIQ_OFFSET; |
| 93 | |||
| 94 | memcpy(base + offset, start, length); | ||
| 95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | ||
| 88 | if (!vectors_high()) | 96 | if (!vectors_high()) |
| 89 | flush_icache_range(0x1c, 0x1c + length); | 97 | flush_icache_range(offset, offset + length); |
| 90 | } | 98 | } |
| 91 | 99 | ||
| 92 | int claim_fiq(struct fiq_handler *f) | 100 | int claim_fiq(struct fiq_handler *f) |
| @@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq); | |||
| 144 | 152 | ||
| 145 | void __init init_FIQ(int start) | 153 | void __init init_FIQ(int start) |
| 146 | { | 154 | { |
| 147 | no_fiq_insn = *(unsigned long *)0xffff001c; | 155 | unsigned offset = FIQ_OFFSET; |
| 156 | no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); | ||
| 148 | fiq_start = start; | 157 | fiq_start = start; |
| 149 | } | 158 | } |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 08b47ebd3144..16ed3f7c4980 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
| @@ -429,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | #ifdef CONFIG_MMU | 431 | #ifdef CONFIG_MMU |
| 432 | #ifdef CONFIG_KUSER_HELPERS | ||
| 432 | /* | 433 | /* |
| 433 | * The vectors page is always readable from user space for the | 434 | * The vectors page is always readable from user space for the |
| 434 | * atomic helpers and the signal restart code. Insert it into the | 435 | * atomic helpers. Insert it into the gate_vma so that it is visible |
| 435 | * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. | 436 | * through ptrace and /proc/<pid>/mem. |
| 436 | */ | 437 | */ |
| 437 | static struct vm_area_struct gate_vma = { | 438 | static struct vm_area_struct gate_vma = { |
| 438 | .vm_start = 0xffff0000, | 439 | .vm_start = 0xffff0000, |
| @@ -461,9 +462,47 @@ int in_gate_area_no_mm(unsigned long addr) | |||
| 461 | { | 462 | { |
| 462 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
| 463 | } | 464 | } |
| 465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | ||
| 466 | #else | ||
| 467 | #define is_gate_vma(vma) 0 | ||
| 468 | #endif | ||
| 464 | 469 | ||
| 465 | const char *arch_vma_name(struct vm_area_struct *vma) | 470 | const char *arch_vma_name(struct vm_area_struct *vma) |
| 466 | { | 471 | { |
| 467 | return (vma == &gate_vma) ? "[vectors]" : NULL; | 472 | return is_gate_vma(vma) ? "[vectors]" : |
| 473 | (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? | ||
| 474 | "[sigpage]" : NULL; | ||
| 475 | } | ||
| 476 | |||
| 477 | extern struct page *get_signal_page(void); | ||
| 478 | |||
| 479 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
| 480 | { | ||
| 481 | struct mm_struct *mm = current->mm; | ||
| 482 | struct page *page; | ||
| 483 | unsigned long addr; | ||
| 484 | int ret; | ||
| 485 | |||
| 486 | page = get_signal_page(); | ||
| 487 | if (!page) | ||
| 488 | return -ENOMEM; | ||
| 489 | |||
| 490 | down_write(&mm->mmap_sem); | ||
| 491 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
| 492 | if (IS_ERR_VALUE(addr)) { | ||
| 493 | ret = addr; | ||
| 494 | goto up_fail; | ||
| 495 | } | ||
| 496 | |||
| 497 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | ||
| 498 | VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | ||
| 499 | &page); | ||
| 500 | |||
| 501 | if (ret == 0) | ||
| 502 | mm->context.sigpage = addr; | ||
| 503 | |||
| 504 | up_fail: | ||
| 505 | up_write(&mm->mmap_sem); | ||
| 506 | return ret; | ||
| 468 | } | 507 | } |
| 469 | #endif | 508 | #endif |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c16c35c271a..0f17e06d51e6 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
| 9 | */ | 9 | */ |
| 10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
| 11 | #include <linux/random.h> | ||
| 11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
| 12 | #include <linux/personality.h> | 13 | #include <linux/personality.h> |
| 13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| @@ -15,12 +16,11 @@ | |||
| 15 | 16 | ||
| 16 | #include <asm/elf.h> | 17 | #include <asm/elf.h> |
| 17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
| 19 | #include <asm/traps.h> | ||
| 18 | #include <asm/ucontext.h> | 20 | #include <asm/ucontext.h> |
| 19 | #include <asm/unistd.h> | 21 | #include <asm/unistd.h> |
| 20 | #include <asm/vfp.h> | 22 | #include <asm/vfp.h> |
| 21 | 23 | ||
| 22 | #include "signal.h" | ||
| 23 | |||
| 24 | /* | 24 | /* |
| 25 | * For ARM syscalls, we encode the syscall number into the instruction. | 25 | * For ARM syscalls, we encode the syscall number into the instruction. |
| 26 | */ | 26 | */ |
| @@ -40,11 +40,13 @@ | |||
| 40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) | 40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) |
| 41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) | 41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) |
| 42 | 42 | ||
| 43 | const unsigned long sigreturn_codes[7] = { | 43 | static const unsigned long sigreturn_codes[7] = { |
| 44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, | 44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, |
| 45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, | 45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | static unsigned long signal_return_offset; | ||
| 49 | |||
| 48 | #ifdef CONFIG_CRUNCH | 50 | #ifdef CONFIG_CRUNCH |
| 49 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) | 51 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) |
| 50 | { | 52 | { |
| @@ -401,12 +403,15 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
| 401 | return 1; | 403 | return 1; |
| 402 | 404 | ||
| 403 | if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { | 405 | if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { |
| 406 | struct mm_struct *mm = current->mm; | ||
| 407 | |||
| 404 | /* | 408 | /* |
| 405 | * 32-bit code can use the new high-page | 409 | * 32-bit code can use the signal return page |
| 406 | * signal return code support except when the MPU has | 410 | * except when the MPU has protected the vectors |
| 407 | * protected the vectors page from PL0 | 411 | * page from PL0 |
| 408 | */ | 412 | */ |
| 409 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; | 413 | retcode = mm->context.sigpage + signal_return_offset + |
| 414 | (idx << 2) + thumb; | ||
| 410 | } else { | 415 | } else { |
| 411 | /* | 416 | /* |
| 412 | * Ensure that the instruction cache sees | 417 | * Ensure that the instruction cache sees |
| @@ -608,3 +613,36 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
| 608 | } while (thread_flags & _TIF_WORK_MASK); | 613 | } while (thread_flags & _TIF_WORK_MASK); |
| 609 | return 0; | 614 | return 0; |
| 610 | } | 615 | } |
| 616 | |||
| 617 | static struct page *signal_page; | ||
| 618 | |||
| 619 | struct page *get_signal_page(void) | ||
| 620 | { | ||
| 621 | if (!signal_page) { | ||
| 622 | unsigned long ptr; | ||
| 623 | unsigned offset; | ||
| 624 | void *addr; | ||
| 625 | |||
| 626 | signal_page = alloc_pages(GFP_KERNEL, 0); | ||
| 627 | |||
| 628 | if (!signal_page) | ||
| 629 | return NULL; | ||
| 630 | |||
| 631 | addr = page_address(signal_page); | ||
| 632 | |||
| 633 | /* Give the signal return code some randomness */ | ||
| 634 | offset = 0x200 + (get_random_int() & 0x7fc); | ||
| 635 | signal_return_offset = offset; | ||
| 636 | |||
| 637 | /* | ||
| 638 | * Copy signal return handlers into the vector page, and | ||
| 639 | * set sigreturn to be a pointer to these. | ||
| 640 | */ | ||
| 641 | memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); | ||
| 642 | |||
| 643 | ptr = (unsigned long)addr + offset; | ||
| 644 | flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); | ||
| 645 | } | ||
| 646 | |||
| 647 | return signal_page; | ||
| 648 | } | ||
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h deleted file mode 100644 index 5ff067b7c752..000000000000 --- a/arch/arm/kernel/signal.h +++ /dev/null | |||
| @@ -1,12 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/kernel/signal.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005-2009 Russell King. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) | ||
| 11 | |||
| 12 | extern const unsigned long sigreturn_codes[7]; | ||
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cab094c234ee..ab517fcce21b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
| @@ -35,8 +35,6 @@ | |||
| 35 | #include <asm/tls.h> | 35 | #include <asm/tls.h> |
| 36 | #include <asm/system_misc.h> | 36 | #include <asm/system_misc.h> |
| 37 | 37 | ||
| 38 | #include "signal.h" | ||
| 39 | |||
| 40 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 38 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; |
| 41 | 39 | ||
| 42 | void *vectors_page; | 40 | void *vectors_page; |
| @@ -800,15 +798,26 @@ void __init trap_init(void) | |||
| 800 | return; | 798 | return; |
| 801 | } | 799 | } |
| 802 | 800 | ||
| 803 | static void __init kuser_get_tls_init(unsigned long vectors) | 801 | #ifdef CONFIG_KUSER_HELPERS |
| 802 | static void __init kuser_init(void *vectors) | ||
| 804 | { | 803 | { |
| 804 | extern char __kuser_helper_start[], __kuser_helper_end[]; | ||
| 805 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
| 806 | |||
| 807 | memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
| 808 | |||
| 805 | /* | 809 | /* |
| 806 | * vectors + 0xfe0 = __kuser_get_tls | 810 | * vectors + 0xfe0 = __kuser_get_tls |
| 807 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 | 811 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 |
| 808 | */ | 812 | */ |
| 809 | if (tls_emu || has_tls_reg) | 813 | if (tls_emu || has_tls_reg) |
| 810 | memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); | 814 | memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); |
| 811 | } | 815 | } |
| 816 | #else | ||
| 817 | static void __init kuser_init(void *vectors) | ||
| 818 | { | ||
| 819 | } | ||
| 820 | #endif | ||
| 812 | 821 | ||
| 813 | void __init early_trap_init(void *vectors_base) | 822 | void __init early_trap_init(void *vectors_base) |
| 814 | { | 823 | { |
| @@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base) | |||
| 816 | unsigned long vectors = (unsigned long)vectors_base; | 825 | unsigned long vectors = (unsigned long)vectors_base; |
| 817 | extern char __stubs_start[], __stubs_end[]; | 826 | extern char __stubs_start[], __stubs_end[]; |
| 818 | extern char __vectors_start[], __vectors_end[]; | 827 | extern char __vectors_start[], __vectors_end[]; |
| 819 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 828 | unsigned i; |
| 820 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
| 821 | 829 | ||
| 822 | vectors_page = vectors_base; | 830 | vectors_page = vectors_base; |
| 823 | 831 | ||
| 824 | /* | 832 | /* |
| 833 | * Poison the vectors page with an undefined instruction. This | ||
| 834 | * instruction is chosen to be undefined for both ARM and Thumb | ||
| 835 | * ISAs. The Thumb version is an undefined instruction with a | ||
| 836 | * branch back to the undefined instruction. | ||
| 837 | */ | ||
| 838 | for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) | ||
| 839 | ((u32 *)vectors_base)[i] = 0xe7fddef1; | ||
| 840 | |||
| 841 | /* | ||
| 825 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) | 842 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
| 826 | * into the vector page, mapped at 0xffff0000, and ensure these | 843 | * into the vector page, mapped at 0xffff0000, and ensure these |
| 827 | * are visible to the instruction stream. | 844 | * are visible to the instruction stream. |
| 828 | */ | 845 | */ |
| 829 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); | 846 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); |
| 830 | memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); | 847 | memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); |
| 831 | memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
| 832 | 848 | ||
| 833 | /* | 849 | kuser_init(vectors_base); |
| 834 | * Do processor specific fixups for the kuser helpers | ||
| 835 | */ | ||
| 836 | kuser_get_tls_init(vectors); | ||
| 837 | |||
| 838 | /* | ||
| 839 | * Copy signal return handlers into the vector page, and | ||
| 840 | * set sigreturn to be a pointer to these. | ||
| 841 | */ | ||
| 842 | memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), | ||
| 843 | sigreturn_codes, sizeof(sigreturn_codes)); | ||
| 844 | 850 | ||
| 845 | flush_icache_range(vectors, vectors + PAGE_SIZE); | 851 | flush_icache_range(vectors, vectors + PAGE_SIZE * 2); |
| 846 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 852 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
| 847 | #else /* ifndef CONFIG_CPU_V7M */ | 853 | #else /* ifndef CONFIG_CPU_V7M */ |
| 848 | /* | 854 | /* |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index fa25e4e425f6..7bcee5c9b604 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
| @@ -148,6 +148,23 @@ SECTIONS | |||
| 148 | . = ALIGN(PAGE_SIZE); | 148 | . = ALIGN(PAGE_SIZE); |
| 149 | __init_begin = .; | 149 | __init_begin = .; |
| 150 | #endif | 150 | #endif |
| 151 | /* | ||
| 152 | * The vectors and stubs are relocatable code, and the | ||
| 153 | * only thing that matters is their relative offsets | ||
| 154 | */ | ||
| 155 | __vectors_start = .; | ||
| 156 | .vectors 0 : AT(__vectors_start) { | ||
| 157 | *(.vectors) | ||
| 158 | } | ||
| 159 | . = __vectors_start + SIZEOF(.vectors); | ||
| 160 | __vectors_end = .; | ||
| 161 | |||
| 162 | __stubs_start = .; | ||
| 163 | .stubs 0x1000 : AT(__stubs_start) { | ||
| 164 | *(.stubs) | ||
| 165 | } | ||
| 166 | . = __stubs_start + SIZEOF(.stubs); | ||
| 167 | __stubs_end = .; | ||
| 151 | 168 | ||
| 152 | INIT_TEXT_SECTION(8) | 169 | INIT_TEXT_SECTION(8) |
| 153 | .exit.text : { | 170 | .exit.text : { |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd654..db5c2cab8fda 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
| @@ -421,24 +421,28 @@ config CPU_32v3 | |||
| 421 | select CPU_USE_DOMAINS if MMU | 421 | select CPU_USE_DOMAINS if MMU |
| 422 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 422 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
| 423 | select TLS_REG_EMUL if SMP || !MMU | 423 | select TLS_REG_EMUL if SMP || !MMU |
| 424 | select NEED_KUSER_HELPERS | ||
| 424 | 425 | ||
| 425 | config CPU_32v4 | 426 | config CPU_32v4 |
| 426 | bool | 427 | bool |
| 427 | select CPU_USE_DOMAINS if MMU | 428 | select CPU_USE_DOMAINS if MMU |
| 428 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 429 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
| 429 | select TLS_REG_EMUL if SMP || !MMU | 430 | select TLS_REG_EMUL if SMP || !MMU |
| 431 | select NEED_KUSER_HELPERS | ||
| 430 | 432 | ||
| 431 | config CPU_32v4T | 433 | config CPU_32v4T |
| 432 | bool | 434 | bool |
| 433 | select CPU_USE_DOMAINS if MMU | 435 | select CPU_USE_DOMAINS if MMU |
| 434 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 436 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
| 435 | select TLS_REG_EMUL if SMP || !MMU | 437 | select TLS_REG_EMUL if SMP || !MMU |
| 438 | select NEED_KUSER_HELPERS | ||
| 436 | 439 | ||
| 437 | config CPU_32v5 | 440 | config CPU_32v5 |
| 438 | bool | 441 | bool |
| 439 | select CPU_USE_DOMAINS if MMU | 442 | select CPU_USE_DOMAINS if MMU |
| 440 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 443 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
| 441 | select TLS_REG_EMUL if SMP || !MMU | 444 | select TLS_REG_EMUL if SMP || !MMU |
| 445 | select NEED_KUSER_HELPERS | ||
| 442 | 446 | ||
| 443 | config CPU_32v6 | 447 | config CPU_32v6 |
| 444 | bool | 448 | bool |
| @@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE | |||
| 776 | 780 | ||
| 777 | config TLS_REG_EMUL | 781 | config TLS_REG_EMUL |
| 778 | bool | 782 | bool |
| 783 | select NEED_KUSER_HELPERS | ||
| 779 | help | 784 | help |
| 780 | An SMP system using a pre-ARMv6 processor (there are apparently | 785 | An SMP system using a pre-ARMv6 processor (there are apparently |
| 781 | a few prototypes like that in existence) and therefore access to | 786 | a few prototypes like that in existence) and therefore access to |
| @@ -783,11 +788,40 @@ config TLS_REG_EMUL | |||
| 783 | 788 | ||
| 784 | config NEEDS_SYSCALL_FOR_CMPXCHG | 789 | config NEEDS_SYSCALL_FOR_CMPXCHG |
| 785 | bool | 790 | bool |
| 791 | select NEED_KUSER_HELPERS | ||
| 786 | help | 792 | help |
| 787 | SMP on a pre-ARMv6 processor? Well OK then. | 793 | SMP on a pre-ARMv6 processor? Well OK then. |
| 788 | Forget about fast user space cmpxchg support. | 794 | Forget about fast user space cmpxchg support. |
| 789 | It is just not possible. | 795 | It is just not possible. |
| 790 | 796 | ||
| 797 | config NEED_KUSER_HELPERS | ||
| 798 | bool | ||
| 799 | |||
| 800 | config KUSER_HELPERS | ||
| 801 | bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS | ||
| 802 | default y | ||
| 803 | help | ||
| 804 | Warning: disabling this option may break user programs. | ||
| 805 | |||
| 806 | Provide kuser helpers in the vector page. The kernel provides | ||
| 807 | helper code to userspace in read only form at a fixed location | ||
| 808 | in the high vector page to allow userspace to be independent of | ||
| 809 | the CPU type fitted to the system. This permits binaries to be | ||
| 810 | run on ARMv4 through to ARMv7 without modification. | ||
| 811 | |||
| 812 | However, the fixed address nature of these helpers can be used | ||
| 813 | by ROP (return orientated programming) authors when creating | ||
| 814 | exploits. | ||
| 815 | |||
| 816 | If all of the binaries and libraries which run on your platform | ||
| 817 | are built specifically for your platform, and make no use of | ||
| 818 | these helpers, then you can turn this option off. However, | ||
| 819 | when such an binary or library is run, it will receive a SIGILL | ||
| 820 | signal, which will terminate the program. | ||
| 821 | |||
| 822 | Say N here only if you are absolutely certain that you do not | ||
| 823 | need these helpers; otherwise, the safe option is to say Y. | ||
| 824 | |||
| 791 | config DMA_CACHE_RWFO | 825 | config DMA_CACHE_RWFO |
| 792 | bool "Enable read/write for ownership DMA cache maintenance" | 826 | bool "Enable read/write for ownership DMA cache maintenance" |
| 793 | depends on CPU_V6K && SMP | 827 | depends on CPU_V6K && SMP |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b3fdb63783e3..53cdbd39ec8e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -1195,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
| 1195 | /* | 1195 | /* |
| 1196 | * Allocate the vector page early. | 1196 | * Allocate the vector page early. |
| 1197 | */ | 1197 | */ |
| 1198 | vectors = early_alloc(PAGE_SIZE); | 1198 | vectors = early_alloc(PAGE_SIZE * 2); |
| 1199 | 1199 | ||
| 1200 | early_trap_init(vectors); | 1200 | early_trap_init(vectors); |
| 1201 | 1201 | ||
| @@ -1240,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
| 1240 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 1240 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); |
| 1241 | map.virtual = 0xffff0000; | 1241 | map.virtual = 0xffff0000; |
| 1242 | map.length = PAGE_SIZE; | 1242 | map.length = PAGE_SIZE; |
| 1243 | #ifdef CONFIG_KUSER_HELPERS | ||
| 1243 | map.type = MT_HIGH_VECTORS; | 1244 | map.type = MT_HIGH_VECTORS; |
| 1245 | #else | ||
| 1246 | map.type = MT_LOW_VECTORS; | ||
| 1247 | #endif | ||
| 1244 | create_mapping(&map); | 1248 | create_mapping(&map); |
| 1245 | 1249 | ||
| 1246 | if (!vectors_high()) { | 1250 | if (!vectors_high()) { |
| 1247 | map.virtual = 0; | 1251 | map.virtual = 0; |
| 1252 | map.length = PAGE_SIZE * 2; | ||
| 1248 | map.type = MT_LOW_VECTORS; | 1253 | map.type = MT_LOW_VECTORS; |
| 1249 | create_mapping(&map); | 1254 | create_mapping(&map); |
| 1250 | } | 1255 | } |
| 1251 | 1256 | ||
| 1257 | /* Now create a kernel read-only mapping */ | ||
| 1258 | map.pfn += 1; | ||
| 1259 | map.virtual = 0xffff0000 + PAGE_SIZE; | ||
| 1260 | map.length = PAGE_SIZE; | ||
| 1261 | map.type = MT_LOW_VECTORS; | ||
| 1262 | create_mapping(&map); | ||
| 1263 | |||
| 1252 | /* | 1264 | /* |
| 1253 | * Ask the machine support to map in the statically mapped devices. | 1265 | * Ask the machine support to map in the statically mapped devices. |
| 1254 | */ | 1266 | */ |
