diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 103 | ||||
-rw-r--r-- | arch/arm/kernel/entry-v7m.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/fiq.c | 19 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 1 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 1 | ||||
-rw-r--r-- | arch/arm/kernel/hyp-stub.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 49 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 56 | ||||
-rw-r--r-- | arch/arm/kernel/signal.h | 12 | ||||
-rw-r--r-- | arch/arm/kernel/smp_tlb.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 46 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 17 |
13 files changed, 211 insertions, 119 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a39cfc2a1f90..d40d0ef389db 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -742,6 +742,18 @@ ENDPROC(__switch_to) | |||
742 | #endif | 742 | #endif |
743 | .endm | 743 | .endm |
744 | 744 | ||
745 | .macro kuser_pad, sym, size | ||
746 | .if (. - \sym) & 3 | ||
747 | .rept 4 - (. - \sym) & 3 | ||
748 | .byte 0 | ||
749 | .endr | ||
750 | .endif | ||
751 | .rept (\size - (. - \sym)) / 4 | ||
752 | .word 0xe7fddef1 | ||
753 | .endr | ||
754 | .endm | ||
755 | |||
756 | #ifdef CONFIG_KUSER_HELPERS | ||
745 | .align 5 | 757 | .align 5 |
746 | .globl __kuser_helper_start | 758 | .globl __kuser_helper_start |
747 | __kuser_helper_start: | 759 | __kuser_helper_start: |
@@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup: | |||
832 | #error "incoherent kernel configuration" | 844 | #error "incoherent kernel configuration" |
833 | #endif | 845 | #endif |
834 | 846 | ||
835 | /* pad to next slot */ | 847 | kuser_pad __kuser_cmpxchg64, 64 |
836 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
837 | .word 0 | ||
838 | .endr | ||
839 | |||
840 | .align 5 | ||
841 | 848 | ||
842 | __kuser_memory_barrier: @ 0xffff0fa0 | 849 | __kuser_memory_barrier: @ 0xffff0fa0 |
843 | smp_dmb arm | 850 | smp_dmb arm |
844 | usr_ret lr | 851 | usr_ret lr |
845 | 852 | ||
846 | .align 5 | 853 | kuser_pad __kuser_memory_barrier, 32 |
847 | 854 | ||
848 | __kuser_cmpxchg: @ 0xffff0fc0 | 855 | __kuser_cmpxchg: @ 0xffff0fc0 |
849 | 856 | ||
@@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup: | |||
916 | 923 | ||
917 | #endif | 924 | #endif |
918 | 925 | ||
919 | .align 5 | 926 | kuser_pad __kuser_cmpxchg, 32 |
920 | 927 | ||
921 | __kuser_get_tls: @ 0xffff0fe0 | 928 | __kuser_get_tls: @ 0xffff0fe0 |
922 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init | 929 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
923 | usr_ret lr | 930 | usr_ret lr |
924 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code | 931 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
925 | .rep 4 | 932 | kuser_pad __kuser_get_tls, 16 |
933 | .rep 3 | ||
926 | .word 0 @ 0xffff0ff0 software TLS value, then | 934 | .word 0 @ 0xffff0ff0 software TLS value, then |
927 | .endr @ pad up to __kuser_helper_version | 935 | .endr @ pad up to __kuser_helper_version |
928 | 936 | ||
@@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc | |||
932 | .globl __kuser_helper_end | 940 | .globl __kuser_helper_end |
933 | __kuser_helper_end: | 941 | __kuser_helper_end: |
934 | 942 | ||
943 | #endif | ||
944 | |||
935 | THUMB( .thumb ) | 945 | THUMB( .thumb ) |
936 | 946 | ||
937 | /* | 947 | /* |
938 | * Vector stubs. | 948 | * Vector stubs. |
939 | * | 949 | * |
940 | * This code is copied to 0xffff0200 so we can use branches in the | 950 | * This code is copied to 0xffff1000 so we can use branches in the |
941 | * vectors, rather than ldr's. Note that this code must not | 951 | * vectors, rather than ldr's. Note that this code must not exceed |
942 | * exceed 0x300 bytes. | 952 | * a page size. |
943 | * | 953 | * |
944 | * Common stub entry macro: | 954 | * Common stub entry macro: |
945 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | 955 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
@@ -986,8 +996,17 @@ ENDPROC(vector_\name) | |||
986 | 1: | 996 | 1: |
987 | .endm | 997 | .endm |
988 | 998 | ||
989 | .globl __stubs_start | 999 | .section .stubs, "ax", %progbits |
990 | __stubs_start: | 1000 | __stubs_start: |
1001 | @ This must be the first word | ||
1002 | .word vector_swi | ||
1003 | |||
1004 | vector_rst: | ||
1005 | ARM( swi SYS_ERROR0 ) | ||
1006 | THUMB( svc #0 ) | ||
1007 | THUMB( nop ) | ||
1008 | b vector_und | ||
1009 | |||
991 | /* | 1010 | /* |
992 | * Interrupt dispatcher | 1011 | * Interrupt dispatcher |
993 | */ | 1012 | */ |
@@ -1082,6 +1101,16 @@ __stubs_start: | |||
1082 | .align 5 | 1101 | .align 5 |
1083 | 1102 | ||
1084 | /*============================================================================= | 1103 | /*============================================================================= |
1104 | * Address exception handler | ||
1105 | *----------------------------------------------------------------------------- | ||
1106 | * These aren't too critical. | ||
1107 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
1108 | */ | ||
1109 | |||
1110 | vector_addrexcptn: | ||
1111 | b vector_addrexcptn | ||
1112 | |||
1113 | /*============================================================================= | ||
1085 | * Undefined FIQs | 1114 | * Undefined FIQs |
1086 | *----------------------------------------------------------------------------- | 1115 | *----------------------------------------------------------------------------- |
1087 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | 1116 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC |
@@ -1094,45 +1123,19 @@ __stubs_start: | |||
1094 | vector_fiq: | 1123 | vector_fiq: |
1095 | subs pc, lr, #4 | 1124 | subs pc, lr, #4 |
1096 | 1125 | ||
1097 | /*============================================================================= | 1126 | .globl vector_fiq_offset |
1098 | * Address exception handler | 1127 | .equ vector_fiq_offset, vector_fiq |
1099 | *----------------------------------------------------------------------------- | ||
1100 | * These aren't too critical. | ||
1101 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
1102 | */ | ||
1103 | |||
1104 | vector_addrexcptn: | ||
1105 | b vector_addrexcptn | ||
1106 | |||
1107 | /* | ||
1108 | * We group all the following data together to optimise | ||
1109 | * for CPUs with separate I & D caches. | ||
1110 | */ | ||
1111 | .align 5 | ||
1112 | |||
1113 | .LCvswi: | ||
1114 | .word vector_swi | ||
1115 | |||
1116 | .globl __stubs_end | ||
1117 | __stubs_end: | ||
1118 | |||
1119 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start | ||
1120 | 1128 | ||
1121 | .globl __vectors_start | 1129 | .section .vectors, "ax", %progbits |
1122 | __vectors_start: | 1130 | __vectors_start: |
1123 | ARM( swi SYS_ERROR0 ) | 1131 | W(b) vector_rst |
1124 | THUMB( svc #0 ) | 1132 | W(b) vector_und |
1125 | THUMB( nop ) | 1133 | W(ldr) pc, __vectors_start + 0x1000 |
1126 | W(b) vector_und + stubs_offset | 1134 | W(b) vector_pabt |
1127 | W(ldr) pc, .LCvswi + stubs_offset | 1135 | W(b) vector_dabt |
1128 | W(b) vector_pabt + stubs_offset | 1136 | W(b) vector_addrexcptn |
1129 | W(b) vector_dabt + stubs_offset | 1137 | W(b) vector_irq |
1130 | W(b) vector_addrexcptn + stubs_offset | 1138 | W(b) vector_fiq |
1131 | W(b) vector_irq + stubs_offset | ||
1132 | W(b) vector_fiq + stubs_offset | ||
1133 | |||
1134 | .globl __vectors_end | ||
1135 | __vectors_end: | ||
1136 | 1139 | ||
1137 | .data | 1140 | .data |
1138 | 1141 | ||
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index e00621f1403f..52b26432c9a9 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S | |||
@@ -49,7 +49,7 @@ __irq_entry: | |||
49 | mov r1, sp | 49 | mov r1, sp |
50 | stmdb sp!, {lr} | 50 | stmdb sp!, {lr} |
51 | @ routine called with r0 = irq number, r1 = struct pt_regs * | 51 | @ routine called with r0 = irq number, r1 = struct pt_regs * |
52 | bl nvic_do_IRQ | 52 | bl nvic_handle_irq |
53 | 53 | ||
54 | pop {lr} | 54 | pop {lr} |
55 | @ | 55 | @ |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 2adda11f712f..25442f451148 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -47,6 +47,11 @@ | |||
47 | #include <asm/irq.h> | 47 | #include <asm/irq.h> |
48 | #include <asm/traps.h> | 48 | #include <asm/traps.h> |
49 | 49 | ||
50 | #define FIQ_OFFSET ({ \ | ||
51 | extern void *vector_fiq_offset; \ | ||
52 | (unsigned)&vector_fiq_offset; \ | ||
53 | }) | ||
54 | |||
50 | static unsigned long no_fiq_insn; | 55 | static unsigned long no_fiq_insn; |
51 | 56 | ||
52 | /* Default reacquire function | 57 | /* Default reacquire function |
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
80 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
81 | { | 86 | { |
82 | #if defined(CONFIG_CPU_USE_DOMAINS) | 87 | #if defined(CONFIG_CPU_USE_DOMAINS) |
83 | memcpy((void *)0xffff001c, start, length); | 88 | void *base = (void *)0xffff0000; |
84 | #else | 89 | #else |
85 | memcpy(vectors_page + 0x1c, start, length); | 90 | void *base = vectors_page; |
86 | #endif | 91 | #endif |
87 | flush_icache_range(0xffff001c, 0xffff001c + length); | 92 | unsigned offset = FIQ_OFFSET; |
93 | |||
94 | memcpy(base + offset, start, length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | ||
88 | if (!vectors_high()) | 96 | if (!vectors_high()) |
89 | flush_icache_range(0x1c, 0x1c + length); | 97 | flush_icache_range(offset, offset + length); |
90 | } | 98 | } |
91 | 99 | ||
92 | int claim_fiq(struct fiq_handler *f) | 100 | int claim_fiq(struct fiq_handler *f) |
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq); | |||
144 | 152 | ||
145 | void __init init_FIQ(int start) | 153 | void __init init_FIQ(int start) |
146 | { | 154 | { |
147 | no_fiq_insn = *(unsigned long *)0xffff001c; | 155 | unsigned offset = FIQ_OFFSET; |
156 | no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); | ||
148 | fiq_start = start; | 157 | fiq_start = start; |
149 | } | 158 | } |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index b361de143756..14235ba64a90 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -87,6 +87,7 @@ ENTRY(stext) | |||
87 | ENDPROC(stext) | 87 | ENDPROC(stext) |
88 | 88 | ||
89 | #ifdef CONFIG_SMP | 89 | #ifdef CONFIG_SMP |
90 | .text | ||
90 | ENTRY(secondary_startup) | 91 | ENTRY(secondary_startup) |
91 | /* | 92 | /* |
92 | * Common entry point for secondary CPUs. | 93 | * Common entry point for secondary CPUs. |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 9cf6063020ae..2c7cc1e03473 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -343,6 +343,7 @@ __turn_mmu_on_loc: | |||
343 | .long __turn_mmu_on_end | 343 | .long __turn_mmu_on_end |
344 | 344 | ||
345 | #if defined(CONFIG_SMP) | 345 | #if defined(CONFIG_SMP) |
346 | .text | ||
346 | ENTRY(secondary_startup) | 347 | ENTRY(secondary_startup) |
347 | /* | 348 | /* |
348 | * Common entry point for secondary CPUs. | 349 | * Common entry point for secondary CPUs. |
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 4910232c4833..797b1a6a4906 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S | |||
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode) | |||
56 | ldr \reg3, [\reg2] | 56 | ldr \reg3, [\reg2] |
57 | ldr \reg1, [\reg2, \reg3] | 57 | ldr \reg1, [\reg2, \reg3] |
58 | cmp \mode, \reg1 @ matches primary CPU boot mode? | 58 | cmp \mode, \reg1 @ matches primary CPU boot mode? |
59 | orrne r7, r7, #BOOT_CPU_MODE_MISMATCH | 59 | orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH |
60 | strne r7, [r5, r6] @ record what happened and give up | 60 | strne \reg1, [\reg2, \reg3] @ record what happened and give up |
61 | .endm | 61 | .endm |
62 | 62 | ||
63 | #else /* ZIMAGE */ | 63 | #else /* ZIMAGE */ |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d3ca4f6915af..536c85fe72a8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -197,6 +197,7 @@ void machine_shutdown(void) | |||
197 | */ | 197 | */ |
198 | void machine_halt(void) | 198 | void machine_halt(void) |
199 | { | 199 | { |
200 | local_irq_disable(); | ||
200 | smp_send_stop(); | 201 | smp_send_stop(); |
201 | 202 | ||
202 | local_irq_disable(); | 203 | local_irq_disable(); |
@@ -211,6 +212,7 @@ void machine_halt(void) | |||
211 | */ | 212 | */ |
212 | void machine_power_off(void) | 213 | void machine_power_off(void) |
213 | { | 214 | { |
215 | local_irq_disable(); | ||
214 | smp_send_stop(); | 216 | smp_send_stop(); |
215 | 217 | ||
216 | if (pm_power_off) | 218 | if (pm_power_off) |
@@ -230,6 +232,7 @@ void machine_power_off(void) | |||
230 | */ | 232 | */ |
231 | void machine_restart(char *cmd) | 233 | void machine_restart(char *cmd) |
232 | { | 234 | { |
235 | local_irq_disable(); | ||
233 | smp_send_stop(); | 236 | smp_send_stop(); |
234 | 237 | ||
235 | arm_pm_restart(reboot_mode, cmd); | 238 | arm_pm_restart(reboot_mode, cmd); |
@@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
426 | } | 429 | } |
427 | 430 | ||
428 | #ifdef CONFIG_MMU | 431 | #ifdef CONFIG_MMU |
432 | #ifdef CONFIG_KUSER_HELPERS | ||
429 | /* | 433 | /* |
430 | * The vectors page is always readable from user space for the | 434 | * The vectors page is always readable from user space for the |
431 | * atomic helpers and the signal restart code. Insert it into the | 435 | * atomic helpers. Insert it into the gate_vma so that it is visible |
432 | * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. | 436 | * through ptrace and /proc/<pid>/mem. |
433 | */ | 437 | */ |
434 | static struct vm_area_struct gate_vma = { | 438 | static struct vm_area_struct gate_vma = { |
435 | .vm_start = 0xffff0000, | 439 | .vm_start = 0xffff0000, |
@@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr) | |||
458 | { | 462 | { |
459 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
460 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | ||
466 | #else | ||
467 | #define is_gate_vma(vma) 0 | ||
468 | #endif | ||
461 | 469 | ||
462 | const char *arch_vma_name(struct vm_area_struct *vma) | 470 | const char *arch_vma_name(struct vm_area_struct *vma) |
463 | { | 471 | { |
464 | return (vma == &gate_vma) ? "[vectors]" : NULL; | 472 | return is_gate_vma(vma) ? "[vectors]" : |
473 | (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? | ||
474 | "[sigpage]" : NULL; | ||
475 | } | ||
476 | |||
477 | static struct page *signal_page; | ||
478 | extern struct page *get_signal_page(void); | ||
479 | |||
480 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
481 | { | ||
482 | struct mm_struct *mm = current->mm; | ||
483 | unsigned long addr; | ||
484 | int ret; | ||
485 | |||
486 | if (!signal_page) | ||
487 | signal_page = get_signal_page(); | ||
488 | if (!signal_page) | ||
489 | return -ENOMEM; | ||
490 | |||
491 | down_write(&mm->mmap_sem); | ||
492 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
493 | if (IS_ERR_VALUE(addr)) { | ||
494 | ret = addr; | ||
495 | goto up_fail; | ||
496 | } | ||
497 | |||
498 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | ||
499 | VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | ||
500 | &signal_page); | ||
501 | |||
502 | if (ret == 0) | ||
503 | mm->context.sigpage = addr; | ||
504 | |||
505 | up_fail: | ||
506 | up_write(&mm->mmap_sem); | ||
507 | return ret; | ||
465 | } | 508 | } |
466 | #endif | 509 | #endif |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 63af9a7ae512..afc2489ee13b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b) | |||
836 | void __init hyp_mode_check(void) | 836 | void __init hyp_mode_check(void) |
837 | { | 837 | { |
838 | #ifdef CONFIG_ARM_VIRT_EXT | 838 | #ifdef CONFIG_ARM_VIRT_EXT |
839 | sync_boot_mode(); | ||
840 | |||
839 | if (is_hyp_mode_available()) { | 841 | if (is_hyp_mode_available()) { |
840 | pr_info("CPU: All CPU(s) started in HYP mode.\n"); | 842 | pr_info("CPU: All CPU(s) started in HYP mode.\n"); |
841 | pr_info("CPU: Virtualization extensions available.\n"); | 843 | pr_info("CPU: Virtualization extensions available.\n"); |
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = { | |||
971 | "vfpv4", | 973 | "vfpv4", |
972 | "idiva", | 974 | "idiva", |
973 | "idivt", | 975 | "idivt", |
976 | "vfpd32", | ||
974 | "lpae", | 977 | "lpae", |
975 | NULL | 978 | NULL |
976 | }; | 979 | }; |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c16c35c271a..ab3304225272 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/random.h> | ||
11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
12 | #include <linux/personality.h> | 13 | #include <linux/personality.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
@@ -15,12 +16,11 @@ | |||
15 | 16 | ||
16 | #include <asm/elf.h> | 17 | #include <asm/elf.h> |
17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/traps.h> | ||
18 | #include <asm/ucontext.h> | 20 | #include <asm/ucontext.h> |
19 | #include <asm/unistd.h> | 21 | #include <asm/unistd.h> |
20 | #include <asm/vfp.h> | 22 | #include <asm/vfp.h> |
21 | 23 | ||
22 | #include "signal.h" | ||
23 | |||
24 | /* | 24 | /* |
25 | * For ARM syscalls, we encode the syscall number into the instruction. | 25 | * For ARM syscalls, we encode the syscall number into the instruction. |
26 | */ | 26 | */ |
@@ -40,11 +40,13 @@ | |||
40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) | 40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) |
41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) | 41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) |
42 | 42 | ||
43 | const unsigned long sigreturn_codes[7] = { | 43 | static const unsigned long sigreturn_codes[7] = { |
44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, | 44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, |
45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, | 45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static unsigned long signal_return_offset; | ||
49 | |||
48 | #ifdef CONFIG_CRUNCH | 50 | #ifdef CONFIG_CRUNCH |
49 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) | 51 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) |
50 | { | 52 | { |
@@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
400 | __put_user(sigreturn_codes[idx+1], rc+1)) | 402 | __put_user(sigreturn_codes[idx+1], rc+1)) |
401 | return 1; | 403 | return 1; |
402 | 404 | ||
403 | if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { | 405 | #ifdef CONFIG_MMU |
406 | if (cpsr & MODE32_BIT) { | ||
407 | struct mm_struct *mm = current->mm; | ||
408 | |||
404 | /* | 409 | /* |
405 | * 32-bit code can use the new high-page | 410 | * 32-bit code can use the signal return page |
406 | * signal return code support except when the MPU has | 411 | * except when the MPU has protected the vectors |
407 | * protected the vectors page from PL0 | 412 | * page from PL0 |
408 | */ | 413 | */ |
409 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; | 414 | retcode = mm->context.sigpage + signal_return_offset + |
410 | } else { | 415 | (idx << 2) + thumb; |
416 | } else | ||
417 | #endif | ||
418 | { | ||
411 | /* | 419 | /* |
412 | * Ensure that the instruction cache sees | 420 | * Ensure that the instruction cache sees |
413 | * the return code written onto the stack. | 421 | * the return code written onto the stack. |
@@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
608 | } while (thread_flags & _TIF_WORK_MASK); | 616 | } while (thread_flags & _TIF_WORK_MASK); |
609 | return 0; | 617 | return 0; |
610 | } | 618 | } |
619 | |||
620 | struct page *get_signal_page(void) | ||
621 | { | ||
622 | unsigned long ptr; | ||
623 | unsigned offset; | ||
624 | struct page *page; | ||
625 | void *addr; | ||
626 | |||
627 | page = alloc_pages(GFP_KERNEL, 0); | ||
628 | |||
629 | if (!page) | ||
630 | return NULL; | ||
631 | |||
632 | addr = page_address(page); | ||
633 | |||
634 | /* Give the signal return code some randomness */ | ||
635 | offset = 0x200 + (get_random_int() & 0x7fc); | ||
636 | signal_return_offset = offset; | ||
637 | |||
638 | /* | ||
639 | * Copy signal return handlers into the vector page, and | ||
640 | * set sigreturn to be a pointer to these. | ||
641 | */ | ||
642 | memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); | ||
643 | |||
644 | ptr = (unsigned long)addr + offset; | ||
645 | flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); | ||
646 | |||
647 | return page; | ||
648 | } | ||
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h deleted file mode 100644 index 5ff067b7c752..000000000000 --- a/arch/arm/kernel/signal.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/signal.h | ||
3 | * | ||
4 | * Copyright (C) 2005-2009 Russell King. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) | ||
11 | |||
12 | extern const unsigned long sigreturn_codes[7]; | ||
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index a98b62dca2fa..c2edfff573c2 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored) | |||
70 | local_flush_bp_all(); | 70 | local_flush_bp_all(); |
71 | } | 71 | } |
72 | 72 | ||
73 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
74 | static int erratum_a15_798181(void) | ||
75 | { | ||
76 | unsigned int midr = read_cpuid_id(); | ||
77 | |||
78 | /* Cortex-A15 r0p0..r3p2 affected */ | ||
79 | if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) | ||
80 | return 0; | ||
81 | return 1; | ||
82 | } | ||
83 | #else | ||
84 | static int erratum_a15_798181(void) | ||
85 | { | ||
86 | return 0; | ||
87 | } | ||
88 | #endif | ||
89 | |||
90 | static void ipi_flush_tlb_a15_erratum(void *arg) | 73 | static void ipi_flush_tlb_a15_erratum(void *arg) |
91 | { | 74 | { |
92 | dmb(); | 75 | dmb(); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cab094c234ee..ab517fcce21b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -35,8 +35,6 @@ | |||
35 | #include <asm/tls.h> | 35 | #include <asm/tls.h> |
36 | #include <asm/system_misc.h> | 36 | #include <asm/system_misc.h> |
37 | 37 | ||
38 | #include "signal.h" | ||
39 | |||
40 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 38 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; |
41 | 39 | ||
42 | void *vectors_page; | 40 | void *vectors_page; |
@@ -800,15 +798,26 @@ void __init trap_init(void) | |||
800 | return; | 798 | return; |
801 | } | 799 | } |
802 | 800 | ||
803 | static void __init kuser_get_tls_init(unsigned long vectors) | 801 | #ifdef CONFIG_KUSER_HELPERS |
802 | static void __init kuser_init(void *vectors) | ||
804 | { | 803 | { |
804 | extern char __kuser_helper_start[], __kuser_helper_end[]; | ||
805 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
806 | |||
807 | memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
808 | |||
805 | /* | 809 | /* |
806 | * vectors + 0xfe0 = __kuser_get_tls | 810 | * vectors + 0xfe0 = __kuser_get_tls |
807 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 | 811 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 |
808 | */ | 812 | */ |
809 | if (tls_emu || has_tls_reg) | 813 | if (tls_emu || has_tls_reg) |
810 | memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); | 814 | memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); |
811 | } | 815 | } |
816 | #else | ||
817 | static void __init kuser_init(void *vectors) | ||
818 | { | ||
819 | } | ||
820 | #endif | ||
812 | 821 | ||
813 | void __init early_trap_init(void *vectors_base) | 822 | void __init early_trap_init(void *vectors_base) |
814 | { | 823 | { |
@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base) | |||
816 | unsigned long vectors = (unsigned long)vectors_base; | 825 | unsigned long vectors = (unsigned long)vectors_base; |
817 | extern char __stubs_start[], __stubs_end[]; | 826 | extern char __stubs_start[], __stubs_end[]; |
818 | extern char __vectors_start[], __vectors_end[]; | 827 | extern char __vectors_start[], __vectors_end[]; |
819 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 828 | unsigned i; |
820 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
821 | 829 | ||
822 | vectors_page = vectors_base; | 830 | vectors_page = vectors_base; |
823 | 831 | ||
824 | /* | 832 | /* |
833 | * Poison the vectors page with an undefined instruction. This | ||
834 | * instruction is chosen to be undefined for both ARM and Thumb | ||
835 | * ISAs. The Thumb version is an undefined instruction with a | ||
836 | * branch back to the undefined instruction. | ||
837 | */ | ||
838 | for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) | ||
839 | ((u32 *)vectors_base)[i] = 0xe7fddef1; | ||
840 | |||
841 | /* | ||
825 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) | 842 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
826 | * into the vector page, mapped at 0xffff0000, and ensure these | 843 | * into the vector page, mapped at 0xffff0000, and ensure these |
827 | * are visible to the instruction stream. | 844 | * are visible to the instruction stream. |
828 | */ | 845 | */ |
829 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); | 846 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); |
830 | memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); | 847 | memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); |
831 | memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
832 | 848 | ||
833 | /* | 849 | kuser_init(vectors_base); |
834 | * Do processor specific fixups for the kuser helpers | ||
835 | */ | ||
836 | kuser_get_tls_init(vectors); | ||
837 | |||
838 | /* | ||
839 | * Copy signal return handlers into the vector page, and | ||
840 | * set sigreturn to be a pointer to these. | ||
841 | */ | ||
842 | memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), | ||
843 | sigreturn_codes, sizeof(sigreturn_codes)); | ||
844 | 850 | ||
845 | flush_icache_range(vectors, vectors + PAGE_SIZE); | 851 | flush_icache_range(vectors, vectors + PAGE_SIZE * 2); |
846 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 852 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
847 | #else /* ifndef CONFIG_CPU_V7M */ | 853 | #else /* ifndef CONFIG_CPU_V7M */ |
848 | /* | 854 | /* |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index fa25e4e425f6..7bcee5c9b604 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -148,6 +148,23 @@ SECTIONS | |||
148 | . = ALIGN(PAGE_SIZE); | 148 | . = ALIGN(PAGE_SIZE); |
149 | __init_begin = .; | 149 | __init_begin = .; |
150 | #endif | 150 | #endif |
151 | /* | ||
152 | * The vectors and stubs are relocatable code, and the | ||
153 | * only thing that matters is their relative offsets | ||
154 | */ | ||
155 | __vectors_start = .; | ||
156 | .vectors 0 : AT(__vectors_start) { | ||
157 | *(.vectors) | ||
158 | } | ||
159 | . = __vectors_start + SIZEOF(.vectors); | ||
160 | __vectors_end = .; | ||
161 | |||
162 | __stubs_start = .; | ||
163 | .stubs 0x1000 : AT(__stubs_start) { | ||
164 | *(.stubs) | ||
165 | } | ||
166 | . = __stubs_start + SIZEOF(.stubs); | ||
167 | __stubs_end = .; | ||
151 | 168 | ||
152 | INIT_TEXT_SECTION(8) | 169 | INIT_TEXT_SECTION(8) |
153 | .exit.text : { | 170 | .exit.text : { |