diff options
Diffstat (limited to 'arch/arm')
35 files changed, 391 insertions, 237 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 37c0f4e978d4..43594d5116ef 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -20,7 +20,6 @@ config ARM | |||
20 | select GENERIC_STRNCPY_FROM_USER | 20 | select GENERIC_STRNCPY_FROM_USER |
21 | select GENERIC_STRNLEN_USER | 21 | select GENERIC_STRNLEN_USER |
22 | select HARDIRQS_SW_RESEND | 22 | select HARDIRQS_SW_RESEND |
23 | select HAVE_AOUT | ||
24 | select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL | 23 | select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL |
25 | select HAVE_ARCH_KGDB | 24 | select HAVE_ARCH_KGDB |
26 | select HAVE_ARCH_SECCOMP_FILTER | 25 | select HAVE_ARCH_SECCOMP_FILTER |
@@ -218,7 +217,8 @@ config VECTORS_BASE | |||
218 | default DRAM_BASE if REMAP_VECTORS_TO_RAM | 217 | default DRAM_BASE if REMAP_VECTORS_TO_RAM |
219 | default 0x00000000 | 218 | default 0x00000000 |
220 | help | 219 | help |
221 | The base address of exception vectors. | 220 | The base address of exception vectors. This must be two pages |
221 | in size. | ||
222 | 222 | ||
223 | config ARM_PATCH_PHYS_VIRT | 223 | config ARM_PATCH_PHYS_VIRT |
224 | bool "Patch physical to virtual translations at runtime" if EMBEDDED | 224 | bool "Patch physical to virtual translations at runtime" if EMBEDDED |
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index e401a766c0bd..583f4a00ec32 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -804,9 +804,19 @@ config DEBUG_LL_INCLUDE | |||
804 | 804 | ||
805 | config DEBUG_UNCOMPRESS | 805 | config DEBUG_UNCOMPRESS |
806 | bool | 806 | bool |
807 | default y if ARCH_MULTIPLATFORM && DEBUG_LL && \ | 807 | depends on ARCH_MULTIPLATFORM |
808 | !DEBUG_OMAP2PLUS_UART && \ | 808 | default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \ |
809 | !DEBUG_TEGRA_UART | 809 | !DEBUG_TEGRA_UART |
810 | help | ||
811 | This option influences the normal decompressor output for | ||
812 | multiplatform kernels. Normally, multiplatform kernels disable | ||
813 | decompressor output because it is not possible to know where to | ||
814 | send the decompressor output. | ||
815 | |||
816 | When this option is set, the selected DEBUG_LL output method | ||
817 | will be re-used for normal decompressor output on multiplatform | ||
818 | kernels. | ||
819 | |||
810 | 820 | ||
811 | config UNCOMPRESS_INCLUDE | 821 | config UNCOMPRESS_INCLUDE |
812 | string | 822 | string |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c0ac0f5e5e5c..6fd2ceae305a 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI) += davinci | |||
153 | machine-$(CONFIG_ARCH_DOVE) += dove | 153 | machine-$(CONFIG_ARCH_DOVE) += dove |
154 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 | 154 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 |
155 | machine-$(CONFIG_ARCH_EP93XX) += ep93xx | 155 | machine-$(CONFIG_ARCH_EP93XX) += ep93xx |
156 | machine-$(CONFIG_ARCH_EXYNOS) += exynos | ||
156 | machine-$(CONFIG_ARCH_GEMINI) += gemini | 157 | machine-$(CONFIG_ARCH_GEMINI) += gemini |
157 | machine-$(CONFIG_ARCH_HIGHBANK) += highbank | 158 | machine-$(CONFIG_ARCH_HIGHBANK) += highbank |
158 | machine-$(CONFIG_ARCH_INTEGRATOR) += integrator | 159 | machine-$(CONFIG_ARCH_INTEGRATOR) += integrator |
@@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX) += iop13xx | |||
160 | machine-$(CONFIG_ARCH_IOP32X) += iop32x | 161 | machine-$(CONFIG_ARCH_IOP32X) += iop32x |
161 | machine-$(CONFIG_ARCH_IOP33X) += iop33x | 162 | machine-$(CONFIG_ARCH_IOP33X) += iop33x |
162 | machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx | 163 | machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx |
164 | machine-$(CONFIG_ARCH_KEYSTONE) += keystone | ||
163 | machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood | 165 | machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood |
164 | machine-$(CONFIG_ARCH_KS8695) += ks8695 | 166 | machine-$(CONFIG_ARCH_KS8695) += ks8695 |
165 | machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx | 167 | machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx |
166 | machine-$(CONFIG_ARCH_MMP) += mmp | 168 | machine-$(CONFIG_ARCH_MMP) += mmp |
167 | machine-$(CONFIG_ARCH_MSM) += msm | 169 | machine-$(CONFIG_ARCH_MSM) += msm |
168 | machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0 | 170 | machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0 |
171 | machine-$(CONFIG_ARCH_MVEBU) += mvebu | ||
169 | machine-$(CONFIG_ARCH_MXC) += imx | 172 | machine-$(CONFIG_ARCH_MXC) += imx |
170 | machine-$(CONFIG_ARCH_MXS) += mxs | 173 | machine-$(CONFIG_ARCH_MXS) += mxs |
171 | machine-$(CONFIG_ARCH_MVEBU) += mvebu | ||
172 | machine-$(CONFIG_ARCH_NETX) += netx | 174 | machine-$(CONFIG_ARCH_NETX) += netx |
173 | machine-$(CONFIG_ARCH_NOMADIK) += nomadik | 175 | machine-$(CONFIG_ARCH_NOMADIK) += nomadik |
174 | machine-$(CONFIG_ARCH_NSPIRE) += nspire | 176 | machine-$(CONFIG_ARCH_NSPIRE) += nspire |
@@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1) += omap1 | |||
176 | machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2 | 178 | machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2 |
177 | machine-$(CONFIG_ARCH_ORION5X) += orion5x | 179 | machine-$(CONFIG_ARCH_ORION5X) += orion5x |
178 | machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell | 180 | machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell |
179 | machine-$(CONFIG_ARCH_SIRF) += prima2 | ||
180 | machine-$(CONFIG_ARCH_PXA) += pxa | 181 | machine-$(CONFIG_ARCH_PXA) += pxa |
181 | machine-$(CONFIG_ARCH_REALVIEW) += realview | 182 | machine-$(CONFIG_ARCH_REALVIEW) += realview |
182 | machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip | 183 | machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip |
@@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx | |||
186 | machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0 | 187 | machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0 |
187 | machine-$(CONFIG_ARCH_S5PC100) += s5pc100 | 188 | machine-$(CONFIG_ARCH_S5PC100) += s5pc100 |
188 | machine-$(CONFIG_ARCH_S5PV210) += s5pv210 | 189 | machine-$(CONFIG_ARCH_S5PV210) += s5pv210 |
189 | machine-$(CONFIG_ARCH_EXYNOS) += exynos | ||
190 | machine-$(CONFIG_ARCH_SA1100) += sa1100 | 190 | machine-$(CONFIG_ARCH_SA1100) += sa1100 |
191 | machine-$(CONFIG_ARCH_SHARK) += shark | 191 | machine-$(CONFIG_ARCH_SHARK) += shark |
192 | machine-$(CONFIG_ARCH_SHMOBILE) += shmobile | 192 | machine-$(CONFIG_ARCH_SHMOBILE) += shmobile |
193 | machine-$(CONFIG_ARCH_SIRF) += prima2 | ||
194 | machine-$(CONFIG_ARCH_SOCFPGA) += socfpga | ||
195 | machine-$(CONFIG_ARCH_STI) += sti | ||
196 | machine-$(CONFIG_ARCH_SUNXI) += sunxi | ||
193 | machine-$(CONFIG_ARCH_TEGRA) += tegra | 197 | machine-$(CONFIG_ARCH_TEGRA) += tegra |
194 | machine-$(CONFIG_ARCH_U300) += u300 | 198 | machine-$(CONFIG_ARCH_U300) += u300 |
195 | machine-$(CONFIG_ARCH_U8500) += ux500 | 199 | machine-$(CONFIG_ARCH_U8500) += ux500 |
196 | machine-$(CONFIG_ARCH_VERSATILE) += versatile | 200 | machine-$(CONFIG_ARCH_VERSATILE) += versatile |
197 | machine-$(CONFIG_ARCH_VEXPRESS) += vexpress | 201 | machine-$(CONFIG_ARCH_VEXPRESS) += vexpress |
202 | machine-$(CONFIG_ARCH_VIRT) += virt | ||
198 | machine-$(CONFIG_ARCH_VT8500) += vt8500 | 203 | machine-$(CONFIG_ARCH_VT8500) += vt8500 |
199 | machine-$(CONFIG_ARCH_W90X900) += w90x900 | 204 | machine-$(CONFIG_ARCH_W90X900) += w90x900 |
205 | machine-$(CONFIG_ARCH_ZYNQ) += zynq | ||
200 | machine-$(CONFIG_FOOTBRIDGE) += footbridge | 206 | machine-$(CONFIG_FOOTBRIDGE) += footbridge |
201 | machine-$(CONFIG_ARCH_SOCFPGA) += socfpga | ||
202 | machine-$(CONFIG_PLAT_SPEAR) += spear | 207 | machine-$(CONFIG_PLAT_SPEAR) += spear |
203 | machine-$(CONFIG_ARCH_STI) += sti | ||
204 | machine-$(CONFIG_ARCH_VIRT) += virt | ||
205 | machine-$(CONFIG_ARCH_ZYNQ) += zynq | ||
206 | machine-$(CONFIG_ARCH_SUNXI) += sunxi | ||
207 | machine-$(CONFIG_ARCH_KEYSTONE) += keystone | ||
208 | 208 | ||
209 | # Platform directory name. This list is sorted alphanumerically | 209 | # Platform directory name. This list is sorted alphanumerically |
210 | # by CONFIG_* macro name. | 210 | # by CONFIG_* macro name. |
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h deleted file mode 100644 index 92f10cb5c70c..000000000000 --- a/arch/arm/include/asm/a.out-core.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* a.out coredump register dumper | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_A_OUT_CORE_H | ||
13 | #define _ASM_A_OUT_CORE_H | ||
14 | |||
15 | #ifdef __KERNEL__ | ||
16 | |||
17 | #include <linux/user.h> | ||
18 | #include <linux/elfcore.h> | ||
19 | |||
20 | /* | ||
21 | * fill in the user structure for an a.out core dump | ||
22 | */ | ||
23 | static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | ||
24 | { | ||
25 | struct task_struct *tsk = current; | ||
26 | |||
27 | dump->magic = CMAGIC; | ||
28 | dump->start_code = tsk->mm->start_code; | ||
29 | dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); | ||
30 | |||
31 | dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; | ||
32 | dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
33 | dump->u_ssize = 0; | ||
34 | |||
35 | memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); | ||
36 | |||
37 | if (dump->start_stack < 0x04000000) | ||
38 | dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; | ||
39 | |||
40 | dump->regs = *regs; | ||
41 | dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); | ||
42 | } | ||
43 | |||
44 | #endif /* __KERNEL__ */ | ||
45 | #endif /* _ASM_A_OUT_CORE_H */ | ||
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 8c25dc4e9851..9672e978d50d 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -89,13 +89,18 @@ extern unsigned int processor_id; | |||
89 | __val; \ | 89 | __val; \ |
90 | }) | 90 | }) |
91 | 91 | ||
92 | /* | ||
93 | * The memory clobber prevents gcc 4.5 from reordering the mrc before | ||
94 | * any is_smp() tests, which can cause undefined instruction aborts on | ||
95 | * ARM1136 r0 due to the missing extended CP15 registers. | ||
96 | */ | ||
92 | #define read_cpuid_ext(ext_reg) \ | 97 | #define read_cpuid_ext(ext_reg) \ |
93 | ({ \ | 98 | ({ \ |
94 | unsigned int __val; \ | 99 | unsigned int __val; \ |
95 | asm("mrc p15, 0, %0, c0, " ext_reg \ | 100 | asm("mrc p15, 0, %0, c0, " ext_reg \ |
96 | : "=r" (__val) \ | 101 | : "=r" (__val) \ |
97 | : \ | 102 | : \ |
98 | : "cc"); \ | 103 | : "memory"); \ |
99 | __val; \ | 104 | __val; \ |
100 | }) | 105 | }) |
101 | 106 | ||
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 38050b1c4800..56211f2084ef 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h | |||
@@ -130,4 +130,10 @@ struct mm_struct; | |||
130 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | 130 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); |
131 | #define arch_randomize_brk arch_randomize_brk | 131 | #define arch_randomize_brk arch_randomize_brk |
132 | 132 | ||
133 | #ifdef CONFIG_MMU | ||
134 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | ||
135 | struct linux_binprm; | ||
136 | int arch_setup_additional_pages(struct linux_binprm *, int); | ||
137 | #endif | ||
138 | |||
133 | #endif | 139 | #endif |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index e3d55547e755..6f18da09668b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -6,8 +6,11 @@ | |||
6 | typedef struct { | 6 | typedef struct { |
7 | #ifdef CONFIG_CPU_HAS_ASID | 7 | #ifdef CONFIG_CPU_HAS_ASID |
8 | atomic64_t id; | 8 | atomic64_t id; |
9 | #else | ||
10 | int switch_pending; | ||
9 | #endif | 11 | #endif |
10 | unsigned int vmalloc_seq; | 12 | unsigned int vmalloc_seq; |
13 | unsigned long sigpage; | ||
11 | } mm_context_t; | 14 | } mm_context_t; |
12 | 15 | ||
13 | #ifdef CONFIG_CPU_HAS_ASID | 16 | #ifdef CONFIG_CPU_HAS_ASID |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index b5792b7fd8d3..9b32f76bb0dd 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm, | |||
56 | * on non-ASID CPUs, the old mm will remain valid until the | 56 | * on non-ASID CPUs, the old mm will remain valid until the |
57 | * finish_arch_post_lock_switch() call. | 57 | * finish_arch_post_lock_switch() call. |
58 | */ | 58 | */ |
59 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | 59 | mm->context.switch_pending = 1; |
60 | else | 60 | else |
61 | cpu_switch_mm(mm->pgd, mm); | 61 | cpu_switch_mm(mm->pgd, mm); |
62 | } | 62 | } |
@@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm, | |||
65 | finish_arch_post_lock_switch | 65 | finish_arch_post_lock_switch |
66 | static inline void finish_arch_post_lock_switch(void) | 66 | static inline void finish_arch_post_lock_switch(void) |
67 | { | 67 | { |
68 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { | 68 | struct mm_struct *mm = current->mm; |
69 | struct mm_struct *mm = current->mm; | 69 | |
70 | cpu_switch_mm(mm->pgd, mm); | 70 | if (mm && mm->context.switch_pending) { |
71 | /* | ||
72 | * Preemption must be disabled during cpu_switch_mm() as we | ||
73 | * have some stateful cache flush implementations. Check | ||
74 | * switch_pending again in case we were preempted and the | ||
75 | * switch to this mm was already done. | ||
76 | */ | ||
77 | preempt_disable(); | ||
78 | if (mm->context.switch_pending) { | ||
79 | mm->context.switch_pending = 0; | ||
80 | cpu_switch_mm(mm->pgd, mm); | ||
81 | } | ||
82 | preempt_enable_no_resched(); | ||
71 | } | 83 | } |
72 | } | 84 | } |
73 | 85 | ||
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 6363f3d1d505..4355f0ec44d6 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | |||
142 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 142 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
143 | extern void copy_page(void *to, const void *from); | 143 | extern void copy_page(void *to, const void *from); |
144 | 144 | ||
145 | #ifdef CONFIG_KUSER_HELPERS | ||
145 | #define __HAVE_ARCH_GATE_AREA 1 | 146 | #define __HAVE_ARCH_GATE_AREA 1 |
147 | #endif | ||
146 | 148 | ||
147 | #ifdef CONFIG_ARM_LPAE | 149 | #ifdef CONFIG_ARM_LPAE |
148 | #include <asm/pgtable-3level-types.h> | 150 | #include <asm/pgtable-3level-types.h> |
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 06e7d509eaac..413f3876341c 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h | |||
@@ -54,7 +54,6 @@ struct thread_struct { | |||
54 | 54 | ||
55 | #define start_thread(regs,pc,sp) \ | 55 | #define start_thread(regs,pc,sp) \ |
56 | ({ \ | 56 | ({ \ |
57 | unsigned long *stack = (unsigned long *)sp; \ | ||
58 | memset(regs->uregs, 0, sizeof(regs->uregs)); \ | 57 | memset(regs->uregs, 0, sizeof(regs->uregs)); \ |
59 | if (current->personality & ADDR_LIMIT_32BIT) \ | 58 | if (current->personality & ADDR_LIMIT_32BIT) \ |
60 | regs->ARM_cpsr = USR_MODE; \ | 59 | regs->ARM_cpsr = USR_MODE; \ |
@@ -65,9 +64,6 @@ struct thread_struct { | |||
65 | regs->ARM_cpsr |= PSR_ENDSTATE; \ | 64 | regs->ARM_cpsr |= PSR_ENDSTATE; \ |
66 | regs->ARM_pc = pc & ~1; /* pc */ \ | 65 | regs->ARM_pc = pc & ~1; /* pc */ \ |
67 | regs->ARM_sp = sp; /* sp */ \ | 66 | regs->ARM_sp = sp; /* sp */ \ |
68 | regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ | ||
69 | regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ | ||
70 | regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ | ||
71 | nommu_start_thread(regs); \ | 67 | nommu_start_thread(regs); \ |
72 | }) | 68 | }) |
73 | 69 | ||
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 214d4158089a..2b8114fcba09 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
156 | #define TIF_USING_IWMMXT 17 | 156 | #define TIF_USING_IWMMXT 17 |
157 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 157 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
158 | #define TIF_RESTORE_SIGMASK 20 | 158 | #define TIF_RESTORE_SIGMASK 20 |
159 | #define TIF_SWITCH_MM 22 /* deferred switch_mm */ | ||
160 | 159 | ||
161 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 160 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
162 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 161 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index fdbb9e369745..f467e9b3f8d5 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void) | |||
443 | isb(); | 443 | isb(); |
444 | } | 444 | } |
445 | 445 | ||
446 | #include <asm/cputype.h> | ||
446 | #ifdef CONFIG_ARM_ERRATA_798181 | 447 | #ifdef CONFIG_ARM_ERRATA_798181 |
448 | static inline int erratum_a15_798181(void) | ||
449 | { | ||
450 | unsigned int midr = read_cpuid_id(); | ||
451 | |||
452 | /* Cortex-A15 r0p0..r3p2 affected */ | ||
453 | if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) | ||
454 | return 0; | ||
455 | return 1; | ||
456 | } | ||
457 | |||
447 | static inline void dummy_flush_tlb_a15_erratum(void) | 458 | static inline void dummy_flush_tlb_a15_erratum(void) |
448 | { | 459 | { |
449 | /* | 460 | /* |
@@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void) | |||
453 | dsb(); | 464 | dsb(); |
454 | } | 465 | } |
455 | #else | 466 | #else |
467 | static inline int erratum_a15_798181(void) | ||
468 | { | ||
469 | return 0; | ||
470 | } | ||
471 | |||
456 | static inline void dummy_flush_tlb_a15_erratum(void) | 472 | static inline void dummy_flush_tlb_a15_erratum(void) |
457 | { | 473 | { |
458 | } | 474 | } |
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 50af92bac737..4371f45c5784 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT | 29 | #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT |
30 | 30 | ||
31 | #ifndef __ASSEMBLY__ | 31 | #ifndef __ASSEMBLY__ |
32 | #include <asm/cacheflush.h> | ||
32 | 33 | ||
33 | #ifdef CONFIG_ARM_VIRT_EXT | 34 | #ifdef CONFIG_ARM_VIRT_EXT |
34 | /* | 35 | /* |
@@ -41,10 +42,21 @@ | |||
41 | */ | 42 | */ |
42 | extern int __boot_cpu_mode; | 43 | extern int __boot_cpu_mode; |
43 | 44 | ||
45 | static inline void sync_boot_mode(void) | ||
46 | { | ||
47 | /* | ||
48 | * As secondaries write to __boot_cpu_mode with caches disabled, we | ||
49 | * must flush the corresponding cache entries to ensure the visibility | ||
50 | * of their writes. | ||
51 | */ | ||
52 | sync_cache_r(&__boot_cpu_mode); | ||
53 | } | ||
54 | |||
44 | void __hyp_set_vectors(unsigned long phys_vector_base); | 55 | void __hyp_set_vectors(unsigned long phys_vector_base); |
45 | unsigned long __hyp_get_vectors(void); | 56 | unsigned long __hyp_get_vectors(void); |
46 | #else | 57 | #else |
47 | #define __boot_cpu_mode (SVC_MODE) | 58 | #define __boot_cpu_mode (SVC_MODE) |
59 | #define sync_boot_mode() | ||
48 | #endif | 60 | #endif |
49 | 61 | ||
50 | #ifndef ZIMAGE | 62 | #ifndef ZIMAGE |
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 47bcb2d254af..18d76fd5a2af 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild | |||
@@ -1,7 +1,6 @@ | |||
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | include include/uapi/asm-generic/Kbuild.asm | 2 | include include/uapi/asm-generic/Kbuild.asm |
3 | 3 | ||
4 | header-y += a.out.h | ||
5 | header-y += byteorder.h | 4 | header-y += byteorder.h |
6 | header-y += fcntl.h | 5 | header-y += fcntl.h |
7 | header-y += hwcap.h | 6 | header-y += hwcap.h |
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h deleted file mode 100644 index 083894b2e3bc..000000000000 --- a/arch/arm/include/uapi/asm/a.out.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #ifndef __ARM_A_OUT_H__ | ||
2 | #define __ARM_A_OUT_H__ | ||
3 | |||
4 | #include <linux/personality.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | struct exec | ||
8 | { | ||
9 | __u32 a_info; /* Use macros N_MAGIC, etc for access */ | ||
10 | __u32 a_text; /* length of text, in bytes */ | ||
11 | __u32 a_data; /* length of data, in bytes */ | ||
12 | __u32 a_bss; /* length of uninitialized data area for file, in bytes */ | ||
13 | __u32 a_syms; /* length of symbol table data in file, in bytes */ | ||
14 | __u32 a_entry; /* start address */ | ||
15 | __u32 a_trsize; /* length of relocation info for text, in bytes */ | ||
16 | __u32 a_drsize; /* length of relocation info for data, in bytes */ | ||
17 | }; | ||
18 | |||
19 | /* | ||
20 | * This is always the same | ||
21 | */ | ||
22 | #define N_TXTADDR(a) (0x00008000) | ||
23 | |||
24 | #define N_TRSIZE(a) ((a).a_trsize) | ||
25 | #define N_DRSIZE(a) ((a).a_drsize) | ||
26 | #define N_SYMSIZE(a) ((a).a_syms) | ||
27 | |||
28 | #define M_ARM 103 | ||
29 | |||
30 | #ifndef LIBRARY_START_TEXT | ||
31 | #define LIBRARY_START_TEXT (0x00c00000) | ||
32 | #endif | ||
33 | |||
34 | #endif /* __A_OUT_GNU_H__ */ | ||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a39cfc2a1f90..d40d0ef389db 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -742,6 +742,18 @@ ENDPROC(__switch_to) | |||
742 | #endif | 742 | #endif |
743 | .endm | 743 | .endm |
744 | 744 | ||
745 | .macro kuser_pad, sym, size | ||
746 | .if (. - \sym) & 3 | ||
747 | .rept 4 - (. - \sym) & 3 | ||
748 | .byte 0 | ||
749 | .endr | ||
750 | .endif | ||
751 | .rept (\size - (. - \sym)) / 4 | ||
752 | .word 0xe7fddef1 | ||
753 | .endr | ||
754 | .endm | ||
755 | |||
756 | #ifdef CONFIG_KUSER_HELPERS | ||
745 | .align 5 | 757 | .align 5 |
746 | .globl __kuser_helper_start | 758 | .globl __kuser_helper_start |
747 | __kuser_helper_start: | 759 | __kuser_helper_start: |
@@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup: | |||
832 | #error "incoherent kernel configuration" | 844 | #error "incoherent kernel configuration" |
833 | #endif | 845 | #endif |
834 | 846 | ||
835 | /* pad to next slot */ | 847 | kuser_pad __kuser_cmpxchg64, 64 |
836 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
837 | .word 0 | ||
838 | .endr | ||
839 | |||
840 | .align 5 | ||
841 | 848 | ||
842 | __kuser_memory_barrier: @ 0xffff0fa0 | 849 | __kuser_memory_barrier: @ 0xffff0fa0 |
843 | smp_dmb arm | 850 | smp_dmb arm |
844 | usr_ret lr | 851 | usr_ret lr |
845 | 852 | ||
846 | .align 5 | 853 | kuser_pad __kuser_memory_barrier, 32 |
847 | 854 | ||
848 | __kuser_cmpxchg: @ 0xffff0fc0 | 855 | __kuser_cmpxchg: @ 0xffff0fc0 |
849 | 856 | ||
@@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup: | |||
916 | 923 | ||
917 | #endif | 924 | #endif |
918 | 925 | ||
919 | .align 5 | 926 | kuser_pad __kuser_cmpxchg, 32 |
920 | 927 | ||
921 | __kuser_get_tls: @ 0xffff0fe0 | 928 | __kuser_get_tls: @ 0xffff0fe0 |
922 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init | 929 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
923 | usr_ret lr | 930 | usr_ret lr |
924 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code | 931 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
925 | .rep 4 | 932 | kuser_pad __kuser_get_tls, 16 |
933 | .rep 3 | ||
926 | .word 0 @ 0xffff0ff0 software TLS value, then | 934 | .word 0 @ 0xffff0ff0 software TLS value, then |
927 | .endr @ pad up to __kuser_helper_version | 935 | .endr @ pad up to __kuser_helper_version |
928 | 936 | ||
@@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc | |||
932 | .globl __kuser_helper_end | 940 | .globl __kuser_helper_end |
933 | __kuser_helper_end: | 941 | __kuser_helper_end: |
934 | 942 | ||
943 | #endif | ||
944 | |||
935 | THUMB( .thumb ) | 945 | THUMB( .thumb ) |
936 | 946 | ||
937 | /* | 947 | /* |
938 | * Vector stubs. | 948 | * Vector stubs. |
939 | * | 949 | * |
940 | * This code is copied to 0xffff0200 so we can use branches in the | 950 | * This code is copied to 0xffff1000 so we can use branches in the |
941 | * vectors, rather than ldr's. Note that this code must not | 951 | * vectors, rather than ldr's. Note that this code must not exceed |
942 | * exceed 0x300 bytes. | 952 | * a page size. |
943 | * | 953 | * |
944 | * Common stub entry macro: | 954 | * Common stub entry macro: |
945 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | 955 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
@@ -986,8 +996,17 @@ ENDPROC(vector_\name) | |||
986 | 1: | 996 | 1: |
987 | .endm | 997 | .endm |
988 | 998 | ||
989 | .globl __stubs_start | 999 | .section .stubs, "ax", %progbits |
990 | __stubs_start: | 1000 | __stubs_start: |
1001 | @ This must be the first word | ||
1002 | .word vector_swi | ||
1003 | |||
1004 | vector_rst: | ||
1005 | ARM( swi SYS_ERROR0 ) | ||
1006 | THUMB( svc #0 ) | ||
1007 | THUMB( nop ) | ||
1008 | b vector_und | ||
1009 | |||
991 | /* | 1010 | /* |
992 | * Interrupt dispatcher | 1011 | * Interrupt dispatcher |
993 | */ | 1012 | */ |
@@ -1082,6 +1101,16 @@ __stubs_start: | |||
1082 | .align 5 | 1101 | .align 5 |
1083 | 1102 | ||
1084 | /*============================================================================= | 1103 | /*============================================================================= |
1104 | * Address exception handler | ||
1105 | *----------------------------------------------------------------------------- | ||
1106 | * These aren't too critical. | ||
1107 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
1108 | */ | ||
1109 | |||
1110 | vector_addrexcptn: | ||
1111 | b vector_addrexcptn | ||
1112 | |||
1113 | /*============================================================================= | ||
1085 | * Undefined FIQs | 1114 | * Undefined FIQs |
1086 | *----------------------------------------------------------------------------- | 1115 | *----------------------------------------------------------------------------- |
1087 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | 1116 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC |
@@ -1094,45 +1123,19 @@ __stubs_start: | |||
1094 | vector_fiq: | 1123 | vector_fiq: |
1095 | subs pc, lr, #4 | 1124 | subs pc, lr, #4 |
1096 | 1125 | ||
1097 | /*============================================================================= | 1126 | .globl vector_fiq_offset |
1098 | * Address exception handler | 1127 | .equ vector_fiq_offset, vector_fiq |
1099 | *----------------------------------------------------------------------------- | ||
1100 | * These aren't too critical. | ||
1101 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
1102 | */ | ||
1103 | |||
1104 | vector_addrexcptn: | ||
1105 | b vector_addrexcptn | ||
1106 | |||
1107 | /* | ||
1108 | * We group all the following data together to optimise | ||
1109 | * for CPUs with separate I & D caches. | ||
1110 | */ | ||
1111 | .align 5 | ||
1112 | |||
1113 | .LCvswi: | ||
1114 | .word vector_swi | ||
1115 | |||
1116 | .globl __stubs_end | ||
1117 | __stubs_end: | ||
1118 | |||
1119 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start | ||
1120 | 1128 | ||
1121 | .globl __vectors_start | 1129 | .section .vectors, "ax", %progbits |
1122 | __vectors_start: | 1130 | __vectors_start: |
1123 | ARM( swi SYS_ERROR0 ) | 1131 | W(b) vector_rst |
1124 | THUMB( svc #0 ) | 1132 | W(b) vector_und |
1125 | THUMB( nop ) | 1133 | W(ldr) pc, __vectors_start + 0x1000 |
1126 | W(b) vector_und + stubs_offset | 1134 | W(b) vector_pabt |
1127 | W(ldr) pc, .LCvswi + stubs_offset | 1135 | W(b) vector_dabt |
1128 | W(b) vector_pabt + stubs_offset | 1136 | W(b) vector_addrexcptn |
1129 | W(b) vector_dabt + stubs_offset | 1137 | W(b) vector_irq |
1130 | W(b) vector_addrexcptn + stubs_offset | 1138 | W(b) vector_fiq |
1131 | W(b) vector_irq + stubs_offset | ||
1132 | W(b) vector_fiq + stubs_offset | ||
1133 | |||
1134 | .globl __vectors_end | ||
1135 | __vectors_end: | ||
1136 | 1139 | ||
1137 | .data | 1140 | .data |
1138 | 1141 | ||
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index e00621f1403f..52b26432c9a9 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S | |||
@@ -49,7 +49,7 @@ __irq_entry: | |||
49 | mov r1, sp | 49 | mov r1, sp |
50 | stmdb sp!, {lr} | 50 | stmdb sp!, {lr} |
51 | @ routine called with r0 = irq number, r1 = struct pt_regs * | 51 | @ routine called with r0 = irq number, r1 = struct pt_regs * |
52 | bl nvic_do_IRQ | 52 | bl nvic_handle_irq |
53 | 53 | ||
54 | pop {lr} | 54 | pop {lr} |
55 | @ | 55 | @ |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 2adda11f712f..25442f451148 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -47,6 +47,11 @@ | |||
47 | #include <asm/irq.h> | 47 | #include <asm/irq.h> |
48 | #include <asm/traps.h> | 48 | #include <asm/traps.h> |
49 | 49 | ||
50 | #define FIQ_OFFSET ({ \ | ||
51 | extern void *vector_fiq_offset; \ | ||
52 | (unsigned)&vector_fiq_offset; \ | ||
53 | }) | ||
54 | |||
50 | static unsigned long no_fiq_insn; | 55 | static unsigned long no_fiq_insn; |
51 | 56 | ||
52 | /* Default reacquire function | 57 | /* Default reacquire function |
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
80 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
81 | { | 86 | { |
82 | #if defined(CONFIG_CPU_USE_DOMAINS) | 87 | #if defined(CONFIG_CPU_USE_DOMAINS) |
83 | memcpy((void *)0xffff001c, start, length); | 88 | void *base = (void *)0xffff0000; |
84 | #else | 89 | #else |
85 | memcpy(vectors_page + 0x1c, start, length); | 90 | void *base = vectors_page; |
86 | #endif | 91 | #endif |
87 | flush_icache_range(0xffff001c, 0xffff001c + length); | 92 | unsigned offset = FIQ_OFFSET; |
93 | |||
94 | memcpy(base + offset, start, length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | ||
88 | if (!vectors_high()) | 96 | if (!vectors_high()) |
89 | flush_icache_range(0x1c, 0x1c + length); | 97 | flush_icache_range(offset, offset + length); |
90 | } | 98 | } |
91 | 99 | ||
92 | int claim_fiq(struct fiq_handler *f) | 100 | int claim_fiq(struct fiq_handler *f) |
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq); | |||
144 | 152 | ||
145 | void __init init_FIQ(int start) | 153 | void __init init_FIQ(int start) |
146 | { | 154 | { |
147 | no_fiq_insn = *(unsigned long *)0xffff001c; | 155 | unsigned offset = FIQ_OFFSET; |
156 | no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); | ||
148 | fiq_start = start; | 157 | fiq_start = start; |
149 | } | 158 | } |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index b361de143756..14235ba64a90 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -87,6 +87,7 @@ ENTRY(stext) | |||
87 | ENDPROC(stext) | 87 | ENDPROC(stext) |
88 | 88 | ||
89 | #ifdef CONFIG_SMP | 89 | #ifdef CONFIG_SMP |
90 | .text | ||
90 | ENTRY(secondary_startup) | 91 | ENTRY(secondary_startup) |
91 | /* | 92 | /* |
92 | * Common entry point for secondary CPUs. | 93 | * Common entry point for secondary CPUs. |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 9cf6063020ae..2c7cc1e03473 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -343,6 +343,7 @@ __turn_mmu_on_loc: | |||
343 | .long __turn_mmu_on_end | 343 | .long __turn_mmu_on_end |
344 | 344 | ||
345 | #if defined(CONFIG_SMP) | 345 | #if defined(CONFIG_SMP) |
346 | .text | ||
346 | ENTRY(secondary_startup) | 347 | ENTRY(secondary_startup) |
347 | /* | 348 | /* |
348 | * Common entry point for secondary CPUs. | 349 | * Common entry point for secondary CPUs. |
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 4910232c4833..797b1a6a4906 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S | |||
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode) | |||
56 | ldr \reg3, [\reg2] | 56 | ldr \reg3, [\reg2] |
57 | ldr \reg1, [\reg2, \reg3] | 57 | ldr \reg1, [\reg2, \reg3] |
58 | cmp \mode, \reg1 @ matches primary CPU boot mode? | 58 | cmp \mode, \reg1 @ matches primary CPU boot mode? |
59 | orrne r7, r7, #BOOT_CPU_MODE_MISMATCH | 59 | orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH |
60 | strne r7, [r5, r6] @ record what happened and give up | 60 | strne \reg1, [\reg2, \reg3] @ record what happened and give up |
61 | .endm | 61 | .endm |
62 | 62 | ||
63 | #else /* ZIMAGE */ | 63 | #else /* ZIMAGE */ |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d3ca4f6915af..536c85fe72a8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -197,6 +197,7 @@ void machine_shutdown(void) | |||
197 | */ | 197 | */ |
198 | void machine_halt(void) | 198 | void machine_halt(void) |
199 | { | 199 | { |
200 | local_irq_disable(); | ||
200 | smp_send_stop(); | 201 | smp_send_stop(); |
201 | 202 | ||
202 | local_irq_disable(); | 203 | local_irq_disable(); |
@@ -211,6 +212,7 @@ void machine_halt(void) | |||
211 | */ | 212 | */ |
212 | void machine_power_off(void) | 213 | void machine_power_off(void) |
213 | { | 214 | { |
215 | local_irq_disable(); | ||
214 | smp_send_stop(); | 216 | smp_send_stop(); |
215 | 217 | ||
216 | if (pm_power_off) | 218 | if (pm_power_off) |
@@ -230,6 +232,7 @@ void machine_power_off(void) | |||
230 | */ | 232 | */ |
231 | void machine_restart(char *cmd) | 233 | void machine_restart(char *cmd) |
232 | { | 234 | { |
235 | local_irq_disable(); | ||
233 | smp_send_stop(); | 236 | smp_send_stop(); |
234 | 237 | ||
235 | arm_pm_restart(reboot_mode, cmd); | 238 | arm_pm_restart(reboot_mode, cmd); |
@@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
426 | } | 429 | } |
427 | 430 | ||
428 | #ifdef CONFIG_MMU | 431 | #ifdef CONFIG_MMU |
432 | #ifdef CONFIG_KUSER_HELPERS | ||
429 | /* | 433 | /* |
430 | * The vectors page is always readable from user space for the | 434 | * The vectors page is always readable from user space for the |
431 | * atomic helpers and the signal restart code. Insert it into the | 435 | * atomic helpers. Insert it into the gate_vma so that it is visible |
432 | * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. | 436 | * through ptrace and /proc/<pid>/mem. |
433 | */ | 437 | */ |
434 | static struct vm_area_struct gate_vma = { | 438 | static struct vm_area_struct gate_vma = { |
435 | .vm_start = 0xffff0000, | 439 | .vm_start = 0xffff0000, |
@@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr) | |||
458 | { | 462 | { |
459 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
460 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | ||
466 | #else | ||
467 | #define is_gate_vma(vma) 0 | ||
468 | #endif | ||
461 | 469 | ||
462 | const char *arch_vma_name(struct vm_area_struct *vma) | 470 | const char *arch_vma_name(struct vm_area_struct *vma) |
463 | { | 471 | { |
464 | return (vma == &gate_vma) ? "[vectors]" : NULL; | 472 | return is_gate_vma(vma) ? "[vectors]" : |
473 | (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? | ||
474 | "[sigpage]" : NULL; | ||
475 | } | ||
476 | |||
477 | static struct page *signal_page; | ||
478 | extern struct page *get_signal_page(void); | ||
479 | |||
480 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
481 | { | ||
482 | struct mm_struct *mm = current->mm; | ||
483 | unsigned long addr; | ||
484 | int ret; | ||
485 | |||
486 | if (!signal_page) | ||
487 | signal_page = get_signal_page(); | ||
488 | if (!signal_page) | ||
489 | return -ENOMEM; | ||
490 | |||
491 | down_write(&mm->mmap_sem); | ||
492 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
493 | if (IS_ERR_VALUE(addr)) { | ||
494 | ret = addr; | ||
495 | goto up_fail; | ||
496 | } | ||
497 | |||
498 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | ||
499 | VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | ||
500 | &signal_page); | ||
501 | |||
502 | if (ret == 0) | ||
503 | mm->context.sigpage = addr; | ||
504 | |||
505 | up_fail: | ||
506 | up_write(&mm->mmap_sem); | ||
507 | return ret; | ||
465 | } | 508 | } |
466 | #endif | 509 | #endif |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 63af9a7ae512..afc2489ee13b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b) | |||
836 | void __init hyp_mode_check(void) | 836 | void __init hyp_mode_check(void) |
837 | { | 837 | { |
838 | #ifdef CONFIG_ARM_VIRT_EXT | 838 | #ifdef CONFIG_ARM_VIRT_EXT |
839 | sync_boot_mode(); | ||
840 | |||
839 | if (is_hyp_mode_available()) { | 841 | if (is_hyp_mode_available()) { |
840 | pr_info("CPU: All CPU(s) started in HYP mode.\n"); | 842 | pr_info("CPU: All CPU(s) started in HYP mode.\n"); |
841 | pr_info("CPU: Virtualization extensions available.\n"); | 843 | pr_info("CPU: Virtualization extensions available.\n"); |
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = { | |||
971 | "vfpv4", | 973 | "vfpv4", |
972 | "idiva", | 974 | "idiva", |
973 | "idivt", | 975 | "idivt", |
976 | "vfpd32", | ||
974 | "lpae", | 977 | "lpae", |
975 | NULL | 978 | NULL |
976 | }; | 979 | }; |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c16c35c271a..ab3304225272 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/random.h> | ||
11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
12 | #include <linux/personality.h> | 13 | #include <linux/personality.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
@@ -15,12 +16,11 @@ | |||
15 | 16 | ||
16 | #include <asm/elf.h> | 17 | #include <asm/elf.h> |
17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/traps.h> | ||
18 | #include <asm/ucontext.h> | 20 | #include <asm/ucontext.h> |
19 | #include <asm/unistd.h> | 21 | #include <asm/unistd.h> |
20 | #include <asm/vfp.h> | 22 | #include <asm/vfp.h> |
21 | 23 | ||
22 | #include "signal.h" | ||
23 | |||
24 | /* | 24 | /* |
25 | * For ARM syscalls, we encode the syscall number into the instruction. | 25 | * For ARM syscalls, we encode the syscall number into the instruction. |
26 | */ | 26 | */ |
@@ -40,11 +40,13 @@ | |||
40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) | 40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) |
41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) | 41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) |
42 | 42 | ||
43 | const unsigned long sigreturn_codes[7] = { | 43 | static const unsigned long sigreturn_codes[7] = { |
44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, | 44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, |
45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, | 45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static unsigned long signal_return_offset; | ||
49 | |||
48 | #ifdef CONFIG_CRUNCH | 50 | #ifdef CONFIG_CRUNCH |
49 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) | 51 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) |
50 | { | 52 | { |
@@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
400 | __put_user(sigreturn_codes[idx+1], rc+1)) | 402 | __put_user(sigreturn_codes[idx+1], rc+1)) |
401 | return 1; | 403 | return 1; |
402 | 404 | ||
403 | if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { | 405 | #ifdef CONFIG_MMU |
406 | if (cpsr & MODE32_BIT) { | ||
407 | struct mm_struct *mm = current->mm; | ||
408 | |||
404 | /* | 409 | /* |
405 | * 32-bit code can use the new high-page | 410 | * 32-bit code can use the signal return page |
406 | * signal return code support except when the MPU has | 411 | * except when the MPU has protected the vectors |
407 | * protected the vectors page from PL0 | 412 | * page from PL0 |
408 | */ | 413 | */ |
409 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; | 414 | retcode = mm->context.sigpage + signal_return_offset + |
410 | } else { | 415 | (idx << 2) + thumb; |
416 | } else | ||
417 | #endif | ||
418 | { | ||
411 | /* | 419 | /* |
412 | * Ensure that the instruction cache sees | 420 | * Ensure that the instruction cache sees |
413 | * the return code written onto the stack. | 421 | * the return code written onto the stack. |
@@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
608 | } while (thread_flags & _TIF_WORK_MASK); | 616 | } while (thread_flags & _TIF_WORK_MASK); |
609 | return 0; | 617 | return 0; |
610 | } | 618 | } |
619 | |||
620 | struct page *get_signal_page(void) | ||
621 | { | ||
622 | unsigned long ptr; | ||
623 | unsigned offset; | ||
624 | struct page *page; | ||
625 | void *addr; | ||
626 | |||
627 | page = alloc_pages(GFP_KERNEL, 0); | ||
628 | |||
629 | if (!page) | ||
630 | return NULL; | ||
631 | |||
632 | addr = page_address(page); | ||
633 | |||
634 | /* Give the signal return code some randomness */ | ||
635 | offset = 0x200 + (get_random_int() & 0x7fc); | ||
636 | signal_return_offset = offset; | ||
637 | |||
638 | /* | ||
639 | * Copy signal return handlers into the vector page, and | ||
640 | * set sigreturn to be a pointer to these. | ||
641 | */ | ||
642 | memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); | ||
643 | |||
644 | ptr = (unsigned long)addr + offset; | ||
645 | flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); | ||
646 | |||
647 | return page; | ||
648 | } | ||
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h deleted file mode 100644 index 5ff067b7c752..000000000000 --- a/arch/arm/kernel/signal.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/signal.h | ||
3 | * | ||
4 | * Copyright (C) 2005-2009 Russell King. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) | ||
11 | |||
12 | extern const unsigned long sigreturn_codes[7]; | ||
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index a98b62dca2fa..c2edfff573c2 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored) | |||
70 | local_flush_bp_all(); | 70 | local_flush_bp_all(); |
71 | } | 71 | } |
72 | 72 | ||
73 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
74 | static int erratum_a15_798181(void) | ||
75 | { | ||
76 | unsigned int midr = read_cpuid_id(); | ||
77 | |||
78 | /* Cortex-A15 r0p0..r3p2 affected */ | ||
79 | if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) | ||
80 | return 0; | ||
81 | return 1; | ||
82 | } | ||
83 | #else | ||
84 | static int erratum_a15_798181(void) | ||
85 | { | ||
86 | return 0; | ||
87 | } | ||
88 | #endif | ||
89 | |||
90 | static void ipi_flush_tlb_a15_erratum(void *arg) | 73 | static void ipi_flush_tlb_a15_erratum(void *arg) |
91 | { | 74 | { |
92 | dmb(); | 75 | dmb(); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cab094c234ee..ab517fcce21b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -35,8 +35,6 @@ | |||
35 | #include <asm/tls.h> | 35 | #include <asm/tls.h> |
36 | #include <asm/system_misc.h> | 36 | #include <asm/system_misc.h> |
37 | 37 | ||
38 | #include "signal.h" | ||
39 | |||
40 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 38 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; |
41 | 39 | ||
42 | void *vectors_page; | 40 | void *vectors_page; |
@@ -800,15 +798,26 @@ void __init trap_init(void) | |||
800 | return; | 798 | return; |
801 | } | 799 | } |
802 | 800 | ||
803 | static void __init kuser_get_tls_init(unsigned long vectors) | 801 | #ifdef CONFIG_KUSER_HELPERS |
802 | static void __init kuser_init(void *vectors) | ||
804 | { | 803 | { |
804 | extern char __kuser_helper_start[], __kuser_helper_end[]; | ||
805 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
806 | |||
807 | memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
808 | |||
805 | /* | 809 | /* |
806 | * vectors + 0xfe0 = __kuser_get_tls | 810 | * vectors + 0xfe0 = __kuser_get_tls |
807 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 | 811 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 |
808 | */ | 812 | */ |
809 | if (tls_emu || has_tls_reg) | 813 | if (tls_emu || has_tls_reg) |
810 | memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); | 814 | memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); |
811 | } | 815 | } |
816 | #else | ||
817 | static void __init kuser_init(void *vectors) | ||
818 | { | ||
819 | } | ||
820 | #endif | ||
812 | 821 | ||
813 | void __init early_trap_init(void *vectors_base) | 822 | void __init early_trap_init(void *vectors_base) |
814 | { | 823 | { |
@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base) | |||
816 | unsigned long vectors = (unsigned long)vectors_base; | 825 | unsigned long vectors = (unsigned long)vectors_base; |
817 | extern char __stubs_start[], __stubs_end[]; | 826 | extern char __stubs_start[], __stubs_end[]; |
818 | extern char __vectors_start[], __vectors_end[]; | 827 | extern char __vectors_start[], __vectors_end[]; |
819 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 828 | unsigned i; |
820 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
821 | 829 | ||
822 | vectors_page = vectors_base; | 830 | vectors_page = vectors_base; |
823 | 831 | ||
824 | /* | 832 | /* |
833 | * Poison the vectors page with an undefined instruction. This | ||
834 | * instruction is chosen to be undefined for both ARM and Thumb | ||
835 | * ISAs. The Thumb version is an undefined instruction with a | ||
836 | * branch back to the undefined instruction. | ||
837 | */ | ||
838 | for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) | ||
839 | ((u32 *)vectors_base)[i] = 0xe7fddef1; | ||
840 | |||
841 | /* | ||
825 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) | 842 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
826 | * into the vector page, mapped at 0xffff0000, and ensure these | 843 | * into the vector page, mapped at 0xffff0000, and ensure these |
827 | * are visible to the instruction stream. | 844 | * are visible to the instruction stream. |
828 | */ | 845 | */ |
829 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); | 846 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); |
830 | memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); | 847 | memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); |
831 | memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
832 | 848 | ||
833 | /* | 849 | kuser_init(vectors_base); |
834 | * Do processor specific fixups for the kuser helpers | ||
835 | */ | ||
836 | kuser_get_tls_init(vectors); | ||
837 | |||
838 | /* | ||
839 | * Copy signal return handlers into the vector page, and | ||
840 | * set sigreturn to be a pointer to these. | ||
841 | */ | ||
842 | memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), | ||
843 | sigreturn_codes, sizeof(sigreturn_codes)); | ||
844 | 850 | ||
845 | flush_icache_range(vectors, vectors + PAGE_SIZE); | 851 | flush_icache_range(vectors, vectors + PAGE_SIZE * 2); |
846 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 852 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
847 | #else /* ifndef CONFIG_CPU_V7M */ | 853 | #else /* ifndef CONFIG_CPU_V7M */ |
848 | /* | 854 | /* |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index fa25e4e425f6..7bcee5c9b604 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -148,6 +148,23 @@ SECTIONS | |||
148 | . = ALIGN(PAGE_SIZE); | 148 | . = ALIGN(PAGE_SIZE); |
149 | __init_begin = .; | 149 | __init_begin = .; |
150 | #endif | 150 | #endif |
151 | /* | ||
152 | * The vectors and stubs are relocatable code, and the | ||
153 | * only thing that matters is their relative offsets | ||
154 | */ | ||
155 | __vectors_start = .; | ||
156 | .vectors 0 : AT(__vectors_start) { | ||
157 | *(.vectors) | ||
158 | } | ||
159 | . = __vectors_start + SIZEOF(.vectors); | ||
160 | __vectors_end = .; | ||
161 | |||
162 | __stubs_start = .; | ||
163 | .stubs 0x1000 : AT(__stubs_start) { | ||
164 | *(.stubs) | ||
165 | } | ||
166 | . = __stubs_start + SIZEOF(.stubs); | ||
167 | __stubs_end = .; | ||
151 | 168 | ||
152 | INIT_TEXT_SECTION(8) | 169 | INIT_TEXT_SECTION(8) |
153 | .exit.text : { | 170 | .exit.text : { |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd654..db5c2cab8fda 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -421,24 +421,28 @@ config CPU_32v3 | |||
421 | select CPU_USE_DOMAINS if MMU | 421 | select CPU_USE_DOMAINS if MMU |
422 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 422 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
423 | select TLS_REG_EMUL if SMP || !MMU | 423 | select TLS_REG_EMUL if SMP || !MMU |
424 | select NEED_KUSER_HELPERS | ||
424 | 425 | ||
425 | config CPU_32v4 | 426 | config CPU_32v4 |
426 | bool | 427 | bool |
427 | select CPU_USE_DOMAINS if MMU | 428 | select CPU_USE_DOMAINS if MMU |
428 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 429 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
429 | select TLS_REG_EMUL if SMP || !MMU | 430 | select TLS_REG_EMUL if SMP || !MMU |
431 | select NEED_KUSER_HELPERS | ||
430 | 432 | ||
431 | config CPU_32v4T | 433 | config CPU_32v4T |
432 | bool | 434 | bool |
433 | select CPU_USE_DOMAINS if MMU | 435 | select CPU_USE_DOMAINS if MMU |
434 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 436 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
435 | select TLS_REG_EMUL if SMP || !MMU | 437 | select TLS_REG_EMUL if SMP || !MMU |
438 | select NEED_KUSER_HELPERS | ||
436 | 439 | ||
437 | config CPU_32v5 | 440 | config CPU_32v5 |
438 | bool | 441 | bool |
439 | select CPU_USE_DOMAINS if MMU | 442 | select CPU_USE_DOMAINS if MMU |
440 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 443 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
441 | select TLS_REG_EMUL if SMP || !MMU | 444 | select TLS_REG_EMUL if SMP || !MMU |
445 | select NEED_KUSER_HELPERS | ||
442 | 446 | ||
443 | config CPU_32v6 | 447 | config CPU_32v6 |
444 | bool | 448 | bool |
@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE | |||
776 | 780 | ||
777 | config TLS_REG_EMUL | 781 | config TLS_REG_EMUL |
778 | bool | 782 | bool |
783 | select NEED_KUSER_HELPERS | ||
779 | help | 784 | help |
780 | An SMP system using a pre-ARMv6 processor (there are apparently | 785 | An SMP system using a pre-ARMv6 processor (there are apparently |
781 | a few prototypes like that in existence) and therefore access to | 786 | a few prototypes like that in existence) and therefore access to |
@@ -783,11 +788,40 @@ config TLS_REG_EMUL | |||
783 | 788 | ||
784 | config NEEDS_SYSCALL_FOR_CMPXCHG | 789 | config NEEDS_SYSCALL_FOR_CMPXCHG |
785 | bool | 790 | bool |
791 | select NEED_KUSER_HELPERS | ||
786 | help | 792 | help |
787 | SMP on a pre-ARMv6 processor? Well OK then. | 793 | SMP on a pre-ARMv6 processor? Well OK then. |
788 | Forget about fast user space cmpxchg support. | 794 | Forget about fast user space cmpxchg support. |
789 | It is just not possible. | 795 | It is just not possible. |
790 | 796 | ||
797 | config NEED_KUSER_HELPERS | ||
798 | bool | ||
799 | |||
800 | config KUSER_HELPERS | ||
801 | bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS | ||
802 | default y | ||
803 | help | ||
804 | Warning: disabling this option may break user programs. | ||
805 | |||
806 | Provide kuser helpers in the vector page. The kernel provides | ||
807 | helper code to userspace in read only form at a fixed location | ||
808 | in the high vector page to allow userspace to be independent of | ||
809 | the CPU type fitted to the system. This permits binaries to be | ||
810 | run on ARMv4 through to ARMv7 without modification. | ||
811 | |||
812 | However, the fixed address nature of these helpers can be used | ||
813 | by ROP (return orientated programming) authors when creating | ||
814 | exploits. | ||
815 | |||
816 | If all of the binaries and libraries which run on your platform | ||
817 | are built specifically for your platform, and make no use of | ||
818 | these helpers, then you can turn this option off. However, | ||
819 | when such an binary or library is run, it will receive a SIGILL | ||
820 | signal, which will terminate the program. | ||
821 | |||
822 | Say N here only if you are absolutely certain that you do not | ||
823 | need these helpers; otherwise, the safe option is to say Y. | ||
824 | |||
791 | config DMA_CACHE_RWFO | 825 | config DMA_CACHE_RWFO |
792 | bool "Enable read/write for ownership DMA cache maintenance" | 826 | bool "Enable read/write for ownership DMA cache maintenance" |
793 | depends on CPU_V6K && SMP | 827 | depends on CPU_V6K && SMP |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b55b1015724b..4a0544492f10 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
245 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { | 245 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
246 | local_flush_bp_all(); | 246 | local_flush_bp_all(); |
247 | local_flush_tlb_all(); | 247 | local_flush_tlb_all(); |
248 | dummy_flush_tlb_a15_erratum(); | 248 | if (erratum_a15_798181()) |
249 | dummy_flush_tlb_a15_erratum(); | ||
249 | } | 250 | } |
250 | 251 | ||
251 | atomic64_set(&per_cpu(active_asids, cpu), asid); | 252 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f56617a2392..53cdbd39ec8e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0; | |||
989 | 989 | ||
990 | void __init sanity_check_meminfo(void) | 990 | void __init sanity_check_meminfo(void) |
991 | { | 991 | { |
992 | phys_addr_t memblock_limit = 0; | ||
992 | int i, j, highmem = 0; | 993 | int i, j, highmem = 0; |
993 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | 994 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; |
994 | 995 | ||
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void) | |||
1052 | bank->size = size_limit; | 1053 | bank->size = size_limit; |
1053 | } | 1054 | } |
1054 | #endif | 1055 | #endif |
1055 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) | 1056 | if (!bank->highmem) { |
1056 | arm_lowmem_limit = bank->start + bank->size; | 1057 | phys_addr_t bank_end = bank->start + bank->size; |
1057 | 1058 | ||
1059 | if (bank_end > arm_lowmem_limit) | ||
1060 | arm_lowmem_limit = bank_end; | ||
1061 | |||
1062 | /* | ||
1063 | * Find the first non-section-aligned page, and point | ||
1064 | * memblock_limit at it. This relies on rounding the | ||
1065 | * limit down to be section-aligned, which happens at | ||
1066 | * the end of this function. | ||
1067 | * | ||
1068 | * With this algorithm, the start or end of almost any | ||
1069 | * bank can be non-section-aligned. The only exception | ||
1070 | * is that the start of the bank 0 must be section- | ||
1071 | * aligned, since otherwise memory would need to be | ||
1072 | * allocated when mapping the start of bank 0, which | ||
1073 | * occurs before any free memory is mapped. | ||
1074 | */ | ||
1075 | if (!memblock_limit) { | ||
1076 | if (!IS_ALIGNED(bank->start, SECTION_SIZE)) | ||
1077 | memblock_limit = bank->start; | ||
1078 | else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) | ||
1079 | memblock_limit = bank_end; | ||
1080 | } | ||
1081 | } | ||
1058 | j++; | 1082 | j++; |
1059 | } | 1083 | } |
1060 | #ifdef CONFIG_HIGHMEM | 1084 | #ifdef CONFIG_HIGHMEM |
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void) | |||
1079 | #endif | 1103 | #endif |
1080 | meminfo.nr_banks = j; | 1104 | meminfo.nr_banks = j; |
1081 | high_memory = __va(arm_lowmem_limit - 1) + 1; | 1105 | high_memory = __va(arm_lowmem_limit - 1) + 1; |
1082 | memblock_set_current_limit(arm_lowmem_limit); | 1106 | |
1107 | /* | ||
1108 | * Round the memblock limit down to a section size. This | ||
1109 | * helps to ensure that we will allocate memory from the | ||
1110 | * last full section, which should be mapped. | ||
1111 | */ | ||
1112 | if (memblock_limit) | ||
1113 | memblock_limit = round_down(memblock_limit, SECTION_SIZE); | ||
1114 | if (!memblock_limit) | ||
1115 | memblock_limit = arm_lowmem_limit; | ||
1116 | |||
1117 | memblock_set_current_limit(memblock_limit); | ||
1083 | } | 1118 | } |
1084 | 1119 | ||
1085 | static inline void prepare_page_table(void) | 1120 | static inline void prepare_page_table(void) |
@@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1160 | /* | 1195 | /* |
1161 | * Allocate the vector page early. | 1196 | * Allocate the vector page early. |
1162 | */ | 1197 | */ |
1163 | vectors = early_alloc(PAGE_SIZE); | 1198 | vectors = early_alloc(PAGE_SIZE * 2); |
1164 | 1199 | ||
1165 | early_trap_init(vectors); | 1200 | early_trap_init(vectors); |
1166 | 1201 | ||
@@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1205 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 1240 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); |
1206 | map.virtual = 0xffff0000; | 1241 | map.virtual = 0xffff0000; |
1207 | map.length = PAGE_SIZE; | 1242 | map.length = PAGE_SIZE; |
1243 | #ifdef CONFIG_KUSER_HELPERS | ||
1208 | map.type = MT_HIGH_VECTORS; | 1244 | map.type = MT_HIGH_VECTORS; |
1245 | #else | ||
1246 | map.type = MT_LOW_VECTORS; | ||
1247 | #endif | ||
1209 | create_mapping(&map); | 1248 | create_mapping(&map); |
1210 | 1249 | ||
1211 | if (!vectors_high()) { | 1250 | if (!vectors_high()) { |
1212 | map.virtual = 0; | 1251 | map.virtual = 0; |
1252 | map.length = PAGE_SIZE * 2; | ||
1213 | map.type = MT_LOW_VECTORS; | 1253 | map.type = MT_LOW_VECTORS; |
1214 | create_mapping(&map); | 1254 | create_mapping(&map); |
1215 | } | 1255 | } |
1216 | 1256 | ||
1257 | /* Now create a kernel read-only mapping */ | ||
1258 | map.pfn += 1; | ||
1259 | map.virtual = 0xffff0000 + PAGE_SIZE; | ||
1260 | map.length = PAGE_SIZE; | ||
1261 | map.type = MT_LOW_VECTORS; | ||
1262 | create_mapping(&map); | ||
1263 | |||
1217 | /* | 1264 | /* |
1218 | * Ask the machine support to map in the statically mapped devices. | 1265 | * Ask the machine support to map in the statically mapped devices. |
1219 | */ | 1266 | */ |
@@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1276 | { | 1323 | { |
1277 | void *zero_page; | 1324 | void *zero_page; |
1278 | 1325 | ||
1279 | memblock_set_current_limit(arm_lowmem_limit); | ||
1280 | |||
1281 | build_mem_type_table(); | 1326 | build_mem_type_table(); |
1282 | prepare_page_table(); | 1327 | prepare_page_table(); |
1283 | map_lowmem(); | 1328 | map_lowmem(); |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index f64afb9f1bd5..bdd3be4be77a 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
110 | ARM( str r3, [r0, #2048]! ) | 110 | ARM( str r3, [r0, #2048]! ) |
111 | THUMB( add r0, r0, #2048 ) | 111 | THUMB( add r0, r0, #2048 ) |
112 | THUMB( str r3, [r0] ) | 112 | THUMB( str r3, [r0] ) |
113 | ALT_SMP(mov pc,lr) | 113 | ALT_SMP(W(nop)) |
114 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | 114 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
115 | #endif | 115 | #endif |
116 | mov pc, lr | 116 | mov pc, lr |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index c36ac69488c8..01a719e18bb0 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
81 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | 81 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY |
82 | orreq r2, #L_PTE_RDONLY | 82 | orreq r2, #L_PTE_RDONLY |
83 | 1: strd r2, r3, [r0] | 83 | 1: strd r2, r3, [r0] |
84 | ALT_SMP(mov pc, lr) | 84 | ALT_SMP(W(nop)) |
85 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | 85 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
86 | #endif | 86 | #endif |
87 | mov pc, lr | 87 | mov pc, lr |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 5c6d5a3050ea..73398bcf9bd8 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle) | |||
75 | ENDPROC(cpu_v7_do_idle) | 75 | ENDPROC(cpu_v7_do_idle) |
76 | 76 | ||
77 | ENTRY(cpu_v7_dcache_clean_area) | 77 | ENTRY(cpu_v7_dcache_clean_area) |
78 | ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW | 78 | ALT_SMP(W(nop)) @ MP extensions imply L1 PTW |
79 | ALT_UP(W(nop)) | 79 | ALT_UP_B(1f) |
80 | dcache_line_size r2, r3 | 80 | mov pc, lr |
81 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 81 | 1: dcache_line_size r2, r3 |
82 | 2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
82 | add r0, r0, r2 | 83 | add r0, r0, r2 |
83 | subs r1, r1, r2 | 84 | subs r1, r1, r2 |
84 | bhi 1b | 85 | bhi 2b |
85 | dsb | 86 | dsb |
86 | mov pc, lr | 87 | mov pc, lr |
87 | ENDPROC(cpu_v7_dcache_clean_area) | 88 | ENDPROC(cpu_v7_dcache_clean_area) |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index f71c37edca26..c9770ba5c7df 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -172,7 +172,7 @@ static void __init xen_percpu_init(void *unused) | |||
172 | enable_percpu_irq(xen_events_irq, 0); | 172 | enable_percpu_irq(xen_events_irq, 0); |
173 | } | 173 | } |
174 | 174 | ||
175 | static void xen_restart(char str, const char *cmd) | 175 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) |
176 | { | 176 | { |
177 | struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; | 177 | struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; |
178 | int rc; | 178 | int rc; |