diff options
author | Russell King <rmk+kernel@armlinux.org.uk> | 2017-11-08 14:42:47 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@armlinux.org.uk> | 2017-11-08 14:42:47 -0500 |
commit | 02196144a0a0ad71fb75bd4a5a4785ab36c3b78e (patch) | |
tree | b87fe75626250a456604a331faf3594b4f3925a8 | |
parent | 7f3d1f984336377074ebf804ff53869ef1906fbe (diff) | |
parent | fe9c0589eeef4b3edbaad9f7500679a2eeafe951 (diff) |
Merge branch 'devel-stable' into for-next
33 files changed, 1137 insertions, 455 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 979aac3e2fbf..631e61bc7ac4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -240,15 +240,6 @@ config NEED_RET_TO_USER | |||
240 | config ARCH_MTD_XIP | 240 | config ARCH_MTD_XIP |
241 | bool | 241 | bool |
242 | 242 | ||
243 | config VECTORS_BASE | ||
244 | hex | ||
245 | default 0xffff0000 if MMU || CPU_HIGH_VECTOR | ||
246 | default DRAM_BASE if REMAP_VECTORS_TO_RAM | ||
247 | default 0x00000000 | ||
248 | help | ||
249 | The base address of exception vectors. This must be two pages | ||
250 | in size. | ||
251 | |||
252 | config ARM_PATCH_PHYS_VIRT | 243 | config ARM_PATCH_PHYS_VIRT |
253 | bool "Patch physical to virtual translations at runtime" if EMBEDDED | 244 | bool "Patch physical to virtual translations at runtime" if EMBEDDED |
254 | default y | 245 | default y |
@@ -2006,6 +1997,17 @@ config XIP_PHYS_ADDR | |||
2006 | be linked for and stored to. This address is dependent on your | 1997 | be linked for and stored to. This address is dependent on your |
2007 | own flash usage. | 1998 | own flash usage. |
2008 | 1999 | ||
2000 | config XIP_DEFLATED_DATA | ||
2001 | bool "Store kernel .data section compressed in ROM" | ||
2002 | depends on XIP_KERNEL | ||
2003 | select ZLIB_INFLATE | ||
2004 | help | ||
2005 | Before the kernel is actually executed, its .data section has to be | ||
2006 | copied to RAM from ROM. This option allows for storing that data | ||
2007 | in compressed form and decompressed to RAM rather than merely being | ||
2008 | copied, saving some precious ROM space. A possible drawback is a | ||
2009 | slightly longer boot delay. | ||
2010 | |||
2009 | config KEXEC | 2011 | config KEXEC |
2010 | bool "Kexec system call (EXPERIMENTAL)" | 2012 | bool "Kexec system call (EXPERIMENTAL)" |
2011 | depends on (!SMP || PM_SLEEP_SMP) | 2013 | depends on (!SMP || PM_SLEEP_SMP) |
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu index b7576349528c..0fad7d943630 100644 --- a/arch/arm/Kconfig-nommu +++ b/arch/arm/Kconfig-nommu | |||
@@ -52,8 +52,8 @@ config REMAP_VECTORS_TO_RAM | |||
52 | 52 | ||
53 | config ARM_MPU | 53 | config ARM_MPU |
54 | bool 'Use the ARM v7 PMSA Compliant MPU' | 54 | bool 'Use the ARM v7 PMSA Compliant MPU' |
55 | depends on CPU_V7 | 55 | depends on CPU_V7 || CPU_V7M |
56 | default y | 56 | default y if CPU_V7 |
57 | help | 57 | help |
58 | Some ARM systems without an MMU have instead a Memory Protection | 58 | Some ARM systems without an MMU have instead a Memory Protection |
59 | Unit (MPU) that defines the type and permissions for regions of | 59 | Unit (MPU) that defines the type and permissions for regions of |
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 50f8d1be7fcb..a3af4dc08c3e 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile | |||
@@ -31,8 +31,19 @@ targets := Image zImage xipImage bootpImage uImage | |||
31 | 31 | ||
32 | ifeq ($(CONFIG_XIP_KERNEL),y) | 32 | ifeq ($(CONFIG_XIP_KERNEL),y) |
33 | 33 | ||
34 | cmd_deflate_xip_data = $(CONFIG_SHELL) -c \ | ||
35 | '$(srctree)/$(src)/deflate_xip_data.sh $< $@ || { rm -f $@; false; }' | ||
36 | |||
37 | ifeq ($(CONFIG_XIP_DEFLATED_DATA),y) | ||
38 | quiet_cmd_mkxip = XIPZ $@ | ||
39 | cmd_mkxip = $(cmd_objcopy) && $(cmd_deflate_xip_data) | ||
40 | else | ||
41 | quiet_cmd_mkxip = $(quiet_cmd_objcopy) | ||
42 | cmd_mkxip = $(cmd_objcopy) | ||
43 | endif | ||
44 | |||
34 | $(obj)/xipImage: vmlinux FORCE | 45 | $(obj)/xipImage: vmlinux FORCE |
35 | $(call if_changed,objcopy) | 46 | $(call if_changed,mkxip) |
36 | @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)' | 47 | @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)' |
37 | 48 | ||
38 | $(obj)/Image $(obj)/zImage: FORCE | 49 | $(obj)/Image $(obj)/zImage: FORCE |
diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh new file mode 100755 index 000000000000..1189598a25eb --- /dev/null +++ b/arch/arm/boot/deflate_xip_data.sh | |||
@@ -0,0 +1,64 @@ | |||
1 | #!/bin/sh | ||
2 | |||
3 | # XIP kernel .data segment compressor | ||
4 | # | ||
5 | # Created by: Nicolas Pitre, August 2017 | ||
6 | # Copyright: (C) 2017 Linaro Limited | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | |||
12 | # This script locates the start of the .data section in xipImage and | ||
13 | # substitutes it with a compressed version. The needed offsets are obtained | ||
14 | # from symbol addresses in vmlinux. It is expected that .data extends to | ||
15 | # the end of xipImage. | ||
16 | |||
17 | set -e | ||
18 | |||
19 | VMLINUX="$1" | ||
20 | XIPIMAGE="$2" | ||
21 | |||
22 | DD="dd status=none" | ||
23 | |||
24 | # Use "make V=1" to debug this script. | ||
25 | case "$KBUILD_VERBOSE" in | ||
26 | *1*) | ||
27 | set -x | ||
28 | ;; | ||
29 | esac | ||
30 | |||
31 | sym_val() { | ||
32 | # extract hex value for symbol in $1 | ||
33 | local val=$($NM "$VMLINUX" | sed -n "/ $1$/{s/ .*$//p;q}") | ||
34 | [ "$val" ] || { echo "can't find $1 in $VMLINUX" 1>&2; exit 1; } | ||
35 | # convert from hex to decimal | ||
36 | echo $((0x$val)) | ||
37 | } | ||
38 | |||
39 | __data_loc=$(sym_val __data_loc) | ||
40 | _edata_loc=$(sym_val _edata_loc) | ||
41 | base_offset=$(sym_val _xiprom) | ||
42 | |||
43 | # convert to file based offsets | ||
44 | data_start=$(($__data_loc - $base_offset)) | ||
45 | data_end=$(($_edata_loc - $base_offset)) | ||
46 | |||
47 | # Make sure data occupies the last part of the file. | ||
48 | file_end=$(stat -c "%s" "$XIPIMAGE") | ||
49 | if [ "$file_end" != "$data_end" ]; then | ||
50 | printf "end of xipImage doesn't match with _edata_loc (%#x vs %#x)\n" \ | ||
51 | $(($file_end + $base_offset)) $_edata_loc 2>&1 | ||
52 | exit 1; | ||
53 | fi | ||
54 | |||
55 | # be ready to clean up | ||
56 | trap 'rm -f "$XIPIMAGE.tmp"' 0 1 2 3 | ||
57 | |||
58 | # substitute the data section by a compressed version | ||
59 | $DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp" | ||
60 | $DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes | | ||
61 | gzip -9 >> "$XIPIMAGE.tmp" | ||
62 | |||
63 | # replace kernel binary | ||
64 | mv -f "$XIPIMAGE.tmp" "$XIPIMAGE" | ||
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index b62eaeb147aa..abaac5e07b80 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -173,6 +173,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) | |||
173 | return read_cpuid(CPUID_CACHETYPE); | 173 | return read_cpuid(CPUID_CACHETYPE); |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline unsigned int __attribute_const__ read_cpuid_mputype(void) | ||
177 | { | ||
178 | return read_cpuid(CPUID_MPUIR); | ||
179 | } | ||
180 | |||
176 | #elif defined(CONFIG_CPU_V7M) | 181 | #elif defined(CONFIG_CPU_V7M) |
177 | 182 | ||
178 | static inline unsigned int __attribute_const__ read_cpuid_id(void) | 183 | static inline unsigned int __attribute_const__ read_cpuid_id(void) |
@@ -185,6 +190,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) | |||
185 | return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR); | 190 | return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR); |
186 | } | 191 | } |
187 | 192 | ||
193 | static inline unsigned int __attribute_const__ read_cpuid_mputype(void) | ||
194 | { | ||
195 | return readl(BASEADDR_V7M_SCB + MPU_TYPE); | ||
196 | } | ||
197 | |||
188 | #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */ | 198 | #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */ |
189 | 199 | ||
190 | static inline unsigned int __attribute_const__ read_cpuid_id(void) | 200 | static inline unsigned int __attribute_const__ read_cpuid_id(void) |
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index f13ae153fb24..ad0ca4f2ba13 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h | |||
@@ -100,10 +100,15 @@ struct elf32_hdr; | |||
100 | extern int elf_check_arch(const struct elf32_hdr *); | 100 | extern int elf_check_arch(const struct elf32_hdr *); |
101 | #define elf_check_arch elf_check_arch | 101 | #define elf_check_arch elf_check_arch |
102 | 102 | ||
103 | #define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC platform */ | ||
104 | #define elf_check_fdpic(x) ((x)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC) | ||
105 | #define elf_check_const_displacement(x) ((x)->e_flags & EF_ARM_PIC) | ||
106 | #define ELF_FDPIC_CORE_EFLAGS 0 | ||
107 | |||
103 | #define vmcore_elf64_check_arch(x) (0) | 108 | #define vmcore_elf64_check_arch(x) (0) |
104 | 109 | ||
105 | extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); | 110 | extern int arm_elf_read_implies_exec(int); |
106 | #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) | 111 | #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(stk) |
107 | 112 | ||
108 | struct task_struct; | 113 | struct task_struct; |
109 | int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); | 114 | int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); |
@@ -120,6 +125,13 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); | |||
120 | have no such handler. */ | 125 | have no such handler. */ |
121 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 | 126 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 |
122 | 127 | ||
128 | #define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \ | ||
129 | do { \ | ||
130 | (_r)->ARM_r7 = _exec_map_addr; \ | ||
131 | (_r)->ARM_r8 = _interp_map_addr; \ | ||
132 | (_r)->ARM_r9 = dynamic_addr; \ | ||
133 | } while(0) | ||
134 | |||
123 | extern void elf_set_personality(const struct elf32_hdr *); | 135 | extern void elf_set_personality(const struct elf32_hdr *); |
124 | #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) | 136 | #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) |
125 | 137 | ||
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index a5b47421059d..bdec37c6ac35 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -14,6 +14,10 @@ typedef struct { | |||
14 | #ifdef CONFIG_VDSO | 14 | #ifdef CONFIG_VDSO |
15 | unsigned long vdso; | 15 | unsigned long vdso; |
16 | #endif | 16 | #endif |
17 | #ifdef CONFIG_BINFMT_ELF_FDPIC | ||
18 | unsigned long exec_fdpic_loadmap; | ||
19 | unsigned long interp_fdpic_loadmap; | ||
20 | #endif | ||
17 | } mm_context_t; | 21 | } mm_context_t; |
18 | 22 | ||
19 | #ifdef CONFIG_CPU_HAS_ASID | 23 | #ifdef CONFIG_CPU_HAS_ASID |
@@ -33,6 +37,10 @@ typedef struct { | |||
33 | */ | 37 | */ |
34 | typedef struct { | 38 | typedef struct { |
35 | unsigned long end_brk; | 39 | unsigned long end_brk; |
40 | #ifdef CONFIG_BINFMT_ELF_FDPIC | ||
41 | unsigned long exec_fdpic_loadmap; | ||
42 | unsigned long interp_fdpic_loadmap; | ||
43 | #endif | ||
36 | } mm_context_t; | 44 | } mm_context_t; |
37 | 45 | ||
38 | #endif | 46 | #endif |
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h index c3247cc2fe08..56ec02617f58 100644 --- a/arch/arm/include/asm/mpu.h +++ b/arch/arm/include/asm/mpu.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef __ARM_MPU_H | 1 | #ifndef __ARM_MPU_H |
2 | #define __ARM_MPU_H | 2 | #define __ARM_MPU_H |
3 | 3 | ||
4 | #ifdef CONFIG_ARM_MPU | ||
5 | |||
6 | /* MPUIR layout */ | 4 | /* MPUIR layout */ |
7 | #define MPUIR_nU 1 | 5 | #define MPUIR_nU 1 |
8 | #define MPUIR_DREGION 8 | 6 | #define MPUIR_DREGION 8 |
@@ -17,6 +15,11 @@ | |||
17 | /* MPU D/I Size Register fields */ | 15 | /* MPU D/I Size Register fields */ |
18 | #define MPU_RSR_SZ 1 | 16 | #define MPU_RSR_SZ 1 |
19 | #define MPU_RSR_EN 0 | 17 | #define MPU_RSR_EN 0 |
18 | #define MPU_RSR_SD 8 | ||
19 | |||
20 | /* Number of subregions (SD) */ | ||
21 | #define MPU_NR_SUBREGS 8 | ||
22 | #define MPU_MIN_SUBREG_SIZE 256 | ||
20 | 23 | ||
21 | /* The D/I RSR value for an enabled region spanning the whole of memory */ | 24 | /* The D/I RSR value for an enabled region spanning the whole of memory */ |
22 | #define MPU_RSR_ALL_MEM 63 | 25 | #define MPU_RSR_ALL_MEM 63 |
@@ -38,6 +41,7 @@ | |||
38 | #endif | 41 | #endif |
39 | 42 | ||
40 | /* Access permission bits of ACR (only define those that we use)*/ | 43 | /* Access permission bits of ACR (only define those that we use)*/ |
44 | #define MPU_AP_PL1RO_PL0NA (0x5 << 8) | ||
41 | #define MPU_AP_PL1RW_PL0RW (0x3 << 8) | 45 | #define MPU_AP_PL1RW_PL0RW (0x3 << 8) |
42 | #define MPU_AP_PL1RW_PL0R0 (0x2 << 8) | 46 | #define MPU_AP_PL1RW_PL0R0 (0x2 << 8) |
43 | #define MPU_AP_PL1RW_PL0NA (0x1 << 8) | 47 | #define MPU_AP_PL1RW_PL0NA (0x1 << 8) |
@@ -46,7 +50,7 @@ | |||
46 | #define MPU_PROBE_REGION 0 | 50 | #define MPU_PROBE_REGION 0 |
47 | #define MPU_BG_REGION 1 | 51 | #define MPU_BG_REGION 1 |
48 | #define MPU_RAM_REGION 2 | 52 | #define MPU_RAM_REGION 2 |
49 | #define MPU_VECTORS_REGION 3 | 53 | #define MPU_ROM_REGION 3 |
50 | 54 | ||
51 | /* Maximum number of regions Linux is interested in */ | 55 | /* Maximum number of regions Linux is interested in */ |
52 | #define MPU_MAX_REGIONS 16 | 56 | #define MPU_MAX_REGIONS 16 |
@@ -64,13 +68,23 @@ struct mpu_rgn { | |||
64 | }; | 68 | }; |
65 | 69 | ||
66 | struct mpu_rgn_info { | 70 | struct mpu_rgn_info { |
67 | u32 mpuir; | 71 | unsigned int used; |
68 | struct mpu_rgn rgns[MPU_MAX_REGIONS]; | 72 | struct mpu_rgn rgns[MPU_MAX_REGIONS]; |
69 | }; | 73 | }; |
70 | extern struct mpu_rgn_info mpu_rgn_info; | 74 | extern struct mpu_rgn_info mpu_rgn_info; |
71 | 75 | ||
72 | #endif /* __ASSEMBLY__ */ | 76 | #ifdef CONFIG_ARM_MPU |
77 | |||
78 | extern void __init adjust_lowmem_bounds_mpu(void); | ||
79 | extern void __init mpu_setup(void); | ||
73 | 80 | ||
74 | #endif /* CONFIG_ARM_MPU */ | 81 | #else |
82 | |||
83 | static inline void adjust_lowmem_bounds_mpu(void) {} | ||
84 | static inline void mpu_setup(void) {} | ||
85 | |||
86 | #endif /* !CONFIG_ARM_MPU */ | ||
87 | |||
88 | #endif /* __ASSEMBLY__ */ | ||
75 | 89 | ||
76 | #endif | 90 | #endif |
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index c3d5fc124a05..338cbe0a18ef 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h | |||
@@ -47,15 +47,24 @@ struct thread_struct { | |||
47 | 47 | ||
48 | #define INIT_THREAD { } | 48 | #define INIT_THREAD { } |
49 | 49 | ||
50 | #ifdef CONFIG_MMU | ||
51 | #define nommu_start_thread(regs) do { } while (0) | ||
52 | #else | ||
53 | #define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data | ||
54 | #endif | ||
55 | |||
56 | #define start_thread(regs,pc,sp) \ | 50 | #define start_thread(regs,pc,sp) \ |
57 | ({ \ | 51 | ({ \ |
52 | unsigned long r7, r8, r9; \ | ||
53 | \ | ||
54 | if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \ | ||
55 | r7 = regs->ARM_r7; \ | ||
56 | r8 = regs->ARM_r8; \ | ||
57 | r9 = regs->ARM_r9; \ | ||
58 | } \ | ||
58 | memset(regs->uregs, 0, sizeof(regs->uregs)); \ | 59 | memset(regs->uregs, 0, sizeof(regs->uregs)); \ |
60 | if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \ | ||
61 | current->personality & FDPIC_FUNCPTRS) { \ | ||
62 | regs->ARM_r7 = r7; \ | ||
63 | regs->ARM_r8 = r8; \ | ||
64 | regs->ARM_r9 = r9; \ | ||
65 | regs->ARM_r10 = current->mm->start_data; \ | ||
66 | } else if (!IS_ENABLED(CONFIG_MMU)) \ | ||
67 | regs->ARM_r10 = current->mm->start_data; \ | ||
59 | if (current->personality & ADDR_LIMIT_32BIT) \ | 68 | if (current->personality & ADDR_LIMIT_32BIT) \ |
60 | regs->ARM_cpsr = USR_MODE; \ | 69 | regs->ARM_cpsr = USR_MODE; \ |
61 | else \ | 70 | else \ |
@@ -65,7 +74,6 @@ struct thread_struct { | |||
65 | regs->ARM_cpsr |= PSR_ENDSTATE; \ | 74 | regs->ARM_cpsr |= PSR_ENDSTATE; \ |
66 | regs->ARM_pc = pc & ~1; /* pc */ \ | 75 | regs->ARM_pc = pc & ~1; /* pc */ \ |
67 | regs->ARM_sp = sp; /* sp */ \ | 76 | regs->ARM_sp = sp; /* sp */ \ |
68 | nommu_start_thread(regs); \ | ||
69 | }) | 77 | }) |
70 | 78 | ||
71 | /* Forward declaration, a strange C thing */ | 79 | /* Forward declaration, a strange C thing */ |
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 3d6dc8b460e4..709a55989cb0 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h | |||
@@ -60,7 +60,7 @@ asmlinkage void secondary_start_kernel(void); | |||
60 | */ | 60 | */ |
61 | struct secondary_data { | 61 | struct secondary_data { |
62 | union { | 62 | union { |
63 | unsigned long mpu_rgn_szr; | 63 | struct mpu_rgn_info *mpu_rgn_info; |
64 | u64 pgdir; | 64 | u64 pgdir; |
65 | }; | 65 | }; |
66 | unsigned long swapper_pg_dir; | 66 | unsigned long swapper_pg_dir; |
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index 921d8274855c..b42c75ae0d19 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASMARM_UCONTEXT_H | 2 | #define _ASMARM_UCONTEXT_H |
3 | 3 | ||
4 | #include <asm/fpstate.h> | 4 | #include <asm/fpstate.h> |
5 | #include <asm/user.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * struct sigcontext only has room for the basic registers, but struct | 8 | * struct sigcontext only has room for the basic registers, but struct |
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h index 1fd775c1bc5d..5de776c81382 100644 --- a/arch/arm/include/asm/v7m.h +++ b/arch/arm/include/asm/v7m.h | |||
@@ -57,6 +57,16 @@ | |||
57 | #define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */ | 57 | #define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */ |
58 | #define V7M_SCB_CSSELR 0x84 /* Cache size selection register */ | 58 | #define V7M_SCB_CSSELR 0x84 /* Cache size selection register */ |
59 | 59 | ||
60 | /* Memory-mapped MPU registers for M-class */ | ||
61 | #define MPU_TYPE 0x90 | ||
62 | #define MPU_CTRL 0x94 | ||
63 | #define MPU_CTRL_ENABLE 1 | ||
64 | #define MPU_CTRL_PRIVDEFENA (1 << 2) | ||
65 | |||
66 | #define MPU_RNR 0x98 | ||
67 | #define MPU_RBAR 0x9c | ||
68 | #define MPU_RASR 0xa0 | ||
69 | |||
60 | /* Cache opeartions */ | 70 | /* Cache opeartions */ |
61 | #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ | 71 | #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ |
62 | #define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */ | 72 | #define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */ |
diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h index 5af0ed1b825a..3173eb9751fd 100644 --- a/arch/arm/include/uapi/asm/ptrace.h +++ b/arch/arm/include/uapi/asm/ptrace.h | |||
@@ -31,6 +31,10 @@ | |||
31 | #define PTRACE_SETVFPREGS 28 | 31 | #define PTRACE_SETVFPREGS 28 |
32 | #define PTRACE_GETHBPREGS 29 | 32 | #define PTRACE_GETHBPREGS 29 |
33 | #define PTRACE_SETHBPREGS 30 | 33 | #define PTRACE_SETHBPREGS 30 |
34 | #define PTRACE_GETFDPIC 31 | ||
35 | |||
36 | #define PTRACE_GETFDPIC_EXEC 0 | ||
37 | #define PTRACE_GETFDPIC_INTERP 1 | ||
34 | 38 | ||
35 | /* | 39 | /* |
36 | * PSR bits | 40 | * PSR bits |
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 28bd456494a3..575b25fc29c6 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h | |||
@@ -35,5 +35,6 @@ | |||
35 | #define __ARM_NR_usr26 (__ARM_NR_BASE+3) | 35 | #define __ARM_NR_usr26 (__ARM_NR_BASE+3) |
36 | #define __ARM_NR_usr32 (__ARM_NR_BASE+4) | 36 | #define __ARM_NR_usr32 (__ARM_NR_BASE+4) |
37 | #define __ARM_NR_set_tls (__ARM_NR_BASE+5) | 37 | #define __ARM_NR_set_tls (__ARM_NR_BASE+5) |
38 | #define __ARM_NR_get_tls (__ARM_NR_BASE+6) | ||
38 | 39 | ||
39 | #endif /* _UAPI__ASM_ARM_UNISTD_H */ | 40 | #endif /* _UAPI__ASM_ARM_UNISTD_H */ |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index ad325a8c7e1e..52f437997cc6 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -87,6 +87,11 @@ head-y := head$(MMUEXT).o | |||
87 | obj-$(CONFIG_DEBUG_LL) += debug.o | 87 | obj-$(CONFIG_DEBUG_LL) += debug.o |
88 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 88 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
89 | 89 | ||
90 | # This is executed very early using a temporary stack when no memory allocator | ||
91 | # nor global data is available. Everything has to be allocated on the stack. | ||
92 | CFLAGS_head-inflate-data.o := $(call cc-option,-Wframe-larger-than=10240) | ||
93 | obj-$(CONFIG_XIP_DEFLATED_DATA) += head-inflate-data.o | ||
94 | |||
90 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o | 95 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o |
91 | AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a | 96 | AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a |
92 | ifeq ($(CONFIG_ARM_PSCI),y) | 97 | ifeq ($(CONFIG_ARM_PSCI),y) |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 608008229c7d..f369ece99958 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -23,11 +23,13 @@ | |||
23 | #include <asm/mach/arch.h> | 23 | #include <asm/mach/arch.h> |
24 | #include <asm/thread_info.h> | 24 | #include <asm/thread_info.h> |
25 | #include <asm/memory.h> | 25 | #include <asm/memory.h> |
26 | #include <asm/mpu.h> | ||
26 | #include <asm/procinfo.h> | 27 | #include <asm/procinfo.h> |
27 | #include <asm/suspend.h> | 28 | #include <asm/suspend.h> |
28 | #include <asm/vdso_datapage.h> | 29 | #include <asm/vdso_datapage.h> |
29 | #include <asm/hardware/cache-l2x0.h> | 30 | #include <asm/hardware/cache-l2x0.h> |
30 | #include <linux/kbuild.h> | 31 | #include <linux/kbuild.h> |
32 | #include "signal.h" | ||
31 | 33 | ||
32 | /* | 34 | /* |
33 | * Make sure that the compiler and target are compatible. | 35 | * Make sure that the compiler and target are compatible. |
@@ -112,6 +114,9 @@ int main(void) | |||
112 | DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit)); | 114 | DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit)); |
113 | DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); | 115 | DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); |
114 | BLANK(); | 116 | BLANK(); |
117 | DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3])); | ||
118 | DEFINE(RT_SIGFRAME_RC3_OFFSET, offsetof(struct rt_sigframe, sig.retcode[3])); | ||
119 | BLANK(); | ||
115 | #ifdef CONFIG_CACHE_L2X0 | 120 | #ifdef CONFIG_CACHE_L2X0 |
116 | DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); | 121 | DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); |
117 | DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl)); | 122 | DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl)); |
@@ -183,5 +188,15 @@ int main(void) | |||
183 | #ifdef CONFIG_VDSO | 188 | #ifdef CONFIG_VDSO |
184 | DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); | 189 | DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); |
185 | #endif | 190 | #endif |
191 | BLANK(); | ||
192 | #ifdef CONFIG_ARM_MPU | ||
193 | DEFINE(MPU_RNG_INFO_RNGS, offsetof(struct mpu_rgn_info, rgns)); | ||
194 | DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used)); | ||
195 | |||
196 | DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn)); | ||
197 | DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); | ||
198 | DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); | ||
199 | DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); | ||
200 | #endif | ||
186 | return 0; | 201 | return 0; |
187 | } | 202 | } |
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index d0d1e83150c9..569e69ece5ca 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/personality.h> | 3 | #include <linux/personality.h> |
4 | #include <linux/binfmts.h> | 4 | #include <linux/binfmts.h> |
5 | #include <linux/elf.h> | 5 | #include <linux/elf.h> |
6 | #include <linux/elf-fdpic.h> | ||
6 | #include <asm/system_info.h> | 7 | #include <asm/system_info.h> |
7 | 8 | ||
8 | int elf_check_arch(const struct elf32_hdr *x) | 9 | int elf_check_arch(const struct elf32_hdr *x) |
@@ -80,7 +81,7 @@ EXPORT_SYMBOL(elf_set_personality); | |||
80 | * - the binary requires an executable stack | 81 | * - the binary requires an executable stack |
81 | * - we're running on a CPU which doesn't support NX. | 82 | * - we're running on a CPU which doesn't support NX. |
82 | */ | 83 | */ |
83 | int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) | 84 | int arm_elf_read_implies_exec(int executable_stack) |
84 | { | 85 | { |
85 | if (executable_stack != EXSTACK_DISABLE_X) | 86 | if (executable_stack != EXSTACK_DISABLE_X) |
86 | return 1; | 87 | return 1; |
@@ -89,3 +90,24 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) | |||
89 | return 0; | 90 | return 0; |
90 | } | 91 | } |
91 | EXPORT_SYMBOL(arm_elf_read_implies_exec); | 92 | EXPORT_SYMBOL(arm_elf_read_implies_exec); |
93 | |||
94 | #if defined(CONFIG_MMU) && defined(CONFIG_BINFMT_ELF_FDPIC) | ||
95 | |||
96 | void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, | ||
97 | struct elf_fdpic_params *interp_params, | ||
98 | unsigned long *start_stack, | ||
99 | unsigned long *start_brk) | ||
100 | { | ||
101 | elf_set_personality(&exec_params->hdr); | ||
102 | |||
103 | exec_params->load_addr = 0x8000; | ||
104 | interp_params->load_addr = ELF_ET_DYN_BASE; | ||
105 | *start_stack = TASK_SIZE - SZ_16M; | ||
106 | |||
107 | if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == ELF_FDPIC_FLAG_INDEPENDENT) { | ||
108 | exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT; | ||
109 | exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | #endif | ||
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 8733012d231f..21dde771a7dd 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S | |||
@@ -79,47 +79,69 @@ ENDPROC(__vet_atags) | |||
79 | */ | 79 | */ |
80 | __INIT | 80 | __INIT |
81 | __mmap_switched: | 81 | __mmap_switched: |
82 | adr r3, __mmap_switched_data | 82 | |
83 | 83 | mov r7, r1 | |
84 | ldmia r3!, {r4, r5, r6, r7} | 84 | mov r8, r2 |
85 | cmp r4, r5 @ Copy data segment if needed | 85 | mov r10, r0 |
86 | 1: cmpne r5, r6 | 86 | |
87 | ldrne fp, [r4], #4 | 87 | adr r4, __mmap_switched_data |
88 | strne fp, [r5], #4 | 88 | mov fp, #0 |
89 | bne 1b | 89 | |
90 | 90 | #if defined(CONFIG_XIP_DEFLATED_DATA) | |
91 | mov fp, #0 @ Clear BSS (and zero fp) | 91 | ARM( ldr sp, [r4], #4 ) |
92 | 1: cmp r6, r7 | 92 | THUMB( ldr sp, [r4] ) |
93 | strcc fp, [r6],#4 | 93 | THUMB( add r4, #4 ) |
94 | bcc 1b | 94 | bl __inflate_kernel_data @ decompress .data to RAM |
95 | 95 | teq r0, #0 | |
96 | ARM( ldmia r3, {r4, r5, r6, r7, sp}) | 96 | bne __error |
97 | THUMB( ldmia r3, {r4, r5, r6, r7} ) | 97 | #elif defined(CONFIG_XIP_KERNEL) |
98 | THUMB( ldr sp, [r3, #16] ) | 98 | ARM( ldmia r4!, {r0, r1, r2, sp} ) |
99 | str r9, [r4] @ Save processor ID | 99 | THUMB( ldmia r4!, {r0, r1, r2, r3} ) |
100 | str r1, [r5] @ Save machine type | 100 | THUMB( mov sp, r3 ) |
101 | str r2, [r6] @ Save atags pointer | 101 | sub r2, r2, r1 |
102 | cmp r7, #0 | 102 | bl memcpy @ copy .data to RAM |
103 | strne r0, [r7] @ Save control register values | 103 | #endif |
104 | |||
105 | ARM( ldmia r4!, {r0, r1, sp} ) | ||
106 | THUMB( ldmia r4!, {r0, r1, r3} ) | ||
107 | THUMB( mov sp, r3 ) | ||
108 | sub r1, r1, r0 | ||
109 | bl __memzero @ clear .bss | ||
110 | |||
111 | ldmia r4, {r0, r1, r2, r3} | ||
112 | str r9, [r0] @ Save processor ID | ||
113 | str r7, [r1] @ Save machine type | ||
114 | str r8, [r2] @ Save atags pointer | ||
115 | cmp r3, #0 | ||
116 | strne r10, [r3] @ Save control register values | ||
117 | mov lr, #0 | ||
104 | b start_kernel | 118 | b start_kernel |
105 | ENDPROC(__mmap_switched) | 119 | ENDPROC(__mmap_switched) |
106 | 120 | ||
107 | .align 2 | 121 | .align 2 |
108 | .type __mmap_switched_data, %object | 122 | .type __mmap_switched_data, %object |
109 | __mmap_switched_data: | 123 | __mmap_switched_data: |
110 | .long __data_loc @ r4 | 124 | #ifdef CONFIG_XIP_KERNEL |
111 | .long _sdata @ r5 | 125 | #ifndef CONFIG_XIP_DEFLATED_DATA |
112 | .long __bss_start @ r6 | 126 | .long _sdata @ r0 |
113 | .long _end @ r7 | 127 | .long __data_loc @ r1 |
114 | .long processor_id @ r4 | 128 | .long _edata_loc @ r2 |
115 | .long __machine_arch_type @ r5 | 129 | #endif |
116 | .long __atags_pointer @ r6 | 130 | .long __bss_stop @ sp (temporary stack in .bss) |
131 | #endif | ||
132 | |||
133 | .long __bss_start @ r0 | ||
134 | .long __bss_stop @ r1 | ||
135 | .long init_thread_union + THREAD_START_SP @ sp | ||
136 | |||
137 | .long processor_id @ r0 | ||
138 | .long __machine_arch_type @ r1 | ||
139 | .long __atags_pointer @ r2 | ||
117 | #ifdef CONFIG_CPU_CP15 | 140 | #ifdef CONFIG_CPU_CP15 |
118 | .long cr_alignment @ r7 | 141 | .long cr_alignment @ r3 |
119 | #else | 142 | #else |
120 | .long 0 @ r7 | 143 | .long 0 @ r3 |
121 | #endif | 144 | #endif |
122 | .long init_thread_union + THREAD_START_SP @ sp | ||
123 | .size __mmap_switched_data, . - __mmap_switched_data | 145 | .size __mmap_switched_data, . - __mmap_switched_data |
124 | 146 | ||
125 | /* | 147 | /* |
diff --git a/arch/arm/kernel/head-inflate-data.c b/arch/arm/kernel/head-inflate-data.c new file mode 100644 index 000000000000..6dd0ce5e6058 --- /dev/null +++ b/arch/arm/kernel/head-inflate-data.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * XIP kernel .data segment decompressor | ||
3 | * | ||
4 | * Created by: Nicolas Pitre, August 2017 | ||
5 | * Copyright: (C) 2017 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/zutil.h> | ||
14 | |||
15 | /* for struct inflate_state */ | ||
16 | #include "../../../lib/zlib_inflate/inftrees.h" | ||
17 | #include "../../../lib/zlib_inflate/inflate.h" | ||
18 | #include "../../../lib/zlib_inflate/infutil.h" | ||
19 | |||
20 | extern char __data_loc[]; | ||
21 | extern char _edata_loc[]; | ||
22 | extern char _sdata[]; | ||
23 | |||
24 | /* | ||
25 | * This code is called very early during the boot process to decompress | ||
26 | * the .data segment stored compressed in ROM. Therefore none of the global | ||
27 | * variables are valid yet, hence no kernel services such as memory | ||
28 | * allocation is available. Everything must be allocated on the stack and | ||
29 | * we must avoid any global data access. We use a temporary stack located | ||
30 | * in the .bss area. The linker script makes sure the .bss is big enough | ||
31 | * to hold our stack frame plus some room for called functions. | ||
32 | * | ||
33 | * We mimic the code in lib/decompress_inflate.c to use the smallest work | ||
34 | * area possible. And because everything is statically allocated on the | ||
35 | * stack then there is no need to clean up before returning. | ||
36 | */ | ||
37 | |||
38 | int __init __inflate_kernel_data(void) | ||
39 | { | ||
40 | struct z_stream_s stream, *strm = &stream; | ||
41 | struct inflate_state state; | ||
42 | char *in = __data_loc; | ||
43 | int rc; | ||
44 | |||
45 | /* Check and skip gzip header (assume no filename) */ | ||
46 | if (in[0] != 0x1f || in[1] != 0x8b || in[2] != 0x08 || in[3] & ~3) | ||
47 | return -1; | ||
48 | in += 10; | ||
49 | |||
50 | strm->workspace = &state; | ||
51 | strm->next_in = in; | ||
52 | strm->avail_in = _edata_loc - __data_loc; /* upper bound */ | ||
53 | strm->next_out = _sdata; | ||
54 | strm->avail_out = _edata_loc - __data_loc; | ||
55 | zlib_inflateInit2(strm, -MAX_WBITS); | ||
56 | WS(strm)->inflate_state.wsize = 0; | ||
57 | WS(strm)->inflate_state.window = NULL; | ||
58 | rc = zlib_inflate(strm, Z_FINISH); | ||
59 | if (rc == Z_OK || rc == Z_STREAM_END) | ||
60 | rc = strm->avail_out; /* should be 0 */ | ||
61 | return rc; | ||
62 | } | ||
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 2e21e08de747..2e38f85b757a 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | #include <linux/linkage.h> | 14 | #include <linux/linkage.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/errno.h> | ||
16 | 17 | ||
17 | #include <asm/assembler.h> | 18 | #include <asm/assembler.h> |
18 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
@@ -110,8 +111,8 @@ ENTRY(secondary_startup) | |||
110 | 111 | ||
111 | #ifdef CONFIG_ARM_MPU | 112 | #ifdef CONFIG_ARM_MPU |
112 | /* Use MPU region info supplied by __cpu_up */ | 113 | /* Use MPU region info supplied by __cpu_up */ |
113 | ldr r6, [r7] @ get secondary_data.mpu_szr | 114 | ldr r6, [r7] @ get secondary_data.mpu_rgn_info |
114 | bl __setup_mpu @ Initialize the MPU | 115 | bl __secondary_setup_mpu @ Initialize the MPU |
115 | #endif | 116 | #endif |
116 | 117 | ||
117 | badr lr, 1f @ return (PIC) address | 118 | badr lr, 1f @ return (PIC) address |
@@ -175,19 +176,33 @@ ENDPROC(__after_proc_init) | |||
175 | #ifdef CONFIG_ARM_MPU | 176 | #ifdef CONFIG_ARM_MPU |
176 | 177 | ||
177 | 178 | ||
179 | #ifndef CONFIG_CPU_V7M | ||
178 | /* Set which MPU region should be programmed */ | 180 | /* Set which MPU region should be programmed */ |
179 | .macro set_region_nr tmp, rgnr | 181 | .macro set_region_nr tmp, rgnr, unused |
180 | mov \tmp, \rgnr @ Use static region numbers | 182 | mov \tmp, \rgnr @ Use static region numbers |
181 | mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR | 183 | mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR |
182 | .endm | 184 | .endm |
183 | 185 | ||
184 | /* Setup a single MPU region, either D or I side (D-side for unified) */ | 186 | /* Setup a single MPU region, either D or I side (D-side for unified) */ |
185 | .macro setup_region bar, acr, sr, side = MPU_DATA_SIDE | 187 | .macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused |
186 | mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR | 188 | mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR |
187 | mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR | 189 | mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR |
188 | mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR | 190 | mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR |
189 | .endm | 191 | .endm |
192 | #else | ||
193 | .macro set_region_nr tmp, rgnr, base | ||
194 | mov \tmp, \rgnr | ||
195 | str \tmp, [\base, #MPU_RNR] | ||
196 | .endm | ||
197 | |||
198 | .macro setup_region bar, acr, sr, unused, base | ||
199 | lsl \acr, \acr, #16 | ||
200 | orr \acr, \acr, \sr | ||
201 | str \bar, [\base, #MPU_RBAR] | ||
202 | str \acr, [\base, #MPU_RASR] | ||
203 | .endm | ||
190 | 204 | ||
205 | #endif | ||
191 | /* | 206 | /* |
192 | * Setup the MPU and initial MPU Regions. We create the following regions: | 207 | * Setup the MPU and initial MPU Regions. We create the following regions: |
193 | * Region 0: Use this for probing the MPU details, so leave disabled. | 208 | * Region 0: Use this for probing the MPU details, so leave disabled. |
@@ -201,64 +216,137 @@ ENDPROC(__after_proc_init) | |||
201 | ENTRY(__setup_mpu) | 216 | ENTRY(__setup_mpu) |
202 | 217 | ||
203 | /* Probe for v7 PMSA compliance */ | 218 | /* Probe for v7 PMSA compliance */ |
204 | mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 | 219 | M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB) |
220 | M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB) | ||
221 | |||
222 | AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0 | ||
223 | M_CLASS(ldr r0, [r12, 0x50]) | ||
205 | and r0, r0, #(MMFR0_PMSA) @ PMSA field | 224 | and r0, r0, #(MMFR0_PMSA) @ PMSA field |
206 | teq r0, #(MMFR0_PMSAv7) @ PMSA v7 | 225 | teq r0, #(MMFR0_PMSAv7) @ PMSA v7 |
207 | bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA | 226 | bxne lr |
208 | 227 | ||
209 | /* Determine whether the D/I-side memory map is unified. We set the | 228 | /* Determine whether the D/I-side memory map is unified. We set the |
210 | * flags here and continue to use them for the rest of this function */ | 229 | * flags here and continue to use them for the rest of this function */ |
211 | mrc p15, 0, r0, c0, c0, 4 @ MPUIR | 230 | AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR |
231 | M_CLASS(ldr r0, [r12, #MPU_TYPE]) | ||
212 | ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU | 232 | ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU |
213 | beq __error_p @ Fail: ARM_MPU and no MPU | 233 | bxeq lr |
214 | tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified | 234 | tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified |
215 | 235 | ||
216 | /* Setup second region first to free up r6 */ | 236 | /* Setup second region first to free up r6 */ |
217 | set_region_nr r0, #MPU_RAM_REGION | 237 | set_region_nr r0, #MPU_RAM_REGION, r12 |
218 | isb | 238 | isb |
219 | /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ | 239 | /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ |
220 | ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET | 240 | ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET |
221 | ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) | 241 | ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) |
222 | 242 | ||
223 | setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled | 243 | setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled |
224 | beq 1f @ Memory-map not unified | 244 | beq 1f @ Memory-map not unified |
225 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled | 245 | setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled |
226 | 1: isb | 246 | 1: isb |
227 | 247 | ||
228 | /* First/background region */ | 248 | /* First/background region */ |
229 | set_region_nr r0, #MPU_BG_REGION | 249 | set_region_nr r0, #MPU_BG_REGION, r12 |
230 | isb | 250 | isb |
231 | /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ | 251 | /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ |
232 | mov r0, #0 @ BG region starts at 0x0 | 252 | mov r0, #0 @ BG region starts at 0x0 |
233 | ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) | 253 | ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) |
234 | mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled | 254 | mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled |
235 | 255 | ||
236 | setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled | 256 | setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled |
237 | beq 2f @ Memory-map not unified | 257 | beq 2f @ Memory-map not unified |
238 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled | 258 | setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled |
239 | 2: isb | 259 | 2: isb |
240 | 260 | ||
241 | /* Vectors region */ | 261 | #ifdef CONFIG_XIP_KERNEL |
242 | set_region_nr r0, #MPU_VECTORS_REGION | 262 | set_region_nr r0, #MPU_ROM_REGION, r12 |
243 | isb | 263 | isb |
244 | /* Shared, inaccessible to PL0, rw PL1 */ | 264 | |
245 | mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE | 265 | ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL) |
246 | ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) | 266 | |
247 | /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ | 267 | ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start |
248 | mov r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) | 268 | ldr r6, =(_exiprom) @ ROM end |
249 | 269 | sub r6, r6, r0 @ Minimum size of region to map | |
250 | setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled | 270 | clz r6, r6 @ Region size must be 2^N... |
251 | beq 3f @ Memory-map not unified | 271 | rsb r6, r6, #31 @ ...so round up region size |
252 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled | 272 | lsl r6, r6, #MPU_RSR_SZ @ Put size in right field |
273 | orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit | ||
274 | |||
275 | setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled | ||
276 | beq 3f @ Memory-map not unified | ||
277 | setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled | ||
253 | 3: isb | 278 | 3: isb |
279 | #endif | ||
280 | |||
281 | /* Enable the MPU */ | ||
282 | AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR | ||
283 | AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map' | ||
284 | AR_CLASS(orr r0, r0, #CR_M) @ Set SCTRL.M (MPU on) | ||
285 | AR_CLASS(mcr p15, 0, r0, c1, c0, 0) @ Enable MPU | ||
286 | |||
287 | M_CLASS(ldr r0, [r12, #MPU_CTRL]) | ||
288 | M_CLASS(bic r0, #MPU_CTRL_PRIVDEFENA) | ||
289 | M_CLASS(orr r0, #MPU_CTRL_ENABLE) | ||
290 | M_CLASS(str r0, [r12, #MPU_CTRL]) | ||
291 | isb | ||
292 | |||
293 | ret lr | ||
294 | ENDPROC(__setup_mpu) | ||
295 | |||
296 | #ifdef CONFIG_SMP | ||
297 | /* | ||
298 | * r6: pointer at mpu_rgn_info | ||
299 | */ | ||
300 | |||
301 | ENTRY(__secondary_setup_mpu) | ||
302 | /* Probe for v7 PMSA compliance */ | ||
303 | mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 | ||
304 | and r0, r0, #(MMFR0_PMSA) @ PMSA field | ||
305 | teq r0, #(MMFR0_PMSAv7) @ PMSA v7 | ||
306 | bne __error_p | ||
307 | |||
308 | /* Determine whether the D/I-side memory map is unified. We set the | ||
309 | * flags here and continue to use them for the rest of this function */ | ||
310 | mrc p15, 0, r0, c0, c0, 4 @ MPUIR | ||
311 | ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU | ||
312 | beq __error_p | ||
313 | |||
314 | ldr r4, [r6, #MPU_RNG_INFO_USED] | ||
315 | mov r5, #MPU_RNG_SIZE | ||
316 | add r3, r6, #MPU_RNG_INFO_RNGS | ||
317 | mla r3, r4, r5, r3 | ||
318 | |||
319 | 1: | ||
320 | tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified | ||
321 | sub r3, r3, #MPU_RNG_SIZE | ||
322 | sub r4, r4, #1 | ||
323 | |||
324 | set_region_nr r0, r4 | ||
325 | isb | ||
326 | |||
327 | ldr r0, [r3, #MPU_RGN_DRBAR] | ||
328 | ldr r6, [r3, #MPU_RGN_DRSR] | ||
329 | ldr r5, [r3, #MPU_RGN_DRACR] | ||
330 | |||
331 | setup_region r0, r5, r6, MPU_DATA_SIDE | ||
332 | beq 2f | ||
333 | setup_region r0, r5, r6, MPU_INSTR_SIDE | ||
334 | 2: isb | ||
335 | |||
336 | mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR | ||
337 | cmp r4, #0 | ||
338 | bgt 1b | ||
254 | 339 | ||
255 | /* Enable the MPU */ | 340 | /* Enable the MPU */ |
256 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR | 341 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR |
257 | bic r0, r0, #CR_BR @ Disable the 'default mem-map' | 342 | bic r0, r0, #CR_BR @ Disable the 'default mem-map' |
258 | orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) | 343 | orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) |
259 | mcr p15, 0, r0, c1, c0, 0 @ Enable MPU | 344 | mcr p15, 0, r0, c1, c0, 0 @ Enable MPU |
260 | isb | 345 | isb |
346 | |||
261 | ret lr | 347 | ret lr |
262 | ENDPROC(__setup_mpu) | 348 | ENDPROC(__secondary_setup_mpu) |
263 | #endif | 349 | |
350 | #endif /* CONFIG_SMP */ | ||
351 | #endif /* CONFIG_ARM_MPU */ | ||
264 | #include "head-common.S" | 352 | #include "head-common.S" |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e2de50bf8742..237973492479 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -19,11 +19,12 @@ | |||
19 | #include <asm/elf.h> | 19 | #include <asm/elf.h> |
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/traps.h> | 21 | #include <asm/traps.h> |
22 | #include <asm/ucontext.h> | ||
23 | #include <asm/unistd.h> | 22 | #include <asm/unistd.h> |
24 | #include <asm/vfp.h> | 23 | #include <asm/vfp.h> |
25 | 24 | ||
26 | extern const unsigned long sigreturn_codes[7]; | 25 | #include "signal.h" |
26 | |||
27 | extern const unsigned long sigreturn_codes[17]; | ||
27 | 28 | ||
28 | static unsigned long signal_return_offset; | 29 | static unsigned long signal_return_offset; |
29 | 30 | ||
@@ -172,15 +173,6 @@ static int restore_vfp_context(char __user **auxp) | |||
172 | /* | 173 | /* |
173 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. | 174 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. |
174 | */ | 175 | */ |
175 | struct sigframe { | ||
176 | struct ucontext uc; | ||
177 | unsigned long retcode[2]; | ||
178 | }; | ||
179 | |||
180 | struct rt_sigframe { | ||
181 | struct siginfo info; | ||
182 | struct sigframe sig; | ||
183 | }; | ||
184 | 176 | ||
185 | static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) | 177 | static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) |
186 | { | 178 | { |
@@ -366,9 +358,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
366 | unsigned long __user *rc, void __user *frame) | 358 | unsigned long __user *rc, void __user *frame) |
367 | { | 359 | { |
368 | unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; | 360 | unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; |
361 | unsigned long handler_fdpic_GOT = 0; | ||
369 | unsigned long retcode; | 362 | unsigned long retcode; |
370 | int thumb = 0; | 363 | unsigned int idx, thumb = 0; |
371 | unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); | 364 | unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); |
365 | bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && | ||
366 | (current->personality & FDPIC_FUNCPTRS); | ||
367 | |||
368 | if (fdpic) { | ||
369 | unsigned long __user *fdpic_func_desc = | ||
370 | (unsigned long __user *)handler; | ||
371 | if (__get_user(handler, &fdpic_func_desc[0]) || | ||
372 | __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) | ||
373 | return 1; | ||
374 | } | ||
372 | 375 | ||
373 | cpsr |= PSR_ENDSTATE; | 376 | cpsr |= PSR_ENDSTATE; |
374 | 377 | ||
@@ -408,9 +411,26 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
408 | 411 | ||
409 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { | 412 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
410 | retcode = (unsigned long)ksig->ka.sa.sa_restorer; | 413 | retcode = (unsigned long)ksig->ka.sa.sa_restorer; |
414 | if (fdpic) { | ||
415 | /* | ||
416 | * We need code to load the function descriptor. | ||
417 | * That code follows the standard sigreturn code | ||
418 | * (6 words), and is made of 3 + 2 words for each | ||
419 | * variant. The 4th copied word is the actual FD | ||
420 | * address that the assembly code expects. | ||
421 | */ | ||
422 | idx = 6 + thumb * 3; | ||
423 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) | ||
424 | idx += 5; | ||
425 | if (__put_user(sigreturn_codes[idx], rc ) || | ||
426 | __put_user(sigreturn_codes[idx+1], rc+1) || | ||
427 | __put_user(sigreturn_codes[idx+2], rc+2) || | ||
428 | __put_user(retcode, rc+3)) | ||
429 | return 1; | ||
430 | goto rc_finish; | ||
431 | } | ||
411 | } else { | 432 | } else { |
412 | unsigned int idx = thumb << 1; | 433 | idx = thumb << 1; |
413 | |||
414 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) | 434 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
415 | idx += 3; | 435 | idx += 3; |
416 | 436 | ||
@@ -422,6 +442,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
422 | __put_user(sigreturn_codes[idx+1], rc+1)) | 442 | __put_user(sigreturn_codes[idx+1], rc+1)) |
423 | return 1; | 443 | return 1; |
424 | 444 | ||
445 | rc_finish: | ||
425 | #ifdef CONFIG_MMU | 446 | #ifdef CONFIG_MMU |
426 | if (cpsr & MODE32_BIT) { | 447 | if (cpsr & MODE32_BIT) { |
427 | struct mm_struct *mm = current->mm; | 448 | struct mm_struct *mm = current->mm; |
@@ -441,7 +462,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
441 | * the return code written onto the stack. | 462 | * the return code written onto the stack. |
442 | */ | 463 | */ |
443 | flush_icache_range((unsigned long)rc, | 464 | flush_icache_range((unsigned long)rc, |
444 | (unsigned long)(rc + 2)); | 465 | (unsigned long)(rc + 3)); |
445 | 466 | ||
446 | retcode = ((unsigned long)rc) + thumb; | 467 | retcode = ((unsigned long)rc) + thumb; |
447 | } | 468 | } |
@@ -451,6 +472,8 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
451 | regs->ARM_sp = (unsigned long)frame; | 472 | regs->ARM_sp = (unsigned long)frame; |
452 | regs->ARM_lr = retcode; | 473 | regs->ARM_lr = retcode; |
453 | regs->ARM_pc = handler; | 474 | regs->ARM_pc = handler; |
475 | if (fdpic) | ||
476 | regs->ARM_r9 = handler_fdpic_GOT; | ||
454 | regs->ARM_cpsr = cpsr; | 477 | regs->ARM_cpsr = cpsr; |
455 | 478 | ||
456 | return 0; | 479 | return 0; |
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h new file mode 100644 index 000000000000..b7b838b05229 --- /dev/null +++ b/arch/arm/kernel/signal.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #include <asm/ucontext.h> | ||
2 | |||
3 | struct sigframe { | ||
4 | struct ucontext uc; | ||
5 | unsigned long retcode[4]; | ||
6 | }; | ||
7 | |||
8 | struct rt_sigframe { | ||
9 | struct siginfo info; | ||
10 | struct sigframe sig; | ||
11 | }; | ||
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S index b84d0cb13682..2c7b22e32152 100644 --- a/arch/arm/kernel/sigreturn_codes.S +++ b/arch/arm/kernel/sigreturn_codes.S | |||
@@ -14,6 +14,8 @@ | |||
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <asm/assembler.h> | ||
18 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
18 | 20 | ||
19 | /* | 21 | /* |
@@ -51,6 +53,17 @@ ARM_OK( .arm ) | |||
51 | .thumb | 53 | .thumb |
52 | .endm | 54 | .endm |
53 | 55 | ||
56 | .macro arm_fdpic_slot n | ||
57 | .org sigreturn_codes + 24 + 20 * (\n) | ||
58 | ARM_OK( .arm ) | ||
59 | .endm | ||
60 | |||
61 | .macro thumb_fdpic_slot n | ||
62 | .org sigreturn_codes + 24 + 20 * (\n) + 12 | ||
63 | .thumb | ||
64 | .endm | ||
65 | |||
66 | |||
54 | #if __LINUX_ARM_ARCH__ <= 4 | 67 | #if __LINUX_ARM_ARCH__ <= 4 |
55 | /* | 68 | /* |
56 | * Note we manually set minimally required arch that supports | 69 | * Note we manually set minimally required arch that supports |
@@ -90,13 +103,46 @@ ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) ) | |||
90 | movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) | 103 | movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) |
91 | swi #0 | 104 | swi #0 |
92 | 105 | ||
106 | /* ARM sigreturn restorer FDPIC bounce code snippet */ | ||
107 | arm_fdpic_slot 0 | ||
108 | ARM_OK( ldr r3, [sp, #SIGFRAME_RC3_OFFSET] ) | ||
109 | ARM_OK( ldmia r3, {r3, r9} ) | ||
110 | #ifdef CONFIG_ARM_THUMB | ||
111 | ARM_OK( bx r3 ) | ||
112 | #else | ||
113 | ARM_OK( ret r3 ) | ||
114 | #endif | ||
115 | |||
116 | /* Thumb sigreturn restorer FDPIC bounce code snippet */ | ||
117 | thumb_fdpic_slot 0 | ||
118 | ldr r3, [sp, #SIGFRAME_RC3_OFFSET] | ||
119 | ldmia r3, {r2, r3} | ||
120 | mov r9, r3 | ||
121 | bx r2 | ||
122 | |||
123 | /* ARM sigreturn_rt restorer FDPIC bounce code snippet */ | ||
124 | arm_fdpic_slot 1 | ||
125 | ARM_OK( ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET] ) | ||
126 | ARM_OK( ldmia r3, {r3, r9} ) | ||
127 | #ifdef CONFIG_ARM_THUMB | ||
128 | ARM_OK( bx r3 ) | ||
129 | #else | ||
130 | ARM_OK( ret r3 ) | ||
131 | #endif | ||
132 | |||
133 | /* Thumb sigreturn_rt restorer FDPIC bounce code snippet */ | ||
134 | thumb_fdpic_slot 1 | ||
135 | ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET] | ||
136 | ldmia r3, {r2, r3} | ||
137 | mov r9, r3 | ||
138 | bx r2 | ||
139 | |||
93 | /* | 140 | /* |
94 | * Note on addtional space: setup_return in signal.c | 141 | * Note on additional space: setup_return in signal.c |
95 | * algorithm uses two words copy regardless whether | 142 | * always copies the same number of words regardless whether |
96 | * it is thumb case or not, so we need additional | 143 | * it is thumb case or not, so we need one additional padding |
97 | * word after real last entry. | 144 | * word after the last entry. |
98 | */ | 145 | */ |
99 | arm_slot 2 | ||
100 | .space 4 | 146 | .space 4 |
101 | 147 | ||
102 | .size sigreturn_codes, . - sigreturn_codes | 148 | .size sigreturn_codes, . - sigreturn_codes |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c9a0a5299827..b4fbf00ee4ad 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -114,7 +114,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
114 | */ | 114 | */ |
115 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 115 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
116 | #ifdef CONFIG_ARM_MPU | 116 | #ifdef CONFIG_ARM_MPU |
117 | secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; | 117 | secondary_data.mpu_rgn_info = &mpu_rgn_info; |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | #ifdef CONFIG_MMU | 120 | #ifdef CONFIG_MMU |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0fcd82f01388..5cf04888c581 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -655,6 +655,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
655 | set_tls(regs->ARM_r0); | 655 | set_tls(regs->ARM_r0); |
656 | return 0; | 656 | return 0; |
657 | 657 | ||
658 | case NR(get_tls): | ||
659 | return current_thread_info()->tp_value[0]; | ||
660 | |||
658 | default: | 661 | default: |
659 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS | 662 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS |
660 | if not implemented, rather than raising SIGILL. This | 663 | if not implemented, rather than raising SIGILL. This |
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 8265b116218d..74c93879532a 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S | |||
@@ -6,6 +6,8 @@ | |||
6 | /* No __ro_after_init data in the .rodata section - which will always be ro */ | 6 | /* No __ro_after_init data in the .rodata section - which will always be ro */ |
7 | #define RO_AFTER_INIT_DATA | 7 | #define RO_AFTER_INIT_DATA |
8 | 8 | ||
9 | #include <linux/sizes.h> | ||
10 | |||
9 | #include <asm-generic/vmlinux.lds.h> | 11 | #include <asm-generic/vmlinux.lds.h> |
10 | #include <asm/cache.h> | 12 | #include <asm/cache.h> |
11 | #include <asm/thread_info.h> | 13 | #include <asm/thread_info.h> |
@@ -77,9 +79,7 @@ SECTIONS | |||
77 | *(.text.fixup) | 79 | *(.text.fixup) |
78 | *(__ex_table) | 80 | *(__ex_table) |
79 | #endif | 81 | #endif |
80 | #ifndef CONFIG_SMP_ON_UP | ||
81 | *(.alt.smp.init) | 82 | *(.alt.smp.init) |
82 | #endif | ||
83 | *(.discard) | 83 | *(.discard) |
84 | *(.discard.*) | 84 | *(.discard.*) |
85 | } | 85 | } |
@@ -181,19 +181,7 @@ SECTIONS | |||
181 | *(.taglist.init) | 181 | *(.taglist.init) |
182 | __tagtable_end = .; | 182 | __tagtable_end = .; |
183 | } | 183 | } |
184 | #ifdef CONFIG_SMP_ON_UP | 184 | .init.rodata : { |
185 | .init.smpalt : { | ||
186 | __smpalt_begin = .; | ||
187 | *(.alt.smp.init) | ||
188 | __smpalt_end = .; | ||
189 | } | ||
190 | #endif | ||
191 | .init.pv_table : { | ||
192 | __pv_table_begin = .; | ||
193 | *(.pv_table) | ||
194 | __pv_table_end = .; | ||
195 | } | ||
196 | .init.data : { | ||
197 | INIT_SETUP(16) | 185 | INIT_SETUP(16) |
198 | INIT_CALLS | 186 | INIT_CALLS |
199 | CON_INITCALL | 187 | CON_INITCALL |
@@ -201,48 +189,49 @@ SECTIONS | |||
201 | INIT_RAM_FS | 189 | INIT_RAM_FS |
202 | } | 190 | } |
203 | 191 | ||
204 | #ifdef CONFIG_SMP | 192 | #ifdef CONFIG_ARM_MPU |
205 | PERCPU_SECTION(L1_CACHE_BYTES) | 193 | . = ALIGN(SZ_128K); |
206 | #endif | 194 | #endif |
207 | |||
208 | _exiprom = .; /* End of XIP ROM area */ | 195 | _exiprom = .; /* End of XIP ROM area */ |
209 | __data_loc = ALIGN(4); /* location in binary */ | ||
210 | . = PAGE_OFFSET + TEXT_OFFSET; | ||
211 | 196 | ||
212 | .data : AT(__data_loc) { | 197 | /* |
213 | _data = .; /* address in memory */ | 198 | * From this point, stuff is considered writable and will be copied to RAM |
214 | _sdata = .; | 199 | */ |
215 | 200 | __data_loc = ALIGN(4); /* location in file */ | |
216 | /* | 201 | . = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */ |
217 | * first, the init task union, aligned | 202 | #undef LOAD_OFFSET |
218 | * to an 8192 byte boundary. | 203 | #define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc) |
219 | */ | 204 | |
220 | INIT_TASK_DATA(THREAD_SIZE) | 205 | . = ALIGN(THREAD_SIZE); |
206 | _sdata = .; | ||
207 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | ||
208 | .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { | ||
209 | *(.data..ro_after_init) | ||
210 | } | ||
211 | _edata = .; | ||
221 | 212 | ||
222 | . = ALIGN(PAGE_SIZE); | 213 | . = ALIGN(PAGE_SIZE); |
223 | __init_begin = .; | 214 | __init_begin = .; |
215 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { | ||
224 | INIT_DATA | 216 | INIT_DATA |
217 | } | ||
218 | .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { | ||
225 | ARM_EXIT_KEEP(EXIT_DATA) | 219 | ARM_EXIT_KEEP(EXIT_DATA) |
226 | . = ALIGN(PAGE_SIZE); | ||
227 | __init_end = .; | ||
228 | |||
229 | *(.data..ro_after_init) | ||
230 | |||
231 | NOSAVE_DATA | ||
232 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) | ||
233 | READ_MOSTLY_DATA(L1_CACHE_BYTES) | ||
234 | |||
235 | /* | ||
236 | * and the usual data section | ||
237 | */ | ||
238 | DATA_DATA | ||
239 | CONSTRUCTORS | ||
240 | |||
241 | _edata = .; | ||
242 | } | 220 | } |
243 | _edata_loc = __data_loc + SIZEOF(.data); | 221 | #ifdef CONFIG_SMP |
222 | PERCPU_SECTION(L1_CACHE_BYTES) | ||
223 | #endif | ||
224 | |||
225 | /* | ||
226 | * End of copied data. We need a dummy section to get its LMA. | ||
227 | * Also located before final ALIGN() as trailing padding is not stored | ||
228 | * in the resulting binary file and useless to copy. | ||
229 | */ | ||
230 | .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } | ||
231 | _edata_loc = LOADADDR(.data.endmark); | ||
244 | 232 | ||
245 | BUG_TABLE | 233 | . = ALIGN(PAGE_SIZE); |
234 | __init_end = .; | ||
246 | 235 | ||
247 | #ifdef CONFIG_HAVE_TCM | 236 | #ifdef CONFIG_HAVE_TCM |
248 | /* | 237 | /* |
@@ -301,7 +290,7 @@ SECTIONS | |||
301 | } | 290 | } |
302 | #endif | 291 | #endif |
303 | 292 | ||
304 | BSS_SECTION(0, 0, 0) | 293 | BSS_SECTION(0, 0, 8) |
305 | _end = .; | 294 | _end = .; |
306 | 295 | ||
307 | STABS_DEBUG | 296 | STABS_DEBUG |
@@ -322,3 +311,29 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") | |||
322 | */ | 311 | */ |
323 | ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, | 312 | ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, |
324 | "HYP init code too big or misaligned") | 313 | "HYP init code too big or misaligned") |
314 | |||
315 | #ifdef CONFIG_XIP_DEFLATED_DATA | ||
316 | /* | ||
317 | * The .bss is used as a stack area for __inflate_kernel_data() whose stack | ||
318 | * frame is 9568 bytes. Make sure it has extra room left. | ||
319 | */ | ||
320 | ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA") | ||
321 | #endif | ||
322 | |||
323 | #ifdef CONFIG_ARM_MPU | ||
324 | /* | ||
325 | * Due to PMSAv7 restriction on base address and size we have to | ||
326 | * enforce minimal alignment restrictions. It was seen that weaker | ||
327 | * alignment restriction on _xiprom will likely force XIP address | ||
328 | * space spawns multiple MPU regions thus it is likely we run in | ||
329 | * situation when we are reprogramming MPU region we run on with | ||
330 | * something which doesn't cover reprogramming code itself, so as soon | ||
331 | * as we update MPU settings we'd immediately try to execute straight | ||
332 | * from background region which is XN. | ||
333 | * It seem that alignment in 1M should suit most users. | ||
334 | * _exiprom is aligned as 1/8 of 1M so can be covered by subregion | ||
335 | * disable | ||
336 | */ | ||
337 | ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues") | ||
338 | ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues") | ||
339 | #endif | ||
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index c83a7ba737d6..f73ba564b5e5 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -214,14 +214,9 @@ SECTIONS | |||
214 | *(.pv_table) | 214 | *(.pv_table) |
215 | __pv_table_end = .; | 215 | __pv_table_end = .; |
216 | } | 216 | } |
217 | .init.data : { | 217 | |
218 | INIT_DATA | 218 | INIT_DATA_SECTION(16) |
219 | INIT_SETUP(16) | 219 | |
220 | INIT_CALLS | ||
221 | CON_INITCALL | ||
222 | SECURITY_INITCALL | ||
223 | INIT_RAM_FS | ||
224 | } | ||
225 | .exit.data : { | 220 | .exit.data : { |
226 | ARM_EXIT_KEEP(EXIT_DATA) | 221 | ARM_EXIT_KEEP(EXIT_DATA) |
227 | } | 222 | } |
@@ -236,33 +231,10 @@ SECTIONS | |||
236 | . = ALIGN(THREAD_SIZE); | 231 | . = ALIGN(THREAD_SIZE); |
237 | #endif | 232 | #endif |
238 | __init_end = .; | 233 | __init_end = .; |
239 | __data_loc = .; | ||
240 | |||
241 | .data : AT(__data_loc) { | ||
242 | _data = .; /* address in memory */ | ||
243 | _sdata = .; | ||
244 | |||
245 | /* | ||
246 | * first, the init task union, aligned | ||
247 | * to an 8192 byte boundary. | ||
248 | */ | ||
249 | INIT_TASK_DATA(THREAD_SIZE) | ||
250 | |||
251 | NOSAVE_DATA | ||
252 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) | ||
253 | READ_MOSTLY_DATA(L1_CACHE_BYTES) | ||
254 | |||
255 | /* | ||
256 | * and the usual data section | ||
257 | */ | ||
258 | DATA_DATA | ||
259 | CONSTRUCTORS | ||
260 | |||
261 | _edata = .; | ||
262 | } | ||
263 | _edata_loc = __data_loc + SIZEOF(.data); | ||
264 | 234 | ||
265 | BUG_TABLE | 235 | _sdata = .; |
236 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | ||
237 | _edata = .; | ||
266 | 238 | ||
267 | #ifdef CONFIG_HAVE_TCM | 239 | #ifdef CONFIG_HAVE_TCM |
268 | /* | 240 | /* |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 950d19babb5f..bdb2ec11a2e3 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ | |||
9 | 9 | ||
10 | ifneq ($(CONFIG_MMU),y) | 10 | ifneq ($(CONFIG_MMU),y) |
11 | obj-y += nommu.o | 11 | obj-y += nommu.o |
12 | obj-$(CONFIG_ARM_MPU) += pmsa-v7.o | ||
12 | endif | 13 | endif |
13 | 14 | ||
14 | obj-$(CONFIG_ARM_PTDUMP) += dump.o | 15 | obj-$(CONFIG_ARM_PTDUMP) += dump.o |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 91537d90f5f5..e4370810f4f1 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -27,259 +27,7 @@ unsigned long vectors_base; | |||
27 | 27 | ||
28 | #ifdef CONFIG_ARM_MPU | 28 | #ifdef CONFIG_ARM_MPU |
29 | struct mpu_rgn_info mpu_rgn_info; | 29 | struct mpu_rgn_info mpu_rgn_info; |
30 | 30 | #endif | |
31 | /* Region number */ | ||
32 | static void rgnr_write(u32 v) | ||
33 | { | ||
34 | asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v)); | ||
35 | } | ||
36 | |||
37 | /* Data-side / unified region attributes */ | ||
38 | |||
39 | /* Region access control register */ | ||
40 | static void dracr_write(u32 v) | ||
41 | { | ||
42 | asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v)); | ||
43 | } | ||
44 | |||
45 | /* Region size register */ | ||
46 | static void drsr_write(u32 v) | ||
47 | { | ||
48 | asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v)); | ||
49 | } | ||
50 | |||
51 | /* Region base address register */ | ||
52 | static void drbar_write(u32 v) | ||
53 | { | ||
54 | asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v)); | ||
55 | } | ||
56 | |||
57 | static u32 drbar_read(void) | ||
58 | { | ||
59 | u32 v; | ||
60 | asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v)); | ||
61 | return v; | ||
62 | } | ||
63 | /* Optional instruction-side region attributes */ | ||
64 | |||
65 | /* I-side Region access control register */ | ||
66 | static void iracr_write(u32 v) | ||
67 | { | ||
68 | asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v)); | ||
69 | } | ||
70 | |||
71 | /* I-side Region size register */ | ||
72 | static void irsr_write(u32 v) | ||
73 | { | ||
74 | asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v)); | ||
75 | } | ||
76 | |||
77 | /* I-side Region base address register */ | ||
78 | static void irbar_write(u32 v) | ||
79 | { | ||
80 | asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v)); | ||
81 | } | ||
82 | |||
83 | static unsigned long irbar_read(void) | ||
84 | { | ||
85 | unsigned long v; | ||
86 | asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v)); | ||
87 | return v; | ||
88 | } | ||
89 | |||
90 | /* MPU initialisation functions */ | ||
91 | void __init adjust_lowmem_bounds_mpu(void) | ||
92 | { | ||
93 | phys_addr_t phys_offset = PHYS_OFFSET; | ||
94 | phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; | ||
95 | struct memblock_region *reg; | ||
96 | bool first = true; | ||
97 | phys_addr_t mem_start; | ||
98 | phys_addr_t mem_end; | ||
99 | |||
100 | for_each_memblock(memory, reg) { | ||
101 | if (first) { | ||
102 | /* | ||
103 | * Initially only use memory continuous from | ||
104 | * PHYS_OFFSET */ | ||
105 | if (reg->base != phys_offset) | ||
106 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | ||
107 | |||
108 | mem_start = reg->base; | ||
109 | mem_end = reg->base + reg->size; | ||
110 | specified_mem_size = reg->size; | ||
111 | first = false; | ||
112 | } else { | ||
113 | /* | ||
114 | * memblock auto merges contiguous blocks, remove | ||
115 | * all blocks afterwards in one go (we can't remove | ||
116 | * blocks separately while iterating) | ||
117 | */ | ||
118 | pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", | ||
119 | &mem_end, ®->base); | ||
120 | memblock_remove(reg->base, 0 - reg->base); | ||
121 | break; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * MPU has curious alignment requirements: Size must be power of 2, and | ||
127 | * region start must be aligned to the region size | ||
128 | */ | ||
129 | if (phys_offset != 0) | ||
130 | pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n"); | ||
131 | |||
132 | /* | ||
133 | * Maximum aligned region might overflow phys_addr_t if phys_offset is | ||
134 | * 0. Hence we keep everything below 4G until we take the smaller of | ||
135 | * the aligned_region_size and rounded_mem_size, one of which is | ||
136 | * guaranteed to be smaller than the maximum physical address. | ||
137 | */ | ||
138 | aligned_region_size = (phys_offset - 1) ^ (phys_offset); | ||
139 | /* Find the max power-of-two sized region that fits inside our bank */ | ||
140 | rounded_mem_size = (1 << __fls(specified_mem_size)) - 1; | ||
141 | |||
142 | /* The actual region size is the smaller of the two */ | ||
143 | aligned_region_size = aligned_region_size < rounded_mem_size | ||
144 | ? aligned_region_size + 1 | ||
145 | : rounded_mem_size + 1; | ||
146 | |||
147 | if (aligned_region_size != specified_mem_size) { | ||
148 | pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", | ||
149 | &specified_mem_size, &aligned_region_size); | ||
150 | memblock_remove(mem_start + aligned_region_size, | ||
151 | specified_mem_size - aligned_region_size); | ||
152 | |||
153 | mem_end = mem_start + aligned_region_size; | ||
154 | } | ||
155 | |||
156 | pr_debug("MPU Region from %pa size %pa (end %pa))\n", | ||
157 | &phys_offset, &aligned_region_size, &mem_end); | ||
158 | |||
159 | } | ||
160 | |||
161 | static int mpu_present(void) | ||
162 | { | ||
163 | return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); | ||
164 | } | ||
165 | |||
166 | static int mpu_max_regions(void) | ||
167 | { | ||
168 | /* | ||
169 | * We don't support a different number of I/D side regions so if we | ||
170 | * have separate instruction and data memory maps then return | ||
171 | * whichever side has a smaller number of supported regions. | ||
172 | */ | ||
173 | u32 dregions, iregions, mpuir; | ||
174 | mpuir = read_cpuid(CPUID_MPUIR); | ||
175 | |||
176 | dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; | ||
177 | |||
178 | /* Check for separate d-side and i-side memory maps */ | ||
179 | if (mpuir & MPUIR_nU) | ||
180 | iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; | ||
181 | |||
182 | /* Use the smallest of the two maxima */ | ||
183 | return min(dregions, iregions); | ||
184 | } | ||
185 | |||
186 | static int mpu_iside_independent(void) | ||
187 | { | ||
188 | /* MPUIR.nU specifies whether there is *not* a unified memory map */ | ||
189 | return read_cpuid(CPUID_MPUIR) & MPUIR_nU; | ||
190 | } | ||
191 | |||
192 | static int mpu_min_region_order(void) | ||
193 | { | ||
194 | u32 drbar_result, irbar_result; | ||
195 | /* We've kept a region free for this probing */ | ||
196 | rgnr_write(MPU_PROBE_REGION); | ||
197 | isb(); | ||
198 | /* | ||
199 | * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum | ||
200 | * region order | ||
201 | */ | ||
202 | drbar_write(0xFFFFFFFC); | ||
203 | drbar_result = irbar_result = drbar_read(); | ||
204 | drbar_write(0x0); | ||
205 | /* If the MPU is non-unified, we use the larger of the two minima*/ | ||
206 | if (mpu_iside_independent()) { | ||
207 | irbar_write(0xFFFFFFFC); | ||
208 | irbar_result = irbar_read(); | ||
209 | irbar_write(0x0); | ||
210 | } | ||
211 | isb(); /* Ensure that MPU region operations have completed */ | ||
212 | /* Return whichever result is larger */ | ||
213 | return __ffs(max(drbar_result, irbar_result)); | ||
214 | } | ||
215 | |||
216 | static int mpu_setup_region(unsigned int number, phys_addr_t start, | ||
217 | unsigned int size_order, unsigned int properties) | ||
218 | { | ||
219 | u32 size_data; | ||
220 | |||
221 | /* We kept a region free for probing resolution of MPU regions*/ | ||
222 | if (number > mpu_max_regions() || number == MPU_PROBE_REGION) | ||
223 | return -ENOENT; | ||
224 | |||
225 | if (size_order > 32) | ||
226 | return -ENOMEM; | ||
227 | |||
228 | if (size_order < mpu_min_region_order()) | ||
229 | return -ENOMEM; | ||
230 | |||
231 | /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ | ||
232 | size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; | ||
233 | |||
234 | dsb(); /* Ensure all previous data accesses occur with old mappings */ | ||
235 | rgnr_write(number); | ||
236 | isb(); | ||
237 | drbar_write(start); | ||
238 | dracr_write(properties); | ||
239 | isb(); /* Propagate properties before enabling region */ | ||
240 | drsr_write(size_data); | ||
241 | |||
242 | /* Check for independent I-side registers */ | ||
243 | if (mpu_iside_independent()) { | ||
244 | irbar_write(start); | ||
245 | iracr_write(properties); | ||
246 | isb(); | ||
247 | irsr_write(size_data); | ||
248 | } | ||
249 | isb(); | ||
250 | |||
251 | /* Store region info (we treat i/d side the same, so only store d) */ | ||
252 | mpu_rgn_info.rgns[number].dracr = properties; | ||
253 | mpu_rgn_info.rgns[number].drbar = start; | ||
254 | mpu_rgn_info.rgns[number].drsr = size_data; | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * Set up default MPU regions, doing nothing if there is no MPU | ||
260 | */ | ||
261 | void __init mpu_setup(void) | ||
262 | { | ||
263 | int region_err; | ||
264 | if (!mpu_present()) | ||
265 | return; | ||
266 | |||
267 | region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET, | ||
268 | ilog2(memblock.memory.regions[0].size), | ||
269 | MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL); | ||
270 | if (region_err) { | ||
271 | panic("MPU region initialization failure! %d", region_err); | ||
272 | } else { | ||
273 | pr_info("Using ARMv7 PMSA Compliant MPU. " | ||
274 | "Region independence: %s, Max regions: %d\n", | ||
275 | mpu_iside_independent() ? "Yes" : "No", | ||
276 | mpu_max_regions()); | ||
277 | } | ||
278 | } | ||
279 | #else | ||
280 | static void adjust_lowmem_bounds_mpu(void) {} | ||
281 | static void __init mpu_setup(void) {} | ||
282 | #endif /* CONFIG_ARM_MPU */ | ||
283 | 31 | ||
284 | #ifdef CONFIG_CPU_CP15 | 32 | #ifdef CONFIG_CPU_CP15 |
285 | #ifdef CONFIG_CPU_HIGH_VECTOR | 33 | #ifdef CONFIG_CPU_HIGH_VECTOR |
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c new file mode 100644 index 000000000000..976df60ac426 --- /dev/null +++ b/arch/arm/mm/pmsa-v7.c | |||
@@ -0,0 +1,484 @@ | |||
1 | /* | ||
2 | * Based on linux/arch/arm/mm/nommu.c | ||
3 | * | ||
4 | * ARM PMSAv7 supporting functions. | ||
5 | */ | ||
6 | |||
7 | #include <linux/bitops.h> | ||
8 | #include <linux/memblock.h> | ||
9 | |||
10 | #include <asm/cacheflush.h> | ||
11 | #include <asm/cp15.h> | ||
12 | #include <asm/cputype.h> | ||
13 | #include <asm/mpu.h> | ||
14 | #include <asm/sections.h> | ||
15 | |||
16 | #include "mm.h" | ||
17 | |||
18 | struct region { | ||
19 | phys_addr_t base; | ||
20 | phys_addr_t size; | ||
21 | unsigned long subreg; | ||
22 | }; | ||
23 | |||
24 | static struct region __initdata mem[MPU_MAX_REGIONS]; | ||
25 | #ifdef CONFIG_XIP_KERNEL | ||
26 | static struct region __initdata xip[MPU_MAX_REGIONS]; | ||
27 | #endif | ||
28 | |||
29 | static unsigned int __initdata mpu_min_region_order; | ||
30 | static unsigned int __initdata mpu_max_regions; | ||
31 | |||
32 | static int __init __mpu_min_region_order(void); | ||
33 | static int __init __mpu_max_regions(void); | ||
34 | |||
35 | #ifndef CONFIG_CPU_V7M | ||
36 | |||
37 | #define DRBAR __ACCESS_CP15(c6, 0, c1, 0) | ||
38 | #define IRBAR __ACCESS_CP15(c6, 0, c1, 1) | ||
39 | #define DRSR __ACCESS_CP15(c6, 0, c1, 2) | ||
40 | #define IRSR __ACCESS_CP15(c6, 0, c1, 3) | ||
41 | #define DRACR __ACCESS_CP15(c6, 0, c1, 4) | ||
42 | #define IRACR __ACCESS_CP15(c6, 0, c1, 5) | ||
43 | #define RNGNR __ACCESS_CP15(c6, 0, c2, 0) | ||
44 | |||
45 | /* Region number */ | ||
46 | static inline void rgnr_write(u32 v) | ||
47 | { | ||
48 | write_sysreg(v, RNGNR); | ||
49 | } | ||
50 | |||
51 | /* Data-side / unified region attributes */ | ||
52 | |||
53 | /* Region access control register */ | ||
54 | static inline void dracr_write(u32 v) | ||
55 | { | ||
56 | write_sysreg(v, DRACR); | ||
57 | } | ||
58 | |||
59 | /* Region size register */ | ||
60 | static inline void drsr_write(u32 v) | ||
61 | { | ||
62 | write_sysreg(v, DRSR); | ||
63 | } | ||
64 | |||
65 | /* Region base address register */ | ||
66 | static inline void drbar_write(u32 v) | ||
67 | { | ||
68 | write_sysreg(v, DRBAR); | ||
69 | } | ||
70 | |||
71 | static inline u32 drbar_read(void) | ||
72 | { | ||
73 | return read_sysreg(DRBAR); | ||
74 | } | ||
75 | /* Optional instruction-side region attributes */ | ||
76 | |||
77 | /* I-side Region access control register */ | ||
78 | static inline void iracr_write(u32 v) | ||
79 | { | ||
80 | write_sysreg(v, IRACR); | ||
81 | } | ||
82 | |||
83 | /* I-side Region size register */ | ||
84 | static inline void irsr_write(u32 v) | ||
85 | { | ||
86 | write_sysreg(v, IRSR); | ||
87 | } | ||
88 | |||
89 | /* I-side Region base address register */ | ||
90 | static inline void irbar_write(u32 v) | ||
91 | { | ||
92 | write_sysreg(v, IRBAR); | ||
93 | } | ||
94 | |||
95 | static inline u32 irbar_read(void) | ||
96 | { | ||
97 | return read_sysreg(IRBAR); | ||
98 | } | ||
99 | |||
100 | #else | ||
101 | |||
102 | static inline void rgnr_write(u32 v) | ||
103 | { | ||
104 | writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR); | ||
105 | } | ||
106 | |||
107 | /* Data-side / unified region attributes */ | ||
108 | |||
109 | /* Region access control register */ | ||
110 | static inline void dracr_write(u32 v) | ||
111 | { | ||
112 | u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0); | ||
113 | |||
114 | writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR); | ||
115 | } | ||
116 | |||
117 | /* Region size register */ | ||
118 | static inline void drsr_write(u32 v) | ||
119 | { | ||
120 | u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16); | ||
121 | |||
122 | writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR); | ||
123 | } | ||
124 | |||
125 | /* Region base address register */ | ||
126 | static inline void drbar_write(u32 v) | ||
127 | { | ||
128 | writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR); | ||
129 | } | ||
130 | |||
131 | static inline u32 drbar_read(void) | ||
132 | { | ||
133 | return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR); | ||
134 | } | ||
135 | |||
136 | /* ARMv7-M only supports a unified MPU, so I-side operations are nop */ | ||
137 | |||
138 | static inline void iracr_write(u32 v) {} | ||
139 | static inline void irsr_write(u32 v) {} | ||
140 | static inline void irbar_write(u32 v) {} | ||
141 | static inline unsigned long irbar_read(void) {return 0;} | ||
142 | |||
143 | #endif | ||
144 | |||
145 | static int __init mpu_present(void) | ||
146 | { | ||
147 | return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); | ||
148 | } | ||
149 | |||
150 | static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region) | ||
151 | { | ||
152 | unsigned long subreg, bslots, sslots; | ||
153 | phys_addr_t abase = base & ~(size - 1); | ||
154 | phys_addr_t asize = base + size - abase; | ||
155 | phys_addr_t p2size = 1 << __fls(asize); | ||
156 | phys_addr_t bdiff, sdiff; | ||
157 | |||
158 | if (p2size != asize) | ||
159 | p2size *= 2; | ||
160 | |||
161 | bdiff = base - abase; | ||
162 | sdiff = p2size - asize; | ||
163 | subreg = p2size / MPU_NR_SUBREGS; | ||
164 | |||
165 | if ((bdiff % subreg) || (sdiff % subreg)) | ||
166 | return false; | ||
167 | |||
168 | bslots = bdiff / subreg; | ||
169 | sslots = sdiff / subreg; | ||
170 | |||
171 | if (bslots || sslots) { | ||
172 | int i; | ||
173 | |||
174 | if (subreg < MPU_MIN_SUBREG_SIZE) | ||
175 | return false; | ||
176 | |||
177 | if (bslots + sslots > MPU_NR_SUBREGS) | ||
178 | return false; | ||
179 | |||
180 | for (i = 0; i < bslots; i++) | ||
181 | _set_bit(i, ®ion->subreg); | ||
182 | |||
183 | for (i = 1; i <= sslots; i++) | ||
184 | _set_bit(MPU_NR_SUBREGS - i, ®ion->subreg); | ||
185 | } | ||
186 | |||
187 | region->base = abase; | ||
188 | region->size = p2size; | ||
189 | |||
190 | return true; | ||
191 | } | ||
192 | |||
193 | static int __init allocate_region(phys_addr_t base, phys_addr_t size, | ||
194 | unsigned int limit, struct region *regions) | ||
195 | { | ||
196 | int count = 0; | ||
197 | phys_addr_t diff = size; | ||
198 | int attempts = MPU_MAX_REGIONS; | ||
199 | |||
200 | while (diff) { | ||
201 | /* Try cover region as is (maybe with help of subregions) */ | ||
202 | if (try_split_region(base, size, ®ions[count])) { | ||
203 | count++; | ||
204 | base += size; | ||
205 | diff -= size; | ||
206 | size = diff; | ||
207 | } else { | ||
208 | /* | ||
209 | * Maximum aligned region might overflow phys_addr_t | ||
210 | * if "base" is 0. Hence we keep everything below 4G | ||
211 | * until we take the smaller of the aligned region | ||
212 | * size ("asize") and rounded region size ("p2size"), | ||
213 | * one of which is guaranteed to be smaller than the | ||
214 | * maximum physical address. | ||
215 | */ | ||
216 | phys_addr_t asize = (base - 1) ^ base; | ||
217 | phys_addr_t p2size = (1 << __fls(diff)) - 1; | ||
218 | |||
219 | size = asize < p2size ? asize + 1 : p2size + 1; | ||
220 | } | ||
221 | |||
222 | if (count > limit) | ||
223 | break; | ||
224 | |||
225 | if (!attempts) | ||
226 | break; | ||
227 | |||
228 | attempts--; | ||
229 | } | ||
230 | |||
231 | return count; | ||
232 | } | ||
233 | |||
234 | /* MPU initialisation functions */ | ||
235 | void __init adjust_lowmem_bounds_mpu(void) | ||
236 | { | ||
237 | phys_addr_t specified_mem_size = 0, total_mem_size = 0; | ||
238 | struct memblock_region *reg; | ||
239 | bool first = true; | ||
240 | phys_addr_t mem_start; | ||
241 | phys_addr_t mem_end; | ||
242 | unsigned int mem_max_regions; | ||
243 | int num, i; | ||
244 | |||
245 | if (!mpu_present()) | ||
246 | return; | ||
247 | |||
248 | /* Free-up MPU_PROBE_REGION */ | ||
249 | mpu_min_region_order = __mpu_min_region_order(); | ||
250 | |||
251 | /* How many regions are supported */ | ||
252 | mpu_max_regions = __mpu_max_regions(); | ||
253 | |||
254 | mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions); | ||
255 | |||
256 | /* We need to keep one slot for background region */ | ||
257 | mem_max_regions--; | ||
258 | |||
259 | #ifndef CONFIG_CPU_V7M | ||
260 | /* ... and one for vectors */ | ||
261 | mem_max_regions--; | ||
262 | #endif | ||
263 | |||
264 | #ifdef CONFIG_XIP_KERNEL | ||
265 | /* plus some regions to cover XIP ROM */ | ||
266 | num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR, | ||
267 | mem_max_regions, xip); | ||
268 | |||
269 | mem_max_regions -= num; | ||
270 | #endif | ||
271 | |||
272 | for_each_memblock(memory, reg) { | ||
273 | if (first) { | ||
274 | phys_addr_t phys_offset = PHYS_OFFSET; | ||
275 | |||
276 | /* | ||
277 | * Initially only use memory continuous from | ||
278 | * PHYS_OFFSET */ | ||
279 | if (reg->base != phys_offset) | ||
280 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | ||
281 | |||
282 | mem_start = reg->base; | ||
283 | mem_end = reg->base + reg->size; | ||
284 | specified_mem_size = reg->size; | ||
285 | first = false; | ||
286 | } else { | ||
287 | /* | ||
288 | * memblock auto merges contiguous blocks, remove | ||
289 | * all blocks afterwards in one go (we can't remove | ||
290 | * blocks separately while iterating) | ||
291 | */ | ||
292 | pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", | ||
293 | &mem_end, ®->base); | ||
294 | memblock_remove(reg->base, 0 - reg->base); | ||
295 | break; | ||
296 | } | ||
297 | } | ||
298 | |||
299 | num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem); | ||
300 | |||
301 | for (i = 0; i < num; i++) { | ||
302 | unsigned long subreg = mem[i].size / MPU_NR_SUBREGS; | ||
303 | |||
304 | total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg); | ||
305 | |||
306 | pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n", | ||
307 | &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg); | ||
308 | } | ||
309 | |||
310 | if (total_mem_size != specified_mem_size) { | ||
311 | pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", | ||
312 | &specified_mem_size, &total_mem_size); | ||
313 | memblock_remove(mem_start + total_mem_size, | ||
314 | specified_mem_size - total_mem_size); | ||
315 | } | ||
316 | } | ||
317 | |||
318 | static int __init __mpu_max_regions(void) | ||
319 | { | ||
320 | /* | ||
321 | * We don't support a different number of I/D side regions so if we | ||
322 | * have separate instruction and data memory maps then return | ||
323 | * whichever side has a smaller number of supported regions. | ||
324 | */ | ||
325 | u32 dregions, iregions, mpuir; | ||
326 | |||
327 | mpuir = read_cpuid_mputype(); | ||
328 | |||
329 | dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; | ||
330 | |||
331 | /* Check for separate d-side and i-side memory maps */ | ||
332 | if (mpuir & MPUIR_nU) | ||
333 | iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; | ||
334 | |||
335 | /* Use the smallest of the two maxima */ | ||
336 | return min(dregions, iregions); | ||
337 | } | ||
338 | |||
339 | static int __init mpu_iside_independent(void) | ||
340 | { | ||
341 | /* MPUIR.nU specifies whether there is *not* a unified memory map */ | ||
342 | return read_cpuid_mputype() & MPUIR_nU; | ||
343 | } | ||
344 | |||
345 | static int __init __mpu_min_region_order(void) | ||
346 | { | ||
347 | u32 drbar_result, irbar_result; | ||
348 | |||
349 | /* We've kept a region free for this probing */ | ||
350 | rgnr_write(MPU_PROBE_REGION); | ||
351 | isb(); | ||
352 | /* | ||
353 | * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum | ||
354 | * region order | ||
355 | */ | ||
356 | drbar_write(0xFFFFFFFC); | ||
357 | drbar_result = irbar_result = drbar_read(); | ||
358 | drbar_write(0x0); | ||
359 | /* If the MPU is non-unified, we use the larger of the two minima*/ | ||
360 | if (mpu_iside_independent()) { | ||
361 | irbar_write(0xFFFFFFFC); | ||
362 | irbar_result = irbar_read(); | ||
363 | irbar_write(0x0); | ||
364 | } | ||
365 | isb(); /* Ensure that MPU region operations have completed */ | ||
366 | /* Return whichever result is larger */ | ||
367 | |||
368 | return __ffs(max(drbar_result, irbar_result)); | ||
369 | } | ||
370 | |||
371 | static int __init mpu_setup_region(unsigned int number, phys_addr_t start, | ||
372 | unsigned int size_order, unsigned int properties, | ||
373 | unsigned int subregions, bool need_flush) | ||
374 | { | ||
375 | u32 size_data; | ||
376 | |||
377 | /* We kept a region free for probing resolution of MPU regions*/ | ||
378 | if (number > mpu_max_regions | ||
379 | || number >= MPU_MAX_REGIONS) | ||
380 | return -ENOENT; | ||
381 | |||
382 | if (size_order > 32) | ||
383 | return -ENOMEM; | ||
384 | |||
385 | if (size_order < mpu_min_region_order) | ||
386 | return -ENOMEM; | ||
387 | |||
388 | /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ | ||
389 | size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; | ||
390 | size_data |= subregions << MPU_RSR_SD; | ||
391 | |||
392 | if (need_flush) | ||
393 | flush_cache_all(); | ||
394 | |||
395 | dsb(); /* Ensure all previous data accesses occur with old mappings */ | ||
396 | rgnr_write(number); | ||
397 | isb(); | ||
398 | drbar_write(start); | ||
399 | dracr_write(properties); | ||
400 | isb(); /* Propagate properties before enabling region */ | ||
401 | drsr_write(size_data); | ||
402 | |||
403 | /* Check for independent I-side registers */ | ||
404 | if (mpu_iside_independent()) { | ||
405 | irbar_write(start); | ||
406 | iracr_write(properties); | ||
407 | isb(); | ||
408 | irsr_write(size_data); | ||
409 | } | ||
410 | isb(); | ||
411 | |||
412 | /* Store region info (we treat i/d side the same, so only store d) */ | ||
413 | mpu_rgn_info.rgns[number].dracr = properties; | ||
414 | mpu_rgn_info.rgns[number].drbar = start; | ||
415 | mpu_rgn_info.rgns[number].drsr = size_data; | ||
416 | |||
417 | mpu_rgn_info.used++; | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Set up default MPU regions, doing nothing if there is no MPU | ||
424 | */ | ||
425 | void __init mpu_setup(void) | ||
426 | { | ||
427 | int i, region = 0, err = 0; | ||
428 | |||
429 | if (!mpu_present()) | ||
430 | return; | ||
431 | |||
432 | /* Setup MPU (order is important) */ | ||
433 | |||
434 | /* Background */ | ||
435 | err |= mpu_setup_region(region++, 0, 32, | ||
436 | MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA, | ||
437 | 0, false); | ||
438 | |||
439 | #ifdef CONFIG_XIP_KERNEL | ||
440 | /* ROM */ | ||
441 | for (i = 0; i < ARRAY_SIZE(xip); i++) { | ||
442 | /* | ||
443 | * In case we overwrite RAM region we set earlier in | ||
444 | * head-nommu.S (which is cachable) all subsequent | ||
445 | * data access till we setup RAM bellow would be done | ||
446 | * with BG region (which is uncachable), thus we need | ||
447 | * to clean and invalidate cache. | ||
448 | */ | ||
449 | bool need_flush = region == MPU_RAM_REGION; | ||
450 | |||
451 | if (!xip[i].size) | ||
452 | continue; | ||
453 | |||
454 | err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), | ||
455 | MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL, | ||
456 | xip[i].subreg, need_flush); | ||
457 | } | ||
458 | #endif | ||
459 | |||
460 | /* RAM */ | ||
461 | for (i = 0; i < ARRAY_SIZE(mem); i++) { | ||
462 | if (!mem[i].size) | ||
463 | continue; | ||
464 | |||
465 | err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), | ||
466 | MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, | ||
467 | mem[i].subreg, false); | ||
468 | } | ||
469 | |||
470 | /* Vectors */ | ||
471 | #ifndef CONFIG_CPU_V7M | ||
472 | err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), | ||
473 | MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, | ||
474 | 0, false); | ||
475 | #endif | ||
476 | if (err) { | ||
477 | panic("MPU region initialization failure! %d", err); | ||
478 | } else { | ||
479 | pr_info("Using ARMv7 PMSA Compliant MPU. " | ||
480 | "Region independence: %s, Used %d of %d regions\n", | ||
481 | mpu_iside_independent() ? "Yes" : "No", | ||
482 | mpu_rgn_info.used, mpu_max_regions); | ||
483 | } | ||
484 | } | ||
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index b2f82cf6bf86..58c2bbd385ad 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt | |||
@@ -34,8 +34,8 @@ config ARCH_BINFMT_ELF_STATE | |||
34 | 34 | ||
35 | config BINFMT_ELF_FDPIC | 35 | config BINFMT_ELF_FDPIC |
36 | bool "Kernel support for FDPIC ELF binaries" | 36 | bool "Kernel support for FDPIC ELF binaries" |
37 | default y | 37 | default y if !BINFMT_ELF |
38 | depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) | 38 | depends on (ARM || FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) |
39 | select ELFCORE | 39 | select ELFCORE |
40 | help | 40 | help |
41 | ELF FDPIC binaries are based on ELF, but allow the individual load | 41 | ELF FDPIC binaries are based on ELF, but allow the individual load |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 73b01e474fdc..e4f7ef8294ac 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -51,6 +51,11 @@ | |||
51 | #define user_siginfo_t siginfo_t | 51 | #define user_siginfo_t siginfo_t |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | /* That's for binfmt_elf_fdpic to deal with */ | ||
55 | #ifndef elf_check_fdpic | ||
56 | #define elf_check_fdpic(ex) false | ||
57 | #endif | ||
58 | |||
54 | static int load_elf_binary(struct linux_binprm *bprm); | 59 | static int load_elf_binary(struct linux_binprm *bprm); |
55 | static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, | 60 | static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, |
56 | int, int, unsigned long); | 61 | int, int, unsigned long); |
@@ -541,7 +546,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | |||
541 | if (interp_elf_ex->e_type != ET_EXEC && | 546 | if (interp_elf_ex->e_type != ET_EXEC && |
542 | interp_elf_ex->e_type != ET_DYN) | 547 | interp_elf_ex->e_type != ET_DYN) |
543 | goto out; | 548 | goto out; |
544 | if (!elf_check_arch(interp_elf_ex)) | 549 | if (!elf_check_arch(interp_elf_ex) || |
550 | elf_check_fdpic(interp_elf_ex)) | ||
545 | goto out; | 551 | goto out; |
546 | if (!interpreter->f_op->mmap) | 552 | if (!interpreter->f_op->mmap) |
547 | goto out; | 553 | goto out; |
@@ -718,6 +724,8 @@ static int load_elf_binary(struct linux_binprm *bprm) | |||
718 | goto out; | 724 | goto out; |
719 | if (!elf_check_arch(&loc->elf_ex)) | 725 | if (!elf_check_arch(&loc->elf_ex)) |
720 | goto out; | 726 | goto out; |
727 | if (elf_check_fdpic(&loc->elf_ex)) | ||
728 | goto out; | ||
721 | if (!bprm->file->f_op->mmap) | 729 | if (!bprm->file->f_op->mmap) |
722 | goto out; | 730 | goto out; |
723 | 731 | ||
@@ -817,7 +825,8 @@ static int load_elf_binary(struct linux_binprm *bprm) | |||
817 | if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | 825 | if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) |
818 | goto out_free_dentry; | 826 | goto out_free_dentry; |
819 | /* Verify the interpreter has a valid arch */ | 827 | /* Verify the interpreter has a valid arch */ |
820 | if (!elf_check_arch(&loc->interp_elf_ex)) | 828 | if (!elf_check_arch(&loc->interp_elf_ex) || |
829 | elf_check_fdpic(&loc->interp_elf_ex)) | ||
821 | goto out_free_dentry; | 830 | goto out_free_dentry; |
822 | 831 | ||
823 | /* Load the interpreter program headers */ | 832 | /* Load the interpreter program headers */ |
@@ -1190,6 +1199,8 @@ static int load_elf_library(struct file *file) | |||
1190 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || | 1199 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || |
1191 | !elf_check_arch(&elf_ex) || !file->f_op->mmap) | 1200 | !elf_check_arch(&elf_ex) || !file->f_op->mmap) |
1192 | goto out; | 1201 | goto out; |
1202 | if (elf_check_fdpic(&elf_ex)) | ||
1203 | goto out; | ||
1193 | 1204 | ||
1194 | /* Now read in all of the header information */ | 1205 | /* Now read in all of the header information */ |
1195 | 1206 | ||
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index e70c039ac190..5429b035e249 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -378,6 +378,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) | |||
378 | executable_stack); | 378 | executable_stack); |
379 | if (retval < 0) | 379 | if (retval < 0) |
380 | goto error; | 380 | goto error; |
381 | #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
382 | retval = arch_setup_additional_pages(bprm, !!interpreter_name); | ||
383 | if (retval < 0) | ||
384 | goto error; | ||
385 | #endif | ||
381 | #endif | 386 | #endif |
382 | 387 | ||
383 | /* load the executable and interpreter into memory */ | 388 | /* load the executable and interpreter into memory */ |
@@ -831,6 +836,9 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params, | |||
831 | if (phdr->p_vaddr >= seg->p_vaddr && | 836 | if (phdr->p_vaddr >= seg->p_vaddr && |
832 | phdr->p_vaddr + phdr->p_memsz <= | 837 | phdr->p_vaddr + phdr->p_memsz <= |
833 | seg->p_vaddr + seg->p_memsz) { | 838 | seg->p_vaddr + seg->p_memsz) { |
839 | Elf32_Dyn __user *dyn; | ||
840 | Elf32_Sword d_tag; | ||
841 | |||
834 | params->dynamic_addr = | 842 | params->dynamic_addr = |
835 | (phdr->p_vaddr - seg->p_vaddr) + | 843 | (phdr->p_vaddr - seg->p_vaddr) + |
836 | seg->addr; | 844 | seg->addr; |
@@ -843,8 +851,9 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params, | |||
843 | goto dynamic_error; | 851 | goto dynamic_error; |
844 | 852 | ||
845 | tmp = phdr->p_memsz / sizeof(Elf32_Dyn); | 853 | tmp = phdr->p_memsz / sizeof(Elf32_Dyn); |
846 | if (((Elf32_Dyn *) | 854 | dyn = (Elf32_Dyn __user *)params->dynamic_addr; |
847 | params->dynamic_addr)[tmp - 1].d_tag != 0) | 855 | __get_user(d_tag, &dyn[tmp - 1].d_tag); |
856 | if (d_tag != 0) | ||
848 | goto dynamic_error; | 857 | goto dynamic_error; |
849 | break; | 858 | break; |
850 | } | 859 | } |