aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-08 15:17:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-08 15:17:25 -0400
commit09b56d5a418b7ced4ca427c7cf8faf11df72364c (patch)
tree88ad1fe20f8fd7df89131cef5c7f8ef96f15406d
parentfe1b518075d86976db3a93e7e8b640d24d477519 (diff)
parent98becb781e3e27d74efe5f3653b948d39f694cfb (diff)
Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: - add support for ftrace-with-registers, which is needed for kgraft and other ftrace tools - support for mremap() for the sigpage/vDSO so that checkpoint/restore can work - add timestamps to each line of the register dump output - remove the unused KTHREAD_SIZE from nommu - align the ARM bitops APIs with the generic API (using unsigned long pointers rather than void pointers) - make the configuration of userspace Thumb support an expert option so that we can default it on, and avoid some hard to debug userspace crashes * 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 8684/1: NOMMU: Remove unused KTHREAD_SIZE definition ARM: 8683/1: ARM32: Support mremap() for sigpage/vDSO ARM: 8679/1: bitops: Align prototypes to generic API ARM: 8678/1: ftrace: Adds support for CONFIG_DYNAMIC_FTRACE_WITH_REGS ARM: make configuration of userspace Thumb support an expert option ARM: 8673/1: Fix __show_regs output timestamps
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/bitops.h8
-rw-r--r--arch/arm/include/asm/ftrace.h4
-rw-r--r--arch/arm/include/asm/page-nommu.h6
-rw-r--r--arch/arm/kernel/entry-ftrace.S100
-rw-r--r--arch/arm/kernel/ftrace.c37
-rw-r--r--arch/arm/kernel/process.c16
-rw-r--r--arch/arm/kernel/vdso.c18
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/x86/entry/vdso/vma.c3
-rw-r--r--mm/mmap.c4
11 files changed, 185 insertions, 18 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0b731e8ab17e..a208bfe367b5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -58,6 +58,7 @@ config ARM
58 select HAVE_DMA_API_DEBUG 58 select HAVE_DMA_API_DEBUG
59 select HAVE_DMA_CONTIGUOUS if MMU 59 select HAVE_DMA_CONTIGUOUS if MMU
60 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU 60 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
61 select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
61 select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU 62 select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
62 select HAVE_EXIT_THREAD 63 select HAVE_EXIT_THREAD
63 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) 64 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index e943e6cee254..f308c8c40cb9 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -159,16 +159,16 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
159/* 159/*
160 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. 160 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
161 */ 161 */
162extern int _find_first_zero_bit_le(const void * p, unsigned size); 162extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
163extern int _find_next_zero_bit_le(const void * p, int size, int offset); 163extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
164extern int _find_first_bit_le(const unsigned long *p, unsigned size); 164extern int _find_first_bit_le(const unsigned long *p, unsigned size);
165extern int _find_next_bit_le(const unsigned long *p, int size, int offset); 165extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
166 166
167/* 167/*
168 * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. 168 * Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
169 */ 169 */
170extern int _find_first_zero_bit_be(const void * p, unsigned size); 170extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
171extern int _find_next_zero_bit_be(const void * p, int size, int offset); 171extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
172extern int _find_first_bit_be(const unsigned long *p, unsigned size); 172extern int _find_first_bit_be(const unsigned long *p, unsigned size);
173extern int _find_next_bit_be(const unsigned long *p, int size, int offset); 173extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
174 174
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 22b73112b75f..f379881d5cc3 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -1,6 +1,10 @@
1#ifndef _ASM_ARM_FTRACE 1#ifndef _ASM_ARM_FTRACE
2#define _ASM_ARM_FTRACE 2#define _ASM_ARM_FTRACE
3 3
4#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
5#define ARCH_SUPPORTS_FTRACE_OPS 1
6#endif
7
4#ifdef CONFIG_FUNCTION_TRACER 8#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc)) 9#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 10#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
index 503f488053de..8f2c47bec375 100644
--- a/arch/arm/include/asm/page-nommu.h
+++ b/arch/arm/include/asm/page-nommu.h
@@ -11,12 +11,6 @@
11#ifndef _ASMARM_PAGE_NOMMU_H 11#ifndef _ASMARM_PAGE_NOMMU_H
12#define _ASMARM_PAGE_NOMMU_H 12#define _ASMARM_PAGE_NOMMU_H
13 13
14#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
15#define KTHREAD_SIZE (8192)
16#else
17#define KTHREAD_SIZE PAGE_SIZE
18#endif
19
20#define clear_page(page) memset((page), 0, PAGE_SIZE) 14#define clear_page(page) memset((page), 0, PAGE_SIZE)
21#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) 15#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
22 16
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index c73c4030ca5d..efcd9f25a14b 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -92,12 +92,95 @@
922: mcount_exit 922: mcount_exit
93.endm 93.endm
94 94
95#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
96
97.macro __ftrace_regs_caller
98
99 sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
100 @ OLD_R0 will overwrite previous LR
101
102 add ip, sp, #12 @ move in IP the value of SP as it was
103 @ before the push {lr} of the mcount mechanism
104
105 str lr, [sp, #0] @ store LR instead of PC
106
107 ldr lr, [sp, #8] @ get previous LR
108
109 str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
110
111 stmdb sp!, {ip, lr}
112 stmdb sp!, {r0-r11, lr}
113
114 @ stack content at this point:
115 @ 0 4 48 52 56 60 64 68 72
116 @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
117
118 mov r3, sp @ struct pt_regs*
119
120 ldr r2, =function_trace_op
121 ldr r2, [r2] @ pointer to the current
122 @ function tracing op
123
124 ldr r1, [sp, #S_LR] @ lr of instrumented func
125
126 ldr lr, [sp, #S_PC] @ get LR
127
128 mcount_adjust_addr r0, lr @ instrumented function
129
130 .globl ftrace_regs_call
131ftrace_regs_call:
132 bl ftrace_stub
133
134#ifdef CONFIG_FUNCTION_GRAPH_TRACER
135 .globl ftrace_graph_regs_call
136ftrace_graph_regs_call:
137 mov r0, r0
138#endif
139
140 @ pop saved regs
141 ldmia sp!, {r0-r12} @ restore r0 through r12
142 ldr ip, [sp, #8] @ restore PC
143 ldr lr, [sp, #4] @ restore LR
144 ldr sp, [sp, #0] @ restore SP
145 mov pc, ip @ return
146.endm
147
148#ifdef CONFIG_FUNCTION_GRAPH_TRACER
149.macro __ftrace_graph_regs_caller
150
151 sub r0, fp, #4 @ lr of instrumented routine (parent)
152
153 @ called from __ftrace_regs_caller
154 ldr r1, [sp, #S_PC] @ instrumented routine (func)
155 mcount_adjust_addr r1, r1
156
157 mov r2, fp @ frame pointer
158 bl prepare_ftrace_return
159
160 @ pop registers saved in ftrace_regs_caller
161 ldmia sp!, {r0-r12} @ restore r0 through r12
162 ldr ip, [sp, #8] @ restore PC
163 ldr lr, [sp, #4] @ restore LR
164 ldr sp, [sp, #0] @ restore SP
165 mov pc, ip @ return
166
167.endm
168#endif
169#endif
170
95.macro __ftrace_caller suffix 171.macro __ftrace_caller suffix
96 mcount_enter 172 mcount_enter
97 173
98 mcount_get_lr r1 @ lr of instrumented func 174 mcount_get_lr r1 @ lr of instrumented func
99 mcount_adjust_addr r0, lr @ instrumented function 175 mcount_adjust_addr r0, lr @ instrumented function
100 176
177#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
178 ldr r2, =function_trace_op
179 ldr r2, [r2] @ pointer to the current
180 @ function tracing op
181 mov r3, #0 @ regs is NULL
182#endif
183
101 .globl ftrace_call\suffix 184 .globl ftrace_call\suffix
102ftrace_call\suffix: 185ftrace_call\suffix:
103 bl ftrace_stub 186 bl ftrace_stub
@@ -212,6 +295,15 @@ UNWIND(.fnstart)
212 __ftrace_caller 295 __ftrace_caller
213UNWIND(.fnend) 296UNWIND(.fnend)
214ENDPROC(ftrace_caller) 297ENDPROC(ftrace_caller)
298
299#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
300ENTRY(ftrace_regs_caller)
301UNWIND(.fnstart)
302 __ftrace_regs_caller
303UNWIND(.fnend)
304ENDPROC(ftrace_regs_caller)
305#endif
306
215#endif 307#endif
216 308
217#ifdef CONFIG_FUNCTION_GRAPH_TRACER 309#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -220,6 +312,14 @@ UNWIND(.fnstart)
220 __ftrace_graph_caller 312 __ftrace_graph_caller
221UNWIND(.fnend) 313UNWIND(.fnend)
222ENDPROC(ftrace_graph_caller) 314ENDPROC(ftrace_graph_caller)
315
316#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
317ENTRY(ftrace_graph_regs_caller)
318UNWIND(.fnstart)
319 __ftrace_graph_regs_caller
320UNWIND(.fnend)
321ENDPROC(ftrace_graph_regs_caller)
322#endif
223#endif 323#endif
224 324
225.purgem mcount_enter 325.purgem mcount_enter
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 833c991075a1..5617932a83df 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
141 141
142 ret = ftrace_modify_code(pc, 0, new, false); 142 ret = ftrace_modify_code(pc, 0, new, false);
143 143
144#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
145 if (!ret) {
146 pc = (unsigned long)&ftrace_regs_call;
147 new = ftrace_call_replace(pc, (unsigned long)func);
148
149 ret = ftrace_modify_code(pc, 0, new, false);
150 }
151#endif
152
144#ifdef CONFIG_OLD_MCOUNT 153#ifdef CONFIG_OLD_MCOUNT
145 if (!ret) { 154 if (!ret) {
146 pc = (unsigned long)&ftrace_call_old; 155 pc = (unsigned long)&ftrace_call_old;
@@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
159 unsigned long ip = rec->ip; 168 unsigned long ip = rec->ip;
160 169
161 old = ftrace_nop_replace(rec); 170 old = ftrace_nop_replace(rec);
171
172 new = ftrace_call_replace(ip, adjust_address(rec, addr));
173
174 return ftrace_modify_code(rec->ip, old, new, true);
175}
176
177#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
178
179int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
180 unsigned long addr)
181{
182 unsigned long new, old;
183 unsigned long ip = rec->ip;
184
185 old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
186
162 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 187 new = ftrace_call_replace(ip, adjust_address(rec, addr));
163 188
164 return ftrace_modify_code(rec->ip, old, new, true); 189 return ftrace_modify_code(rec->ip, old, new, true);
165} 190}
166 191
192#endif
193
167int ftrace_make_nop(struct module *mod, 194int ftrace_make_nop(struct module *mod,
168 struct dyn_ftrace *rec, unsigned long addr) 195 struct dyn_ftrace *rec, unsigned long addr)
169{ 196{
@@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
231extern unsigned long ftrace_graph_call; 258extern unsigned long ftrace_graph_call;
232extern unsigned long ftrace_graph_call_old; 259extern unsigned long ftrace_graph_call_old;
233extern void ftrace_graph_caller_old(void); 260extern void ftrace_graph_caller_old(void);
261extern unsigned long ftrace_graph_regs_call;
262extern void ftrace_graph_regs_caller(void);
234 263
235static int __ftrace_modify_caller(unsigned long *callsite, 264static int __ftrace_modify_caller(unsigned long *callsite,
236 void (*func) (void), bool enable) 265 void (*func) (void), bool enable)
@@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable)
253 ftrace_graph_caller, 282 ftrace_graph_caller,
254 enable); 283 enable);
255 284
285#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
286 if (!ret)
287 ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
288 ftrace_graph_regs_caller,
289 enable);
290#endif
291
292
256#ifdef CONFIG_OLD_MCOUNT 293#ifdef CONFIG_OLD_MCOUNT
257 if (!ret) 294 if (!ret)
258 ret = __ftrace_modify_caller(&ftrace_graph_call_old, 295 ret = __ftrace_modify_caller(&ftrace_graph_call_old,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 939e8b58c59d..d96714e1858c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -123,10 +123,10 @@ void __show_regs(struct pt_regs *regs)
123 123
124 print_symbol("PC is at %s\n", instruction_pointer(regs)); 124 print_symbol("PC is at %s\n", instruction_pointer(regs));
125 print_symbol("LR is at %s\n", regs->ARM_lr); 125 print_symbol("LR is at %s\n", regs->ARM_lr);
126 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 126 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
127 "sp : %08lx ip : %08lx fp : %08lx\n", 127 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
128 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, 128 printk("sp : %08lx ip : %08lx fp : %08lx\n",
129 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); 129 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
130 printk("r10: %08lx r9 : %08lx r8 : %08lx\n", 130 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
131 regs->ARM_r10, regs->ARM_r9, 131 regs->ARM_r10, regs->ARM_r9,
132 regs->ARM_r8); 132 regs->ARM_r8);
@@ -404,9 +404,17 @@ static unsigned long sigpage_addr(const struct mm_struct *mm,
404static struct page *signal_page; 404static struct page *signal_page;
405extern struct page *get_signal_page(void); 405extern struct page *get_signal_page(void);
406 406
407static int sigpage_mremap(const struct vm_special_mapping *sm,
408 struct vm_area_struct *new_vma)
409{
410 current->mm->context.sigpage = new_vma->vm_start;
411 return 0;
412}
413
407static const struct vm_special_mapping sigpage_mapping = { 414static const struct vm_special_mapping sigpage_mapping = {
408 .name = "[sigpage]", 415 .name = "[sigpage]",
409 .pages = &signal_page, 416 .pages = &signal_page,
417 .mremap = sigpage_mremap,
410}; 418};
411 419
412int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 420int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 53cf86cf2d1a..a4d6dc0f2427 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -54,8 +54,26 @@ static const struct vm_special_mapping vdso_data_mapping = {
54 .pages = &vdso_data_page, 54 .pages = &vdso_data_page,
55}; 55};
56 56
57static int vdso_mremap(const struct vm_special_mapping *sm,
58 struct vm_area_struct *new_vma)
59{
60 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
61 unsigned long vdso_size;
62
63 /* without VVAR page */
64 vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
65
66 if (vdso_size != new_size)
67 return -EINVAL;
68
69 current->mm->context.vdso = new_vma->vm_start;
70
71 return 0;
72}
73
57static struct vm_special_mapping vdso_text_mapping __ro_after_init = { 74static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
58 .name = "[vdso]", 75 .name = "[vdso]",
76 .mremap = vdso_mremap,
59}; 77};
60 78
61struct elfinfo { 79struct elfinfo {
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 877a0e3fd17d..60cdfdc151aa 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -679,7 +679,7 @@ config ARCH_DMA_ADDR_T_64BIT
679 bool 679 bool
680 680
681config ARM_THUMB 681config ARM_THUMB
682 bool "Support Thumb user binaries" if !CPU_THUMBONLY 682 bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT
683 depends on CPU_THUMB_CAPABLE 683 depends on CPU_THUMB_CAPABLE
684 default y 684 default y
685 help 685 help
@@ -690,6 +690,10 @@ config ARM_THUMB
690 instruction set resulting in smaller binaries at the expense of 690 instruction set resulting in smaller binaries at the expense of
691 slightly less efficient code. 691 slightly less efficient code.
692 692
693 If this option is disabled, and you run userspace that switches to
694 Thumb mode, signal handling will not work correctly, resulting in
695 segmentation faults or illegal instruction aborts.
696
693 If you don't know what this all is, saying Y is a safe choice. 697 If you don't know what this all is, saying Y is a safe choice.
694 698
695config ARM_THUMBEE 699config ARM_THUMBEE
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 139ad7726e10..726355ce8497 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -78,9 +78,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
78 if (image->size != new_size) 78 if (image->size != new_size)
79 return -EINVAL; 79 return -EINVAL;
80 80
81 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
82 return -EFAULT;
83
84 vdso_fix_landing(image, new_vma); 81 vdso_fix_landing(image, new_vma);
85 current->mm->context.vdso = (void __user *)new_vma->vm_start; 82 current->mm->context.vdso = (void __user *)new_vma->vm_start;
86 83
diff --git a/mm/mmap.c b/mm/mmap.c
index 5a0ba9788cdd..7f8cfe9d9b4d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3186,8 +3186,12 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma)
3186{ 3186{
3187 struct vm_special_mapping *sm = new_vma->vm_private_data; 3187 struct vm_special_mapping *sm = new_vma->vm_private_data;
3188 3188
3189 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3190 return -EFAULT;
3191
3189 if (sm->mremap) 3192 if (sm->mremap)
3190 return sm->mremap(sm, new_vma); 3193 return sm->mremap(sm, new_vma);
3194
3191 return 0; 3195 return 0;
3192} 3196}
3193 3197