aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/ftrace.h19
-rw-r--r--arch/arm/kernel/entry-common.S37
-rw-r--r--arch/arm/kernel/ftrace.c155
-rwxr-xr-xscripts/recordmcount.pl2
4 files changed, 155 insertions, 58 deletions
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 103f7ee97313..4a56a2ee067c 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -2,12 +2,29 @@
2#define _ASM_ARM_FTRACE 2#define _ASM_ARM_FTRACE
3 3
4#ifdef CONFIG_FUNCTION_TRACER 4#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void mcount(void); 9extern void mcount(void);
10extern void __gnu_mcount_nc(void); 10extern void __gnu_mcount_nc(void);
11
12#ifdef CONFIG_DYNAMIC_FTRACE
13struct dyn_arch_ftrace {
14#ifdef CONFIG_OLD_MCOUNT
15 bool old_mcount;
16#endif
17};
18
19static inline unsigned long ftrace_call_adjust(unsigned long addr)
20{
21 return addr;
22}
23
24extern void ftrace_caller_old(void);
25extern void ftrace_call_old(void);
26#endif
27
11#endif 28#endif
12 29
13#endif 30#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f5e75de0203e..e02790f28879 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -127,6 +127,10 @@ ENDPROC(ret_from_fork)
127 * clobber the ip register. This is OK because the ARM calling convention 127 * clobber the ip register. This is OK because the ARM calling convention
128 * allows it to be clobbered in subroutines and doesn't use it to hold 128 * allows it to be clobbered in subroutines and doesn't use it to hold
129 * parameters.) 129 * parameters.)
130 *
131 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
132 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
133 * arch/arm/kernel/ftrace.c).
130 */ 134 */
131 135
132#ifndef CONFIG_OLD_MCOUNT 136#ifndef CONFIG_OLD_MCOUNT
@@ -136,30 +140,45 @@ ENDPROC(ret_from_fork)
136#endif 140#endif
137 141
138#ifdef CONFIG_DYNAMIC_FTRACE 142#ifdef CONFIG_DYNAMIC_FTRACE
139ENTRY(mcount) 143ENTRY(__gnu_mcount_nc)
144 mov ip, lr
145 ldmia sp!, {lr}
146 mov pc, ip
147ENDPROC(__gnu_mcount_nc)
148
149ENTRY(ftrace_caller)
140 stmdb sp!, {r0-r3, lr} 150 stmdb sp!, {r0-r3, lr}
141 mov r0, lr 151 mov r0, lr
142 sub r0, r0, #MCOUNT_INSN_SIZE 152 sub r0, r0, #MCOUNT_INSN_SIZE
153 ldr r1, [sp, #20]
143 154
144 .globl mcount_call 155 .global ftrace_call
145mcount_call: 156ftrace_call:
146 bl ftrace_stub 157 bl ftrace_stub
147 ldr lr, [fp, #-4] @ restore lr 158 ldmia sp!, {r0-r3, ip, lr}
148 ldmia sp!, {r0-r3, pc} 159 mov pc, ip
160ENDPROC(ftrace_caller)
161
162#ifdef CONFIG_OLD_MCOUNT
163ENTRY(mcount)
164 stmdb sp!, {lr}
165 ldr lr, [fp, #-4]
166 ldmia sp!, {pc}
149ENDPROC(mcount) 167ENDPROC(mcount)
150 168
151ENTRY(ftrace_caller) 169ENTRY(ftrace_caller_old)
152 stmdb sp!, {r0-r3, lr} 170 stmdb sp!, {r0-r3, lr}
153 ldr r1, [fp, #-4] 171 ldr r1, [fp, #-4]
154 mov r0, lr 172 mov r0, lr
155 sub r0, r0, #MCOUNT_INSN_SIZE 173 sub r0, r0, #MCOUNT_INSN_SIZE
156 174
157 .globl ftrace_call 175 .globl ftrace_call_old
158ftrace_call: 176ftrace_call_old:
159 bl ftrace_stub 177 bl ftrace_stub
160 ldr lr, [fp, #-4] @ restore lr 178 ldr lr, [fp, #-4] @ restore lr
161 ldmia sp!, {r0-r3, pc} 179 ldmia sp!, {r0-r3, pc}
162ENDPROC(ftrace_caller) 180ENDPROC(ftrace_caller_old)
181#endif
163 182
164#else 183#else
165 184
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0298286ad4ad..f09014cfbf2c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -2,102 +2,161 @@
2 * Dynamic function tracing support. 2 * Dynamic function tracing support.
3 * 3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> 4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
5 * 6 *
6 * For licencing details, see COPYING. 7 * For licencing details, see COPYING.
7 * 8 *
8 * Defines low-level handling of mcount calls when the kernel 9 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the 10 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are 11 * mcount call-sites get patched with NOP till they are enabled.
11 * enabled. All code mutation routines here take effect atomically. 12 * All code mutation routines here are called under stop_machine().
12 */ 13 */
13 14
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/uaccess.h>
15 17
16#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
17#include <asm/ftrace.h> 19#include <asm/ftrace.h>
18 20
19#define PC_OFFSET 8 21#define NOP 0xe8bd4000 /* pop {lr} */
20#define BL_OPCODE 0xeb000000
21#define BL_OFFSET_MASK 0x00ffffff
22 22
23static unsigned long bl_insn; 23#ifdef CONFIG_OLD_MCOUNT
24static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ 24#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
25#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
25 26
26unsigned char *ftrace_nop_replace(void) 27#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
28
29static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
30{
31 return rec->arch.old_mcount ? OLD_NOP : NOP;
32}
33
34static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
35{
36 if (!rec->arch.old_mcount)
37 return addr;
38
39 if (addr == MCOUNT_ADDR)
40 addr = OLD_MCOUNT_ADDR;
41 else if (addr == FTRACE_ADDR)
42 addr = OLD_FTRACE_ADDR;
43
44 return addr;
45}
46#else
47static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
48{
49 return NOP;
50}
51
52static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
27{ 53{
28 return (char *)&NOP; 54 return addr;
29} 55}
56#endif
30 57
31/* construct a branch (BL) instruction to addr */ 58/* construct a branch (BL) instruction to addr */
32unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) 59static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
33{ 60{
34 long offset; 61 long offset;
35 62
36 offset = (long)addr - (long)(pc + PC_OFFSET); 63 offset = (long)addr - (long)(pc + 8);
37 if (unlikely(offset < -33554432 || offset > 33554428)) { 64 if (unlikely(offset < -33554432 || offset > 33554428)) {
38 /* Can't generate branches that far (from ARM ARM). Ftrace 65 /* Can't generate branches that far (from ARM ARM). Ftrace
39 * doesn't generate branches outside of kernel text. 66 * doesn't generate branches outside of kernel text.
40 */ 67 */
41 WARN_ON_ONCE(1); 68 WARN_ON_ONCE(1);
42 return NULL; 69 return 0;
43 } 70 }
44 offset = (offset >> 2) & BL_OFFSET_MASK;
45 bl_insn = BL_OPCODE | offset;
46 return (unsigned char *)&bl_insn;
47}
48 71
49int ftrace_modify_code(unsigned long pc, unsigned char *old_code, 72 offset = (offset >> 2) & 0x00ffffff;
50 unsigned char *new_code)
51{
52 unsigned long err = 0, replaced = 0, old, new;
53 73
54 old = *(unsigned long *)old_code; 74 return 0xeb000000 | offset;
55 new = *(unsigned long *)new_code; 75}
56 76
57 __asm__ __volatile__ ( 77static int ftrace_modify_code(unsigned long pc, unsigned long old,
58 "1: ldr %1, [%2] \n" 78 unsigned long new)
59 " cmp %1, %4 \n" 79{
60 "2: streq %3, [%2] \n" 80 unsigned long replaced;
61 " cmpne %1, %3 \n"
62 " movne %0, #2 \n"
63 "3:\n"
64 81
65 ".pushsection .fixup, \"ax\"\n" 82 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
66 "4: mov %0, #1 \n" 83 return -EFAULT;
67 " b 3b \n"
68 ".popsection\n"
69 84
70 ".pushsection __ex_table, \"a\"\n" 85 if (replaced != old)
71 " .long 1b, 4b \n" 86 return -EINVAL;
72 " .long 2b, 4b \n"
73 ".popsection\n"
74 87
75 : "=r"(err), "=r"(replaced) 88 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
76 : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) 89 return -EPERM;
77 : "memory");
78 90
79 if (!err && (replaced == old)) 91 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
80 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
81 92
82 return err; 93 return 0;
83} 94}
84 95
85int ftrace_update_ftrace_func(ftrace_func_t func) 96int ftrace_update_ftrace_func(ftrace_func_t func)
86{ 97{
87 int ret;
88 unsigned long pc, old; 98 unsigned long pc, old;
89 unsigned char *new; 99 unsigned long new;
100 int ret;
90 101
91 pc = (unsigned long)&ftrace_call; 102 pc = (unsigned long)&ftrace_call;
92 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); 103 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
93 new = ftrace_call_replace(pc, (unsigned long)func); 104 new = ftrace_call_replace(pc, (unsigned long)func);
94 ret = ftrace_modify_code(pc, (unsigned char *)&old, new); 105
106 ret = ftrace_modify_code(pc, old, new);
107
108#ifdef CONFIG_OLD_MCOUNT
109 if (!ret) {
110 pc = (unsigned long)&ftrace_call_old;
111 memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
112 new = ftrace_call_replace(pc, (unsigned long)func);
113
114 ret = ftrace_modify_code(pc, old, new);
115 }
116#endif
117
118 return ret;
119}
120
121int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
122{
123 unsigned long new, old;
124 unsigned long ip = rec->ip;
125
126 old = ftrace_nop_replace(rec);
127 new = ftrace_call_replace(ip, adjust_address(rec, addr));
128
129 return ftrace_modify_code(rec->ip, old, new);
130}
131
132int ftrace_make_nop(struct module *mod,
133 struct dyn_ftrace *rec, unsigned long addr)
134{
135 unsigned long ip = rec->ip;
136 unsigned long old;
137 unsigned long new;
138 int ret;
139
140 old = ftrace_call_replace(ip, adjust_address(rec, addr));
141 new = ftrace_nop_replace(rec);
142 ret = ftrace_modify_code(ip, old, new);
143
144#ifdef CONFIG_OLD_MCOUNT
145 if (ret == -EINVAL && addr == MCOUNT_ADDR) {
146 rec->arch.old_mcount = true;
147
148 old = ftrace_call_replace(ip, adjust_address(rec, addr));
149 new = ftrace_nop_replace(rec);
150 ret = ftrace_modify_code(ip, old, new);
151 }
152#endif
153
95 return ret; 154 return ret;
96} 155}
97 156
98/* run from ftrace_init with irqs disabled */
99int __init ftrace_dyn_arch_init(void *data) 157int __init ftrace_dyn_arch_init(void *data)
100{ 158{
101 ftrace_mcount_set(data); 159 *(unsigned long *)data = 0;
160
102 return 0; 161 return 0;
103} 162}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index e67f05486087..022d4679b1b3 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -270,6 +270,8 @@ if ($arch eq "x86_64") {
270} elsif ($arch eq "arm") { 270} elsif ($arch eq "arm") {
271 $alignment = 2; 271 $alignment = 2;
272 $section_type = '%progbits'; 272 $section_type = '%progbits';
273 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_ARM_(CALL|PC24)" .
274 "\\s+(__gnu_mcount_nc|mcount)\$";
273 275
274} elsif ($arch eq "ia64") { 276} elsif ($arch eq "ia64") {
275 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; 277 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";