aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/ftrace.c
diff options
context:
space:
mode:
authorRabin Vincent <rabin@rab.in>2010-08-10 14:43:28 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-09-02 10:27:40 -0400
commit3b6c223b1b97ad60bbb0f4efda57d649414ac2a2 (patch)
tree291dcb285e8cb64415a82ed1c65dc9681921a257 /arch/arm/kernel/ftrace.c
parentf9810a82536e0c730c57844753e6c08cc7f77881 (diff)
ARM: 6318/1: ftrace: fix and update dynamic ftrace
This adds mcount recording and updates dynamic ftrace for ARM to work with the new ftrace dyamic tracing implementation. It also adds support for the mcount format used by newer ARM compilers. With dynamic tracing, mcount() is implemented as a nop. Callsites are patched on startup with nops, and dynamically patched to call to the ftrace_caller() routine as needed. Acked-by: Steven Rostedt <rostedt@goodmis.org> [recordmcount.pl change] Signed-off-by: Rabin Vincent <rabin@rab.in> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/ftrace.c')
-rw-r--r--arch/arm/kernel/ftrace.c155
1 files changed, 107 insertions, 48 deletions
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0298286ad4ad..f09014cfbf2c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -2,102 +2,161 @@
2 * Dynamic function tracing support. 2 * Dynamic function tracing support.
3 * 3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> 4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
5 * 6 *
6 * For licencing details, see COPYING. 7 * For licencing details, see COPYING.
7 * 8 *
8 * Defines low-level handling of mcount calls when the kernel 9 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the 10 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are 11 * mcount call-sites get patched with NOP till they are enabled.
11 * enabled. All code mutation routines here take effect atomically. 12 * All code mutation routines here are called under stop_machine().
12 */ 13 */
13 14
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/uaccess.h>
15 17
16#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
17#include <asm/ftrace.h> 19#include <asm/ftrace.h>
18 20
19#define PC_OFFSET 8 21#define NOP 0xe8bd4000 /* pop {lr} */
20#define BL_OPCODE 0xeb000000
21#define BL_OFFSET_MASK 0x00ffffff
22 22
23static unsigned long bl_insn; 23#ifdef CONFIG_OLD_MCOUNT
24static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ 24#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
25#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
25 26
26unsigned char *ftrace_nop_replace(void) 27#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
28
29static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
30{
31 return rec->arch.old_mcount ? OLD_NOP : NOP;
32}
33
34static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
35{
36 if (!rec->arch.old_mcount)
37 return addr;
38
39 if (addr == MCOUNT_ADDR)
40 addr = OLD_MCOUNT_ADDR;
41 else if (addr == FTRACE_ADDR)
42 addr = OLD_FTRACE_ADDR;
43
44 return addr;
45}
46#else
47static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
48{
49 return NOP;
50}
51
52static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
27{ 53{
28 return (char *)&NOP; 54 return addr;
29} 55}
56#endif
30 57
31/* construct a branch (BL) instruction to addr */ 58/* construct a branch (BL) instruction to addr */
32unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) 59static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
33{ 60{
34 long offset; 61 long offset;
35 62
36 offset = (long)addr - (long)(pc + PC_OFFSET); 63 offset = (long)addr - (long)(pc + 8);
37 if (unlikely(offset < -33554432 || offset > 33554428)) { 64 if (unlikely(offset < -33554432 || offset > 33554428)) {
38 /* Can't generate branches that far (from ARM ARM). Ftrace 65 /* Can't generate branches that far (from ARM ARM). Ftrace
39 * doesn't generate branches outside of kernel text. 66 * doesn't generate branches outside of kernel text.
40 */ 67 */
41 WARN_ON_ONCE(1); 68 WARN_ON_ONCE(1);
42 return NULL; 69 return 0;
43 } 70 }
44 offset = (offset >> 2) & BL_OFFSET_MASK;
45 bl_insn = BL_OPCODE | offset;
46 return (unsigned char *)&bl_insn;
47}
48 71
49int ftrace_modify_code(unsigned long pc, unsigned char *old_code, 72 offset = (offset >> 2) & 0x00ffffff;
50 unsigned char *new_code)
51{
52 unsigned long err = 0, replaced = 0, old, new;
53 73
54 old = *(unsigned long *)old_code; 74 return 0xeb000000 | offset;
55 new = *(unsigned long *)new_code; 75}
56 76
57 __asm__ __volatile__ ( 77static int ftrace_modify_code(unsigned long pc, unsigned long old,
58 "1: ldr %1, [%2] \n" 78 unsigned long new)
59 " cmp %1, %4 \n" 79{
60 "2: streq %3, [%2] \n" 80 unsigned long replaced;
61 " cmpne %1, %3 \n"
62 " movne %0, #2 \n"
63 "3:\n"
64 81
65 ".pushsection .fixup, \"ax\"\n" 82 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
66 "4: mov %0, #1 \n" 83 return -EFAULT;
67 " b 3b \n"
68 ".popsection\n"
69 84
70 ".pushsection __ex_table, \"a\"\n" 85 if (replaced != old)
71 " .long 1b, 4b \n" 86 return -EINVAL;
72 " .long 2b, 4b \n"
73 ".popsection\n"
74 87
75 : "=r"(err), "=r"(replaced) 88 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
76 : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) 89 return -EPERM;
77 : "memory");
78 90
79 if (!err && (replaced == old)) 91 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
80 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
81 92
82 return err; 93 return 0;
83} 94}
84 95
85int ftrace_update_ftrace_func(ftrace_func_t func) 96int ftrace_update_ftrace_func(ftrace_func_t func)
86{ 97{
87 int ret;
88 unsigned long pc, old; 98 unsigned long pc, old;
89 unsigned char *new; 99 unsigned long new;
100 int ret;
90 101
91 pc = (unsigned long)&ftrace_call; 102 pc = (unsigned long)&ftrace_call;
92 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); 103 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
93 new = ftrace_call_replace(pc, (unsigned long)func); 104 new = ftrace_call_replace(pc, (unsigned long)func);
94 ret = ftrace_modify_code(pc, (unsigned char *)&old, new); 105
106 ret = ftrace_modify_code(pc, old, new);
107
108#ifdef CONFIG_OLD_MCOUNT
109 if (!ret) {
110 pc = (unsigned long)&ftrace_call_old;
111 memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
112 new = ftrace_call_replace(pc, (unsigned long)func);
113
114 ret = ftrace_modify_code(pc, old, new);
115 }
116#endif
117
118 return ret;
119}
120
121int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
122{
123 unsigned long new, old;
124 unsigned long ip = rec->ip;
125
126 old = ftrace_nop_replace(rec);
127 new = ftrace_call_replace(ip, adjust_address(rec, addr));
128
129 return ftrace_modify_code(rec->ip, old, new);
130}
131
132int ftrace_make_nop(struct module *mod,
133 struct dyn_ftrace *rec, unsigned long addr)
134{
135 unsigned long ip = rec->ip;
136 unsigned long old;
137 unsigned long new;
138 int ret;
139
140 old = ftrace_call_replace(ip, adjust_address(rec, addr));
141 new = ftrace_nop_replace(rec);
142 ret = ftrace_modify_code(ip, old, new);
143
144#ifdef CONFIG_OLD_MCOUNT
145 if (ret == -EINVAL && addr == MCOUNT_ADDR) {
146 rec->arch.old_mcount = true;
147
148 old = ftrace_call_replace(ip, adjust_address(rec, addr));
149 new = ftrace_nop_replace(rec);
150 ret = ftrace_modify_code(ip, old, new);
151 }
152#endif
153
95 return ret; 154 return ret;
96} 155}
97 156
98/* run from ftrace_init with irqs disabled */
99int __init ftrace_dyn_arch_init(void *data) 157int __init ftrace_dyn_arch_init(void *data)
100{ 158{
101 ftrace_mcount_set(data); 159 *(unsigned long *)data = 0;
160
102 return 0; 161 return 0;
103} 162}