aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorPetri Gynther <pgynther@google.com>2014-07-24 01:55:02 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-07-30 14:37:42 -0400
commitdce0e7d54a710400c0056d86d0f0ed972133b25d (patch)
tree09c4e4a9ff99b1d11150e62c21f519264513254c /arch/mips
parentb1442d39fac2fcfbe6a4814979020e993ca59c9e (diff)
MIPS: ftrace: Fix dynamic tracing of kernel modules
Dynamic tracing of kernel modules is broken on 32-bit MIPS. When modules are loaded, the kernel crashes when dynamic tracing is enabled with: cd /sys/kernel/debug/tracing echo > set_ftrace_filter echo function > current_tracer 1) arch/mips/kernel/ftrace.c When the kernel boots, or when a module is initialized, ftrace_make_nop() modifies every _mcount call site to eliminate the ftrace overhead. However, when ftrace is later enabled for a call site, ftrace_make_call() does not currently restore the _mcount call correctly for module call sites. Added ftrace_modify_code_2r() and modified ftrace_make_call() to fix this. 2) arch/mips/kernel/mcount.S _mcount assembly routine is supposed to have the caller's _mcount call site address in register a0. However, a0 is currently not calculated correctly for module call sites. a0 should be (ra - 20) or (ra - 24), depending on whether the kernel was built with KBUILD_MCOUNT_RA_ADDRESS or not. This fix has been tested on Broadcom BMIPS5000 processor. Dynamic tracing now works for both built-in functions and module functions. Signed-off-by: Petri Gynther <pgynther@google.com> Cc: linux-mips@linux-mips.org Cc: rostedt@goodmis.org Cc: alcooperx@gmail.com Cc: cminyard@mvista.com Patchwork: https://patchwork.linux-mips.org/patch/7476/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/kernel/ftrace.c56
-rw-r--r--arch/mips/kernel/mcount.S13
2 files changed, 60 insertions, 9 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 60e7e5e45af1..2a72208e319c 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -63,7 +63,7 @@ static inline int in_kernel_space(unsigned long ip)
63 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 63 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
64 64
65static unsigned int insn_jal_ftrace_caller __read_mostly; 65static unsigned int insn_jal_ftrace_caller __read_mostly;
66static unsigned int insn_lui_v1_hi16_mcount __read_mostly; 66static unsigned int insn_la_mcount[2] __read_mostly;
67static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; 67static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
68 68
69static inline void ftrace_dyn_arch_init_insns(void) 69static inline void ftrace_dyn_arch_init_insns(void)
@@ -71,10 +71,10 @@ static inline void ftrace_dyn_arch_init_insns(void)
71 u32 *buf; 71 u32 *buf;
72 unsigned int v1; 72 unsigned int v1;
73 73
74 /* lui v1, hi16_mcount */ 74 /* la v1, _mcount */
75 v1 = 3; 75 v1 = 3;
76 buf = (u32 *)&insn_lui_v1_hi16_mcount; 76 buf = (u32 *)&insn_la_mcount[0];
77 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); 77 UASM_i_LA(&buf, v1, MCOUNT_ADDR);
78 78
79 /* jal (ftrace_caller + 8), jump over the first two instruction */ 79 /* jal (ftrace_caller + 8), jump over the first two instruction */
80 buf = (u32 *)&insn_jal_ftrace_caller; 80 buf = (u32 *)&insn_jal_ftrace_caller;
@@ -111,14 +111,47 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
111 unsigned int new_code2) 111 unsigned int new_code2)
112{ 112{
113 int faulted; 113 int faulted;
114 mm_segment_t old_fs;
114 115
115 safe_store_code(new_code1, ip, faulted); 116 safe_store_code(new_code1, ip, faulted);
116 if (unlikely(faulted)) 117 if (unlikely(faulted))
117 return -EFAULT; 118 return -EFAULT;
118 safe_store_code(new_code2, ip + 4, faulted); 119
120 ip += 4;
121 safe_store_code(new_code2, ip, faulted);
119 if (unlikely(faulted)) 122 if (unlikely(faulted))
120 return -EFAULT; 123 return -EFAULT;
124
125 ip -= 4;
126 old_fs = get_fs();
127 set_fs(get_ds());
121 flush_icache_range(ip, ip + 8); 128 flush_icache_range(ip, ip + 8);
129 set_fs(old_fs);
130
131 return 0;
132}
133
134static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
135 unsigned int new_code2)
136{
137 int faulted;
138 mm_segment_t old_fs;
139
140 ip += 4;
141 safe_store_code(new_code2, ip, faulted);
142 if (unlikely(faulted))
143 return -EFAULT;
144
145 ip -= 4;
146 safe_store_code(new_code1, ip, faulted);
147 if (unlikely(faulted))
148 return -EFAULT;
149
150 old_fs = get_fs();
151 set_fs(get_ds());
152 flush_icache_range(ip, ip + 8);
153 set_fs(old_fs);
154
122 return 0; 155 return 0;
123} 156}
124#endif 157#endif
@@ -130,13 +163,14 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
130 * 163 *
131 * move at, ra 164 * move at, ra
132 * jal _mcount --> nop 165 * jal _mcount --> nop
166 * sub sp, sp, 8 --> nop (CONFIG_32BIT)
133 * 167 *
134 * 2. For modules: 168 * 2. For modules:
135 * 169 *
136 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT 170 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
137 * 171 *
138 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) 172 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
139 * addiu v1, v1, low_16bit_of_mcount 173 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
140 * move at, ra 174 * move at, ra
141 * move $12, ra_address 175 * move $12, ra_address
142 * jalr v1 176 * jalr v1
@@ -145,7 +179,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
145 * 2.2 For the Other situations 179 * 2.2 For the Other situations
146 * 180 *
147 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) 181 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
148 * addiu v1, v1, low_16bit_of_mcount 182 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
149 * move at, ra 183 * move at, ra
150 * jalr v1 184 * jalr v1
151 * nop | move $12, ra_address | sub sp, sp, 8 185 * nop | move $12, ra_address | sub sp, sp, 8
@@ -184,10 +218,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
184 unsigned int new; 218 unsigned int new;
185 unsigned long ip = rec->ip; 219 unsigned long ip = rec->ip;
186 220
187 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : 221 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
188 insn_lui_v1_hi16_mcount;
189 222
223#ifdef CONFIG_64BIT
190 return ftrace_modify_code(ip, new); 224 return ftrace_modify_code(ip, new);
225#else
226 return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
227 INSN_NOP : insn_la_mcount[1]);
228#endif
191} 229}
192 230
193#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 231#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 539b6294b613..26ceb3c0eca9 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -84,6 +84,19 @@ _mcount:
84#endif 84#endif
85 85
86 PTR_SUBU a0, ra, 8 /* arg1: self address */ 86 PTR_SUBU a0, ra, 8 /* arg1: self address */
87 PTR_LA t1, _stext
88 sltu t2, a0, t1 /* t2 = (a0 < _stext) */
89 PTR_LA t1, _etext
90 sltu t3, t1, a0 /* t3 = (a0 > _etext) */
91 or t1, t2, t3
92 beqz t1, ftrace_call
93 nop
94#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
95 PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */
96#else
97 PTR_SUBU a0, a0, 12
98#endif
99
87 .globl ftrace_call 100 .globl ftrace_call
88ftrace_call: 101ftrace_call:
89 nop /* a placeholder for the call to a real tracing function */ 102 nop /* a placeholder for the call to a real tracing function */