aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWu Zhangjin <wuzhangjin@gmail.com>2011-01-19 14:28:30 -0500
committerRalf Baechle <ralf@linux-mips.org>2011-03-14 16:07:24 -0400
commit2816e325969396af5bd1d5f70c7360074ae1d63c (patch)
tree229cc2f5bed2e39ac1ff8ae46b9d1e82b2060b32
parentd9cdb2f1038143c945fcb1a366aae4fa2998419e (diff)
MIPS, Tracing: Clean up prepare_ftrace_return()
The old prepare_ftrace_return() for MIPS is confused and have introduced some problem. This patch cleans up the names of the arguments, variables and related functions. For MIPS, the 2nd argument of prepare_ftrace_return() is not really the 'selfpc' described in ftrace-design.txt but instead it is the self return address. This did break the compatibility of the generic interface but really reduced one unneeded calculation for to get the current function name, the parent return address and the self return address are enough, no need to tranform the self return address to the self address. But set_graph_function of function graph tracer is an exception, it does need the 2nd argument of prepare_ftrace_return() as 'selfpc', for it will use 'selfpc' to match user's configuration of function graph entries, but in reality, it doesn't need the 'selfpc' but the recorded ip address of the mcount calling site in the __mcount_loc section. So, the 2nd argument of prepare_ftrace_return() is not important, the real requirement is the right recorded ip address should be calculated and assign to trace.func, this will be fixed in the next patches. Reported-by: Zhiping Zhong <xzhong86@163.com> Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com> Cc: Steven Rostedt <srostedt@redhat.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2007/ Signed-off-by: Ralf Baechle <ralf@duck.linux-mips.net>
-rw-r--r--arch/mips/kernel/ftrace.c52
1 files changed, 25 insertions, 27 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 5970286131dd..40ef34ccb76b 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -190,21 +190,19 @@ int ftrace_disable_ftrace_graph_caller(void)
190#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 190#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 191#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
192 192
193unsigned long ftrace_get_parent_addr(unsigned long self_addr, 193unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
194 unsigned long parent, 194 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
195 unsigned long parent_addr,
196 unsigned long fp)
197{ 195{
198 unsigned long sp, ip, ra; 196 unsigned long sp, ip, tmp;
199 unsigned int code; 197 unsigned int code;
200 int faulted; 198 int faulted;
201 199
202 /* 200 /*
203 * For module, move the ip from calling site of mcount after the 201 * For module, move the ip from the return address after the
204 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 202 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
205 * kernel, move after the instruction "move ra, at"(offset is 16) 203 * kernel, move after the instruction "move ra, at"(offset is 16)
206 */ 204 */
207 ip = self_addr - (in_kernel_space(self_addr) ? 16 : 24); 205 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
208 206
209 /* 207 /*
210 * search the text until finding the non-store instruction or "s{d,w} 208 * search the text until finding the non-store instruction or "s{d,w}
@@ -222,7 +220,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
222 * store the ra on the stack 220 * store the ra on the stack
223 */ 221 */
224 if ((code & S_R_SP) != S_R_SP) 222 if ((code & S_R_SP) != S_R_SP)
225 return parent_addr; 223 return parent_ra_addr;
226 224
227 /* Move to the next instruction */ 225 /* Move to the next instruction */
228 ip -= 4; 226 ip -= 4;
@@ -230,12 +228,12 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
230 228
231 sp = fp + (code & OFFSET_MASK); 229 sp = fp + (code & OFFSET_MASK);
232 230
233 /* ra = *(unsigned long *)sp; */ 231 /* tmp = *(unsigned long *)sp; */
234 safe_load_stack(ra, sp, faulted); 232 safe_load_stack(tmp, sp, faulted);
235 if (unlikely(faulted)) 233 if (unlikely(faulted))
236 return 0; 234 return 0;
237 235
238 if (ra == parent) 236 if (tmp == old_parent_ra)
239 return sp; 237 return sp;
240 return 0; 238 return 0;
241} 239}
@@ -246,10 +244,10 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
246 * Hook the return address and push it in the stack of return addrs 244 * Hook the return address and push it in the stack of return addrs
247 * in current thread info. 245 * in current thread info.
248 */ 246 */
249void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 247void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
250 unsigned long fp) 248 unsigned long fp)
251{ 249{
252 unsigned long old; 250 unsigned long old_parent_ra;
253 struct ftrace_graph_ent trace; 251 struct ftrace_graph_ent trace;
254 unsigned long return_hooker = (unsigned long) 252 unsigned long return_hooker = (unsigned long)
255 &return_to_handler; 253 &return_to_handler;
@@ -259,8 +257,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
259 return; 257 return;
260 258
261 /* 259 /*
262 * "parent" is the stack address saved the return address of the caller 260 * "parent_ra_addr" is the stack address saved the return address of
263 * of _mcount. 261 * the caller of _mcount.
264 * 262 *
265 * if the gcc < 4.5, a leaf function does not save the return address 263 * if the gcc < 4.5, a leaf function does not save the return address
266 * in the stack address, so, we "emulate" one in _mcount's stack space, 264 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +273,37 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
275 * do it in ftrace_graph_caller of mcount.S. 273 * do it in ftrace_graph_caller of mcount.S.
276 */ 274 */
277 275
278 /* old = *parent; */ 276 /* old_parent_ra = *parent_ra_addr; */
279 safe_load_stack(old, parent, faulted); 277 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
280 if (unlikely(faulted)) 278 if (unlikely(faulted))
281 goto out; 279 goto out;
282#ifndef KBUILD_MCOUNT_RA_ADDRESS 280#ifndef KBUILD_MCOUNT_RA_ADDRESS
283 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, 281 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
284 (unsigned long)parent, fp); 282 old_parent_ra, (unsigned long)parent_ra_addr, fp);
285 /* 283 /*
286 * If fails when getting the stack address of the non-leaf function's 284 * If fails when getting the stack address of the non-leaf function's
287 * ra, stop function graph tracer and return 285 * ra, stop function graph tracer and return
288 */ 286 */
289 if (parent == 0) 287 if (parent_ra_addr == 0)
290 goto out; 288 goto out;
291#endif 289#endif
292 /* *parent = return_hooker; */ 290 /* *parent_ra_addr = return_hooker; */
293 safe_store_stack(return_hooker, parent, faulted); 291 safe_store_stack(return_hooker, parent_ra_addr, faulted);
294 if (unlikely(faulted)) 292 if (unlikely(faulted))
295 goto out; 293 goto out;
296 294
297 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == 295 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
298 -EBUSY) { 296 == -EBUSY) {
299 *parent = old; 297 *parent_ra_addr = old_parent_ra;
300 return; 298 return;
301 } 299 }
302 300
303 trace.func = self_addr; 301 trace.func = self_ra;
304 302
305 /* Only trace if the calling function expects to */ 303 /* Only trace if the calling function expects to */
306 if (!ftrace_graph_entry(&trace)) { 304 if (!ftrace_graph_entry(&trace)) {
307 current->curr_ret_stack--; 305 current->curr_ret_stack--;
308 *parent = old; 306 *parent_ra_addr = old_parent_ra;
309 } 307 }
310 return; 308 return;
311out: 309out: