aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c90
1 files changed, 72 insertions, 18 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index bb137f7297ed..7ef914e6a2f6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -111,7 +111,6 @@ static void ftrace_mod_code(void)
111 */ 111 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, 112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE); 113 MCOUNT_INSN_SIZE);
114
115} 114}
116 115
117void ftrace_nmi_enter(void) 116void ftrace_nmi_enter(void)
@@ -323,9 +322,53 @@ int __init ftrace_dyn_arch_init(void *data)
323} 322}
324#endif 323#endif
325 324
326#ifdef CONFIG_FUNCTION_RET_TRACER 325#ifdef CONFIG_FUNCTION_GRAPH_TRACER
326
327#ifdef CONFIG_DYNAMIC_FTRACE
328extern void ftrace_graph_call(void);
329
330static int ftrace_mod_jmp(unsigned long ip,
331 int old_offset, int new_offset)
332{
333 unsigned char code[MCOUNT_INSN_SIZE];
334
335 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
336 return -EFAULT;
337
338 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
339 return -EINVAL;
340
341 *(int *)(&code[1]) = new_offset;
342
343 if (do_ftrace_mod_code(ip, &code))
344 return -EPERM;
345
346 return 0;
347}
348
349int ftrace_enable_ftrace_graph_caller(void)
350{
351 unsigned long ip = (unsigned long)(&ftrace_graph_call);
352 int old_offset, new_offset;
353
354 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
355 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
356
357 return ftrace_mod_jmp(ip, old_offset, new_offset);
358}
359
360int ftrace_disable_ftrace_graph_caller(void)
361{
362 unsigned long ip = (unsigned long)(&ftrace_graph_call);
363 int old_offset, new_offset;
364
365 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
366 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
367
368 return ftrace_mod_jmp(ip, old_offset, new_offset);
369}
327 370
328#ifndef CONFIG_DYNAMIC_FTRACE 371#else /* CONFIG_DYNAMIC_FTRACE */
329 372
330/* 373/*
331 * These functions are picked from those used on 374 * These functions are picked from those used on
@@ -343,11 +386,12 @@ void ftrace_nmi_exit(void)
343{ 386{
344 atomic_dec(&in_nmi); 387 atomic_dec(&in_nmi);
345} 388}
389
346#endif /* !CONFIG_DYNAMIC_FTRACE */ 390#endif /* !CONFIG_DYNAMIC_FTRACE */
347 391
348/* Add a function return address to the trace stack on thread info.*/ 392/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time, 393static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func) 394 unsigned long func, int *depth)
351{ 395{
352 int index; 396 int index;
353 397
@@ -365,21 +409,22 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
365 current->ret_stack[index].ret = ret; 409 current->ret_stack[index].ret = ret;
366 current->ret_stack[index].func = func; 410 current->ret_stack[index].func = func;
367 current->ret_stack[index].calltime = time; 411 current->ret_stack[index].calltime = time;
412 *depth = index;
368 413
369 return 0; 414 return 0;
370} 415}
371 416
372/* Retrieve a function return address to the trace stack on thread info.*/ 417/* Retrieve a function return address to the trace stack on thread info.*/
373static void pop_return_trace(unsigned long *ret, unsigned long long *time, 418static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
374 unsigned long *func, unsigned long *overrun)
375{ 419{
376 int index; 420 int index;
377 421
378 index = current->curr_ret_stack; 422 index = current->curr_ret_stack;
379 *ret = current->ret_stack[index].ret; 423 *ret = current->ret_stack[index].ret;
380 *func = current->ret_stack[index].func; 424 trace->func = current->ret_stack[index].func;
381 *time = current->ret_stack[index].calltime; 425 trace->calltime = current->ret_stack[index].calltime;
382 *overrun = atomic_read(&current->trace_overrun); 426 trace->overrun = atomic_read(&current->trace_overrun);
427 trace->depth = index;
383 current->curr_ret_stack--; 428 current->curr_ret_stack--;
384} 429}
385 430
@@ -389,13 +434,14 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
389 */ 434 */
390unsigned long ftrace_return_to_handler(void) 435unsigned long ftrace_return_to_handler(void)
391{ 436{
392 struct ftrace_retfunc trace; 437 struct ftrace_graph_ret trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func, 438 unsigned long ret;
394 &trace.overrun); 439
440 pop_return_trace(&trace, &ret);
395 trace.rettime = cpu_clock(raw_smp_processor_id()); 441 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_function_return(&trace); 442 ftrace_graph_return(&trace);
397 443
398 return trace.ret; 444 return ret;
399} 445}
400 446
401/* 447/*
@@ -407,6 +453,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
407 unsigned long old; 453 unsigned long old;
408 unsigned long long calltime; 454 unsigned long long calltime;
409 int faulted; 455 int faulted;
456 struct ftrace_graph_ent trace;
410 unsigned long return_hooker = (unsigned long) 457 unsigned long return_hooker = (unsigned long)
411 &return_to_handler; 458 &return_to_handler;
412 459
@@ -440,20 +487,27 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
440 ); 487 );
441 488
442 if (WARN_ON(faulted)) { 489 if (WARN_ON(faulted)) {
443 unregister_ftrace_return(); 490 unregister_ftrace_graph();
444 return; 491 return;
445 } 492 }
446 493
447 if (WARN_ON(!__kernel_text_address(old))) { 494 if (WARN_ON(!__kernel_text_address(old))) {
448 unregister_ftrace_return(); 495 unregister_ftrace_graph();
449 *parent = old; 496 *parent = old;
450 return; 497 return;
451 } 498 }
452 499
453 calltime = cpu_clock(raw_smp_processor_id()); 500 calltime = cpu_clock(raw_smp_processor_id());
454 501
455 if (push_return_trace(old, calltime, self_addr) == -EBUSY) 502 if (push_return_trace(old, calltime,
503 self_addr, &trace.depth) == -EBUSY) {
456 *parent = old; 504 *parent = old;
505 return;
506 }
507
508 trace.func = self_addr;
509 ftrace_graph_entry(&trace);
510
457} 511}
458 512
459#endif /* CONFIG_FUNCTION_RET_TRACER */ 513#endif /* CONFIG_FUNCTION_GRAPH_TRACER */