aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-12 06:43:05 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-12 06:43:05 -0500
commit81444a799550214f549caf579cf65a0ca55e70b7 (patch)
tree3288dac0740be2e1e7d1af4ee51d792a6e91edf3 /arch/x86/kernel/ftrace.c
parenta64d31baed104be25305e9c71585d3ea4ee9a418 (diff)
parentda485e0cb16726797e99a595a399b9fc721b91bc (diff)
Merge branch 'tracing/fastboot' into cpus4096
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c134
1 files changed, 106 insertions, 28 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index bb137f7297ed..1b43086b097a 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -111,7 +111,6 @@ static void ftrace_mod_code(void)
111 */ 111 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, 112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE); 113 MCOUNT_INSN_SIZE);
114
115} 114}
116 115
117void ftrace_nmi_enter(void) 116void ftrace_nmi_enter(void)
@@ -323,9 +322,53 @@ int __init ftrace_dyn_arch_init(void *data)
323} 322}
324#endif 323#endif
325 324
326#ifdef CONFIG_FUNCTION_RET_TRACER 325#ifdef CONFIG_FUNCTION_GRAPH_TRACER
326
327#ifdef CONFIG_DYNAMIC_FTRACE
328extern void ftrace_graph_call(void);
329
330static int ftrace_mod_jmp(unsigned long ip,
331 int old_offset, int new_offset)
332{
333 unsigned char code[MCOUNT_INSN_SIZE];
334
335 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
336 return -EFAULT;
337
338 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
339 return -EINVAL;
340
341 *(int *)(&code[1]) = new_offset;
342
343 if (do_ftrace_mod_code(ip, &code))
344 return -EPERM;
345
346 return 0;
347}
348
349int ftrace_enable_ftrace_graph_caller(void)
350{
351 unsigned long ip = (unsigned long)(&ftrace_graph_call);
352 int old_offset, new_offset;
353
354 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
355 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
356
357 return ftrace_mod_jmp(ip, old_offset, new_offset);
358}
359
360int ftrace_disable_ftrace_graph_caller(void)
361{
362 unsigned long ip = (unsigned long)(&ftrace_graph_call);
363 int old_offset, new_offset;
364
365 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
366 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
327 367
328#ifndef CONFIG_DYNAMIC_FTRACE 368 return ftrace_mod_jmp(ip, old_offset, new_offset);
369}
370
371#else /* CONFIG_DYNAMIC_FTRACE */
329 372
330/* 373/*
331 * These functions are picked from those used on 374 * These functions are picked from those used on
@@ -343,11 +386,12 @@ void ftrace_nmi_exit(void)
343{ 386{
344 atomic_dec(&in_nmi); 387 atomic_dec(&in_nmi);
345} 388}
389
346#endif /* !CONFIG_DYNAMIC_FTRACE */ 390#endif /* !CONFIG_DYNAMIC_FTRACE */
347 391
348/* Add a function return address to the trace stack on thread info.*/ 392/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time, 393static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func) 394 unsigned long func, int *depth)
351{ 395{
352 int index; 396 int index;
353 397
@@ -365,22 +409,34 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
365 current->ret_stack[index].ret = ret; 409 current->ret_stack[index].ret = ret;
366 current->ret_stack[index].func = func; 410 current->ret_stack[index].func = func;
367 current->ret_stack[index].calltime = time; 411 current->ret_stack[index].calltime = time;
412 *depth = index;
368 413
369 return 0; 414 return 0;
370} 415}
371 416
372/* Retrieve a function return address to the trace stack on thread info.*/ 417/* Retrieve a function return address to the trace stack on thread info.*/
373static void pop_return_trace(unsigned long *ret, unsigned long long *time, 418static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
374 unsigned long *func, unsigned long *overrun)
375{ 419{
376 int index; 420 int index;
377 421
378 index = current->curr_ret_stack; 422 index = current->curr_ret_stack;
423
424 if (unlikely(index < 0)) {
425 ftrace_graph_stop();
426 WARN_ON(1);
427 /* Might as well panic, otherwise we have no where to go */
428 *ret = (unsigned long)panic;
429 return;
430 }
431
379 *ret = current->ret_stack[index].ret; 432 *ret = current->ret_stack[index].ret;
380 *func = current->ret_stack[index].func; 433 trace->func = current->ret_stack[index].func;
381 *time = current->ret_stack[index].calltime; 434 trace->calltime = current->ret_stack[index].calltime;
382 *overrun = atomic_read(&current->trace_overrun); 435 trace->overrun = atomic_read(&current->trace_overrun);
436 trace->depth = index;
437 barrier();
383 current->curr_ret_stack--; 438 current->curr_ret_stack--;
439
384} 440}
385 441
386/* 442/*
@@ -389,13 +445,21 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
389 */ 445 */
390unsigned long ftrace_return_to_handler(void) 446unsigned long ftrace_return_to_handler(void)
391{ 447{
392 struct ftrace_retfunc trace; 448 struct ftrace_graph_ret trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func, 449 unsigned long ret;
394 &trace.overrun); 450
451 pop_return_trace(&trace, &ret);
395 trace.rettime = cpu_clock(raw_smp_processor_id()); 452 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_function_return(&trace); 453 ftrace_graph_return(&trace);
454
455 if (unlikely(!ret)) {
456 ftrace_graph_stop();
457 WARN_ON(1);
458 /* Might as well panic. What else to do? */
459 ret = (unsigned long)panic;
460 }
397 461
398 return trace.ret; 462 return ret;
399} 463}
400 464
401/* 465/*
@@ -407,11 +471,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
407 unsigned long old; 471 unsigned long old;
408 unsigned long long calltime; 472 unsigned long long calltime;
409 int faulted; 473 int faulted;
474 struct ftrace_graph_ent trace;
410 unsigned long return_hooker = (unsigned long) 475 unsigned long return_hooker = (unsigned long)
411 &return_to_handler; 476 &return_to_handler;
412 477
413 /* Nmi's are currently unsupported */ 478 /* Nmi's are currently unsupported */
414 if (atomic_read(&in_nmi)) 479 if (unlikely(atomic_read(&in_nmi)))
480 return;
481
482 if (unlikely(atomic_read(&current->tracing_graph_pause)))
415 return; 483 return;
416 484
417 /* 485 /*
@@ -420,18 +488,16 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
420 * ignore such a protection. 488 * ignore such a protection.
421 */ 489 */
422 asm volatile( 490 asm volatile(
423 "1: movl (%[parent_old]), %[old]\n" 491 "1: " _ASM_MOV " (%[parent_old]), %[old]\n"
424 "2: movl %[return_hooker], (%[parent_replaced])\n" 492 "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n"
425 " movl $0, %[faulted]\n" 493 " movl $0, %[faulted]\n"
426 494
427 ".section .fixup, \"ax\"\n" 495 ".section .fixup, \"ax\"\n"
428 "3: movl $1, %[faulted]\n" 496 "3: movl $1, %[faulted]\n"
429 ".previous\n" 497 ".previous\n"
430 498
431 ".section __ex_table, \"a\"\n" 499 _ASM_EXTABLE(1b, 3b)
432 " .long 1b, 3b\n" 500 _ASM_EXTABLE(2b, 3b)
433 " .long 2b, 3b\n"
434 ".previous\n"
435 501
436 : [parent_replaced] "=r" (parent), [old] "=r" (old), 502 : [parent_replaced] "=r" (parent), [old] "=r" (old),
437 [faulted] "=r" (faulted) 503 [faulted] "=r" (faulted)
@@ -439,21 +505,33 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
439 : "memory" 505 : "memory"
440 ); 506 );
441 507
442 if (WARN_ON(faulted)) { 508 if (unlikely(faulted)) {
443 unregister_ftrace_return(); 509 ftrace_graph_stop();
510 WARN_ON(1);
444 return; 511 return;
445 } 512 }
446 513
447 if (WARN_ON(!__kernel_text_address(old))) { 514 if (unlikely(!__kernel_text_address(old))) {
448 unregister_ftrace_return(); 515 ftrace_graph_stop();
449 *parent = old; 516 *parent = old;
517 WARN_ON(1);
450 return; 518 return;
451 } 519 }
452 520
453 calltime = cpu_clock(raw_smp_processor_id()); 521 calltime = cpu_clock(raw_smp_processor_id());
454 522
455 if (push_return_trace(old, calltime, self_addr) == -EBUSY) 523 if (push_return_trace(old, calltime,
524 self_addr, &trace.depth) == -EBUSY) {
456 *parent = old; 525 *parent = old;
457} 526 return;
527 }
528
529 trace.func = self_addr;
458 530
459#endif /* CONFIG_FUNCTION_RET_TRACER */ 531 /* Only trace if the calling function expects to */
532 if (!ftrace_graph_entry(&trace)) {
533 current->curr_ret_stack--;
534 *parent = old;
535 }
536}
537#endif /* CONFIG_FUNCTION_GRAPH_TRACER */