aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-16 00:02:06 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-16 01:57:38 -0500
commite7d3737ea1b102030f44e96c97754101e41515f0 (patch)
tree44fdc3dea100d1fa639e6ba3cb1bfca2ab40e70b /kernel/trace
parentb01c746617da5e260803eb10ed64ca043e9a1241 (diff)
tracing/function-return-tracer: support for dynamic ftrace on function return tracer
This patch adds the support for dynamic tracing on the function return tracer. The whole difference with normal dynamic function tracing is that we don't need to hook on a particular callback. The only pro that we want is to nop or set dynamically the calls to ftrace_caller (which is ftrace_return_caller here). Some security checks ensure that we are not trying to launch dynamic tracing for return tracing while normal function tracing is already running. An example of trace with getnstimeofday set as a filter: ktime_get_ts+0x22/0x50 -> getnstimeofday (2283 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1396 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1382 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1825 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1426 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1464 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1524 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1382 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1382 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1434 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1464 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1502 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1404 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1397 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1051 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1314 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1344 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1163 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1390 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1374 ns) Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c58
-rw-r--r--kernel/trace/trace_functions_return.c15
3 files changed, 55 insertions, 19 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 9c89526b6b7c..b8378fad29a3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -59,7 +59,6 @@ config FUNCTION_TRACER
59 59
60config FUNCTION_RET_TRACER 60config FUNCTION_RET_TRACER
61 bool "Kernel Function return Tracer" 61 bool "Kernel Function return Tracer"
62 depends on !DYNAMIC_FTRACE
63 depends on HAVE_FUNCTION_RET_TRACER 62 depends on HAVE_FUNCTION_RET_TRACER
64 depends on FUNCTION_TRACER 63 depends on FUNCTION_TRACER
65 help 64 help
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b42ec1de546b..2f78a45aac14 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -50,6 +50,9 @@ static int last_ftrace_enabled;
50/* Quick disabling of function tracer. */ 50/* Quick disabling of function tracer. */
51int function_trace_stop; 51int function_trace_stop;
52 52
53/* By default, current tracing type is normal tracing. */
54enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
53/* 56/*
54 * ftrace_disabled is set when an anomaly is discovered. 57 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled. 58 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -385,12 +388,21 @@ static void ftrace_bug(int failed, unsigned long ip)
385 } 388 }
386} 389}
387 390
388#define FTRACE_ADDR ((long)(ftrace_caller))
389 391
390static int 392static int
391__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 393__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
392{ 394{
393 unsigned long ip, fl; 395 unsigned long ip, fl;
396 unsigned long ftrace_addr;
397
398#ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403#else
404 ftrace_addr = (unsigned long)ftrace_caller;
405#endif
394 406
395 ip = rec->ip; 407 ip = rec->ip;
396 408
@@ -450,9 +462,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
450 } 462 }
451 463
452 if (rec->flags & FTRACE_FL_ENABLED) 464 if (rec->flags & FTRACE_FL_ENABLED)
453 return ftrace_make_call(rec, FTRACE_ADDR); 465 return ftrace_make_call(rec, ftrace_addr);
454 else 466 else
455 return ftrace_make_nop(NULL, rec, FTRACE_ADDR); 467 return ftrace_make_nop(NULL, rec, ftrace_addr);
456} 468}
457 469
458static void ftrace_replace_code(int enable) 470static void ftrace_replace_code(int enable)
@@ -1405,10 +1417,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
1405 return -1; 1417 return -1;
1406 1418
1407 mutex_lock(&ftrace_sysctl_lock); 1419 mutex_lock(&ftrace_sysctl_lock);
1420
1421 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1422 ret = -EBUSY;
1423 goto out;
1424 }
1425
1408 ret = __register_ftrace_function(ops); 1426 ret = __register_ftrace_function(ops);
1409 ftrace_startup(); 1427 ftrace_startup();
1410 mutex_unlock(&ftrace_sysctl_lock);
1411 1428
1429out:
1430 mutex_unlock(&ftrace_sysctl_lock);
1412 return ret; 1431 return ret;
1413} 1432}
1414 1433
@@ -1474,16 +1493,45 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1474} 1493}
1475 1494
1476#ifdef CONFIG_FUNCTION_RET_TRACER 1495#ifdef CONFIG_FUNCTION_RET_TRACER
1496
1497/* The callback that hooks the return of a function */
1477trace_function_return_t ftrace_function_return = 1498trace_function_return_t ftrace_function_return =
1478 (trace_function_return_t)ftrace_stub; 1499 (trace_function_return_t)ftrace_stub;
1479void register_ftrace_return(trace_function_return_t func) 1500
1501int register_ftrace_return(trace_function_return_t func)
1480{ 1502{
1503 int ret = 0;
1504
1505 mutex_lock(&ftrace_sysctl_lock);
1506
1507 /*
1508 * Don't launch return tracing if normal function
1509 * tracing is already running.
1510 */
1511 if (ftrace_trace_function != ftrace_stub) {
1512 ret = -EBUSY;
1513 goto out;
1514 }
1515
1516 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1481 ftrace_function_return = func; 1517 ftrace_function_return = func;
1518 ftrace_startup();
1519
1520out:
1521 mutex_unlock(&ftrace_sysctl_lock);
1522 return ret;
1482} 1523}
1483 1524
1484void unregister_ftrace_return(void) 1525void unregister_ftrace_return(void)
1485{ 1526{
1527 mutex_lock(&ftrace_sysctl_lock);
1528
1486 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1529 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1530 ftrace_shutdown();
1531 /* Restore normal tracing type */
1532 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1533
1534 mutex_unlock(&ftrace_sysctl_lock);
1487} 1535}
1488#endif 1536#endif
1489 1537
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
index 61185f756a13..a68564af022b 100644
--- a/kernel/trace/trace_functions_return.c
+++ b/kernel/trace/trace_functions_return.c
@@ -14,29 +14,18 @@
14#include "trace.h" 14#include "trace.h"
15 15
16 16
17static void start_return_trace(struct trace_array *tr)
18{
19 register_ftrace_return(&trace_function_return);
20}
21
22static void stop_return_trace(struct trace_array *tr)
23{
24 unregister_ftrace_return();
25}
26
27static int return_trace_init(struct trace_array *tr) 17static int return_trace_init(struct trace_array *tr)
28{ 18{
29 int cpu; 19 int cpu;
30 for_each_online_cpu(cpu) 20 for_each_online_cpu(cpu)
31 tracing_reset(tr, cpu); 21 tracing_reset(tr, cpu);
32 22
33 start_return_trace(tr); 23 return register_ftrace_return(&trace_function_return);
34 return 0;
35} 24}
36 25
37static void return_trace_reset(struct trace_array *tr) 26static void return_trace_reset(struct trace_array *tr)
38{ 27{
39 stop_return_trace(tr); 28 unregister_ftrace_return();
40} 29}
41 30
42 31