diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
| -rw-r--r-- | kernel/trace/ftrace.c | 306 |
1 files changed, 222 insertions, 84 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 53042f118f23..cbf8b09f63a5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -47,12 +47,12 @@ | |||
| 47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
| 48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
| 49 | 49 | ||
| 50 | /* ftrace_pid_trace >= 0 will only trace threads with this pid */ | ||
| 51 | static int ftrace_pid_trace = -1; | ||
| 52 | |||
| 50 | /* Quick disabling of function tracer. */ | 53 | /* Quick disabling of function tracer. */ |
| 51 | int function_trace_stop; | 54 | int function_trace_stop; |
| 52 | 55 | ||
| 53 | /* By default, current tracing type is normal tracing. */ | ||
| 54 | enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
| 55 | |||
| 56 | /* | 56 | /* |
| 57 | * ftrace_disabled is set when an anomaly is discovered. | 57 | * ftrace_disabled is set when an anomaly is discovered. |
| 58 | * ftrace_disabled is much stronger than ftrace_enabled. | 58 | * ftrace_disabled is much stronger than ftrace_enabled. |
| @@ -61,6 +61,7 @@ static int ftrace_disabled __read_mostly; | |||
| 61 | 61 | ||
| 62 | static DEFINE_SPINLOCK(ftrace_lock); | 62 | static DEFINE_SPINLOCK(ftrace_lock); |
| 63 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 63 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
| 64 | static DEFINE_MUTEX(ftrace_start_lock); | ||
| 64 | 65 | ||
| 65 | static struct ftrace_ops ftrace_list_end __read_mostly = | 66 | static struct ftrace_ops ftrace_list_end __read_mostly = |
| 66 | { | 67 | { |
| @@ -70,6 +71,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
| 70 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 71 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
| 71 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 72 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
| 72 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 73 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
| 74 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
| 73 | 75 | ||
| 74 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 76 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
| 75 | { | 77 | { |
| @@ -86,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
| 86 | }; | 88 | }; |
| 87 | } | 89 | } |
| 88 | 90 | ||
| 91 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | ||
| 92 | { | ||
| 93 | if (current->pid != ftrace_pid_trace) | ||
| 94 | return; | ||
| 95 | |||
| 96 | ftrace_pid_function(ip, parent_ip); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
| 100 | { | ||
| 101 | /* do not set ftrace_pid_function to itself! */ | ||
| 102 | if (func != ftrace_pid_func) | ||
| 103 | ftrace_pid_function = func; | ||
| 104 | } | ||
| 105 | |||
| 89 | /** | 106 | /** |
| 90 | * clear_ftrace_function - reset the ftrace function | 107 | * clear_ftrace_function - reset the ftrace function |
| 91 | * | 108 | * |
| @@ -96,6 +113,7 @@ void clear_ftrace_function(void) | |||
| 96 | { | 113 | { |
| 97 | ftrace_trace_function = ftrace_stub; | 114 | ftrace_trace_function = ftrace_stub; |
| 98 | __ftrace_trace_function = ftrace_stub; | 115 | __ftrace_trace_function = ftrace_stub; |
| 116 | ftrace_pid_function = ftrace_stub; | ||
| 99 | } | 117 | } |
| 100 | 118 | ||
| 101 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 119 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| @@ -128,20 +146,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
| 128 | ftrace_list = ops; | 146 | ftrace_list = ops; |
| 129 | 147 | ||
| 130 | if (ftrace_enabled) { | 148 | if (ftrace_enabled) { |
| 149 | ftrace_func_t func; | ||
| 150 | |||
| 151 | if (ops->next == &ftrace_list_end) | ||
| 152 | func = ops->func; | ||
| 153 | else | ||
| 154 | func = ftrace_list_func; | ||
| 155 | |||
| 156 | if (ftrace_pid_trace >= 0) { | ||
| 157 | set_ftrace_pid_function(func); | ||
| 158 | func = ftrace_pid_func; | ||
| 159 | } | ||
| 160 | |||
| 131 | /* | 161 | /* |
| 132 | * For one func, simply call it directly. | 162 | * For one func, simply call it directly. |
| 133 | * For more than one func, call the chain. | 163 | * For more than one func, call the chain. |
| 134 | */ | 164 | */ |
| 135 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 165 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| 136 | if (ops->next == &ftrace_list_end) | 166 | ftrace_trace_function = func; |
| 137 | ftrace_trace_function = ops->func; | ||
| 138 | else | ||
| 139 | ftrace_trace_function = ftrace_list_func; | ||
| 140 | #else | 167 | #else |
| 141 | if (ops->next == &ftrace_list_end) | 168 | __ftrace_trace_function = func; |
| 142 | __ftrace_trace_function = ops->func; | ||
| 143 | else | ||
| 144 | __ftrace_trace_function = ftrace_list_func; | ||
| 145 | ftrace_trace_function = ftrace_test_stop_func; | 169 | ftrace_trace_function = ftrace_test_stop_func; |
| 146 | #endif | 170 | #endif |
| 147 | } | 171 | } |
| @@ -182,8 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 182 | 206 | ||
| 183 | if (ftrace_enabled) { | 207 | if (ftrace_enabled) { |
| 184 | /* If we only have one func left, then call that directly */ | 208 | /* If we only have one func left, then call that directly */ |
| 185 | if (ftrace_list->next == &ftrace_list_end) | 209 | if (ftrace_list->next == &ftrace_list_end) { |
| 186 | ftrace_trace_function = ftrace_list->func; | 210 | ftrace_func_t func = ftrace_list->func; |
| 211 | |||
| 212 | if (ftrace_pid_trace >= 0) { | ||
| 213 | set_ftrace_pid_function(func); | ||
| 214 | func = ftrace_pid_func; | ||
| 215 | } | ||
| 216 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 217 | ftrace_trace_function = func; | ||
| 218 | #else | ||
| 219 | __ftrace_trace_function = func; | ||
| 220 | #endif | ||
| 221 | } | ||
| 187 | } | 222 | } |
| 188 | 223 | ||
| 189 | out: | 224 | out: |
| @@ -192,6 +227,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 192 | return ret; | 227 | return ret; |
| 193 | } | 228 | } |
| 194 | 229 | ||
| 230 | static void ftrace_update_pid_func(void) | ||
| 231 | { | ||
| 232 | ftrace_func_t func; | ||
| 233 | |||
| 234 | /* should not be called from interrupt context */ | ||
| 235 | spin_lock(&ftrace_lock); | ||
| 236 | |||
| 237 | if (ftrace_trace_function == ftrace_stub) | ||
| 238 | goto out; | ||
| 239 | |||
| 240 | func = ftrace_trace_function; | ||
| 241 | |||
| 242 | if (ftrace_pid_trace >= 0) { | ||
| 243 | set_ftrace_pid_function(func); | ||
| 244 | func = ftrace_pid_func; | ||
| 245 | } else { | ||
| 246 | if (func != ftrace_pid_func) | ||
| 247 | goto out; | ||
| 248 | |||
| 249 | set_ftrace_pid_function(func); | ||
| 250 | } | ||
| 251 | |||
| 252 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 253 | ftrace_trace_function = func; | ||
| 254 | #else | ||
| 255 | __ftrace_trace_function = func; | ||
| 256 | #endif | ||
| 257 | |||
| 258 | out: | ||
| 259 | spin_unlock(&ftrace_lock); | ||
| 260 | } | ||
| 261 | |||
| 195 | #ifdef CONFIG_DYNAMIC_FTRACE | 262 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 196 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 263 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
| 197 | # error Dynamic ftrace depends on MCOUNT_RECORD | 264 | # error Dynamic ftrace depends on MCOUNT_RECORD |
| @@ -211,6 +278,8 @@ enum { | |||
| 211 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 278 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
| 212 | FTRACE_ENABLE_MCOUNT = (1 << 3), | 279 | FTRACE_ENABLE_MCOUNT = (1 << 3), |
| 213 | FTRACE_DISABLE_MCOUNT = (1 << 4), | 280 | FTRACE_DISABLE_MCOUNT = (1 << 4), |
| 281 | FTRACE_START_FUNC_RET = (1 << 5), | ||
| 282 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
| 214 | }; | 283 | }; |
| 215 | 284 | ||
| 216 | static int ftrace_filtered; | 285 | static int ftrace_filtered; |
| @@ -395,14 +464,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 395 | unsigned long ip, fl; | 464 | unsigned long ip, fl; |
| 396 | unsigned long ftrace_addr; | 465 | unsigned long ftrace_addr; |
| 397 | 466 | ||
| 398 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
| 399 | if (ftrace_tracing_type == FTRACE_TYPE_ENTER) | ||
| 400 | ftrace_addr = (unsigned long)ftrace_caller; | ||
| 401 | else | ||
| 402 | ftrace_addr = (unsigned long)ftrace_return_caller; | ||
| 403 | #else | ||
| 404 | ftrace_addr = (unsigned long)ftrace_caller; | 467 | ftrace_addr = (unsigned long)ftrace_caller; |
| 405 | #endif | ||
| 406 | 468 | ||
| 407 | ip = rec->ip; | 469 | ip = rec->ip; |
| 408 | 470 | ||
| @@ -535,6 +597,11 @@ static int __ftrace_modify_code(void *data) | |||
| 535 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 597 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
| 536 | ftrace_update_ftrace_func(ftrace_trace_function); | 598 | ftrace_update_ftrace_func(ftrace_trace_function); |
| 537 | 599 | ||
| 600 | if (*command & FTRACE_START_FUNC_RET) | ||
| 601 | ftrace_enable_ftrace_graph_caller(); | ||
| 602 | else if (*command & FTRACE_STOP_FUNC_RET) | ||
| 603 | ftrace_disable_ftrace_graph_caller(); | ||
| 604 | |||
| 538 | return 0; | 605 | return 0; |
| 539 | } | 606 | } |
| 540 | 607 | ||
| @@ -545,12 +612,22 @@ static void ftrace_run_update_code(int command) | |||
| 545 | 612 | ||
| 546 | static ftrace_func_t saved_ftrace_func; | 613 | static ftrace_func_t saved_ftrace_func; |
| 547 | static int ftrace_start_up; | 614 | static int ftrace_start_up; |
| 548 | static DEFINE_MUTEX(ftrace_start_lock); | ||
| 549 | 615 | ||
| 550 | static void ftrace_startup(void) | 616 | static void ftrace_startup_enable(int command) |
| 551 | { | 617 | { |
| 552 | int command = 0; | 618 | if (saved_ftrace_func != ftrace_trace_function) { |
| 619 | saved_ftrace_func = ftrace_trace_function; | ||
| 620 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
| 621 | } | ||
| 622 | |||
| 623 | if (!command || !ftrace_enabled) | ||
| 624 | return; | ||
| 625 | |||
| 626 | ftrace_run_update_code(command); | ||
| 627 | } | ||
| 553 | 628 | ||
| 629 | static void ftrace_startup(int command) | ||
| 630 | { | ||
| 554 | if (unlikely(ftrace_disabled)) | 631 | if (unlikely(ftrace_disabled)) |
| 555 | return; | 632 | return; |
| 556 | 633 | ||
| @@ -558,23 +635,13 @@ static void ftrace_startup(void) | |||
| 558 | ftrace_start_up++; | 635 | ftrace_start_up++; |
| 559 | command |= FTRACE_ENABLE_CALLS; | 636 | command |= FTRACE_ENABLE_CALLS; |
| 560 | 637 | ||
| 561 | if (saved_ftrace_func != ftrace_trace_function) { | 638 | ftrace_startup_enable(command); |
| 562 | saved_ftrace_func = ftrace_trace_function; | ||
| 563 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
| 564 | } | ||
| 565 | 639 | ||
| 566 | if (!command || !ftrace_enabled) | ||
| 567 | goto out; | ||
| 568 | |||
| 569 | ftrace_run_update_code(command); | ||
| 570 | out: | ||
| 571 | mutex_unlock(&ftrace_start_lock); | 640 | mutex_unlock(&ftrace_start_lock); |
| 572 | } | 641 | } |
| 573 | 642 | ||
| 574 | static void ftrace_shutdown(void) | 643 | static void ftrace_shutdown(int command) |
| 575 | { | 644 | { |
| 576 | int command = 0; | ||
| 577 | |||
| 578 | if (unlikely(ftrace_disabled)) | 645 | if (unlikely(ftrace_disabled)) |
| 579 | return; | 646 | return; |
| 580 | 647 | ||
| @@ -1262,13 +1329,10 @@ static struct file_operations ftrace_notrace_fops = { | |||
| 1262 | .release = ftrace_notrace_release, | 1329 | .release = ftrace_notrace_release, |
| 1263 | }; | 1330 | }; |
| 1264 | 1331 | ||
| 1265 | static __init int ftrace_init_debugfs(void) | 1332 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
| 1266 | { | 1333 | { |
| 1267 | struct dentry *d_tracer; | ||
| 1268 | struct dentry *entry; | 1334 | struct dentry *entry; |
| 1269 | 1335 | ||
| 1270 | d_tracer = tracing_init_dentry(); | ||
| 1271 | |||
| 1272 | entry = debugfs_create_file("available_filter_functions", 0444, | 1336 | entry = debugfs_create_file("available_filter_functions", 0444, |
| 1273 | d_tracer, NULL, &ftrace_avail_fops); | 1337 | d_tracer, NULL, &ftrace_avail_fops); |
| 1274 | if (!entry) | 1338 | if (!entry) |
| @@ -1295,8 +1359,6 @@ static __init int ftrace_init_debugfs(void) | |||
| 1295 | return 0; | 1359 | return 0; |
| 1296 | } | 1360 | } |
| 1297 | 1361 | ||
| 1298 | fs_initcall(ftrace_init_debugfs); | ||
| 1299 | |||
| 1300 | static int ftrace_convert_nops(struct module *mod, | 1362 | static int ftrace_convert_nops(struct module *mod, |
| 1301 | unsigned long *start, | 1363 | unsigned long *start, |
| 1302 | unsigned long *end) | 1364 | unsigned long *end) |
| @@ -1382,12 +1444,101 @@ static int __init ftrace_nodyn_init(void) | |||
| 1382 | } | 1444 | } |
| 1383 | device_initcall(ftrace_nodyn_init); | 1445 | device_initcall(ftrace_nodyn_init); |
| 1384 | 1446 | ||
| 1385 | # define ftrace_startup() do { } while (0) | 1447 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
| 1386 | # define ftrace_shutdown() do { } while (0) | 1448 | static inline void ftrace_startup_enable(int command) { } |
| 1449 | /* Keep as macros so we do not need to define the commands */ | ||
| 1450 | # define ftrace_startup(command) do { } while (0) | ||
| 1451 | # define ftrace_shutdown(command) do { } while (0) | ||
| 1387 | # define ftrace_startup_sysctl() do { } while (0) | 1452 | # define ftrace_startup_sysctl() do { } while (0) |
| 1388 | # define ftrace_shutdown_sysctl() do { } while (0) | 1453 | # define ftrace_shutdown_sysctl() do { } while (0) |
| 1389 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1454 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 1390 | 1455 | ||
| 1456 | static ssize_t | ||
| 1457 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
| 1458 | size_t cnt, loff_t *ppos) | ||
| 1459 | { | ||
| 1460 | char buf[64]; | ||
| 1461 | int r; | ||
| 1462 | |||
| 1463 | if (ftrace_pid_trace >= 0) | ||
| 1464 | r = sprintf(buf, "%u\n", ftrace_pid_trace); | ||
| 1465 | else | ||
| 1466 | r = sprintf(buf, "no pid\n"); | ||
| 1467 | |||
| 1468 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | static ssize_t | ||
| 1472 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
| 1473 | size_t cnt, loff_t *ppos) | ||
| 1474 | { | ||
| 1475 | char buf[64]; | ||
| 1476 | long val; | ||
| 1477 | int ret; | ||
| 1478 | |||
| 1479 | if (cnt >= sizeof(buf)) | ||
| 1480 | return -EINVAL; | ||
| 1481 | |||
| 1482 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 1483 | return -EFAULT; | ||
| 1484 | |||
| 1485 | buf[cnt] = 0; | ||
| 1486 | |||
| 1487 | ret = strict_strtol(buf, 10, &val); | ||
| 1488 | if (ret < 0) | ||
| 1489 | return ret; | ||
| 1490 | |||
| 1491 | mutex_lock(&ftrace_start_lock); | ||
| 1492 | if (ret < 0) { | ||
| 1493 | /* disable pid tracing */ | ||
| 1494 | if (ftrace_pid_trace < 0) | ||
| 1495 | goto out; | ||
| 1496 | ftrace_pid_trace = -1; | ||
| 1497 | |||
| 1498 | } else { | ||
| 1499 | |||
| 1500 | if (ftrace_pid_trace == val) | ||
| 1501 | goto out; | ||
| 1502 | |||
| 1503 | ftrace_pid_trace = val; | ||
| 1504 | } | ||
| 1505 | |||
| 1506 | /* update the function call */ | ||
| 1507 | ftrace_update_pid_func(); | ||
| 1508 | ftrace_startup_enable(0); | ||
| 1509 | |||
| 1510 | out: | ||
| 1511 | mutex_unlock(&ftrace_start_lock); | ||
| 1512 | |||
| 1513 | return cnt; | ||
| 1514 | } | ||
| 1515 | |||
| 1516 | static struct file_operations ftrace_pid_fops = { | ||
| 1517 | .read = ftrace_pid_read, | ||
| 1518 | .write = ftrace_pid_write, | ||
| 1519 | }; | ||
| 1520 | |||
| 1521 | static __init int ftrace_init_debugfs(void) | ||
| 1522 | { | ||
| 1523 | struct dentry *d_tracer; | ||
| 1524 | struct dentry *entry; | ||
| 1525 | |||
| 1526 | d_tracer = tracing_init_dentry(); | ||
| 1527 | if (!d_tracer) | ||
| 1528 | return 0; | ||
| 1529 | |||
| 1530 | ftrace_init_dyn_debugfs(d_tracer); | ||
| 1531 | |||
| 1532 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | ||
| 1533 | NULL, &ftrace_pid_fops); | ||
| 1534 | if (!entry) | ||
| 1535 | pr_warning("Could not create debugfs " | ||
| 1536 | "'set_ftrace_pid' entry\n"); | ||
| 1537 | return 0; | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | fs_initcall(ftrace_init_debugfs); | ||
| 1541 | |||
| 1391 | /** | 1542 | /** |
| 1392 | * ftrace_kill - kill ftrace | 1543 | * ftrace_kill - kill ftrace |
| 1393 | * | 1544 | * |
| @@ -1422,15 +1573,9 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
| 1422 | 1573 | ||
| 1423 | mutex_lock(&ftrace_sysctl_lock); | 1574 | mutex_lock(&ftrace_sysctl_lock); |
| 1424 | 1575 | ||
| 1425 | if (ftrace_tracing_type == FTRACE_TYPE_RETURN) { | ||
| 1426 | ret = -EBUSY; | ||
| 1427 | goto out; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | ret = __register_ftrace_function(ops); | 1576 | ret = __register_ftrace_function(ops); |
| 1431 | ftrace_startup(); | 1577 | ftrace_startup(0); |
| 1432 | 1578 | ||
| 1433 | out: | ||
| 1434 | mutex_unlock(&ftrace_sysctl_lock); | 1579 | mutex_unlock(&ftrace_sysctl_lock); |
| 1435 | return ret; | 1580 | return ret; |
| 1436 | } | 1581 | } |
| @@ -1447,7 +1592,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 1447 | 1592 | ||
| 1448 | mutex_lock(&ftrace_sysctl_lock); | 1593 | mutex_lock(&ftrace_sysctl_lock); |
| 1449 | ret = __unregister_ftrace_function(ops); | 1594 | ret = __unregister_ftrace_function(ops); |
| 1450 | ftrace_shutdown(); | 1595 | ftrace_shutdown(0); |
| 1451 | mutex_unlock(&ftrace_sysctl_lock); | 1596 | mutex_unlock(&ftrace_sysctl_lock); |
| 1452 | 1597 | ||
| 1453 | return ret; | 1598 | return ret; |
| @@ -1496,14 +1641,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 1496 | return ret; | 1641 | return ret; |
| 1497 | } | 1642 | } |
| 1498 | 1643 | ||
| 1499 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1644 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1500 | |||
| 1501 | static atomic_t ftrace_retfunc_active; | ||
| 1502 | 1645 | ||
| 1503 | /* The callback that hooks the return of a function */ | 1646 | static atomic_t ftrace_graph_active; |
| 1504 | trace_function_return_t ftrace_function_return = | ||
| 1505 | (trace_function_return_t)ftrace_stub; | ||
| 1506 | 1647 | ||
| 1648 | /* The callbacks that hook a function */ | ||
| 1649 | trace_func_graph_ret_t ftrace_graph_return = | ||
| 1650 | (trace_func_graph_ret_t)ftrace_stub; | ||
| 1651 | trace_func_graph_ent_t ftrace_graph_entry = | ||
| 1652 | (trace_func_graph_ent_t)ftrace_stub; | ||
| 1507 | 1653 | ||
| 1508 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 1654 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
| 1509 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 1655 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
| @@ -1549,7 +1695,7 @@ free: | |||
| 1549 | } | 1695 | } |
| 1550 | 1696 | ||
| 1551 | /* Allocate a return stack for each task */ | 1697 | /* Allocate a return stack for each task */ |
| 1552 | static int start_return_tracing(void) | 1698 | static int start_graph_tracing(void) |
| 1553 | { | 1699 | { |
| 1554 | struct ftrace_ret_stack **ret_stack_list; | 1700 | struct ftrace_ret_stack **ret_stack_list; |
| 1555 | int ret; | 1701 | int ret; |
| @@ -1569,52 +1715,46 @@ static int start_return_tracing(void) | |||
| 1569 | return ret; | 1715 | return ret; |
| 1570 | } | 1716 | } |
| 1571 | 1717 | ||
| 1572 | int register_ftrace_return(trace_function_return_t func) | 1718 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
| 1719 | trace_func_graph_ent_t entryfunc) | ||
| 1573 | { | 1720 | { |
| 1574 | int ret = 0; | 1721 | int ret = 0; |
| 1575 | 1722 | ||
| 1576 | mutex_lock(&ftrace_sysctl_lock); | 1723 | mutex_lock(&ftrace_sysctl_lock); |
| 1577 | 1724 | ||
| 1578 | /* | 1725 | atomic_inc(&ftrace_graph_active); |
| 1579 | * Don't launch return tracing if normal function | 1726 | ret = start_graph_tracing(); |
| 1580 | * tracing is already running. | ||
| 1581 | */ | ||
| 1582 | if (ftrace_trace_function != ftrace_stub) { | ||
| 1583 | ret = -EBUSY; | ||
| 1584 | goto out; | ||
| 1585 | } | ||
| 1586 | atomic_inc(&ftrace_retfunc_active); | ||
| 1587 | ret = start_return_tracing(); | ||
| 1588 | if (ret) { | 1727 | if (ret) { |
| 1589 | atomic_dec(&ftrace_retfunc_active); | 1728 | atomic_dec(&ftrace_graph_active); |
| 1590 | goto out; | 1729 | goto out; |
| 1591 | } | 1730 | } |
| 1592 | ftrace_tracing_type = FTRACE_TYPE_RETURN; | 1731 | |
| 1593 | ftrace_function_return = func; | 1732 | ftrace_graph_return = retfunc; |
| 1594 | ftrace_startup(); | 1733 | ftrace_graph_entry = entryfunc; |
| 1734 | |||
| 1735 | ftrace_startup(FTRACE_START_FUNC_RET); | ||
| 1595 | 1736 | ||
| 1596 | out: | 1737 | out: |
| 1597 | mutex_unlock(&ftrace_sysctl_lock); | 1738 | mutex_unlock(&ftrace_sysctl_lock); |
| 1598 | return ret; | 1739 | return ret; |
| 1599 | } | 1740 | } |
| 1600 | 1741 | ||
| 1601 | void unregister_ftrace_return(void) | 1742 | void unregister_ftrace_graph(void) |
| 1602 | { | 1743 | { |
| 1603 | mutex_lock(&ftrace_sysctl_lock); | 1744 | mutex_lock(&ftrace_sysctl_lock); |
| 1604 | 1745 | ||
| 1605 | atomic_dec(&ftrace_retfunc_active); | 1746 | atomic_dec(&ftrace_graph_active); |
| 1606 | ftrace_function_return = (trace_function_return_t)ftrace_stub; | 1747 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 1607 | ftrace_shutdown(); | 1748 | ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub; |
| 1608 | /* Restore normal tracing type */ | 1749 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
| 1609 | ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
| 1610 | 1750 | ||
| 1611 | mutex_unlock(&ftrace_sysctl_lock); | 1751 | mutex_unlock(&ftrace_sysctl_lock); |
| 1612 | } | 1752 | } |
| 1613 | 1753 | ||
| 1614 | /* Allocate a return stack for newly created task */ | 1754 | /* Allocate a return stack for newly created task */ |
| 1615 | void ftrace_retfunc_init_task(struct task_struct *t) | 1755 | void ftrace_graph_init_task(struct task_struct *t) |
| 1616 | { | 1756 | { |
| 1617 | if (atomic_read(&ftrace_retfunc_active)) { | 1757 | if (atomic_read(&ftrace_graph_active)) { |
| 1618 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 1758 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
| 1619 | * sizeof(struct ftrace_ret_stack), | 1759 | * sizeof(struct ftrace_ret_stack), |
| 1620 | GFP_KERNEL); | 1760 | GFP_KERNEL); |
| @@ -1626,7 +1766,7 @@ void ftrace_retfunc_init_task(struct task_struct *t) | |||
| 1626 | t->ret_stack = NULL; | 1766 | t->ret_stack = NULL; |
| 1627 | } | 1767 | } |
| 1628 | 1768 | ||
| 1629 | void ftrace_retfunc_exit_task(struct task_struct *t) | 1769 | void ftrace_graph_exit_task(struct task_struct *t) |
| 1630 | { | 1770 | { |
| 1631 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | 1771 | struct ftrace_ret_stack *ret_stack = t->ret_stack; |
| 1632 | 1772 | ||
| @@ -1638,5 +1778,3 @@ void ftrace_retfunc_exit_task(struct task_struct *t) | |||
| 1638 | } | 1778 | } |
| 1639 | #endif | 1779 | #endif |
| 1640 | 1780 | ||
| 1641 | |||
| 1642 | |||
