aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c215
1 files changed, 91 insertions, 124 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index bb60732ade0c..8c804e24f96f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
292 292
293 again: 293 again:
294 rec++; 294 if (idx != 0)
295 rec++;
296
295 if ((void *)rec >= (void *)&pg->records[pg->index]) { 297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
296 pg = pg->next; 298 pg = pg->next;
297 if (!pg) 299 if (!pg)
@@ -766,7 +768,7 @@ static struct tracer_stat function_stats __initdata = {
766 .stat_show = function_stat_show 768 .stat_show = function_stat_show
767}; 769};
768 770
769static void ftrace_profile_debugfs(struct dentry *d_tracer) 771static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
770{ 772{
771 struct ftrace_profile_stat *stat; 773 struct ftrace_profile_stat *stat;
772 struct dentry *entry; 774 struct dentry *entry;
@@ -784,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
784 * The files created are permanent, if something happens 786 * The files created are permanent, if something happens
785 * we still do not free memory. 787 * we still do not free memory.
786 */ 788 */
787 kfree(stat);
788 WARN(1, 789 WARN(1,
789 "Could not allocate stat file for cpu %d\n", 790 "Could not allocate stat file for cpu %d\n",
790 cpu); 791 cpu);
@@ -811,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
811} 812}
812 813
813#else /* CONFIG_FUNCTION_PROFILER */ 814#else /* CONFIG_FUNCTION_PROFILER */
814static void ftrace_profile_debugfs(struct dentry *d_tracer) 815static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
815{ 816{
816} 817}
817#endif /* CONFIG_FUNCTION_PROFILER */ 818#endif /* CONFIG_FUNCTION_PROFILER */
@@ -1015,71 +1016,35 @@ static int
1015__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1016__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1016{ 1017{
1017 unsigned long ftrace_addr; 1018 unsigned long ftrace_addr;
1018 unsigned long ip, fl; 1019 unsigned long flag = 0UL;
1019 1020
1020 ftrace_addr = (unsigned long)FTRACE_ADDR; 1021 ftrace_addr = (unsigned long)FTRACE_ADDR;
1021 1022
1022 ip = rec->ip;
1023
1024 /* 1023 /*
1025 * If this record is not to be traced and 1024 * If this record is not to be traced or we want to disable it,
1026 * it is not enabled then do nothing. 1025 * then disable it.
1027 * 1026 *
1028 * If this record is not to be traced and 1027 * If we want to enable it and filtering is off, then enable it.
1029 * it is enabled then disable it.
1030 * 1028 *
1029 * If we want to enable it and filtering is on, enable it only if
1030 * it's filtered
1031 */ 1031 */
1032 if (rec->flags & FTRACE_FL_NOTRACE) { 1032 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1033 if (rec->flags & FTRACE_FL_ENABLED) 1033 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1034 rec->flags &= ~FTRACE_FL_ENABLED; 1034 flag = FTRACE_FL_ENABLED;
1035 else 1035 }
1036 return 0;
1037
1038 } else if (ftrace_filtered && enable) {
1039 /*
1040 * Filtering is on:
1041 */
1042
1043 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
1044
1045 /* Record is filtered and enabled, do nothing */
1046 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
1047 return 0;
1048
1049 /* Record is not filtered or enabled, do nothing */
1050 if (!fl)
1051 return 0;
1052
1053 /* Record is not filtered but enabled, disable it */
1054 if (fl == FTRACE_FL_ENABLED)
1055 rec->flags &= ~FTRACE_FL_ENABLED;
1056 else
1057 /* Otherwise record is filtered but not enabled, enable it */
1058 rec->flags |= FTRACE_FL_ENABLED;
1059 } else {
1060 /* Disable or not filtered */
1061
1062 if (enable) {
1063 /* if record is enabled, do nothing */
1064 if (rec->flags & FTRACE_FL_ENABLED)
1065 return 0;
1066
1067 rec->flags |= FTRACE_FL_ENABLED;
1068
1069 } else {
1070 1036
1071 /* if record is not enabled, do nothing */ 1037 /* If the state of this record hasn't changed, then do nothing */
1072 if (!(rec->flags & FTRACE_FL_ENABLED)) 1038 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1073 return 0; 1039 return 0;
1074 1040
1075 rec->flags &= ~FTRACE_FL_ENABLED; 1041 if (flag) {
1076 } 1042 rec->flags |= FTRACE_FL_ENABLED;
1043 return ftrace_make_call(rec, ftrace_addr);
1077 } 1044 }
1078 1045
1079 if (rec->flags & FTRACE_FL_ENABLED) 1046 rec->flags &= ~FTRACE_FL_ENABLED;
1080 return ftrace_make_call(rec, ftrace_addr); 1047 return ftrace_make_nop(NULL, rec, ftrace_addr);
1081 else
1082 return ftrace_make_nop(NULL, rec, ftrace_addr);
1083} 1048}
1084 1049
1085static void ftrace_replace_code(int enable) 1050static void ftrace_replace_code(int enable)
@@ -1224,6 +1189,13 @@ static void ftrace_shutdown(int command)
1224 return; 1189 return;
1225 1190
1226 ftrace_start_up--; 1191 ftrace_start_up--;
1192 /*
1193 * Just warn in case of unbalance, no need to kill ftrace, it's not
1194 * critical but the ftrace_call callers may be never nopped again after
1195 * further ftrace uses.
1196 */
1197 WARN_ON_ONCE(ftrace_start_up < 0);
1198
1227 if (!ftrace_start_up) 1199 if (!ftrace_start_up)
1228 command |= FTRACE_DISABLE_CALLS; 1200 command |= FTRACE_DISABLE_CALLS;
1229 1201
@@ -1367,7 +1339,6 @@ struct ftrace_iterator {
1367 unsigned flags; 1339 unsigned flags;
1368 unsigned char buffer[FTRACE_BUFF_MAX+1]; 1340 unsigned char buffer[FTRACE_BUFF_MAX+1];
1369 unsigned buffer_idx; 1341 unsigned buffer_idx;
1370 unsigned filtered;
1371}; 1342};
1372 1343
1373static void * 1344static void *
@@ -1410,28 +1381,33 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
1410{ 1381{
1411 struct ftrace_iterator *iter = m->private; 1382 struct ftrace_iterator *iter = m->private;
1412 void *p = NULL; 1383 void *p = NULL;
1384 loff_t l;
1385
1386 if (!(iter->flags & FTRACE_ITER_HASH))
1387 *pos = 0;
1413 1388
1414 iter->flags |= FTRACE_ITER_HASH; 1389 iter->flags |= FTRACE_ITER_HASH;
1415 1390
1416 return t_hash_next(m, p, pos); 1391 iter->hidx = 0;
1392 for (l = 0; l <= *pos; ) {
1393 p = t_hash_next(m, p, &l);
1394 if (!p)
1395 break;
1396 }
1397 return p;
1417} 1398}
1418 1399
1419static int t_hash_show(struct seq_file *m, void *v) 1400static int t_hash_show(struct seq_file *m, void *v)
1420{ 1401{
1421 struct ftrace_func_probe *rec; 1402 struct ftrace_func_probe *rec;
1422 struct hlist_node *hnd = v; 1403 struct hlist_node *hnd = v;
1423 char str[KSYM_SYMBOL_LEN];
1424 1404
1425 rec = hlist_entry(hnd, struct ftrace_func_probe, node); 1405 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1426 1406
1427 if (rec->ops->print) 1407 if (rec->ops->print)
1428 return rec->ops->print(m, rec->ip, rec->ops, rec->data); 1408 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1429 1409
1430 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 1410 seq_printf(m, "%pf:%pf", (void *)rec->ip, (void *)rec->ops->func);
1431 seq_printf(m, "%s:", str);
1432
1433 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1434 seq_printf(m, "%s", str);
1435 1411
1436 if (rec->data) 1412 if (rec->data)
1437 seq_printf(m, ":%p", rec->data); 1413 seq_printf(m, ":%p", rec->data);
@@ -1460,8 +1436,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
1460 iter->pg = iter->pg->next; 1436 iter->pg = iter->pg->next;
1461 iter->idx = 0; 1437 iter->idx = 0;
1462 goto retry; 1438 goto retry;
1463 } else {
1464 iter->idx = -1;
1465 } 1439 }
1466 } else { 1440 } else {
1467 rec = &iter->pg->records[iter->idx++]; 1441 rec = &iter->pg->records[iter->idx++];
@@ -1490,6 +1464,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1490{ 1464{
1491 struct ftrace_iterator *iter = m->private; 1465 struct ftrace_iterator *iter = m->private;
1492 void *p = NULL; 1466 void *p = NULL;
1467 loff_t l;
1493 1468
1494 mutex_lock(&ftrace_lock); 1469 mutex_lock(&ftrace_lock);
1495 /* 1470 /*
@@ -1501,23 +1476,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1501 if (*pos > 0) 1476 if (*pos > 0)
1502 return t_hash_start(m, pos); 1477 return t_hash_start(m, pos);
1503 iter->flags |= FTRACE_ITER_PRINTALL; 1478 iter->flags |= FTRACE_ITER_PRINTALL;
1504 (*pos)++;
1505 return iter; 1479 return iter;
1506 } 1480 }
1507 1481
1508 if (iter->flags & FTRACE_ITER_HASH) 1482 if (iter->flags & FTRACE_ITER_HASH)
1509 return t_hash_start(m, pos); 1483 return t_hash_start(m, pos);
1510 1484
1511 if (*pos > 0) { 1485 iter->pg = ftrace_pages_start;
1512 if (iter->idx < 0) 1486 iter->idx = 0;
1513 return p; 1487 for (l = 0; l <= *pos; ) {
1514 (*pos)--; 1488 p = t_next(m, p, &l);
1515 iter->idx--; 1489 if (!p)
1490 break;
1516 } 1491 }
1517 1492
1518 p = t_next(m, p, pos); 1493 if (!p && iter->flags & FTRACE_ITER_FILTER)
1519
1520 if (!p)
1521 return t_hash_start(m, pos); 1494 return t_hash_start(m, pos);
1522 1495
1523 return p; 1496 return p;
@@ -1532,7 +1505,6 @@ static int t_show(struct seq_file *m, void *v)
1532{ 1505{
1533 struct ftrace_iterator *iter = m->private; 1506 struct ftrace_iterator *iter = m->private;
1534 struct dyn_ftrace *rec = v; 1507 struct dyn_ftrace *rec = v;
1535 char str[KSYM_SYMBOL_LEN];
1536 1508
1537 if (iter->flags & FTRACE_ITER_HASH) 1509 if (iter->flags & FTRACE_ITER_HASH)
1538 return t_hash_show(m, v); 1510 return t_hash_show(m, v);
@@ -1545,9 +1517,7 @@ static int t_show(struct seq_file *m, void *v)
1545 if (!rec) 1517 if (!rec)
1546 return 0; 1518 return 0;
1547 1519
1548 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 1520 seq_printf(m, "%pf\n", (void *)rec->ip);
1549
1550 seq_printf(m, "%s\n", str);
1551 1521
1552 return 0; 1522 return 0;
1553} 1523}
@@ -1586,17 +1556,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
1586 return ret; 1556 return ret;
1587} 1557}
1588 1558
1589int ftrace_avail_release(struct inode *inode, struct file *file)
1590{
1591 struct seq_file *m = (struct seq_file *)file->private_data;
1592 struct ftrace_iterator *iter = m->private;
1593
1594 seq_release(inode, file);
1595 kfree(iter);
1596
1597 return 0;
1598}
1599
1600static int 1559static int
1601ftrace_failures_open(struct inode *inode, struct file *file) 1560ftrace_failures_open(struct inode *inode, struct file *file)
1602{ 1561{
@@ -1647,7 +1606,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1647 1606
1648 mutex_lock(&ftrace_regex_lock); 1607 mutex_lock(&ftrace_regex_lock);
1649 if ((file->f_mode & FMODE_WRITE) && 1608 if ((file->f_mode & FMODE_WRITE) &&
1650 !(file->f_flags & O_APPEND)) 1609 (file->f_flags & O_TRUNC))
1651 ftrace_filter_reset(enable); 1610 ftrace_filter_reset(enable);
1652 1611
1653 if (file->f_mode & FMODE_READ) { 1612 if (file->f_mode & FMODE_READ) {
@@ -2263,7 +2222,11 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2263 read++; 2222 read++;
2264 cnt--; 2223 cnt--;
2265 2224
2266 if (!(iter->flags & ~FTRACE_ITER_CONT)) { 2225 /*
2226 * If the parser haven't finished with the last write,
2227 * continue reading the user input without skipping spaces.
2228 */
2229 if (!(iter->flags & FTRACE_ITER_CONT)) {
2267 /* skip white space */ 2230 /* skip white space */
2268 while (cnt && isspace(ch)) { 2231 while (cnt && isspace(ch)) {
2269 ret = get_user(ch, ubuf++); 2232 ret = get_user(ch, ubuf++);
@@ -2273,8 +2236,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2273 cnt--; 2236 cnt--;
2274 } 2237 }
2275 2238
2239 /* only spaces were written */
2276 if (isspace(ch)) { 2240 if (isspace(ch)) {
2277 file->f_pos += read; 2241 *ppos += read;
2278 ret = read; 2242 ret = read;
2279 goto out; 2243 goto out;
2280 } 2244 }
@@ -2297,19 +2261,18 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2297 } 2261 }
2298 2262
2299 if (isspace(ch)) { 2263 if (isspace(ch)) {
2300 iter->filtered++;
2301 iter->buffer[iter->buffer_idx] = 0; 2264 iter->buffer[iter->buffer_idx] = 0;
2302 ret = ftrace_process_regex(iter->buffer, 2265 ret = ftrace_process_regex(iter->buffer,
2303 iter->buffer_idx, enable); 2266 iter->buffer_idx, enable);
2304 if (ret) 2267 if (ret)
2305 goto out; 2268 goto out;
2306 iter->buffer_idx = 0; 2269 iter->buffer_idx = 0;
2307 } else 2270 } else {
2308 iter->flags |= FTRACE_ITER_CONT; 2271 iter->flags |= FTRACE_ITER_CONT;
2272 iter->buffer[iter->buffer_idx++] = ch;
2273 }
2309 2274
2310 2275 *ppos += read;
2311 file->f_pos += read;
2312
2313 ret = read; 2276 ret = read;
2314 out: 2277 out:
2315 mutex_unlock(&ftrace_regex_lock); 2278 mutex_unlock(&ftrace_regex_lock);
@@ -2428,7 +2391,6 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2428 iter = file->private_data; 2391 iter = file->private_data;
2429 2392
2430 if (iter->buffer_idx) { 2393 if (iter->buffer_idx) {
2431 iter->filtered++;
2432 iter->buffer[iter->buffer_idx] = 0; 2394 iter->buffer[iter->buffer_idx] = 0;
2433 ftrace_match_records(iter->buffer, iter->buffer_idx, enable); 2395 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2434 } 2396 }
@@ -2459,14 +2421,14 @@ static const struct file_operations ftrace_avail_fops = {
2459 .open = ftrace_avail_open, 2421 .open = ftrace_avail_open,
2460 .read = seq_read, 2422 .read = seq_read,
2461 .llseek = seq_lseek, 2423 .llseek = seq_lseek,
2462 .release = ftrace_avail_release, 2424 .release = seq_release_private,
2463}; 2425};
2464 2426
2465static const struct file_operations ftrace_failures_fops = { 2427static const struct file_operations ftrace_failures_fops = {
2466 .open = ftrace_failures_open, 2428 .open = ftrace_failures_open,
2467 .read = seq_read, 2429 .read = seq_read,
2468 .llseek = seq_lseek, 2430 .llseek = seq_lseek,
2469 .release = ftrace_avail_release, 2431 .release = seq_release_private,
2470}; 2432};
2471 2433
2472static const struct file_operations ftrace_filter_fops = { 2434static const struct file_operations ftrace_filter_fops = {
@@ -2493,32 +2455,31 @@ int ftrace_graph_count;
2493unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 2455unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2494 2456
2495static void * 2457static void *
2496g_next(struct seq_file *m, void *v, loff_t *pos) 2458__g_next(struct seq_file *m, loff_t *pos)
2497{ 2459{
2498 unsigned long *array = m->private; 2460 unsigned long *array = m->private;
2499 int index = *pos;
2500
2501 (*pos)++;
2502 2461
2503 if (index >= ftrace_graph_count) 2462 if (*pos >= ftrace_graph_count)
2504 return NULL; 2463 return NULL;
2464 return &array[*pos];
2465}
2505 2466
2506 return &array[index]; 2467static void *
2468g_next(struct seq_file *m, void *v, loff_t *pos)
2469{
2470 (*pos)++;
2471 return __g_next(m, pos);
2507} 2472}
2508 2473
2509static void *g_start(struct seq_file *m, loff_t *pos) 2474static void *g_start(struct seq_file *m, loff_t *pos)
2510{ 2475{
2511 void *p = NULL;
2512
2513 mutex_lock(&graph_lock); 2476 mutex_lock(&graph_lock);
2514 2477
2515 /* Nothing, tell g_show to print all functions are enabled */ 2478 /* Nothing, tell g_show to print all functions are enabled */
2516 if (!ftrace_graph_count && !*pos) 2479 if (!ftrace_graph_count && !*pos)
2517 return (void *)1; 2480 return (void *)1;
2518 2481
2519 p = g_next(m, p, pos); 2482 return __g_next(m, pos);
2520
2521 return p;
2522} 2483}
2523 2484
2524static void g_stop(struct seq_file *m, void *p) 2485static void g_stop(struct seq_file *m, void *p)
@@ -2529,7 +2490,6 @@ static void g_stop(struct seq_file *m, void *p)
2529static int g_show(struct seq_file *m, void *v) 2490static int g_show(struct seq_file *m, void *v)
2530{ 2491{
2531 unsigned long *ptr = v; 2492 unsigned long *ptr = v;
2532 char str[KSYM_SYMBOL_LEN];
2533 2493
2534 if (!ptr) 2494 if (!ptr)
2535 return 0; 2495 return 0;
@@ -2539,9 +2499,7 @@ static int g_show(struct seq_file *m, void *v)
2539 return 0; 2499 return 0;
2540 } 2500 }
2541 2501
2542 kallsyms_lookup(*ptr, NULL, NULL, NULL, str); 2502 seq_printf(m, "%pf\n", v);
2543
2544 seq_printf(m, "%s\n", str);
2545 2503
2546 return 0; 2504 return 0;
2547} 2505}
@@ -2563,7 +2521,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2563 2521
2564 mutex_lock(&graph_lock); 2522 mutex_lock(&graph_lock);
2565 if ((file->f_mode & FMODE_WRITE) && 2523 if ((file->f_mode & FMODE_WRITE) &&
2566 !(file->f_flags & O_APPEND)) { 2524 (file->f_flags & O_TRUNC)) {
2567 ftrace_graph_count = 0; 2525 ftrace_graph_count = 0;
2568 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2526 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2569 } 2527 }
@@ -2582,6 +2540,14 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2582} 2540}
2583 2541
2584static int 2542static int
2543ftrace_graph_release(struct inode *inode, struct file *file)
2544{
2545 if (file->f_mode & FMODE_READ)
2546 seq_release(inode, file);
2547 return 0;
2548}
2549
2550static int
2585ftrace_set_func(unsigned long *array, int *idx, char *buffer) 2551ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2586{ 2552{
2587 struct dyn_ftrace *rec; 2553 struct dyn_ftrace *rec;
@@ -2710,9 +2676,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2710} 2676}
2711 2677
2712static const struct file_operations ftrace_graph_fops = { 2678static const struct file_operations ftrace_graph_fops = {
2713 .open = ftrace_graph_open, 2679 .open = ftrace_graph_open,
2714 .read = seq_read, 2680 .read = seq_read,
2715 .write = ftrace_graph_write, 2681 .write = ftrace_graph_write,
2682 .release = ftrace_graph_release,
2716}; 2683};
2717#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2684#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2718 2685
@@ -3145,10 +3112,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
3145 3112
3146 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 3113 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
3147 3114
3148 if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) 3115 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3149 goto out; 3116 goto out;
3150 3117
3151 last_ftrace_enabled = ftrace_enabled; 3118 last_ftrace_enabled = !!ftrace_enabled;
3152 3119
3153 if (ftrace_enabled) { 3120 if (ftrace_enabled) {
3154 3121