aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig22
-rw-r--r--kernel/trace/ftrace.c101
-rw-r--r--kernel/trace/trace.c182
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_branch.c74
-rw-r--r--kernel/trace/trace_mmiotrace.c16
6 files changed, 366 insertions, 39 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index b8378fad29a3..9cbf7761f498 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -3,6 +3,9 @@
3# select HAVE_FUNCTION_TRACER: 3# select HAVE_FUNCTION_TRACER:
4# 4#
5 5
6config USER_STACKTRACE_SUPPORT
7 bool
8
6config NOP_TRACER 9config NOP_TRACER
7 bool 10 bool
8 11
@@ -166,14 +169,29 @@ config TRACE_BRANCH_PROFILING
166 This tracer profiles all the the likely and unlikely macros 169 This tracer profiles all the the likely and unlikely macros
167 in the kernel. It will display the results in: 170 in the kernel. It will display the results in:
168 171
169 /debugfs/tracing/profile_likely 172 /debugfs/tracing/profile_annotated_branch
170 /debugfs/tracing/profile_unlikely
171 173
172 Note: this will add a significant overhead, only turn this 174 Note: this will add a significant overhead, only turn this
173 on if you need to profile the system's use of these macros. 175 on if you need to profile the system's use of these macros.
174 176
175 Say N if unsure. 177 Say N if unsure.
176 178
179config PROFILE_ALL_BRANCHES
180 bool "Profile all if conditionals"
181 depends on TRACE_BRANCH_PROFILING
182 help
183 This tracer profiles all branch conditions. Every if ()
184 taken in the kernel is recorded whether it hit or miss.
185 The results will be displayed in:
186
187 /debugfs/tracing/profile_branch
188
189 This configuration, when enabled, will impose a great overhead
190 on the system. This should only be enabled when the system
191 is to be analyzed
192
193 Say N if unsure.
194
177config TRACING_BRANCHES 195config TRACING_BRANCHES
178 bool 196 bool
179 help 197 help
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f212da486689..53042f118f23 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1498,10 +1498,77 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1498 1498
1499#ifdef CONFIG_FUNCTION_RET_TRACER 1499#ifdef CONFIG_FUNCTION_RET_TRACER
1500 1500
1501static atomic_t ftrace_retfunc_active;
1502
1501/* The callback that hooks the return of a function */ 1503/* The callback that hooks the return of a function */
1502trace_function_return_t ftrace_function_return = 1504trace_function_return_t ftrace_function_return =
1503 (trace_function_return_t)ftrace_stub; 1505 (trace_function_return_t)ftrace_stub;
1504 1506
1507
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1510{
1511 int i;
1512 int ret = 0;
1513 unsigned long flags;
1514 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1515 struct task_struct *g, *t;
1516
1517 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1518 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1519 * sizeof(struct ftrace_ret_stack),
1520 GFP_KERNEL);
1521 if (!ret_stack_list[i]) {
1522 start = 0;
1523 end = i;
1524 ret = -ENOMEM;
1525 goto free;
1526 }
1527 }
1528
1529 read_lock_irqsave(&tasklist_lock, flags);
1530 do_each_thread(g, t) {
1531 if (start == end) {
1532 ret = -EAGAIN;
1533 goto unlock;
1534 }
1535
1536 if (t->ret_stack == NULL) {
1537 t->ret_stack = ret_stack_list[start++];
1538 t->curr_ret_stack = -1;
1539 atomic_set(&t->trace_overrun, 0);
1540 }
1541 } while_each_thread(g, t);
1542
1543unlock:
1544 read_unlock_irqrestore(&tasklist_lock, flags);
1545free:
1546 for (i = start; i < end; i++)
1547 kfree(ret_stack_list[i]);
1548 return ret;
1549}
1550
1551/* Allocate a return stack for each task */
1552static int start_return_tracing(void)
1553{
1554 struct ftrace_ret_stack **ret_stack_list;
1555 int ret;
1556
1557 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1558 sizeof(struct ftrace_ret_stack *),
1559 GFP_KERNEL);
1560
1561 if (!ret_stack_list)
1562 return -ENOMEM;
1563
1564 do {
1565 ret = alloc_retstack_tasklist(ret_stack_list);
1566 } while (ret == -EAGAIN);
1567
1568 kfree(ret_stack_list);
1569 return ret;
1570}
1571
1505int register_ftrace_return(trace_function_return_t func) 1572int register_ftrace_return(trace_function_return_t func)
1506{ 1573{
1507 int ret = 0; 1574 int ret = 0;
@@ -1516,7 +1583,12 @@ int register_ftrace_return(trace_function_return_t func)
1516 ret = -EBUSY; 1583 ret = -EBUSY;
1517 goto out; 1584 goto out;
1518 } 1585 }
1519 1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing();
1588 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active);
1590 goto out;
1591 }
1520 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1592 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1521 ftrace_function_return = func; 1593 ftrace_function_return = func;
1522 ftrace_startup(); 1594 ftrace_startup();
@@ -1530,6 +1602,7 @@ void unregister_ftrace_return(void)
1530{ 1602{
1531 mutex_lock(&ftrace_sysctl_lock); 1603 mutex_lock(&ftrace_sysctl_lock);
1532 1604
1605 atomic_dec(&ftrace_retfunc_active);
1533 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1606 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1534 ftrace_shutdown(); 1607 ftrace_shutdown();
1535 /* Restore normal tracing type */ 1608 /* Restore normal tracing type */
@@ -1537,6 +1610,32 @@ void unregister_ftrace_return(void)
1537 1610
1538 mutex_unlock(&ftrace_sysctl_lock); 1611 mutex_unlock(&ftrace_sysctl_lock);
1539} 1612}
1613
1614/* Allocate a return stack for newly created task */
1615void ftrace_retfunc_init_task(struct task_struct *t)
1616{
1617 if (atomic_read(&ftrace_retfunc_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL);
1621 if (!t->ret_stack)
1622 return;
1623 t->curr_ret_stack = -1;
1624 atomic_set(&t->trace_overrun, 0);
1625 } else
1626 t->ret_stack = NULL;
1627}
1628
1629void ftrace_retfunc_exit_task(struct task_struct *t)
1630{
1631 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1632
1633 t->ret_stack = NULL;
1634 /* NULL must become visible to IRQs before we free it: */
1635 barrier();
1636
1637 kfree(ret_stack);
1638}
1540#endif 1639#endif
1541 1640
1542 1641
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4ee6f0375222..a45b59e53fbc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,6 +30,7 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/seq_file.h>
33#include <linux/writeback.h> 34#include <linux/writeback.h>
34 35
35#include <linux/stacktrace.h> 36#include <linux/stacktrace.h>
@@ -275,6 +276,8 @@ static const char *trace_options[] = {
275 "ftrace_preempt", 276 "ftrace_preempt",
276 "branch", 277 "branch",
277 "annotate", 278 "annotate",
279 "userstacktrace",
280 "sym-userobj",
278 NULL 281 NULL
279}; 282};
280 283
@@ -421,6 +424,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
421 return trace_seq_putmem(s, hex, j); 424 return trace_seq_putmem(s, hex, j);
422} 425}
423 426
427static int
428trace_seq_path(struct trace_seq *s, struct path *path)
429{
430 unsigned char *p;
431
432 if (s->len >= (PAGE_SIZE - 1))
433 return 0;
434 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
435 if (!IS_ERR(p)) {
436 p = mangle_path(s->buffer + s->len, p, "\n");
437 if (p) {
438 s->len = p - s->buffer;
439 return 1;
440 }
441 } else {
442 s->buffer[s->len++] = '?';
443 return 1;
444 }
445
446 return 0;
447}
448
424static void 449static void
425trace_seq_reset(struct trace_seq *s) 450trace_seq_reset(struct trace_seq *s)
426{ 451{
@@ -661,6 +686,21 @@ static int trace_stop_count;
661static DEFINE_SPINLOCK(tracing_start_lock); 686static DEFINE_SPINLOCK(tracing_start_lock);
662 687
663/** 688/**
689 * ftrace_off_permanent - disable all ftrace code permanently
690 *
691 * This should only be called when a serious anomally has
692 * been detected. This will turn off the function tracing,
693 * ring buffers, and other tracing utilites. It takes no
694 * locks and can be called from any context.
695 */
696void ftrace_off_permanent(void)
697{
698 tracing_disabled = 1;
699 ftrace_stop();
700 tracing_off_permanent();
701}
702
703/**
664 * tracing_start - quick start of the tracer 704 * tracing_start - quick start of the tracer
665 * 705 *
666 * If tracing is enabled but was stopped by tracing_stop, 706 * If tracing is enabled but was stopped by tracing_stop,
@@ -801,6 +841,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
801 841
802 entry->preempt_count = pc & 0xff; 842 entry->preempt_count = pc & 0xff;
803 entry->pid = (tsk) ? tsk->pid : 0; 843 entry->pid = (tsk) ? tsk->pid : 0;
844 entry->tgid = (tsk) ? tsk->tgid : 0;
804 entry->flags = 845 entry->flags =
805#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 846#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
806 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 847 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -918,6 +959,44 @@ void __trace_stack(struct trace_array *tr,
918 ftrace_trace_stack(tr, data, flags, skip, preempt_count()); 959 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
919} 960}
920 961
962static void ftrace_trace_userstack(struct trace_array *tr,
963 struct trace_array_cpu *data,
964 unsigned long flags, int pc)
965{
966 struct ring_buffer_event *event;
967 struct userstack_entry *entry;
968 struct stack_trace trace;
969 unsigned long irq_flags;
970
971 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
972 return;
973
974 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
975 &irq_flags);
976 if (!event)
977 return;
978 entry = ring_buffer_event_data(event);
979 tracing_generic_entry_update(&entry->ent, flags, pc);
980 entry->ent.type = TRACE_USER_STACK;
981
982 memset(&entry->caller, 0, sizeof(entry->caller));
983
984 trace.nr_entries = 0;
985 trace.max_entries = FTRACE_STACK_ENTRIES;
986 trace.skip = 0;
987 trace.entries = entry->caller;
988
989 save_stack_trace_user(&trace);
990 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
991}
992
993void __trace_userstack(struct trace_array *tr,
994 struct trace_array_cpu *data,
995 unsigned long flags)
996{
997 ftrace_trace_userstack(tr, data, flags, preempt_count());
998}
999
921static void 1000static void
922ftrace_trace_special(void *__tr, void *__data, 1001ftrace_trace_special(void *__tr, void *__data,
923 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1002 unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -941,6 +1020,7 @@ ftrace_trace_special(void *__tr, void *__data,
941 entry->arg3 = arg3; 1020 entry->arg3 = arg3;
942 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1021 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
943 ftrace_trace_stack(tr, data, irq_flags, 4, pc); 1022 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
1023 ftrace_trace_userstack(tr, data, irq_flags, pc);
944 1024
945 trace_wake_up(); 1025 trace_wake_up();
946} 1026}
@@ -979,6 +1059,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
979 entry->next_cpu = task_cpu(next); 1059 entry->next_cpu = task_cpu(next);
980 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1060 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
981 ftrace_trace_stack(tr, data, flags, 5, pc); 1061 ftrace_trace_stack(tr, data, flags, 5, pc);
1062 ftrace_trace_userstack(tr, data, flags, pc);
982} 1063}
983 1064
984void 1065void
@@ -1008,6 +1089,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1008 entry->next_cpu = task_cpu(wakee); 1089 entry->next_cpu = task_cpu(wakee);
1009 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1090 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1010 ftrace_trace_stack(tr, data, flags, 6, pc); 1091 ftrace_trace_stack(tr, data, flags, 6, pc);
1092 ftrace_trace_userstack(tr, data, flags, pc);
1011 1093
1012 trace_wake_up(); 1094 trace_wake_up();
1013} 1095}
@@ -1387,6 +1469,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1387 return ret; 1469 return ret;
1388} 1470}
1389 1471
1472static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
1473 unsigned long ip, unsigned long sym_flags)
1474{
1475 struct file *file = NULL;
1476 unsigned long vmstart = 0;
1477 int ret = 1;
1478
1479 if (mm) {
1480 const struct vm_area_struct *vma;
1481
1482 down_read(&mm->mmap_sem);
1483 vma = find_vma(mm, ip);
1484 if (vma) {
1485 file = vma->vm_file;
1486 vmstart = vma->vm_start;
1487 }
1488 if (file) {
1489 ret = trace_seq_path(s, &file->f_path);
1490 if (ret)
1491 ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
1492 }
1493 up_read(&mm->mmap_sem);
1494 }
1495 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
1496 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1497 return ret;
1498}
1499
1500static int
1501seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
1502 unsigned long sym_flags)
1503{
1504 struct mm_struct *mm = NULL;
1505 int ret = 1;
1506 unsigned int i;
1507
1508 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1509 struct task_struct *task;
1510 /*
1511 * we do the lookup on the thread group leader,
1512 * since individual threads might have already quit!
1513 */
1514 rcu_read_lock();
1515 task = find_task_by_vpid(entry->ent.tgid);
1516 if (task)
1517 mm = get_task_mm(task);
1518 rcu_read_unlock();
1519 }
1520
1521 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1522 unsigned long ip = entry->caller[i];
1523
1524 if (ip == ULONG_MAX || !ret)
1525 break;
1526 if (i && ret)
1527 ret = trace_seq_puts(s, " <- ");
1528 if (!ip) {
1529 if (ret)
1530 ret = trace_seq_puts(s, "??");
1531 continue;
1532 }
1533 if (!ret)
1534 break;
1535 if (ret)
1536 ret = seq_print_user_ip(s, mm, ip, sym_flags);
1537 }
1538
1539 if (mm)
1540 mmput(mm);
1541 return ret;
1542}
1543
1390static void print_lat_help_header(struct seq_file *m) 1544static void print_lat_help_header(struct seq_file *m)
1391{ 1545{
1392 seq_puts(m, "# _------=> CPU# \n"); 1546 seq_puts(m, "# _------=> CPU# \n");
@@ -1702,6 +1856,15 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1702 field->line); 1856 field->line);
1703 break; 1857 break;
1704 } 1858 }
1859 case TRACE_USER_STACK: {
1860 struct userstack_entry *field;
1861
1862 trace_assign_type(field, entry);
1863
1864 seq_print_userip_objs(field, s, sym_flags);
1865 trace_seq_putc(s, '\n');
1866 break;
1867 }
1705 default: 1868 default:
1706 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1869 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1707 } 1870 }
@@ -1853,6 +2016,19 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1853 field->line); 2016 field->line);
1854 break; 2017 break;
1855 } 2018 }
2019 case TRACE_USER_STACK: {
2020 struct userstack_entry *field;
2021
2022 trace_assign_type(field, entry);
2023
2024 ret = seq_print_userip_objs(field, s, sym_flags);
2025 if (!ret)
2026 return TRACE_TYPE_PARTIAL_LINE;
2027 ret = trace_seq_putc(s, '\n');
2028 if (!ret)
2029 return TRACE_TYPE_PARTIAL_LINE;
2030 break;
2031 }
1856 } 2032 }
1857 return TRACE_TYPE_HANDLED; 2033 return TRACE_TYPE_HANDLED;
1858} 2034}
@@ -1912,6 +2088,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1912 break; 2088 break;
1913 } 2089 }
1914 case TRACE_SPECIAL: 2090 case TRACE_SPECIAL:
2091 case TRACE_USER_STACK:
1915 case TRACE_STACK: { 2092 case TRACE_STACK: {
1916 struct special_entry *field; 2093 struct special_entry *field;
1917 2094
@@ -2000,6 +2177,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2000 break; 2177 break;
2001 } 2178 }
2002 case TRACE_SPECIAL: 2179 case TRACE_SPECIAL:
2180 case TRACE_USER_STACK:
2003 case TRACE_STACK: { 2181 case TRACE_STACK: {
2004 struct special_entry *field; 2182 struct special_entry *field;
2005 2183
@@ -2054,6 +2232,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2054 break; 2232 break;
2055 } 2233 }
2056 case TRACE_SPECIAL: 2234 case TRACE_SPECIAL:
2235 case TRACE_USER_STACK:
2057 case TRACE_STACK: { 2236 case TRACE_STACK: {
2058 struct special_entry *field; 2237 struct special_entry *field;
2059 2238
@@ -3488,6 +3667,9 @@ void ftrace_dump(void)
3488 atomic_inc(&global_trace.data[cpu]->disabled); 3667 atomic_inc(&global_trace.data[cpu]->disabled);
3489 } 3668 }
3490 3669
3670 /* don't look at user memory in panic mode */
3671 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3672
3491 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 3673 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3492 3674
3493 iter.tr = &global_trace; 3675 iter.tr = &global_trace;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2cb12fd98f6b..28c15c2ebc22 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -26,6 +26,7 @@ enum trace_type {
26 TRACE_BOOT_CALL, 26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET, 27 TRACE_BOOT_RET,
28 TRACE_FN_RET, 28 TRACE_FN_RET,
29 TRACE_USER_STACK,
29 30
30 __TRACE_LAST_TYPE 31 __TRACE_LAST_TYPE
31}; 32};
@@ -42,6 +43,7 @@ struct trace_entry {
42 unsigned char flags; 43 unsigned char flags;
43 unsigned char preempt_count; 44 unsigned char preempt_count;
44 int pid; 45 int pid;
46 int tgid;
45}; 47};
46 48
47/* 49/*
@@ -99,6 +101,11 @@ struct stack_entry {
99 unsigned long caller[FTRACE_STACK_ENTRIES]; 101 unsigned long caller[FTRACE_STACK_ENTRIES];
100}; 102};
101 103
104struct userstack_entry {
105 struct trace_entry ent;
106 unsigned long caller[FTRACE_STACK_ENTRIES];
107};
108
102/* 109/*
103 * ftrace_printk entry: 110 * ftrace_printk entry:
104 */ 111 */
@@ -240,6 +247,7 @@ extern void __ftrace_bad_type(void);
240 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 247 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
241 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ 248 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
242 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 249 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
250 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
243 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 251 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
244 IF_ASSIGN(var, ent, struct special_entry, 0); \ 252 IF_ASSIGN(var, ent, struct special_entry, 0); \
245 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 253 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
@@ -500,6 +508,8 @@ enum trace_iterator_flags {
500 TRACE_ITER_PREEMPTONLY = 0x800, 508 TRACE_ITER_PREEMPTONLY = 0x800,
501 TRACE_ITER_BRANCH = 0x1000, 509 TRACE_ITER_BRANCH = 0x1000,
502 TRACE_ITER_ANNOTATE = 0x2000, 510 TRACE_ITER_ANNOTATE = 0x2000,
511 TRACE_ITER_USERSTACKTRACE = 0x4000,
512 TRACE_ITER_SYM_USEROBJ = 0x8000
503}; 513};
504 514
505/* 515/*
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 23f9b02ce967..877ee88e6a74 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -185,12 +185,13 @@ EXPORT_SYMBOL(ftrace_likely_update);
185struct ftrace_pointer { 185struct ftrace_pointer {
186 void *start; 186 void *start;
187 void *stop; 187 void *stop;
188 int hit;
188}; 189};
189 190
190static void * 191static void *
191t_next(struct seq_file *m, void *v, loff_t *pos) 192t_next(struct seq_file *m, void *v, loff_t *pos)
192{ 193{
193 struct ftrace_pointer *f = m->private; 194 const struct ftrace_pointer *f = m->private;
194 struct ftrace_branch_data *p = v; 195 struct ftrace_branch_data *p = v;
195 196
196 (*pos)++; 197 (*pos)++;
@@ -223,13 +224,17 @@ static void t_stop(struct seq_file *m, void *p)
223 224
224static int t_show(struct seq_file *m, void *v) 225static int t_show(struct seq_file *m, void *v)
225{ 226{
227 const struct ftrace_pointer *fp = m->private;
226 struct ftrace_branch_data *p = v; 228 struct ftrace_branch_data *p = v;
227 const char *f; 229 const char *f;
228 unsigned long percent; 230 long percent;
229 231
230 if (v == (void *)1) { 232 if (v == (void *)1) {
231 seq_printf(m, " correct incorrect %% " 233 if (fp->hit)
232 " Function " 234 seq_printf(m, " miss hit %% ");
235 else
236 seq_printf(m, " correct incorrect %% ");
237 seq_printf(m, " Function "
233 " File Line\n" 238 " File Line\n"
234 " ------- --------- - " 239 " ------- --------- - "
235 " -------- " 240 " -------- "
@@ -243,13 +248,20 @@ static int t_show(struct seq_file *m, void *v)
243 f--; 248 f--;
244 f++; 249 f++;
245 250
251 /*
252 * The miss is overlayed on correct, and hit on incorrect.
253 */
246 if (p->correct) { 254 if (p->correct) {
247 percent = p->incorrect * 100; 255 percent = p->incorrect * 100;
248 percent /= p->correct + p->incorrect; 256 percent /= p->correct + p->incorrect;
249 } else 257 } else
250 percent = p->incorrect ? 100 : 0; 258 percent = p->incorrect ? 100 : -1;
251 259
252 seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent); 260 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
261 if (percent < 0)
262 seq_printf(m, " X ");
263 else
264 seq_printf(m, "%3ld ", percent);
253 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); 265 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
254 return 0; 266 return 0;
255} 267}
@@ -261,7 +273,7 @@ static struct seq_operations tracing_likely_seq_ops = {
261 .show = t_show, 273 .show = t_show,
262}; 274};
263 275
264static int tracing_likely_open(struct inode *inode, struct file *file) 276static int tracing_branch_open(struct inode *inode, struct file *file)
265{ 277{
266 int ret; 278 int ret;
267 279
@@ -274,25 +286,30 @@ static int tracing_likely_open(struct inode *inode, struct file *file)
274 return ret; 286 return ret;
275} 287}
276 288
277static struct file_operations tracing_likely_fops = { 289static const struct file_operations tracing_branch_fops = {
278 .open = tracing_likely_open, 290 .open = tracing_branch_open,
279 .read = seq_read, 291 .read = seq_read,
280 .llseek = seq_lseek, 292 .llseek = seq_lseek,
281}; 293};
282 294
283extern unsigned long __start_likely_profile[]; 295#ifdef CONFIG_PROFILE_ALL_BRANCHES
284extern unsigned long __stop_likely_profile[]; 296extern unsigned long __start_branch_profile[];
285extern unsigned long __start_unlikely_profile[]; 297extern unsigned long __stop_branch_profile[];
286extern unsigned long __stop_unlikely_profile[];
287 298
288static struct ftrace_pointer ftrace_likely_pos = { 299static const struct ftrace_pointer ftrace_branch_pos = {
289 .start = __start_likely_profile, 300 .start = __start_branch_profile,
290 .stop = __stop_likely_profile, 301 .stop = __stop_branch_profile,
302 .hit = 1,
291}; 303};
292 304
293static struct ftrace_pointer ftrace_unlikely_pos = { 305#endif /* CONFIG_PROFILE_ALL_BRANCHES */
294 .start = __start_unlikely_profile, 306
295 .stop = __stop_unlikely_profile, 307extern unsigned long __start_annotated_branch_profile[];
308extern unsigned long __stop_annotated_branch_profile[];
309
310static const struct ftrace_pointer ftrace_annotated_branch_pos = {
311 .start = __start_annotated_branch_profile,
312 .stop = __stop_annotated_branch_profile,
296}; 313};
297 314
298static __init int ftrace_branch_init(void) 315static __init int ftrace_branch_init(void)
@@ -302,18 +319,21 @@ static __init int ftrace_branch_init(void)
302 319
303 d_tracer = tracing_init_dentry(); 320 d_tracer = tracing_init_dentry();
304 321
305 entry = debugfs_create_file("profile_likely", 0444, d_tracer, 322 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
306 &ftrace_likely_pos, 323 (void *)&ftrace_annotated_branch_pos,
307 &tracing_likely_fops); 324 &tracing_branch_fops);
308 if (!entry) 325 if (!entry)
309 pr_warning("Could not create debugfs 'profile_likely' entry\n"); 326 pr_warning("Could not create debugfs "
327 "'profile_annotatet_branch' entry\n");
310 328
311 entry = debugfs_create_file("profile_unlikely", 0444, d_tracer, 329#ifdef CONFIG_PROFILE_ALL_BRANCHES
312 &ftrace_unlikely_pos, 330 entry = debugfs_create_file("profile_branch", 0444, d_tracer,
313 &tracing_likely_fops); 331 (void *)&ftrace_branch_pos,
332 &tracing_branch_fops);
314 if (!entry) 333 if (!entry)
315 pr_warning("Could not create debugfs" 334 pr_warning("Could not create debugfs"
316 " 'profile_unlikely' entry\n"); 335 " 'profile_branch' entry\n");
336#endif
317 337
318 return 0; 338 return 0;
319} 339}
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 433d650eda9f..2a98a206acc2 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -18,12 +18,14 @@ struct header_iter {
18 18
19static struct trace_array *mmio_trace_array; 19static struct trace_array *mmio_trace_array;
20static bool overrun_detected; 20static bool overrun_detected;
21static unsigned long prev_overruns;
21 22
22static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
23{ 24{
24 int cpu; 25 int cpu;
25 26
26 overrun_detected = false; 27 overrun_detected = false;
28 prev_overruns = 0;
27 tr->time_start = ftrace_now(tr->cpu); 29 tr->time_start = ftrace_now(tr->cpu);
28 30
29 for_each_online_cpu(cpu) 31 for_each_online_cpu(cpu)
@@ -123,16 +125,12 @@ static void mmio_close(struct trace_iterator *iter)
123 125
124static unsigned long count_overruns(struct trace_iterator *iter) 126static unsigned long count_overruns(struct trace_iterator *iter)
125{ 127{
126 int cpu;
127 unsigned long cnt = 0; 128 unsigned long cnt = 0;
128/* FIXME: */ 129 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
129#if 0 130
130 for_each_online_cpu(cpu) { 131 if (over > prev_overruns)
131 cnt += iter->overrun[cpu]; 132 cnt = over - prev_overruns;
132 iter->overrun[cpu] = 0; 133 prev_overruns = over;
133 }
134#endif
135 (void)cpu;
136 return cnt; 134 return cnt;
137} 135}
138 136