diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/trace/Kconfig | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 56 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 41 |
3 files changed, 80 insertions, 19 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index b58f43bec363..33dbefd471e8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -25,7 +25,7 @@ config TRACING | |||
| 25 | bool | 25 | bool |
| 26 | select DEBUG_FS | 26 | select DEBUG_FS |
| 27 | select RING_BUFFER | 27 | select RING_BUFFER |
| 28 | select STACKTRACE | 28 | select STACKTRACE if STACKTRACE_SUPPORT |
| 29 | select TRACEPOINTS | 29 | select TRACEPOINTS |
| 30 | select NOP_TRACER | 30 | select NOP_TRACER |
| 31 | 31 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index cedf4e268285..3f3380638646 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1022,8 +1022,23 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1022 | struct ring_buffer_event *event; | 1022 | struct ring_buffer_event *event; |
| 1023 | u64 ts, delta; | 1023 | u64 ts, delta; |
| 1024 | int commit = 0; | 1024 | int commit = 0; |
| 1025 | int nr_loops = 0; | ||
| 1025 | 1026 | ||
| 1026 | again: | 1027 | again: |
| 1028 | /* | ||
| 1029 | * We allow for interrupts to reenter here and do a trace. | ||
| 1030 | * If one does, it will cause this original code to loop | ||
| 1031 | * back here. Even with heavy interrupts happening, this | ||
| 1032 | * should only happen a few times in a row. If this happens | ||
| 1033 | * 1000 times in a row, there must be either an interrupt | ||
| 1034 | * storm or we have something buggy. | ||
| 1035 | * Bail! | ||
| 1036 | */ | ||
| 1037 | if (unlikely(++nr_loops > 1000)) { | ||
| 1038 | RB_WARN_ON(cpu_buffer, 1); | ||
| 1039 | return NULL; | ||
| 1040 | } | ||
| 1041 | |||
| 1027 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1042 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
| 1028 | 1043 | ||
| 1029 | /* | 1044 | /* |
| @@ -1532,10 +1547,23 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 1532 | { | 1547 | { |
| 1533 | struct buffer_page *reader = NULL; | 1548 | struct buffer_page *reader = NULL; |
| 1534 | unsigned long flags; | 1549 | unsigned long flags; |
| 1550 | int nr_loops = 0; | ||
| 1535 | 1551 | ||
| 1536 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 1552 | spin_lock_irqsave(&cpu_buffer->lock, flags); |
| 1537 | 1553 | ||
| 1538 | again: | 1554 | again: |
| 1555 | /* | ||
| 1556 | * This should normally only loop twice. But because the | ||
| 1557 | * start of the reader inserts an empty page, it causes | ||
| 1558 | * a case where we will loop three times. There should be no | ||
| 1559 | * reason to loop four times (that I know of). | ||
| 1560 | */ | ||
| 1561 | if (unlikely(++nr_loops > 3)) { | ||
| 1562 | RB_WARN_ON(cpu_buffer, 1); | ||
| 1563 | reader = NULL; | ||
| 1564 | goto out; | ||
| 1565 | } | ||
| 1566 | |||
| 1539 | reader = cpu_buffer->reader_page; | 1567 | reader = cpu_buffer->reader_page; |
| 1540 | 1568 | ||
| 1541 | /* If there's more to read, return this page */ | 1569 | /* If there's more to read, return this page */ |
| @@ -1665,6 +1693,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 1665 | struct ring_buffer_per_cpu *cpu_buffer; | 1693 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1666 | struct ring_buffer_event *event; | 1694 | struct ring_buffer_event *event; |
| 1667 | struct buffer_page *reader; | 1695 | struct buffer_page *reader; |
| 1696 | int nr_loops = 0; | ||
| 1668 | 1697 | ||
| 1669 | if (!cpu_isset(cpu, buffer->cpumask)) | 1698 | if (!cpu_isset(cpu, buffer->cpumask)) |
| 1670 | return NULL; | 1699 | return NULL; |
| @@ -1672,6 +1701,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 1672 | cpu_buffer = buffer->buffers[cpu]; | 1701 | cpu_buffer = buffer->buffers[cpu]; |
| 1673 | 1702 | ||
| 1674 | again: | 1703 | again: |
| 1704 | /* | ||
| 1705 | * We repeat when a timestamp is encountered. It is possible | ||
| 1706 | * to get multiple timestamps from an interrupt entering just | ||
| 1707 | * as one timestamp is about to be written. The max times | ||
| 1708 | * that this can happen is the number of nested interrupts we | ||
| 1709 | * can have. Nesting 10 deep of interrupts is clearly | ||
| 1710 | * an anomaly. | ||
| 1711 | */ | ||
| 1712 | if (unlikely(++nr_loops > 10)) { | ||
| 1713 | RB_WARN_ON(cpu_buffer, 1); | ||
| 1714 | return NULL; | ||
| 1715 | } | ||
| 1716 | |||
| 1675 | reader = rb_get_reader_page(cpu_buffer); | 1717 | reader = rb_get_reader_page(cpu_buffer); |
| 1676 | if (!reader) | 1718 | if (!reader) |
| 1677 | return NULL; | 1719 | return NULL; |
| @@ -1722,6 +1764,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
| 1722 | struct ring_buffer *buffer; | 1764 | struct ring_buffer *buffer; |
| 1723 | struct ring_buffer_per_cpu *cpu_buffer; | 1765 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1724 | struct ring_buffer_event *event; | 1766 | struct ring_buffer_event *event; |
| 1767 | int nr_loops = 0; | ||
| 1725 | 1768 | ||
| 1726 | if (ring_buffer_iter_empty(iter)) | 1769 | if (ring_buffer_iter_empty(iter)) |
| 1727 | return NULL; | 1770 | return NULL; |
| @@ -1730,6 +1773,19 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
| 1730 | buffer = cpu_buffer->buffer; | 1773 | buffer = cpu_buffer->buffer; |
| 1731 | 1774 | ||
| 1732 | again: | 1775 | again: |
| 1776 | /* | ||
| 1777 | * We repeat when a timestamp is encountered. It is possible | ||
| 1778 | * to get multiple timestamps from an interrupt entering just | ||
| 1779 | * as one timestamp is about to be written. The max times | ||
| 1780 | * that this can happen is the number of nested interrupts we | ||
| 1781 | * can have. Nesting 10 deep of interrupts is clearly | ||
| 1782 | * an anomaly. | ||
| 1783 | */ | ||
| 1784 | if (unlikely(++nr_loops > 10)) { | ||
| 1785 | RB_WARN_ON(cpu_buffer, 1); | ||
| 1786 | return NULL; | ||
| 1787 | } | ||
| 1788 | |||
| 1733 | if (rb_per_cpu_empty(cpu_buffer)) | 1789 | if (rb_per_cpu_empty(cpu_buffer)) |
| 1734 | return NULL; | 1790 | return NULL; |
| 1735 | 1791 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8a499e2adaec..9f3b478f9171 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -705,6 +705,7 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
| 705 | unsigned long flags, | 705 | unsigned long flags, |
| 706 | int skip, int pc) | 706 | int skip, int pc) |
| 707 | { | 707 | { |
| 708 | #ifdef CONFIG_STACKTRACE | ||
| 708 | struct ring_buffer_event *event; | 709 | struct ring_buffer_event *event; |
| 709 | struct stack_entry *entry; | 710 | struct stack_entry *entry; |
| 710 | struct stack_trace trace; | 711 | struct stack_trace trace; |
| @@ -730,6 +731,7 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
| 730 | 731 | ||
| 731 | save_stack_trace(&trace); | 732 | save_stack_trace(&trace); |
| 732 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 733 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
| 734 | #endif | ||
| 733 | } | 735 | } |
| 734 | 736 | ||
| 735 | void __trace_stack(struct trace_array *tr, | 737 | void __trace_stack(struct trace_array *tr, |
| @@ -1086,17 +1088,20 @@ static void s_stop(struct seq_file *m, void *p) | |||
| 1086 | mutex_unlock(&trace_types_lock); | 1088 | mutex_unlock(&trace_types_lock); |
| 1087 | } | 1089 | } |
| 1088 | 1090 | ||
| 1089 | #define KRETPROBE_MSG "[unknown/kretprobe'd]" | ||
| 1090 | |||
| 1091 | #ifdef CONFIG_KRETPROBES | 1091 | #ifdef CONFIG_KRETPROBES |
| 1092 | static inline int kretprobed(unsigned long addr) | 1092 | static inline const char *kretprobed(const char *name) |
| 1093 | { | 1093 | { |
| 1094 | return addr == (unsigned long)kretprobe_trampoline; | 1094 | static const char tramp_name[] = "kretprobe_trampoline"; |
| 1095 | int size = sizeof(tramp_name); | ||
| 1096 | |||
| 1097 | if (strncmp(tramp_name, name, size) == 0) | ||
| 1098 | return "[unknown/kretprobe'd]"; | ||
| 1099 | return name; | ||
| 1095 | } | 1100 | } |
| 1096 | #else | 1101 | #else |
| 1097 | static inline int kretprobed(unsigned long addr) | 1102 | static inline const char *kretprobed(const char *name) |
| 1098 | { | 1103 | { |
| 1099 | return 0; | 1104 | return name; |
| 1100 | } | 1105 | } |
| 1101 | #endif /* CONFIG_KRETPROBES */ | 1106 | #endif /* CONFIG_KRETPROBES */ |
| 1102 | 1107 | ||
| @@ -1105,10 +1110,13 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | |||
| 1105 | { | 1110 | { |
| 1106 | #ifdef CONFIG_KALLSYMS | 1111 | #ifdef CONFIG_KALLSYMS |
| 1107 | char str[KSYM_SYMBOL_LEN]; | 1112 | char str[KSYM_SYMBOL_LEN]; |
| 1113 | const char *name; | ||
| 1108 | 1114 | ||
| 1109 | kallsyms_lookup(address, NULL, NULL, NULL, str); | 1115 | kallsyms_lookup(address, NULL, NULL, NULL, str); |
| 1110 | 1116 | ||
| 1111 | return trace_seq_printf(s, fmt, str); | 1117 | name = kretprobed(str); |
| 1118 | |||
| 1119 | return trace_seq_printf(s, fmt, name); | ||
| 1112 | #endif | 1120 | #endif |
| 1113 | return 1; | 1121 | return 1; |
| 1114 | } | 1122 | } |
| @@ -1119,9 +1127,12 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
| 1119 | { | 1127 | { |
| 1120 | #ifdef CONFIG_KALLSYMS | 1128 | #ifdef CONFIG_KALLSYMS |
| 1121 | char str[KSYM_SYMBOL_LEN]; | 1129 | char str[KSYM_SYMBOL_LEN]; |
| 1130 | const char *name; | ||
| 1122 | 1131 | ||
| 1123 | sprint_symbol(str, address); | 1132 | sprint_symbol(str, address); |
| 1124 | return trace_seq_printf(s, fmt, str); | 1133 | name = kretprobed(str); |
| 1134 | |||
| 1135 | return trace_seq_printf(s, fmt, name); | ||
| 1125 | #endif | 1136 | #endif |
| 1126 | return 1; | 1137 | return 1; |
| 1127 | } | 1138 | } |
| @@ -1375,10 +1386,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
| 1375 | 1386 | ||
| 1376 | seq_print_ip_sym(s, field->ip, sym_flags); | 1387 | seq_print_ip_sym(s, field->ip, sym_flags); |
| 1377 | trace_seq_puts(s, " ("); | 1388 | trace_seq_puts(s, " ("); |
| 1378 | if (kretprobed(field->parent_ip)) | 1389 | seq_print_ip_sym(s, field->parent_ip, sym_flags); |
| 1379 | trace_seq_puts(s, KRETPROBE_MSG); | ||
| 1380 | else | ||
| 1381 | seq_print_ip_sym(s, field->parent_ip, sym_flags); | ||
| 1382 | trace_seq_puts(s, ")\n"); | 1390 | trace_seq_puts(s, ")\n"); |
| 1383 | break; | 1391 | break; |
| 1384 | } | 1392 | } |
| @@ -1494,12 +1502,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
| 1494 | ret = trace_seq_printf(s, " <-"); | 1502 | ret = trace_seq_printf(s, " <-"); |
| 1495 | if (!ret) | 1503 | if (!ret) |
| 1496 | return TRACE_TYPE_PARTIAL_LINE; | 1504 | return TRACE_TYPE_PARTIAL_LINE; |
| 1497 | if (kretprobed(field->parent_ip)) | 1505 | ret = seq_print_ip_sym(s, |
| 1498 | ret = trace_seq_puts(s, KRETPROBE_MSG); | 1506 | field->parent_ip, |
| 1499 | else | 1507 | sym_flags); |
| 1500 | ret = seq_print_ip_sym(s, | ||
| 1501 | field->parent_ip, | ||
| 1502 | sym_flags); | ||
| 1503 | if (!ret) | 1508 | if (!ret) |
| 1504 | return TRACE_TYPE_PARTIAL_LINE; | 1509 | return TRACE_TYPE_PARTIAL_LINE; |
| 1505 | } | 1510 | } |
