diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 353 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 |
2 files changed, 309 insertions, 46 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d39f4faec7c3..a40687a4413a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -174,15 +174,20 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
174 | { | 174 | { |
175 | int len = (PAGE_SIZE - 1) - s->len; | 175 | int len = (PAGE_SIZE - 1) - s->len; |
176 | va_list ap; | 176 | va_list ap; |
177 | int ret; | ||
177 | 178 | ||
178 | if (!len) | 179 | if (!len) |
179 | return 0; | 180 | return 0; |
180 | 181 | ||
181 | va_start(ap, fmt); | 182 | va_start(ap, fmt); |
182 | len = vsnprintf(s->buffer + s->len, len, fmt, ap); | 183 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); |
183 | va_end(ap); | 184 | va_end(ap); |
184 | 185 | ||
185 | s->len += len; | 186 | /* If we can't write it all, don't bother writing anything */ |
187 | if (ret > len) | ||
188 | return 0; | ||
189 | |||
190 | s->len += ret; | ||
186 | 191 | ||
187 | return len; | 192 | return len; |
188 | } | 193 | } |
@@ -193,7 +198,7 @@ trace_seq_puts(struct trace_seq *s, const char *str) | |||
193 | int len = strlen(str); | 198 | int len = strlen(str); |
194 | 199 | ||
195 | if (len > ((PAGE_SIZE - 1) - s->len)) | 200 | if (len > ((PAGE_SIZE - 1) - s->len)) |
196 | len = (PAGE_SIZE - 1) - s->len; | 201 | return 0; |
197 | 202 | ||
198 | memcpy(s->buffer + s->len, str, len); | 203 | memcpy(s->buffer + s->len, str, len); |
199 | s->len += len; | 204 | s->len += len; |
@@ -615,11 +620,13 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
615 | { | 620 | { |
616 | struct trace_entry *entry; | 621 | struct trace_entry *entry; |
617 | 622 | ||
623 | spin_lock(&data->lock); | ||
618 | entry = tracing_get_trace_entry(tr, data); | 624 | entry = tracing_get_trace_entry(tr, data); |
619 | tracing_generic_entry_update(entry, flags); | 625 | tracing_generic_entry_update(entry, flags); |
620 | entry->type = TRACE_FN; | 626 | entry->type = TRACE_FN; |
621 | entry->fn.ip = ip; | 627 | entry->fn.ip = ip; |
622 | entry->fn.parent_ip = parent_ip; | 628 | entry->fn.parent_ip = parent_ip; |
629 | spin_unlock(&data->lock); | ||
623 | } | 630 | } |
624 | 631 | ||
625 | notrace void | 632 | notrace void |
@@ -630,6 +637,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
630 | { | 637 | { |
631 | struct trace_entry *entry; | 638 | struct trace_entry *entry; |
632 | 639 | ||
640 | spin_lock(&data->lock); | ||
633 | entry = tracing_get_trace_entry(tr, data); | 641 | entry = tracing_get_trace_entry(tr, data); |
634 | tracing_generic_entry_update(entry, flags); | 642 | tracing_generic_entry_update(entry, flags); |
635 | entry->type = TRACE_CTX; | 643 | entry->type = TRACE_CTX; |
@@ -638,6 +646,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
638 | entry->ctx.prev_state = prev->state; | 646 | entry->ctx.prev_state = prev->state; |
639 | entry->ctx.next_pid = next->pid; | 647 | entry->ctx.next_pid = next->pid; |
640 | entry->ctx.next_prio = next->prio; | 648 | entry->ctx.next_prio = next->prio; |
649 | spin_unlock(&data->lock); | ||
641 | } | 650 | } |
642 | 651 | ||
643 | enum trace_file_type { | 652 | enum trace_file_type { |
@@ -652,7 +661,9 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data, | |||
652 | struct trace_entry *array; | 661 | struct trace_entry *array; |
653 | 662 | ||
654 | if (iter->next_idx[cpu] >= tr->entries || | 663 | if (iter->next_idx[cpu] >= tr->entries || |
655 | iter->next_idx[cpu] >= data->trace_idx) | 664 | iter->next_idx[cpu] >= data->trace_idx || |
665 | (data->trace_head == data->trace_tail && | ||
666 | data->trace_head_idx == data->trace_tail_idx)) | ||
656 | return NULL; | 667 | return NULL; |
657 | 668 | ||
658 | if (!iter->next_page[cpu]) { | 669 | if (!iter->next_page[cpu]) { |
@@ -702,33 +713,57 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu) | |||
702 | return next; | 713 | return next; |
703 | } | 714 | } |
704 | 715 | ||
705 | static void *find_next_entry_inc(struct trace_iterator *iter) | 716 | static notrace void |
717 | trace_iterator_increment(struct trace_iterator *iter) | ||
706 | { | 718 | { |
707 | struct trace_entry *next; | 719 | iter->idx++; |
708 | int next_cpu = -1; | 720 | iter->next_idx[iter->cpu]++; |
721 | iter->next_page_idx[iter->cpu]++; | ||
722 | if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) { | ||
723 | struct trace_array_cpu *data = iter->tr->data[iter->cpu]; | ||
709 | 724 | ||
710 | next = find_next_entry(iter, &next_cpu); | 725 | iter->next_page_idx[iter->cpu] = 0; |
726 | iter->next_page[iter->cpu] = | ||
727 | trace_next_list(data, iter->next_page[iter->cpu]); | ||
728 | } | ||
729 | } | ||
711 | 730 | ||
712 | if (next) { | 731 | static notrace void |
713 | iter->idx++; | 732 | trace_consume(struct trace_iterator *iter) |
714 | iter->next_idx[next_cpu]++; | 733 | { |
715 | iter->next_page_idx[next_cpu]++; | 734 | struct trace_array_cpu *data = iter->tr->data[iter->cpu]; |
735 | |||
736 | data->trace_tail_idx++; | ||
737 | if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { | ||
738 | data->trace_tail = trace_next_page(data, data->trace_tail); | ||
739 | data->trace_tail_idx = 0; | ||
740 | } | ||
716 | 741 | ||
717 | if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) { | 742 | /* Check if we empty it, then reset the index */ |
718 | struct trace_array_cpu *data = iter->tr->data[next_cpu]; | 743 | if (data->trace_head == data->trace_tail && |
744 | data->trace_head_idx == data->trace_tail_idx) | ||
745 | data->trace_idx = 0; | ||
719 | 746 | ||
720 | iter->next_page_idx[next_cpu] = 0; | 747 | trace_iterator_increment(iter); |
721 | iter->next_page[next_cpu] = | 748 | } |
722 | trace_next_list(data, iter->next_page[next_cpu]); | 749 | |
750 | static notrace void * | ||
751 | find_next_entry_inc(struct trace_iterator *iter) | ||
752 | { | ||
753 | struct trace_entry *next; | ||
754 | int next_cpu = -1; | ||
755 | |||
756 | next = find_next_entry(iter, &next_cpu); | ||
723 | 757 | ||
724 | } | ||
725 | } | ||
726 | iter->prev_ent = iter->ent; | 758 | iter->prev_ent = iter->ent; |
727 | iter->prev_cpu = iter->cpu; | 759 | iter->prev_cpu = iter->cpu; |
728 | 760 | ||
729 | iter->ent = next; | 761 | iter->ent = next; |
730 | iter->cpu = next_cpu; | 762 | iter->cpu = next_cpu; |
731 | 763 | ||
764 | if (next) | ||
765 | trace_iterator_increment(iter); | ||
766 | |||
732 | return next ? iter : NULL; | 767 | return next ? iter : NULL; |
733 | } | 768 | } |
734 | 769 | ||
@@ -815,7 +850,7 @@ static void s_stop(struct seq_file *m, void *p) | |||
815 | mutex_unlock(&trace_types_lock); | 850 | mutex_unlock(&trace_types_lock); |
816 | } | 851 | } |
817 | 852 | ||
818 | static void | 853 | static int |
819 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | 854 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) |
820 | { | 855 | { |
821 | #ifdef CONFIG_KALLSYMS | 856 | #ifdef CONFIG_KALLSYMS |
@@ -823,11 +858,12 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | |||
823 | 858 | ||
824 | kallsyms_lookup(address, NULL, NULL, NULL, str); | 859 | kallsyms_lookup(address, NULL, NULL, NULL, str); |
825 | 860 | ||
826 | trace_seq_printf(s, fmt, str); | 861 | return trace_seq_printf(s, fmt, str); |
827 | #endif | 862 | #endif |
863 | return 1; | ||
828 | } | 864 | } |
829 | 865 | ||
830 | static void | 866 | static int |
831 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | 867 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, |
832 | unsigned long address) | 868 | unsigned long address) |
833 | { | 869 | { |
@@ -835,8 +871,9 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
835 | char str[KSYM_SYMBOL_LEN]; | 871 | char str[KSYM_SYMBOL_LEN]; |
836 | 872 | ||
837 | sprint_symbol(str, address); | 873 | sprint_symbol(str, address); |
838 | trace_seq_printf(s, fmt, str); | 874 | return trace_seq_printf(s, fmt, str); |
839 | #endif | 875 | #endif |
876 | return 1; | ||
840 | } | 877 | } |
841 | 878 | ||
842 | #ifndef CONFIG_64BIT | 879 | #ifndef CONFIG_64BIT |
@@ -845,21 +882,25 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
845 | # define IP_FMT "%016lx" | 882 | # define IP_FMT "%016lx" |
846 | #endif | 883 | #endif |
847 | 884 | ||
848 | static notrace void | 885 | static notrace int |
849 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 886 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
850 | { | 887 | { |
851 | if (!ip) { | 888 | int ret; |
852 | trace_seq_printf(s, "0"); | 889 | |
853 | return; | 890 | if (!ip) |
854 | } | 891 | return trace_seq_printf(s, "0"); |
855 | 892 | ||
856 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | 893 | if (sym_flags & TRACE_ITER_SYM_OFFSET) |
857 | seq_print_sym_offset(s, "%s", ip); | 894 | ret = seq_print_sym_offset(s, "%s", ip); |
858 | else | 895 | else |
859 | seq_print_sym_short(s, "%s", ip); | 896 | ret = seq_print_sym_short(s, "%s", ip); |
897 | |||
898 | if (!ret) | ||
899 | return 0; | ||
860 | 900 | ||
861 | if (sym_flags & TRACE_ITER_SYM_ADDR) | 901 | if (sym_flags & TRACE_ITER_SYM_ADDR) |
862 | trace_seq_printf(s, " <" IP_FMT ">", ip); | 902 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); |
903 | return ret; | ||
863 | } | 904 | } |
864 | 905 | ||
865 | static notrace void print_lat_help_header(struct seq_file *m) | 906 | static notrace void print_lat_help_header(struct seq_file *m) |
@@ -1089,7 +1130,7 @@ static notrace void sync_time_offset(struct trace_iterator *iter) | |||
1089 | array->time_offset += prev_t - t; | 1130 | array->time_offset += prev_t - t; |
1090 | } | 1131 | } |
1091 | 1132 | ||
1092 | static notrace void | 1133 | static notrace int |
1093 | print_trace_fmt(struct trace_iterator *iter) | 1134 | print_trace_fmt(struct trace_iterator *iter) |
1094 | { | 1135 | { |
1095 | struct trace_seq *s = &iter->seq; | 1136 | struct trace_seq *s = &iter->seq; |
@@ -1100,6 +1141,7 @@ print_trace_fmt(struct trace_iterator *iter) | |||
1100 | unsigned long secs; | 1141 | unsigned long secs; |
1101 | char *comm; | 1142 | char *comm; |
1102 | int S; | 1143 | int S; |
1144 | int ret; | ||
1103 | 1145 | ||
1104 | sync_time_offset(iter); | 1146 | sync_time_offset(iter); |
1105 | entry = iter->ent; | 1147 | entry = iter->ent; |
@@ -1110,31 +1152,49 @@ print_trace_fmt(struct trace_iterator *iter) | |||
1110 | usec_rem = do_div(t, 1000000ULL); | 1152 | usec_rem = do_div(t, 1000000ULL); |
1111 | secs = (unsigned long)t; | 1153 | secs = (unsigned long)t; |
1112 | 1154 | ||
1113 | trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); | 1155 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); |
1114 | trace_seq_printf(s, "[%02d] ", iter->cpu); | 1156 | if (!ret) |
1115 | trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); | 1157 | return 0; |
1158 | ret = trace_seq_printf(s, "[%02d] ", iter->cpu); | ||
1159 | if (!ret) | ||
1160 | return 0; | ||
1161 | ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); | ||
1162 | if (!ret) | ||
1163 | return 0; | ||
1116 | 1164 | ||
1117 | switch (entry->type) { | 1165 | switch (entry->type) { |
1118 | case TRACE_FN: | 1166 | case TRACE_FN: |
1119 | seq_print_ip_sym(s, entry->fn.ip, sym_flags); | 1167 | ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags); |
1168 | if (!ret) | ||
1169 | return 0; | ||
1120 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && | 1170 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && |
1121 | entry->fn.parent_ip) { | 1171 | entry->fn.parent_ip) { |
1122 | trace_seq_printf(s, " <-"); | 1172 | ret = trace_seq_printf(s, " <-"); |
1123 | seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags); | 1173 | if (!ret) |
1174 | return 0; | ||
1175 | ret = seq_print_ip_sym(s, entry->fn.parent_ip, | ||
1176 | sym_flags); | ||
1177 | if (!ret) | ||
1178 | return 0; | ||
1124 | } | 1179 | } |
1125 | trace_seq_printf(s, "\n"); | 1180 | ret = trace_seq_printf(s, "\n"); |
1181 | if (!ret) | ||
1182 | return 0; | ||
1126 | break; | 1183 | break; |
1127 | case TRACE_CTX: | 1184 | case TRACE_CTX: |
1128 | S = entry->ctx.prev_state < sizeof(state_to_char) ? | 1185 | S = entry->ctx.prev_state < sizeof(state_to_char) ? |
1129 | state_to_char[entry->ctx.prev_state] : 'X'; | 1186 | state_to_char[entry->ctx.prev_state] : 'X'; |
1130 | trace_seq_printf(s, " %d:%d:%c ==> %d:%d\n", | 1187 | ret = trace_seq_printf(s, " %d:%d:%c ==> %d:%d\n", |
1131 | entry->ctx.prev_pid, | 1188 | entry->ctx.prev_pid, |
1132 | entry->ctx.prev_prio, | 1189 | entry->ctx.prev_prio, |
1133 | S, | 1190 | S, |
1134 | entry->ctx.next_pid, | 1191 | entry->ctx.next_pid, |
1135 | entry->ctx.next_prio); | 1192 | entry->ctx.next_prio); |
1193 | if (!ret) | ||
1194 | return 0; | ||
1136 | break; | 1195 | break; |
1137 | } | 1196 | } |
1197 | return 1; | ||
1138 | } | 1198 | } |
1139 | 1199 | ||
1140 | static int trace_empty(struct trace_iterator *iter) | 1200 | static int trace_empty(struct trace_iterator *iter) |
@@ -1145,7 +1205,9 @@ static int trace_empty(struct trace_iterator *iter) | |||
1145 | for_each_possible_cpu(cpu) { | 1205 | for_each_possible_cpu(cpu) { |
1146 | data = iter->tr->data[cpu]; | 1206 | data = iter->tr->data[cpu]; |
1147 | 1207 | ||
1148 | if (head_page(data) && data->trace_idx) | 1208 | if (head_page(data) && data->trace_idx && |
1209 | (data->trace_tail != data->trace_head || | ||
1210 | data->trace_tail_idx != data->trace_head_idx)) | ||
1149 | return 0; | 1211 | return 0; |
1150 | } | 1212 | } |
1151 | return 1; | 1213 | return 1; |
@@ -1645,6 +1707,192 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
1645 | return cnt; | 1707 | return cnt; |
1646 | } | 1708 | } |
1647 | 1709 | ||
1710 | static atomic_t tracing_reader; | ||
1711 | |||
1712 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | ||
1713 | { | ||
1714 | struct trace_iterator *iter; | ||
1715 | |||
1716 | if (tracing_disabled) | ||
1717 | return -ENODEV; | ||
1718 | |||
1719 | /* We only allow for reader of the pipe */ | ||
1720 | if (atomic_inc_return(&tracing_reader) != 1) { | ||
1721 | atomic_dec(&tracing_reader); | ||
1722 | return -EBUSY; | ||
1723 | } | ||
1724 | |||
1725 | /* create a buffer to store the information to pass to userspace */ | ||
1726 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
1727 | if (!iter) | ||
1728 | return -ENOMEM; | ||
1729 | |||
1730 | iter->tr = &global_trace; | ||
1731 | |||
1732 | filp->private_data = iter; | ||
1733 | |||
1734 | return 0; | ||
1735 | } | ||
1736 | |||
1737 | static int tracing_release_pipe(struct inode *inode, struct file *file) | ||
1738 | { | ||
1739 | struct trace_iterator *iter = file->private_data; | ||
1740 | |||
1741 | kfree(iter); | ||
1742 | atomic_dec(&tracing_reader); | ||
1743 | |||
1744 | return 0; | ||
1745 | } | ||
1746 | |||
1747 | /* | ||
1748 | * Consumer reader. | ||
1749 | */ | ||
1750 | static ssize_t | ||
1751 | tracing_read_pipe(struct file *filp, char __user *ubuf, | ||
1752 | size_t cnt, loff_t *ppos) | ||
1753 | { | ||
1754 | struct trace_iterator *iter = filp->private_data; | ||
1755 | struct trace_array_cpu *data; | ||
1756 | static cpumask_t mask; | ||
1757 | struct trace_entry *entry; | ||
1758 | static int start; | ||
1759 | unsigned long flags; | ||
1760 | int read = 0; | ||
1761 | int cpu; | ||
1762 | int len; | ||
1763 | int ret; | ||
1764 | |||
1765 | /* return any leftover data */ | ||
1766 | if (iter->seq.len > start) { | ||
1767 | len = iter->seq.len - start; | ||
1768 | if (cnt > len) | ||
1769 | cnt = len; | ||
1770 | ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt); | ||
1771 | if (ret) | ||
1772 | cnt = -EFAULT; | ||
1773 | |||
1774 | start += len; | ||
1775 | |||
1776 | return cnt; | ||
1777 | } | ||
1778 | |||
1779 | trace_seq_reset(&iter->seq); | ||
1780 | start = 0; | ||
1781 | |||
1782 | while (trace_empty(iter)) { | ||
1783 | /* | ||
1784 | * This is a make-shift waitqueue. The reason we don't use | ||
1785 | * an actual wait queue is because: | ||
1786 | * 1) we only ever have one waiter | ||
1787 | * 2) the tracing, traces all functions, we don't want | ||
1788 | * the overhead of calling wake_up and friends | ||
1789 | * (and tracing them too) | ||
1790 | * Anyway, this is really very primitive wakeup. | ||
1791 | */ | ||
1792 | set_current_state(TASK_INTERRUPTIBLE); | ||
1793 | iter->tr->waiter = current; | ||
1794 | |||
1795 | /* sleep for one second, and try again. */ | ||
1796 | schedule_timeout(HZ); | ||
1797 | |||
1798 | iter->tr->waiter = NULL; | ||
1799 | |||
1800 | if (signal_pending(current)) | ||
1801 | return -EINTR; | ||
1802 | |||
1803 | /* | ||
1804 | * We block until we read something and tracing is disabled. | ||
1805 | * We still block if tracing is disabled, but we have never | ||
1806 | * read anything. This allows a user to cat this file, and | ||
1807 | * then enable tracing. But after we have read something, | ||
1808 | * we give an EOF when tracing is again disabled. | ||
1809 | * | ||
1810 | * iter->pos will be 0 if we haven't read anything. | ||
1811 | */ | ||
1812 | if (!tracer_enabled && iter->pos) | ||
1813 | break; | ||
1814 | |||
1815 | continue; | ||
1816 | } | ||
1817 | |||
1818 | /* stop when tracing is finished */ | ||
1819 | if (trace_empty(iter)) | ||
1820 | return 0; | ||
1821 | |||
1822 | if (cnt >= PAGE_SIZE) | ||
1823 | cnt = PAGE_SIZE - 1; | ||
1824 | |||
1825 | memset(iter, 0, sizeof(*iter)); | ||
1826 | iter->tr = &global_trace; | ||
1827 | iter->pos = -1; | ||
1828 | |||
1829 | /* | ||
1830 | * We need to stop all tracing on all CPUS to read the | ||
1831 | * the next buffer. This is a bit expensive, but is | ||
1832 | * not done often. We fill all what we can read, | ||
1833 | * and then release the locks again. | ||
1834 | */ | ||
1835 | |||
1836 | cpus_clear(mask); | ||
1837 | local_irq_save(flags); | ||
1838 | for_each_possible_cpu(cpu) { | ||
1839 | data = iter->tr->data[cpu]; | ||
1840 | |||
1841 | if (!head_page(data) || !data->trace_idx) | ||
1842 | continue; | ||
1843 | |||
1844 | atomic_inc(&data->disabled); | ||
1845 | spin_lock(&data->lock); | ||
1846 | cpu_set(cpu, mask); | ||
1847 | } | ||
1848 | |||
1849 | while ((entry = find_next_entry(iter, &cpu))) { | ||
1850 | |||
1851 | if (!entry) | ||
1852 | break; | ||
1853 | |||
1854 | iter->ent = entry; | ||
1855 | iter->cpu = cpu; | ||
1856 | |||
1857 | ret = print_trace_fmt(iter); | ||
1858 | if (!ret) | ||
1859 | break; | ||
1860 | |||
1861 | trace_consume(iter); | ||
1862 | |||
1863 | if (iter->seq.len >= cnt) | ||
1864 | break; | ||
1865 | |||
1866 | } | ||
1867 | |||
1868 | for_each_possible_cpu(cpu) { | ||
1869 | data = iter->tr->data[cpu]; | ||
1870 | |||
1871 | if (!cpu_isset(cpu, mask)) | ||
1872 | continue; | ||
1873 | spin_unlock(&data->lock); | ||
1874 | atomic_dec(&data->disabled); | ||
1875 | } | ||
1876 | local_irq_restore(flags); | ||
1877 | |||
1878 | /* Now copy what we have to the user */ | ||
1879 | read = iter->seq.len; | ||
1880 | if (read > cnt) | ||
1881 | read = cnt; | ||
1882 | |||
1883 | ret = copy_to_user(ubuf, iter->seq.buffer, read); | ||
1884 | |||
1885 | if (read < iter->seq.len) | ||
1886 | start = read; | ||
1887 | else | ||
1888 | trace_seq_reset(&iter->seq); | ||
1889 | |||
1890 | if (ret) | ||
1891 | read = -EFAULT; | ||
1892 | |||
1893 | return read; | ||
1894 | } | ||
1895 | |||
1648 | static struct file_operations tracing_max_lat_fops = { | 1896 | static struct file_operations tracing_max_lat_fops = { |
1649 | .open = tracing_open_generic, | 1897 | .open = tracing_open_generic, |
1650 | .read = tracing_max_lat_read, | 1898 | .read = tracing_max_lat_read, |
@@ -1663,6 +1911,12 @@ static struct file_operations set_tracer_fops = { | |||
1663 | .write = tracing_set_trace_write, | 1911 | .write = tracing_set_trace_write, |
1664 | }; | 1912 | }; |
1665 | 1913 | ||
1914 | static struct file_operations tracing_pipe_fops = { | ||
1915 | .open = tracing_open_pipe, | ||
1916 | .read = tracing_read_pipe, | ||
1917 | .release = tracing_release_pipe, | ||
1918 | }; | ||
1919 | |||
1666 | #ifdef CONFIG_DYNAMIC_FTRACE | 1920 | #ifdef CONFIG_DYNAMIC_FTRACE |
1667 | 1921 | ||
1668 | static ssize_t | 1922 | static ssize_t |
@@ -1763,6 +2017,11 @@ static __init void tracer_init_debugfs(void) | |||
1763 | if (!entry) | 2017 | if (!entry) |
1764 | pr_warning("Could not create debugfs 'README' entry\n"); | 2018 | pr_warning("Could not create debugfs 'README' entry\n"); |
1765 | 2019 | ||
2020 | entry = debugfs_create_file("trace_pipe", 0644, d_tracer, | ||
2021 | NULL, &tracing_pipe_fops); | ||
2022 | if (!entry) | ||
2023 | pr_warning("Could not create debugfs " | ||
2024 | "'tracing_threash' entry\n"); | ||
1766 | 2025 | ||
1767 | #ifdef CONFIG_DYNAMIC_FTRACE | 2026 | #ifdef CONFIG_DYNAMIC_FTRACE |
1768 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 2027 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
@@ -1816,6 +2075,7 @@ static int trace_alloc_page(void) | |||
1816 | /* Now that we successfully allocate a page per CPU, add them */ | 2075 | /* Now that we successfully allocate a page per CPU, add them */ |
1817 | for_each_possible_cpu(i) { | 2076 | for_each_possible_cpu(i) { |
1818 | data = global_trace.data[i]; | 2077 | data = global_trace.data[i]; |
2078 | spin_lock_init(&data->lock); | ||
1819 | page = list_entry(pages.next, struct page, lru); | 2079 | page = list_entry(pages.next, struct page, lru); |
1820 | list_del_init(&page->lru); | 2080 | list_del_init(&page->lru); |
1821 | list_add_tail(&page->lru, &data->trace_pages); | 2081 | list_add_tail(&page->lru, &data->trace_pages); |
@@ -1823,6 +2083,7 @@ static int trace_alloc_page(void) | |||
1823 | 2083 | ||
1824 | #ifdef CONFIG_TRACER_MAX_TRACE | 2084 | #ifdef CONFIG_TRACER_MAX_TRACE |
1825 | data = max_tr.data[i]; | 2085 | data = max_tr.data[i]; |
2086 | spin_lock_init(&data->lock); | ||
1826 | page = list_entry(pages.next, struct page, lru); | 2087 | page = list_entry(pages.next, struct page, lru); |
1827 | list_del_init(&page->lru); | 2088 | list_del_init(&page->lru); |
1828 | list_add_tail(&page->lru, &data->trace_pages); | 2089 | list_add_tail(&page->lru, &data->trace_pages); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f5b32ca0b457..29a7ea59de50 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -55,6 +55,7 @@ struct trace_entry { | |||
55 | struct trace_array_cpu { | 55 | struct trace_array_cpu { |
56 | struct list_head trace_pages; | 56 | struct list_head trace_pages; |
57 | atomic_t disabled; | 57 | atomic_t disabled; |
58 | spinlock_t lock; | ||
58 | cycle_t time_offset; | 59 | cycle_t time_offset; |
59 | 60 | ||
60 | /* these fields get copied into max-trace: */ | 61 | /* these fields get copied into max-trace: */ |
@@ -88,6 +89,7 @@ struct trace_array { | |||
88 | long ctrl; | 89 | long ctrl; |
89 | int cpu; | 90 | int cpu; |
90 | cycle_t time_start; | 91 | cycle_t time_start; |
92 | struct task_struct *waiter; | ||
91 | struct trace_array_cpu *data[NR_CPUS]; | 93 | struct trace_array_cpu *data[NR_CPUS]; |
92 | }; | 94 | }; |
93 | 95 | ||