diff options
-rw-r--r-- | block/blktrace.c | 21 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 19 | ||||
-rw-r--r-- | kernel/trace/trace.c | 94 | ||||
-rw-r--r-- | kernel/trace/trace.h | 11 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 20 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 20 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 20 |
9 files changed, 102 insertions, 117 deletions
diff --git a/block/blktrace.c b/block/blktrace.c index 8e52f24cc8f9..834cd84037b2 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -187,19 +187,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
187 | cpu = raw_smp_processor_id(); | 187 | cpu = raw_smp_processor_id(); |
188 | 188 | ||
189 | if (blk_tr) { | 189 | if (blk_tr) { |
190 | struct trace_entry *ent; | ||
191 | tracing_record_cmdline(current); | 190 | tracing_record_cmdline(current); |
192 | 191 | ||
193 | event = ring_buffer_lock_reserve(blk_tr->buffer, | 192 | pc = preempt_count(); |
194 | sizeof(*t) + pdu_len); | 193 | event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, |
194 | sizeof(*t) + pdu_len, | ||
195 | 0, pc); | ||
195 | if (!event) | 196 | if (!event) |
196 | return; | 197 | return; |
197 | 198 | t = ring_buffer_event_data(event); | |
198 | ent = ring_buffer_event_data(event); | ||
199 | t = (struct blk_io_trace *)ent; | ||
200 | pc = preempt_count(); | ||
201 | tracing_generic_entry_update(ent, 0, pc); | ||
202 | ent->type = TRACE_BLK; | ||
203 | goto record_it; | 199 | goto record_it; |
204 | } | 200 | } |
205 | 201 | ||
@@ -241,12 +237,7 @@ record_it: | |||
241 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | 237 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); |
242 | 238 | ||
243 | if (blk_tr) { | 239 | if (blk_tr) { |
244 | ring_buffer_unlock_commit(blk_tr->buffer, event); | 240 | trace_buffer_unlock_commit(blk_tr, event, 0, pc); |
245 | if (pid != 0 && | ||
246 | !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && | ||
247 | (trace_flags & TRACE_ITER_STACKTRACE) != 0) | ||
248 | __trace_stack(blk_tr, 0, 5, pc); | ||
249 | trace_wake_up(); | ||
250 | return; | 241 | return; |
251 | } | 242 | } |
252 | } | 243 | } |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 256749d1032a..ae201b3eda89 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -276,13 +276,12 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | |||
276 | if (!kmem_tracing_enabled) | 276 | if (!kmem_tracing_enabled) |
277 | return; | 277 | return; |
278 | 278 | ||
279 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 279 | event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC, |
280 | sizeof(*entry), 0, 0); | ||
280 | if (!event) | 281 | if (!event) |
281 | return; | 282 | return; |
282 | entry = ring_buffer_event_data(event); | 283 | entry = ring_buffer_event_data(event); |
283 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
284 | 284 | ||
285 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
286 | entry->call_site = call_site; | 285 | entry->call_site = call_site; |
287 | entry->ptr = ptr; | 286 | entry->ptr = ptr; |
288 | entry->bytes_req = bytes_req; | 287 | entry->bytes_req = bytes_req; |
@@ -290,9 +289,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | |||
290 | entry->gfp_flags = gfp_flags; | 289 | entry->gfp_flags = gfp_flags; |
291 | entry->node = node; | 290 | entry->node = node; |
292 | 291 | ||
293 | ring_buffer_unlock_commit(tr->buffer, event); | 292 | trace_buffer_unlock_commit(tr, event, 0, 0); |
294 | |||
295 | trace_wake_up(); | ||
296 | } | 293 | } |
297 | EXPORT_SYMBOL(kmemtrace_mark_alloc_node); | 294 | EXPORT_SYMBOL(kmemtrace_mark_alloc_node); |
298 | 295 | ||
@@ -307,20 +304,16 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | |||
307 | if (!kmem_tracing_enabled) | 304 | if (!kmem_tracing_enabled) |
308 | return; | 305 | return; |
309 | 306 | ||
310 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 307 | event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE, |
308 | sizeof(*entry), 0, 0); | ||
311 | if (!event) | 309 | if (!event) |
312 | return; | 310 | return; |
313 | entry = ring_buffer_event_data(event); | 311 | entry = ring_buffer_event_data(event); |
314 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
315 | |||
316 | entry->ent.type = TRACE_KMEM_FREE; | ||
317 | entry->type_id = type_id; | 312 | entry->type_id = type_id; |
318 | entry->call_site = call_site; | 313 | entry->call_site = call_site; |
319 | entry->ptr = ptr; | 314 | entry->ptr = ptr; |
320 | 315 | ||
321 | ring_buffer_unlock_commit(tr->buffer, event); | 316 | trace_buffer_unlock_commit(tr, event, 0, 0); |
322 | |||
323 | trace_wake_up(); | ||
324 | } | 317 | } |
325 | EXPORT_SYMBOL(kmemtrace_mark_free); | 318 | EXPORT_SYMBOL(kmemtrace_mark_free); |
326 | 319 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index eb453a238a6f..8fad3776e843 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -776,6 +776,39 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
776 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 776 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
777 | } | 777 | } |
778 | 778 | ||
779 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | ||
780 | unsigned char type, | ||
781 | unsigned long len, | ||
782 | unsigned long flags, int pc) | ||
783 | { | ||
784 | struct ring_buffer_event *event; | ||
785 | |||
786 | event = ring_buffer_lock_reserve(tr->buffer, len); | ||
787 | if (event != NULL) { | ||
788 | struct trace_entry *ent = ring_buffer_event_data(event); | ||
789 | |||
790 | tracing_generic_entry_update(ent, flags, pc); | ||
791 | ent->type = type; | ||
792 | } | ||
793 | |||
794 | return event; | ||
795 | } | ||
796 | static void ftrace_trace_stack(struct trace_array *tr, | ||
797 | unsigned long flags, int skip, int pc); | ||
798 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
799 | unsigned long flags, int pc); | ||
800 | |||
801 | void trace_buffer_unlock_commit(struct trace_array *tr, | ||
802 | struct ring_buffer_event *event, | ||
803 | unsigned long flags, int pc) | ||
804 | { | ||
805 | ring_buffer_unlock_commit(tr->buffer, event); | ||
806 | |||
807 | ftrace_trace_stack(tr, flags, 6, pc); | ||
808 | ftrace_trace_userstack(tr, flags, pc); | ||
809 | trace_wake_up(); | ||
810 | } | ||
811 | |||
779 | void | 812 | void |
780 | trace_function(struct trace_array *tr, | 813 | trace_function(struct trace_array *tr, |
781 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 814 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
@@ -788,12 +821,11 @@ trace_function(struct trace_array *tr, | |||
788 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 821 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
789 | return; | 822 | return; |
790 | 823 | ||
791 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 824 | event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), |
825 | flags, pc); | ||
792 | if (!event) | 826 | if (!event) |
793 | return; | 827 | return; |
794 | entry = ring_buffer_event_data(event); | 828 | entry = ring_buffer_event_data(event); |
795 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
796 | entry->ent.type = TRACE_FN; | ||
797 | entry->ip = ip; | 829 | entry->ip = ip; |
798 | entry->parent_ip = parent_ip; | 830 | entry->parent_ip = parent_ip; |
799 | ring_buffer_unlock_commit(tr->buffer, event); | 831 | ring_buffer_unlock_commit(tr->buffer, event); |
@@ -811,12 +843,11 @@ static void __trace_graph_entry(struct trace_array *tr, | |||
811 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 843 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
812 | return; | 844 | return; |
813 | 845 | ||
814 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); | 846 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, |
847 | sizeof(*entry), flags, pc); | ||
815 | if (!event) | 848 | if (!event) |
816 | return; | 849 | return; |
817 | entry = ring_buffer_event_data(event); | 850 | entry = ring_buffer_event_data(event); |
818 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
819 | entry->ent.type = TRACE_GRAPH_ENT; | ||
820 | entry->graph_ent = *trace; | 851 | entry->graph_ent = *trace; |
821 | ring_buffer_unlock_commit(global_trace.buffer, event); | 852 | ring_buffer_unlock_commit(global_trace.buffer, event); |
822 | } | 853 | } |
@@ -832,12 +863,11 @@ static void __trace_graph_return(struct trace_array *tr, | |||
832 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 863 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
833 | return; | 864 | return; |
834 | 865 | ||
835 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); | 866 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET, |
867 | sizeof(*entry), flags, pc); | ||
836 | if (!event) | 868 | if (!event) |
837 | return; | 869 | return; |
838 | entry = ring_buffer_event_data(event); | 870 | entry = ring_buffer_event_data(event); |
839 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
840 | entry->ent.type = TRACE_GRAPH_RET; | ||
841 | entry->ret = *trace; | 871 | entry->ret = *trace; |
842 | ring_buffer_unlock_commit(global_trace.buffer, event); | 872 | ring_buffer_unlock_commit(global_trace.buffer, event); |
843 | } | 873 | } |
@@ -861,13 +891,11 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
861 | struct stack_entry *entry; | 891 | struct stack_entry *entry; |
862 | struct stack_trace trace; | 892 | struct stack_trace trace; |
863 | 893 | ||
864 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 894 | event = trace_buffer_lock_reserve(tr, TRACE_STACK, |
895 | sizeof(*entry), flags, pc); | ||
865 | if (!event) | 896 | if (!event) |
866 | return; | 897 | return; |
867 | entry = ring_buffer_event_data(event); | 898 | entry = ring_buffer_event_data(event); |
868 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
869 | entry->ent.type = TRACE_STACK; | ||
870 | |||
871 | memset(&entry->caller, 0, sizeof(entry->caller)); | 899 | memset(&entry->caller, 0, sizeof(entry->caller)); |
872 | 900 | ||
873 | trace.nr_entries = 0; | 901 | trace.nr_entries = 0; |
@@ -908,12 +936,11 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
908 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 936 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
909 | return; | 937 | return; |
910 | 938 | ||
911 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 939 | event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, |
940 | sizeof(*entry), flags, pc); | ||
912 | if (!event) | 941 | if (!event) |
913 | return; | 942 | return; |
914 | entry = ring_buffer_event_data(event); | 943 | entry = ring_buffer_event_data(event); |
915 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
916 | entry->ent.type = TRACE_USER_STACK; | ||
917 | 944 | ||
918 | memset(&entry->caller, 0, sizeof(entry->caller)); | 945 | memset(&entry->caller, 0, sizeof(entry->caller)); |
919 | 946 | ||
@@ -941,20 +968,15 @@ ftrace_trace_special(void *__tr, | |||
941 | struct trace_array *tr = __tr; | 968 | struct trace_array *tr = __tr; |
942 | struct special_entry *entry; | 969 | struct special_entry *entry; |
943 | 970 | ||
944 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 971 | event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, |
972 | sizeof(*entry), 0, pc); | ||
945 | if (!event) | 973 | if (!event) |
946 | return; | 974 | return; |
947 | entry = ring_buffer_event_data(event); | 975 | entry = ring_buffer_event_data(event); |
948 | tracing_generic_entry_update(&entry->ent, 0, pc); | ||
949 | entry->ent.type = TRACE_SPECIAL; | ||
950 | entry->arg1 = arg1; | 976 | entry->arg1 = arg1; |
951 | entry->arg2 = arg2; | 977 | entry->arg2 = arg2; |
952 | entry->arg3 = arg3; | 978 | entry->arg3 = arg3; |
953 | ring_buffer_unlock_commit(tr->buffer, event); | 979 | trace_buffer_unlock_commit(tr, event, 0, pc); |
954 | ftrace_trace_stack(tr, 0, 4, pc); | ||
955 | ftrace_trace_userstack(tr, 0, pc); | ||
956 | |||
957 | trace_wake_up(); | ||
958 | } | 980 | } |
959 | 981 | ||
960 | void | 982 | void |
@@ -973,12 +995,11 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
973 | struct ring_buffer_event *event; | 995 | struct ring_buffer_event *event; |
974 | struct ctx_switch_entry *entry; | 996 | struct ctx_switch_entry *entry; |
975 | 997 | ||
976 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 998 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, |
999 | sizeof(*entry), flags, pc); | ||
977 | if (!event) | 1000 | if (!event) |
978 | return; | 1001 | return; |
979 | entry = ring_buffer_event_data(event); | 1002 | entry = ring_buffer_event_data(event); |
980 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
981 | entry->ent.type = TRACE_CTX; | ||
982 | entry->prev_pid = prev->pid; | 1003 | entry->prev_pid = prev->pid; |
983 | entry->prev_prio = prev->prio; | 1004 | entry->prev_prio = prev->prio; |
984 | entry->prev_state = prev->state; | 1005 | entry->prev_state = prev->state; |
@@ -986,9 +1007,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
986 | entry->next_prio = next->prio; | 1007 | entry->next_prio = next->prio; |
987 | entry->next_state = next->state; | 1008 | entry->next_state = next->state; |
988 | entry->next_cpu = task_cpu(next); | 1009 | entry->next_cpu = task_cpu(next); |
989 | ring_buffer_unlock_commit(tr->buffer, event); | 1010 | trace_buffer_unlock_commit(tr, event, flags, pc); |
990 | ftrace_trace_stack(tr, flags, 5, pc); | ||
991 | ftrace_trace_userstack(tr, flags, pc); | ||
992 | } | 1011 | } |
993 | 1012 | ||
994 | void | 1013 | void |
@@ -1000,12 +1019,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1000 | struct ring_buffer_event *event; | 1019 | struct ring_buffer_event *event; |
1001 | struct ctx_switch_entry *entry; | 1020 | struct ctx_switch_entry *entry; |
1002 | 1021 | ||
1003 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 1022 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, |
1023 | sizeof(*entry), flags, pc); | ||
1004 | if (!event) | 1024 | if (!event) |
1005 | return; | 1025 | return; |
1006 | entry = ring_buffer_event_data(event); | 1026 | entry = ring_buffer_event_data(event); |
1007 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
1008 | entry->ent.type = TRACE_WAKE; | ||
1009 | entry->prev_pid = curr->pid; | 1027 | entry->prev_pid = curr->pid; |
1010 | entry->prev_prio = curr->prio; | 1028 | entry->prev_prio = curr->prio; |
1011 | entry->prev_state = curr->state; | 1029 | entry->prev_state = curr->state; |
@@ -1013,11 +1031,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1013 | entry->next_prio = wakee->prio; | 1031 | entry->next_prio = wakee->prio; |
1014 | entry->next_state = wakee->state; | 1032 | entry->next_state = wakee->state; |
1015 | entry->next_cpu = task_cpu(wakee); | 1033 | entry->next_cpu = task_cpu(wakee); |
1016 | ring_buffer_unlock_commit(tr->buffer, event); | 1034 | trace_buffer_unlock_commit(tr, event, flags, pc); |
1017 | ftrace_trace_stack(tr, flags, 6, pc); | ||
1018 | ftrace_trace_userstack(tr, flags, pc); | ||
1019 | |||
1020 | trace_wake_up(); | ||
1021 | } | 1035 | } |
1022 | 1036 | ||
1023 | void | 1037 | void |
@@ -2825,12 +2839,10 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
2825 | trace_buf[len] = 0; | 2839 | trace_buf[len] = 0; |
2826 | 2840 | ||
2827 | size = sizeof(*entry) + len + 1; | 2841 | size = sizeof(*entry) + len + 1; |
2828 | event = ring_buffer_lock_reserve(tr->buffer, size); | 2842 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); |
2829 | if (!event) | 2843 | if (!event) |
2830 | goto out_unlock; | 2844 | goto out_unlock; |
2831 | entry = ring_buffer_event_data(event); | 2845 | entry = ring_buffer_event_data(event); |
2832 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); | ||
2833 | entry->ent.type = TRACE_PRINT; | ||
2834 | entry->ip = ip; | 2846 | entry->ip = ip; |
2835 | entry->depth = depth; | 2847 | entry->depth = depth; |
2836 | 2848 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index df627a948694..e03f157c772e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -403,6 +403,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp); | |||
403 | struct dentry *tracing_init_dentry(void); | 403 | struct dentry *tracing_init_dentry(void); |
404 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | 404 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
405 | 405 | ||
406 | struct ring_buffer_event; | ||
407 | |||
408 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | ||
409 | unsigned char type, | ||
410 | unsigned long len, | ||
411 | unsigned long flags, | ||
412 | int pc); | ||
413 | void trace_buffer_unlock_commit(struct trace_array *tr, | ||
414 | struct ring_buffer_event *event, | ||
415 | unsigned long flags, int pc); | ||
416 | |||
406 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 417 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
407 | struct trace_array_cpu *data); | 418 | struct trace_array_cpu *data); |
408 | 419 | ||
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 4e08debf662d..7a30fc4c3642 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -143,17 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
143 | sprint_symbol(bt->func, (unsigned long)fn); | 143 | sprint_symbol(bt->func, (unsigned long)fn); |
144 | preempt_disable(); | 144 | preempt_disable(); |
145 | 145 | ||
146 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 146 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, |
147 | sizeof(*entry), 0, 0); | ||
147 | if (!event) | 148 | if (!event) |
148 | goto out; | 149 | goto out; |
149 | entry = ring_buffer_event_data(event); | 150 | entry = ring_buffer_event_data(event); |
150 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
151 | entry->ent.type = TRACE_BOOT_CALL; | ||
152 | entry->boot_call = *bt; | 151 | entry->boot_call = *bt; |
153 | ring_buffer_unlock_commit(tr->buffer, event); | 152 | trace_buffer_unlock_commit(tr, event, 0, 0); |
154 | |||
155 | trace_wake_up(); | ||
156 | |||
157 | out: | 153 | out: |
158 | preempt_enable(); | 154 | preempt_enable(); |
159 | } | 155 | } |
@@ -170,17 +166,13 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
170 | sprint_symbol(bt->func, (unsigned long)fn); | 166 | sprint_symbol(bt->func, (unsigned long)fn); |
171 | preempt_disable(); | 167 | preempt_disable(); |
172 | 168 | ||
173 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 169 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET, |
170 | sizeof(*entry), 0, 0); | ||
174 | if (!event) | 171 | if (!event) |
175 | goto out; | 172 | goto out; |
176 | entry = ring_buffer_event_data(event); | 173 | entry = ring_buffer_event_data(event); |
177 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
178 | entry->ent.type = TRACE_BOOT_RET; | ||
179 | entry->boot_ret = *bt; | 174 | entry->boot_ret = *bt; |
180 | ring_buffer_unlock_commit(tr->buffer, event); | 175 | trace_buffer_unlock_commit(tr, event, 0, 0); |
181 | |||
182 | trace_wake_up(); | ||
183 | |||
184 | out: | 176 | out: |
185 | preempt_enable(); | 177 | preempt_enable(); |
186 | } | 178 | } |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 770e52acfc10..48b2196abe37 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -52,14 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
52 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 52 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
53 | goto out; | 53 | goto out; |
54 | 54 | ||
55 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 55 | pc = preempt_count(); |
56 | event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, | ||
57 | sizeof(*entry), flags, pc); | ||
56 | if (!event) | 58 | if (!event) |
57 | goto out; | 59 | goto out; |
58 | 60 | ||
59 | pc = preempt_count(); | ||
60 | entry = ring_buffer_event_data(event); | 61 | entry = ring_buffer_event_data(event); |
61 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
62 | entry->ent.type = TRACE_BRANCH; | ||
63 | 62 | ||
64 | /* Strip off the path, only save the file */ | 63 | /* Strip off the path, only save the file */ |
65 | p = f->file + strlen(f->file); | 64 | p = f->file + strlen(f->file); |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index e720c001db2b..2aa1c9f4c7d8 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -189,16 +189,15 @@ void trace_hw_branch(u64 from, u64 to) | |||
189 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 189 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
190 | goto out; | 190 | goto out; |
191 | 191 | ||
192 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 192 | event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, |
193 | sizeof(*entry), 0, 0); | ||
193 | if (!event) | 194 | if (!event) |
194 | goto out; | 195 | goto out; |
195 | entry = ring_buffer_event_data(event); | 196 | entry = ring_buffer_event_data(event); |
196 | tracing_generic_entry_update(&entry->ent, 0, from); | ||
197 | entry->ent.type = TRACE_HW_BRANCHES; | ||
198 | entry->ent.cpu = cpu; | 197 | entry->ent.cpu = cpu; |
199 | entry->from = from; | 198 | entry->from = from; |
200 | entry->to = to; | 199 | entry->to = to; |
201 | ring_buffer_unlock_commit(tr->buffer, event); | 200 | trace_buffer_unlock_commit(tr, event, 0, 0); |
202 | 201 | ||
203 | out: | 202 | out: |
204 | atomic_dec(&tr->data[cpu]->disabled); | 203 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 104ddebc11d1..c401b908e805 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -307,19 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
307 | { | 307 | { |
308 | struct ring_buffer_event *event; | 308 | struct ring_buffer_event *event; |
309 | struct trace_mmiotrace_rw *entry; | 309 | struct trace_mmiotrace_rw *entry; |
310 | int pc = preempt_count(); | ||
310 | 311 | ||
311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 312 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, |
313 | sizeof(*entry), 0, pc); | ||
312 | if (!event) { | 314 | if (!event) { |
313 | atomic_inc(&dropped_count); | 315 | atomic_inc(&dropped_count); |
314 | return; | 316 | return; |
315 | } | 317 | } |
316 | entry = ring_buffer_event_data(event); | 318 | entry = ring_buffer_event_data(event); |
317 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | ||
318 | entry->ent.type = TRACE_MMIO_RW; | ||
319 | entry->rw = *rw; | 319 | entry->rw = *rw; |
320 | ring_buffer_unlock_commit(tr->buffer, event); | 320 | trace_buffer_unlock_commit(tr, event, 0, pc); |
321 | |||
322 | trace_wake_up(); | ||
323 | } | 321 | } |
324 | 322 | ||
325 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 323 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
@@ -335,19 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
335 | { | 333 | { |
336 | struct ring_buffer_event *event; | 334 | struct ring_buffer_event *event; |
337 | struct trace_mmiotrace_map *entry; | 335 | struct trace_mmiotrace_map *entry; |
336 | int pc = preempt_count(); | ||
338 | 337 | ||
339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 338 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, |
339 | sizeof(*entry), 0, pc); | ||
340 | if (!event) { | 340 | if (!event) { |
341 | atomic_inc(&dropped_count); | 341 | atomic_inc(&dropped_count); |
342 | return; | 342 | return; |
343 | } | 343 | } |
344 | entry = ring_buffer_event_data(event); | 344 | entry = ring_buffer_event_data(event); |
345 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | ||
346 | entry->ent.type = TRACE_MMIO_MAP; | ||
347 | entry->map = *map; | 345 | entry->map = *map; |
348 | ring_buffer_unlock_commit(tr->buffer, event); | 346 | trace_buffer_unlock_commit(tr, event, 0, pc); |
349 | |||
350 | trace_wake_up(); | ||
351 | } | 347 | } |
352 | 348 | ||
353 | void mmio_trace_mapping(struct mmiotrace_map *map) | 349 | void mmio_trace_mapping(struct mmiotrace_map *map) |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 3b1a292d12d2..bfc21f8079ab 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -124,17 +124,13 @@ void trace_power_end(struct power_trace *it) | |||
124 | it->end = ktime_get(); | 124 | it->end = ktime_get(); |
125 | data = tr->data[smp_processor_id()]; | 125 | data = tr->data[smp_processor_id()]; |
126 | 126 | ||
127 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 127 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, |
128 | sizeof(*entry), 0, 0); | ||
128 | if (!event) | 129 | if (!event) |
129 | goto out; | 130 | goto out; |
130 | entry = ring_buffer_event_data(event); | 131 | entry = ring_buffer_event_data(event); |
131 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
132 | entry->ent.type = TRACE_POWER; | ||
133 | entry->state_data = *it; | 132 | entry->state_data = *it; |
134 | ring_buffer_unlock_commit(tr->buffer, event); | 133 | trace_buffer_unlock_commit(tr, event, 0, 0); |
135 | |||
136 | trace_wake_up(); | ||
137 | |||
138 | out: | 134 | out: |
139 | preempt_enable(); | 135 | preempt_enable(); |
140 | } | 136 | } |
@@ -159,17 +155,13 @@ void trace_power_mark(struct power_trace *it, unsigned int type, | |||
159 | it->end = it->stamp; | 155 | it->end = it->stamp; |
160 | data = tr->data[smp_processor_id()]; | 156 | data = tr->data[smp_processor_id()]; |
161 | 157 | ||
162 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 158 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, |
159 | sizeof(*entry), 0, 0); | ||
163 | if (!event) | 160 | if (!event) |
164 | goto out; | 161 | goto out; |
165 | entry = ring_buffer_event_data(event); | 162 | entry = ring_buffer_event_data(event); |
166 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
167 | entry->ent.type = TRACE_POWER; | ||
168 | entry->state_data = *it; | 163 | entry->state_data = *it; |
169 | ring_buffer_unlock_commit(tr->buffer, event); | 164 | trace_buffer_unlock_commit(tr, event, 0, 0); |
170 | |||
171 | trace_wake_up(); | ||
172 | |||
173 | out: | 165 | out: |
174 | preempt_enable(); | 166 | preempt_enable(); |
175 | } | 167 | } |