diff options
-rw-r--r-- | block/blktrace.c | 8 | ||||
-rw-r--r-- | include/linux/ring_buffer.h | 9 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 12 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace.c | 56 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 12 |
10 files changed, 51 insertions, 92 deletions
diff --git a/block/blktrace.c b/block/blktrace.c index d9d7146ee023..8e52f24cc8f9 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -165,7 +165,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
165 | struct task_struct *tsk = current; | 165 | struct task_struct *tsk = current; |
166 | struct ring_buffer_event *event = NULL; | 166 | struct ring_buffer_event *event = NULL; |
167 | struct blk_io_trace *t; | 167 | struct blk_io_trace *t; |
168 | unsigned long flags; | 168 | unsigned long flags = 0; |
169 | unsigned long *sequence; | 169 | unsigned long *sequence; |
170 | pid_t pid; | 170 | pid_t pid; |
171 | int cpu, pc = 0; | 171 | int cpu, pc = 0; |
@@ -191,7 +191,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
191 | tracing_record_cmdline(current); | 191 | tracing_record_cmdline(current); |
192 | 192 | ||
193 | event = ring_buffer_lock_reserve(blk_tr->buffer, | 193 | event = ring_buffer_lock_reserve(blk_tr->buffer, |
194 | sizeof(*t) + pdu_len, &flags); | 194 | sizeof(*t) + pdu_len); |
195 | if (!event) | 195 | if (!event) |
196 | return; | 196 | return; |
197 | 197 | ||
@@ -241,11 +241,11 @@ record_it: | |||
241 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | 241 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); |
242 | 242 | ||
243 | if (blk_tr) { | 243 | if (blk_tr) { |
244 | ring_buffer_unlock_commit(blk_tr->buffer, event, flags); | 244 | ring_buffer_unlock_commit(blk_tr->buffer, event); |
245 | if (pid != 0 && | 245 | if (pid != 0 && |
246 | !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && | 246 | !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && |
247 | (trace_flags & TRACE_ITER_STACKTRACE) != 0) | 247 | (trace_flags & TRACE_ITER_STACKTRACE) != 0) |
248 | __trace_stack(blk_tr, flags, 5, pc); | 248 | __trace_stack(blk_tr, 0, 5, pc); |
249 | trace_wake_up(); | 249 | trace_wake_up(); |
250 | return; | 250 | return; |
251 | } | 251 | } |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b3b359660082..3110d92e7d81 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer); | |||
74 | 74 | ||
75 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 75 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
76 | 76 | ||
77 | struct ring_buffer_event * | 77 | struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, |
78 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 78 | unsigned long length); |
79 | unsigned long length, | ||
80 | unsigned long *flags); | ||
81 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 79 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
82 | struct ring_buffer_event *event, | 80 | struct ring_buffer_event *event); |
83 | unsigned long flags); | ||
84 | int ring_buffer_write(struct ring_buffer *buffer, | 81 | int ring_buffer_write(struct ring_buffer *buffer, |
85 | unsigned long length, void *data); | 82 | unsigned long length, void *data); |
86 | 83 | ||
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index f04c0625f1cd..256749d1032a 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -272,13 +272,11 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | |||
272 | struct ring_buffer_event *event; | 272 | struct ring_buffer_event *event; |
273 | struct kmemtrace_alloc_entry *entry; | 273 | struct kmemtrace_alloc_entry *entry; |
274 | struct trace_array *tr = kmemtrace_array; | 274 | struct trace_array *tr = kmemtrace_array; |
275 | unsigned long irq_flags; | ||
276 | 275 | ||
277 | if (!kmem_tracing_enabled) | 276 | if (!kmem_tracing_enabled) |
278 | return; | 277 | return; |
279 | 278 | ||
280 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 279 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
281 | &irq_flags); | ||
282 | if (!event) | 280 | if (!event) |
283 | return; | 281 | return; |
284 | entry = ring_buffer_event_data(event); | 282 | entry = ring_buffer_event_data(event); |
@@ -292,7 +290,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | |||
292 | entry->gfp_flags = gfp_flags; | 290 | entry->gfp_flags = gfp_flags; |
293 | entry->node = node; | 291 | entry->node = node; |
294 | 292 | ||
295 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 293 | ring_buffer_unlock_commit(tr->buffer, event); |
296 | 294 | ||
297 | trace_wake_up(); | 295 | trace_wake_up(); |
298 | } | 296 | } |
@@ -305,13 +303,11 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | |||
305 | struct ring_buffer_event *event; | 303 | struct ring_buffer_event *event; |
306 | struct kmemtrace_free_entry *entry; | 304 | struct kmemtrace_free_entry *entry; |
307 | struct trace_array *tr = kmemtrace_array; | 305 | struct trace_array *tr = kmemtrace_array; |
308 | unsigned long irq_flags; | ||
309 | 306 | ||
310 | if (!kmem_tracing_enabled) | 307 | if (!kmem_tracing_enabled) |
311 | return; | 308 | return; |
312 | 309 | ||
313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 310 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
314 | &irq_flags); | ||
315 | if (!event) | 311 | if (!event) |
316 | return; | 312 | return; |
317 | entry = ring_buffer_event_data(event); | 313 | entry = ring_buffer_event_data(event); |
@@ -322,7 +318,7 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | |||
322 | entry->call_site = call_site; | 318 | entry->call_site = call_site; |
323 | entry->ptr = ptr; | 319 | entry->ptr = ptr; |
324 | 320 | ||
325 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 321 | ring_buffer_unlock_commit(tr->buffer, event); |
326 | 322 | ||
327 | trace_wake_up(); | 323 | trace_wake_up(); |
328 | } | 324 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b36d7374ceef..aee76b3eeed2 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1257,7 +1257,6 @@ static DEFINE_PER_CPU(int, rb_need_resched); | |||
1257 | * ring_buffer_lock_reserve - reserve a part of the buffer | 1257 | * ring_buffer_lock_reserve - reserve a part of the buffer |
1258 | * @buffer: the ring buffer to reserve from | 1258 | * @buffer: the ring buffer to reserve from |
1259 | * @length: the length of the data to reserve (excluding event header) | 1259 | * @length: the length of the data to reserve (excluding event header) |
1260 | * @flags: a pointer to save the interrupt flags | ||
1261 | * | 1260 | * |
1262 | * Returns a reseverd event on the ring buffer to copy directly to. | 1261 | * Returns a reseverd event on the ring buffer to copy directly to. |
1263 | * The user of this interface will need to get the body to write into | 1262 | * The user of this interface will need to get the body to write into |
@@ -1270,9 +1269,7 @@ static DEFINE_PER_CPU(int, rb_need_resched); | |||
1270 | * If NULL is returned, then nothing has been allocated or locked. | 1269 | * If NULL is returned, then nothing has been allocated or locked. |
1271 | */ | 1270 | */ |
1272 | struct ring_buffer_event * | 1271 | struct ring_buffer_event * |
1273 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 1272 | ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) |
1274 | unsigned long length, | ||
1275 | unsigned long *flags) | ||
1276 | { | 1273 | { |
1277 | struct ring_buffer_per_cpu *cpu_buffer; | 1274 | struct ring_buffer_per_cpu *cpu_buffer; |
1278 | struct ring_buffer_event *event; | 1275 | struct ring_buffer_event *event; |
@@ -1339,15 +1336,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | |||
1339 | * ring_buffer_unlock_commit - commit a reserved | 1336 | * ring_buffer_unlock_commit - commit a reserved |
1340 | * @buffer: The buffer to commit to | 1337 | * @buffer: The buffer to commit to |
1341 | * @event: The event pointer to commit. | 1338 | * @event: The event pointer to commit. |
1342 | * @flags: the interrupt flags received from ring_buffer_lock_reserve. | ||
1343 | * | 1339 | * |
1344 | * This commits the data to the ring buffer, and releases any locks held. | 1340 | * This commits the data to the ring buffer, and releases any locks held. |
1345 | * | 1341 | * |
1346 | * Must be paired with ring_buffer_lock_reserve. | 1342 | * Must be paired with ring_buffer_lock_reserve. |
1347 | */ | 1343 | */ |
1348 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 1344 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
1349 | struct ring_buffer_event *event, | 1345 | struct ring_buffer_event *event) |
1350 | unsigned long flags) | ||
1351 | { | 1346 | { |
1352 | struct ring_buffer_per_cpu *cpu_buffer; | 1347 | struct ring_buffer_per_cpu *cpu_buffer; |
1353 | int cpu = raw_smp_processor_id(); | 1348 | int cpu = raw_smp_processor_id(); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3536ef41575d..eb453a238a6f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -783,14 +783,12 @@ trace_function(struct trace_array *tr, | |||
783 | { | 783 | { |
784 | struct ring_buffer_event *event; | 784 | struct ring_buffer_event *event; |
785 | struct ftrace_entry *entry; | 785 | struct ftrace_entry *entry; |
786 | unsigned long irq_flags; | ||
787 | 786 | ||
788 | /* If we are reading the ring buffer, don't trace */ | 787 | /* If we are reading the ring buffer, don't trace */ |
789 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 788 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
790 | return; | 789 | return; |
791 | 790 | ||
792 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 791 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
793 | &irq_flags); | ||
794 | if (!event) | 792 | if (!event) |
795 | return; | 793 | return; |
796 | entry = ring_buffer_event_data(event); | 794 | entry = ring_buffer_event_data(event); |
@@ -798,7 +796,7 @@ trace_function(struct trace_array *tr, | |||
798 | entry->ent.type = TRACE_FN; | 796 | entry->ent.type = TRACE_FN; |
799 | entry->ip = ip; | 797 | entry->ip = ip; |
800 | entry->parent_ip = parent_ip; | 798 | entry->parent_ip = parent_ip; |
801 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 799 | ring_buffer_unlock_commit(tr->buffer, event); |
802 | } | 800 | } |
803 | 801 | ||
804 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 802 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -809,20 +807,18 @@ static void __trace_graph_entry(struct trace_array *tr, | |||
809 | { | 807 | { |
810 | struct ring_buffer_event *event; | 808 | struct ring_buffer_event *event; |
811 | struct ftrace_graph_ent_entry *entry; | 809 | struct ftrace_graph_ent_entry *entry; |
812 | unsigned long irq_flags; | ||
813 | 810 | ||
814 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 811 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
815 | return; | 812 | return; |
816 | 813 | ||
817 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 814 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); |
818 | &irq_flags); | ||
819 | if (!event) | 815 | if (!event) |
820 | return; | 816 | return; |
821 | entry = ring_buffer_event_data(event); | 817 | entry = ring_buffer_event_data(event); |
822 | tracing_generic_entry_update(&entry->ent, flags, pc); | 818 | tracing_generic_entry_update(&entry->ent, flags, pc); |
823 | entry->ent.type = TRACE_GRAPH_ENT; | 819 | entry->ent.type = TRACE_GRAPH_ENT; |
824 | entry->graph_ent = *trace; | 820 | entry->graph_ent = *trace; |
825 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 821 | ring_buffer_unlock_commit(global_trace.buffer, event); |
826 | } | 822 | } |
827 | 823 | ||
828 | static void __trace_graph_return(struct trace_array *tr, | 824 | static void __trace_graph_return(struct trace_array *tr, |
@@ -832,20 +828,18 @@ static void __trace_graph_return(struct trace_array *tr, | |||
832 | { | 828 | { |
833 | struct ring_buffer_event *event; | 829 | struct ring_buffer_event *event; |
834 | struct ftrace_graph_ret_entry *entry; | 830 | struct ftrace_graph_ret_entry *entry; |
835 | unsigned long irq_flags; | ||
836 | 831 | ||
837 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 832 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
838 | return; | 833 | return; |
839 | 834 | ||
840 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 835 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); |
841 | &irq_flags); | ||
842 | if (!event) | 836 | if (!event) |
843 | return; | 837 | return; |
844 | entry = ring_buffer_event_data(event); | 838 | entry = ring_buffer_event_data(event); |
845 | tracing_generic_entry_update(&entry->ent, flags, pc); | 839 | tracing_generic_entry_update(&entry->ent, flags, pc); |
846 | entry->ent.type = TRACE_GRAPH_RET; | 840 | entry->ent.type = TRACE_GRAPH_RET; |
847 | entry->ret = *trace; | 841 | entry->ret = *trace; |
848 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 842 | ring_buffer_unlock_commit(global_trace.buffer, event); |
849 | } | 843 | } |
850 | #endif | 844 | #endif |
851 | 845 | ||
@@ -866,10 +860,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
866 | struct ring_buffer_event *event; | 860 | struct ring_buffer_event *event; |
867 | struct stack_entry *entry; | 861 | struct stack_entry *entry; |
868 | struct stack_trace trace; | 862 | struct stack_trace trace; |
869 | unsigned long irq_flags; | ||
870 | 863 | ||
871 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 864 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
872 | &irq_flags); | ||
873 | if (!event) | 865 | if (!event) |
874 | return; | 866 | return; |
875 | entry = ring_buffer_event_data(event); | 867 | entry = ring_buffer_event_data(event); |
@@ -884,7 +876,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
884 | trace.entries = entry->caller; | 876 | trace.entries = entry->caller; |
885 | 877 | ||
886 | save_stack_trace(&trace); | 878 | save_stack_trace(&trace); |
887 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 879 | ring_buffer_unlock_commit(tr->buffer, event); |
888 | #endif | 880 | #endif |
889 | } | 881 | } |
890 | 882 | ||
@@ -912,13 +904,11 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
912 | struct ring_buffer_event *event; | 904 | struct ring_buffer_event *event; |
913 | struct userstack_entry *entry; | 905 | struct userstack_entry *entry; |
914 | struct stack_trace trace; | 906 | struct stack_trace trace; |
915 | unsigned long irq_flags; | ||
916 | 907 | ||
917 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 908 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
918 | return; | 909 | return; |
919 | 910 | ||
920 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 911 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
921 | &irq_flags); | ||
922 | if (!event) | 912 | if (!event) |
923 | return; | 913 | return; |
924 | entry = ring_buffer_event_data(event); | 914 | entry = ring_buffer_event_data(event); |
@@ -933,7 +923,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
933 | trace.entries = entry->caller; | 923 | trace.entries = entry->caller; |
934 | 924 | ||
935 | save_stack_trace_user(&trace); | 925 | save_stack_trace_user(&trace); |
936 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 926 | ring_buffer_unlock_commit(tr->buffer, event); |
937 | #endif | 927 | #endif |
938 | } | 928 | } |
939 | 929 | ||
@@ -950,10 +940,8 @@ ftrace_trace_special(void *__tr, | |||
950 | struct ring_buffer_event *event; | 940 | struct ring_buffer_event *event; |
951 | struct trace_array *tr = __tr; | 941 | struct trace_array *tr = __tr; |
952 | struct special_entry *entry; | 942 | struct special_entry *entry; |
953 | unsigned long irq_flags; | ||
954 | 943 | ||
955 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 944 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
956 | &irq_flags); | ||
957 | if (!event) | 945 | if (!event) |
958 | return; | 946 | return; |
959 | entry = ring_buffer_event_data(event); | 947 | entry = ring_buffer_event_data(event); |
@@ -962,9 +950,9 @@ ftrace_trace_special(void *__tr, | |||
962 | entry->arg1 = arg1; | 950 | entry->arg1 = arg1; |
963 | entry->arg2 = arg2; | 951 | entry->arg2 = arg2; |
964 | entry->arg3 = arg3; | 952 | entry->arg3 = arg3; |
965 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 953 | ring_buffer_unlock_commit(tr->buffer, event); |
966 | ftrace_trace_stack(tr, irq_flags, 4, pc); | 954 | ftrace_trace_stack(tr, 0, 4, pc); |
967 | ftrace_trace_userstack(tr, irq_flags, pc); | 955 | ftrace_trace_userstack(tr, 0, pc); |
968 | 956 | ||
969 | trace_wake_up(); | 957 | trace_wake_up(); |
970 | } | 958 | } |
@@ -984,10 +972,8 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
984 | { | 972 | { |
985 | struct ring_buffer_event *event; | 973 | struct ring_buffer_event *event; |
986 | struct ctx_switch_entry *entry; | 974 | struct ctx_switch_entry *entry; |
987 | unsigned long irq_flags; | ||
988 | 975 | ||
989 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 976 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
990 | &irq_flags); | ||
991 | if (!event) | 977 | if (!event) |
992 | return; | 978 | return; |
993 | entry = ring_buffer_event_data(event); | 979 | entry = ring_buffer_event_data(event); |
@@ -1000,7 +986,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1000 | entry->next_prio = next->prio; | 986 | entry->next_prio = next->prio; |
1001 | entry->next_state = next->state; | 987 | entry->next_state = next->state; |
1002 | entry->next_cpu = task_cpu(next); | 988 | entry->next_cpu = task_cpu(next); |
1003 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 989 | ring_buffer_unlock_commit(tr->buffer, event); |
1004 | ftrace_trace_stack(tr, flags, 5, pc); | 990 | ftrace_trace_stack(tr, flags, 5, pc); |
1005 | ftrace_trace_userstack(tr, flags, pc); | 991 | ftrace_trace_userstack(tr, flags, pc); |
1006 | } | 992 | } |
@@ -1013,10 +999,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1013 | { | 999 | { |
1014 | struct ring_buffer_event *event; | 1000 | struct ring_buffer_event *event; |
1015 | struct ctx_switch_entry *entry; | 1001 | struct ctx_switch_entry *entry; |
1016 | unsigned long irq_flags; | ||
1017 | 1002 | ||
1018 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1003 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
1019 | &irq_flags); | ||
1020 | if (!event) | 1004 | if (!event) |
1021 | return; | 1005 | return; |
1022 | entry = ring_buffer_event_data(event); | 1006 | entry = ring_buffer_event_data(event); |
@@ -1029,7 +1013,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1029 | entry->next_prio = wakee->prio; | 1013 | entry->next_prio = wakee->prio; |
1030 | entry->next_state = wakee->state; | 1014 | entry->next_state = wakee->state; |
1031 | entry->next_cpu = task_cpu(wakee); | 1015 | entry->next_cpu = task_cpu(wakee); |
1032 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1016 | ring_buffer_unlock_commit(tr->buffer, event); |
1033 | ftrace_trace_stack(tr, flags, 6, pc); | 1017 | ftrace_trace_stack(tr, flags, 6, pc); |
1034 | ftrace_trace_userstack(tr, flags, pc); | 1018 | ftrace_trace_userstack(tr, flags, pc); |
1035 | 1019 | ||
@@ -2841,7 +2825,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
2841 | trace_buf[len] = 0; | 2825 | trace_buf[len] = 0; |
2842 | 2826 | ||
2843 | size = sizeof(*entry) + len + 1; | 2827 | size = sizeof(*entry) + len + 1; |
2844 | event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); | 2828 | event = ring_buffer_lock_reserve(tr->buffer, size); |
2845 | if (!event) | 2829 | if (!event) |
2846 | goto out_unlock; | 2830 | goto out_unlock; |
2847 | entry = ring_buffer_event_data(event); | 2831 | entry = ring_buffer_event_data(event); |
@@ -2852,7 +2836,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
2852 | 2836 | ||
2853 | memcpy(&entry->buf, trace_buf, len); | 2837 | memcpy(&entry->buf, trace_buf, len); |
2854 | entry->buf[len] = 0; | 2838 | entry->buf[len] = 0; |
2855 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 2839 | ring_buffer_unlock_commit(tr->buffer, event); |
2856 | 2840 | ||
2857 | out_unlock: | 2841 | out_unlock: |
2858 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); | 2842 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 1f07895977a0..4e08debf662d 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -132,7 +132,6 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
132 | { | 132 | { |
133 | struct ring_buffer_event *event; | 133 | struct ring_buffer_event *event; |
134 | struct trace_boot_call *entry; | 134 | struct trace_boot_call *entry; |
135 | unsigned long irq_flags; | ||
136 | struct trace_array *tr = boot_trace; | 135 | struct trace_array *tr = boot_trace; |
137 | 136 | ||
138 | if (!tr || !pre_initcalls_finished) | 137 | if (!tr || !pre_initcalls_finished) |
@@ -144,15 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
144 | sprint_symbol(bt->func, (unsigned long)fn); | 143 | sprint_symbol(bt->func, (unsigned long)fn); |
145 | preempt_disable(); | 144 | preempt_disable(); |
146 | 145 | ||
147 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 146 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
148 | &irq_flags); | ||
149 | if (!event) | 147 | if (!event) |
150 | goto out; | 148 | goto out; |
151 | entry = ring_buffer_event_data(event); | 149 | entry = ring_buffer_event_data(event); |
152 | tracing_generic_entry_update(&entry->ent, 0, 0); | 150 | tracing_generic_entry_update(&entry->ent, 0, 0); |
153 | entry->ent.type = TRACE_BOOT_CALL; | 151 | entry->ent.type = TRACE_BOOT_CALL; |
154 | entry->boot_call = *bt; | 152 | entry->boot_call = *bt; |
155 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 153 | ring_buffer_unlock_commit(tr->buffer, event); |
156 | 154 | ||
157 | trace_wake_up(); | 155 | trace_wake_up(); |
158 | 156 | ||
@@ -164,7 +162,6 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
164 | { | 162 | { |
165 | struct ring_buffer_event *event; | 163 | struct ring_buffer_event *event; |
166 | struct trace_boot_ret *entry; | 164 | struct trace_boot_ret *entry; |
167 | unsigned long irq_flags; | ||
168 | struct trace_array *tr = boot_trace; | 165 | struct trace_array *tr = boot_trace; |
169 | 166 | ||
170 | if (!tr || !pre_initcalls_finished) | 167 | if (!tr || !pre_initcalls_finished) |
@@ -173,15 +170,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
173 | sprint_symbol(bt->func, (unsigned long)fn); | 170 | sprint_symbol(bt->func, (unsigned long)fn); |
174 | preempt_disable(); | 171 | preempt_disable(); |
175 | 172 | ||
176 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 173 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
177 | &irq_flags); | ||
178 | if (!event) | 174 | if (!event) |
179 | goto out; | 175 | goto out; |
180 | entry = ring_buffer_event_data(event); | 176 | entry = ring_buffer_event_data(event); |
181 | tracing_generic_entry_update(&entry->ent, 0, 0); | 177 | tracing_generic_entry_update(&entry->ent, 0, 0); |
182 | entry->ent.type = TRACE_BOOT_RET; | 178 | entry->ent.type = TRACE_BOOT_RET; |
183 | entry->boot_ret = *bt; | 179 | entry->boot_ret = *bt; |
184 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 180 | ring_buffer_unlock_commit(tr->buffer, event); |
185 | 181 | ||
186 | trace_wake_up(); | 182 | trace_wake_up(); |
187 | 183 | ||
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 027e83690615..770e52acfc10 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -33,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
33 | struct trace_array *tr = branch_tracer; | 33 | struct trace_array *tr = branch_tracer; |
34 | struct ring_buffer_event *event; | 34 | struct ring_buffer_event *event; |
35 | struct trace_branch *entry; | 35 | struct trace_branch *entry; |
36 | unsigned long flags, irq_flags; | 36 | unsigned long flags; |
37 | int cpu, pc; | 37 | int cpu, pc; |
38 | const char *p; | 38 | const char *p; |
39 | 39 | ||
@@ -52,8 +52,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
52 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 52 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
53 | goto out; | 53 | goto out; |
54 | 54 | ||
55 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 55 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
56 | &irq_flags); | ||
57 | if (!event) | 56 | if (!event) |
58 | goto out; | 57 | goto out; |
59 | 58 | ||
@@ -75,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
75 | entry->line = f->line; | 74 | entry->line = f->line; |
76 | entry->correct = val == expect; | 75 | entry->correct = val == expect; |
77 | 76 | ||
78 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 77 | ring_buffer_unlock_commit(tr->buffer, event); |
79 | 78 | ||
80 | out: | 79 | out: |
81 | atomic_dec(&tr->data[cpu]->disabled); | 80 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index fff3545fc866..e720c001db2b 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -175,7 +175,7 @@ void trace_hw_branch(u64 from, u64 to) | |||
175 | struct trace_array *tr = hw_branch_trace; | 175 | struct trace_array *tr = hw_branch_trace; |
176 | struct ring_buffer_event *event; | 176 | struct ring_buffer_event *event; |
177 | struct hw_branch_entry *entry; | 177 | struct hw_branch_entry *entry; |
178 | unsigned long irq1, irq2; | 178 | unsigned long irq1; |
179 | int cpu; | 179 | int cpu; |
180 | 180 | ||
181 | if (unlikely(!tr)) | 181 | if (unlikely(!tr)) |
@@ -189,7 +189,7 @@ void trace_hw_branch(u64 from, u64 to) | |||
189 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 189 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
190 | goto out; | 190 | goto out; |
191 | 191 | ||
192 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2); | 192 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
193 | if (!event) | 193 | if (!event) |
194 | goto out; | 194 | goto out; |
195 | entry = ring_buffer_event_data(event); | 195 | entry = ring_buffer_event_data(event); |
@@ -198,7 +198,7 @@ void trace_hw_branch(u64 from, u64 to) | |||
198 | entry->ent.cpu = cpu; | 198 | entry->ent.cpu = cpu; |
199 | entry->from = from; | 199 | entry->from = from; |
200 | entry->to = to; | 200 | entry->to = to; |
201 | ring_buffer_unlock_commit(tr->buffer, event, irq2); | 201 | ring_buffer_unlock_commit(tr->buffer, event); |
202 | 202 | ||
203 | out: | 203 | out: |
204 | atomic_dec(&tr->data[cpu]->disabled); | 204 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index ec78e244242e..104ddebc11d1 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -307,10 +307,8 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
307 | { | 307 | { |
308 | struct ring_buffer_event *event; | 308 | struct ring_buffer_event *event; |
309 | struct trace_mmiotrace_rw *entry; | 309 | struct trace_mmiotrace_rw *entry; |
310 | unsigned long irq_flags; | ||
311 | 310 | ||
312 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
313 | &irq_flags); | ||
314 | if (!event) { | 312 | if (!event) { |
315 | atomic_inc(&dropped_count); | 313 | atomic_inc(&dropped_count); |
316 | return; | 314 | return; |
@@ -319,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
319 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 317 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
320 | entry->ent.type = TRACE_MMIO_RW; | 318 | entry->ent.type = TRACE_MMIO_RW; |
321 | entry->rw = *rw; | 319 | entry->rw = *rw; |
322 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 320 | ring_buffer_unlock_commit(tr->buffer, event); |
323 | 321 | ||
324 | trace_wake_up(); | 322 | trace_wake_up(); |
325 | } | 323 | } |
@@ -337,10 +335,8 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
337 | { | 335 | { |
338 | struct ring_buffer_event *event; | 336 | struct ring_buffer_event *event; |
339 | struct trace_mmiotrace_map *entry; | 337 | struct trace_mmiotrace_map *entry; |
340 | unsigned long irq_flags; | ||
341 | 338 | ||
342 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
343 | &irq_flags); | ||
344 | if (!event) { | 340 | if (!event) { |
345 | atomic_inc(&dropped_count); | 341 | atomic_inc(&dropped_count); |
346 | return; | 342 | return; |
@@ -349,7 +345,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
349 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 345 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
350 | entry->ent.type = TRACE_MMIO_MAP; | 346 | entry->ent.type = TRACE_MMIO_MAP; |
351 | entry->map = *map; | 347 | entry->map = *map; |
352 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 348 | ring_buffer_unlock_commit(tr->buffer, event); |
353 | 349 | ||
354 | trace_wake_up(); | 350 | trace_wake_up(); |
355 | } | 351 | } |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index faa6ab7a1f5c..3b1a292d12d2 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -115,7 +115,6 @@ void trace_power_end(struct power_trace *it) | |||
115 | struct ring_buffer_event *event; | 115 | struct ring_buffer_event *event; |
116 | struct trace_power *entry; | 116 | struct trace_power *entry; |
117 | struct trace_array_cpu *data; | 117 | struct trace_array_cpu *data; |
118 | unsigned long irq_flags; | ||
119 | struct trace_array *tr = power_trace; | 118 | struct trace_array *tr = power_trace; |
120 | 119 | ||
121 | if (!trace_power_enabled) | 120 | if (!trace_power_enabled) |
@@ -125,15 +124,14 @@ void trace_power_end(struct power_trace *it) | |||
125 | it->end = ktime_get(); | 124 | it->end = ktime_get(); |
126 | data = tr->data[smp_processor_id()]; | 125 | data = tr->data[smp_processor_id()]; |
127 | 126 | ||
128 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 127 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
129 | &irq_flags); | ||
130 | if (!event) | 128 | if (!event) |
131 | goto out; | 129 | goto out; |
132 | entry = ring_buffer_event_data(event); | 130 | entry = ring_buffer_event_data(event); |
133 | tracing_generic_entry_update(&entry->ent, 0, 0); | 131 | tracing_generic_entry_update(&entry->ent, 0, 0); |
134 | entry->ent.type = TRACE_POWER; | 132 | entry->ent.type = TRACE_POWER; |
135 | entry->state_data = *it; | 133 | entry->state_data = *it; |
136 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 134 | ring_buffer_unlock_commit(tr->buffer, event); |
137 | 135 | ||
138 | trace_wake_up(); | 136 | trace_wake_up(); |
139 | 137 | ||
@@ -148,7 +146,6 @@ void trace_power_mark(struct power_trace *it, unsigned int type, | |||
148 | struct ring_buffer_event *event; | 146 | struct ring_buffer_event *event; |
149 | struct trace_power *entry; | 147 | struct trace_power *entry; |
150 | struct trace_array_cpu *data; | 148 | struct trace_array_cpu *data; |
151 | unsigned long irq_flags; | ||
152 | struct trace_array *tr = power_trace; | 149 | struct trace_array *tr = power_trace; |
153 | 150 | ||
154 | if (!trace_power_enabled) | 151 | if (!trace_power_enabled) |
@@ -162,15 +159,14 @@ void trace_power_mark(struct power_trace *it, unsigned int type, | |||
162 | it->end = it->stamp; | 159 | it->end = it->stamp; |
163 | data = tr->data[smp_processor_id()]; | 160 | data = tr->data[smp_processor_id()]; |
164 | 161 | ||
165 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 162 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
166 | &irq_flags); | ||
167 | if (!event) | 163 | if (!event) |
168 | goto out; | 164 | goto out; |
169 | entry = ring_buffer_event_data(event); | 165 | entry = ring_buffer_event_data(event); |
170 | tracing_generic_entry_update(&entry->ent, 0, 0); | 166 | tracing_generic_entry_update(&entry->ent, 0, 0); |
171 | entry->ent.type = TRACE_POWER; | 167 | entry->ent.type = TRACE_POWER; |
172 | entry->state_data = *it; | 168 | entry->state_data = *it; |
173 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 169 | ring_buffer_unlock_commit(tr->buffer, event); |
174 | 170 | ||
175 | trace_wake_up(); | 171 | trace_wake_up(); |
176 | 172 | ||