diff options
| -rw-r--r-- | include/linux/ftrace_event.h | 15 | ||||
| -rw-r--r-- | include/trace/ftrace.h | 15 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 12 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 117 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 17 | ||||
| -rw-r--r-- | kernel/trace/trace_boot.c | 12 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 14 | ||||
| -rw-r--r-- | kernel/trace/trace_mmiotrace.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace_power.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 18 |
12 files changed, 163 insertions, 109 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 755480484eb6..23f7179bf74e 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -93,13 +93,17 @@ void tracing_generic_entry_update(struct trace_entry *entry, | |||
| 93 | unsigned long flags, | 93 | unsigned long flags, |
| 94 | int pc); | 94 | int pc); |
| 95 | struct ring_buffer_event * | 95 | struct ring_buffer_event * |
| 96 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 96 | trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, |
| 97 | int type, unsigned long len, | ||
| 97 | unsigned long flags, int pc); | 98 | unsigned long flags, int pc); |
| 98 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 99 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
| 100 | struct ring_buffer_event *event, | ||
| 99 | unsigned long flags, int pc); | 101 | unsigned long flags, int pc); |
| 100 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 102 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
| 103 | struct ring_buffer_event *event, | ||
| 101 | unsigned long flags, int pc); | 104 | unsigned long flags, int pc); |
| 102 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event); | 105 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
| 106 | struct ring_buffer_event *event); | ||
| 103 | 107 | ||
| 104 | void tracing_record_cmdline(struct task_struct *tsk); | 108 | void tracing_record_cmdline(struct task_struct *tsk); |
| 105 | 109 | ||
| @@ -135,7 +139,8 @@ struct ftrace_event_call { | |||
| 135 | 139 | ||
| 136 | extern void destroy_preds(struct ftrace_event_call *call); | 140 | extern void destroy_preds(struct ftrace_event_call *call); |
| 137 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | 141 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); |
| 138 | extern int filter_current_check_discard(struct ftrace_event_call *call, | 142 | extern int filter_current_check_discard(struct ring_buffer *buffer, |
| 143 | struct ftrace_event_call *call, | ||
| 139 | void *rec, | 144 | void *rec, |
| 140 | struct ring_buffer_event *event); | 145 | struct ring_buffer_event *event); |
| 141 | 146 | ||
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index bfbc842600a1..308bafd93325 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -460,13 +460,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
| 460 | * { | 460 | * { |
| 461 | * struct ring_buffer_event *event; | 461 | * struct ring_buffer_event *event; |
| 462 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 462 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
| 463 | * struct ring_buffer *buffer; | ||
| 463 | * unsigned long irq_flags; | 464 | * unsigned long irq_flags; |
| 464 | * int pc; | 465 | * int pc; |
| 465 | * | 466 | * |
| 466 | * local_save_flags(irq_flags); | 467 | * local_save_flags(irq_flags); |
| 467 | * pc = preempt_count(); | 468 | * pc = preempt_count(); |
| 468 | * | 469 | * |
| 469 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | 470 | * event = trace_current_buffer_lock_reserve(&buffer, |
| 471 | * event_<call>.id, | ||
| 470 | * sizeof(struct ftrace_raw_<call>), | 472 | * sizeof(struct ftrace_raw_<call>), |
| 471 | * irq_flags, pc); | 473 | * irq_flags, pc); |
| 472 | * if (!event) | 474 | * if (!event) |
| @@ -476,7 +478,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
| 476 | * <assign>; <-- Here we assign the entries by the __field and | 478 | * <assign>; <-- Here we assign the entries by the __field and |
| 477 | * __array macros. | 479 | * __array macros. |
| 478 | * | 480 | * |
| 479 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | 481 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); |
| 480 | * } | 482 | * } |
| 481 | * | 483 | * |
| 482 | * static int ftrace_raw_reg_event_<call>(void) | 484 | * static int ftrace_raw_reg_event_<call>(void) |
| @@ -568,6 +570,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
| 568 | struct ftrace_event_call *event_call = &event_##call; \ | 570 | struct ftrace_event_call *event_call = &event_##call; \ |
| 569 | struct ring_buffer_event *event; \ | 571 | struct ring_buffer_event *event; \ |
| 570 | struct ftrace_raw_##call *entry; \ | 572 | struct ftrace_raw_##call *entry; \ |
| 573 | struct ring_buffer *buffer; \ | ||
| 571 | unsigned long irq_flags; \ | 574 | unsigned long irq_flags; \ |
| 572 | int __data_size; \ | 575 | int __data_size; \ |
| 573 | int pc; \ | 576 | int pc; \ |
| @@ -577,7 +580,8 @@ static void ftrace_raw_event_##call(proto) \ | |||
| 577 | \ | 580 | \ |
| 578 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 581 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
| 579 | \ | 582 | \ |
| 580 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 583 | event = trace_current_buffer_lock_reserve(&buffer, \ |
| 584 | event_##call.id, \ | ||
| 581 | sizeof(*entry) + __data_size, \ | 585 | sizeof(*entry) + __data_size, \ |
| 582 | irq_flags, pc); \ | 586 | irq_flags, pc); \ |
| 583 | if (!event) \ | 587 | if (!event) \ |
| @@ -589,8 +593,9 @@ static void ftrace_raw_event_##call(proto) \ | |||
| 589 | \ | 593 | \ |
| 590 | { assign; } \ | 594 | { assign; } \ |
| 591 | \ | 595 | \ |
| 592 | if (!filter_current_check_discard(event_call, entry, event)) \ | 596 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
| 593 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | 597 | trace_nowake_buffer_unlock_commit(buffer, \ |
| 598 | event, irq_flags, pc); \ | ||
| 594 | } \ | 599 | } \ |
| 595 | \ | 600 | \ |
| 596 | static int ftrace_raw_reg_event_##call(void *ptr) \ | 601 | static int ftrace_raw_reg_event_##call(void *ptr) \ |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 1090b0aed9ba..243bafc2ec90 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -65,13 +65,15 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, | |||
| 65 | { | 65 | { |
| 66 | struct blk_io_trace *t; | 66 | struct blk_io_trace *t; |
| 67 | struct ring_buffer_event *event = NULL; | 67 | struct ring_buffer_event *event = NULL; |
| 68 | struct ring_buffer *buffer = NULL; | ||
| 68 | int pc = 0; | 69 | int pc = 0; |
| 69 | int cpu = smp_processor_id(); | 70 | int cpu = smp_processor_id(); |
| 70 | bool blk_tracer = blk_tracer_enabled; | 71 | bool blk_tracer = blk_tracer_enabled; |
| 71 | 72 | ||
| 72 | if (blk_tracer) { | 73 | if (blk_tracer) { |
| 74 | buffer = blk_tr->buffer; | ||
| 73 | pc = preempt_count(); | 75 | pc = preempt_count(); |
| 74 | event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, | 76 | event = trace_buffer_lock_reserve(buffer, TRACE_BLK, |
| 75 | sizeof(*t) + len, | 77 | sizeof(*t) + len, |
| 76 | 0, pc); | 78 | 0, pc); |
| 77 | if (!event) | 79 | if (!event) |
| @@ -96,7 +98,7 @@ record_it: | |||
| 96 | memcpy((void *) t + sizeof(*t), data, len); | 98 | memcpy((void *) t + sizeof(*t), data, len); |
| 97 | 99 | ||
| 98 | if (blk_tracer) | 100 | if (blk_tracer) |
| 99 | trace_buffer_unlock_commit(blk_tr, event, 0, pc); | 101 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 100 | } | 102 | } |
| 101 | } | 103 | } |
| 102 | 104 | ||
| @@ -179,6 +181,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
| 179 | { | 181 | { |
| 180 | struct task_struct *tsk = current; | 182 | struct task_struct *tsk = current; |
| 181 | struct ring_buffer_event *event = NULL; | 183 | struct ring_buffer_event *event = NULL; |
| 184 | struct ring_buffer *buffer = NULL; | ||
| 182 | struct blk_io_trace *t; | 185 | struct blk_io_trace *t; |
| 183 | unsigned long flags = 0; | 186 | unsigned long flags = 0; |
| 184 | unsigned long *sequence; | 187 | unsigned long *sequence; |
| @@ -204,8 +207,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
| 204 | if (blk_tracer) { | 207 | if (blk_tracer) { |
| 205 | tracing_record_cmdline(current); | 208 | tracing_record_cmdline(current); |
| 206 | 209 | ||
| 210 | buffer = blk_tr->buffer; | ||
| 207 | pc = preempt_count(); | 211 | pc = preempt_count(); |
| 208 | event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, | 212 | event = trace_buffer_lock_reserve(buffer, TRACE_BLK, |
| 209 | sizeof(*t) + pdu_len, | 213 | sizeof(*t) + pdu_len, |
| 210 | 0, pc); | 214 | 0, pc); |
| 211 | if (!event) | 215 | if (!event) |
| @@ -252,7 +256,7 @@ record_it: | |||
| 252 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | 256 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); |
| 253 | 257 | ||
| 254 | if (blk_tracer) { | 258 | if (blk_tracer) { |
| 255 | trace_buffer_unlock_commit(blk_tr, event, 0, pc); | 259 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 256 | return; | 260 | return; |
| 257 | } | 261 | } |
| 258 | } | 262 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0418e2650d41..0c61836e30e7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -169,10 +169,11 @@ static struct trace_array global_trace; | |||
| 169 | 169 | ||
| 170 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 170 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
| 171 | 171 | ||
| 172 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | 172 | int filter_current_check_discard(struct ring_buffer *buffer, |
| 173 | struct ftrace_event_call *call, void *rec, | ||
| 173 | struct ring_buffer_event *event) | 174 | struct ring_buffer_event *event) |
| 174 | { | 175 | { |
| 175 | return filter_check_discard(call, rec, global_trace.buffer, event); | 176 | return filter_check_discard(call, rec, buffer, event); |
| 176 | } | 177 | } |
| 177 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 178 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
| 178 | 179 | ||
| @@ -887,14 +888,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 887 | } | 888 | } |
| 888 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 889 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
| 889 | 890 | ||
| 890 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 891 | struct ring_buffer_event * |
| 891 | int type, | 892 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
| 892 | unsigned long len, | 893 | int type, |
| 893 | unsigned long flags, int pc) | 894 | unsigned long len, |
| 895 | unsigned long flags, int pc) | ||
| 894 | { | 896 | { |
| 895 | struct ring_buffer_event *event; | 897 | struct ring_buffer_event *event; |
| 896 | 898 | ||
| 897 | event = ring_buffer_lock_reserve(tr->buffer, len); | 899 | event = ring_buffer_lock_reserve(buffer, len); |
| 898 | if (event != NULL) { | 900 | if (event != NULL) { |
| 899 | struct trace_entry *ent = ring_buffer_event_data(event); | 901 | struct trace_entry *ent = ring_buffer_event_data(event); |
| 900 | 902 | ||
| @@ -905,53 +907,59 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | |||
| 905 | return event; | 907 | return event; |
| 906 | } | 908 | } |
| 907 | 909 | ||
| 908 | static inline void __trace_buffer_unlock_commit(struct trace_array *tr, | 910 | static inline void |
| 909 | struct ring_buffer_event *event, | 911 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
| 910 | unsigned long flags, int pc, | 912 | struct ring_buffer_event *event, |
| 911 | int wake) | 913 | unsigned long flags, int pc, |
| 914 | int wake) | ||
| 912 | { | 915 | { |
| 913 | ring_buffer_unlock_commit(tr->buffer, event); | 916 | ring_buffer_unlock_commit(buffer, event); |
| 914 | 917 | ||
| 915 | ftrace_trace_stack(tr, flags, 6, pc); | 918 | ftrace_trace_stack(buffer, flags, 6, pc); |
| 916 | ftrace_trace_userstack(tr, flags, pc); | 919 | ftrace_trace_userstack(buffer, flags, pc); |
| 917 | 920 | ||
| 918 | if (wake) | 921 | if (wake) |
| 919 | trace_wake_up(); | 922 | trace_wake_up(); |
| 920 | } | 923 | } |
| 921 | 924 | ||
| 922 | void trace_buffer_unlock_commit(struct trace_array *tr, | 925 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
| 923 | struct ring_buffer_event *event, | 926 | struct ring_buffer_event *event, |
| 924 | unsigned long flags, int pc) | 927 | unsigned long flags, int pc) |
| 925 | { | 928 | { |
| 926 | __trace_buffer_unlock_commit(tr, event, flags, pc, 1); | 929 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
| 927 | } | 930 | } |
| 928 | 931 | ||
| 929 | struct ring_buffer_event * | 932 | struct ring_buffer_event * |
| 930 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 933 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, |
| 934 | int type, unsigned long len, | ||
| 931 | unsigned long flags, int pc) | 935 | unsigned long flags, int pc) |
| 932 | { | 936 | { |
| 933 | return trace_buffer_lock_reserve(&global_trace, | 937 | *current_rb = global_trace.buffer; |
| 938 | return trace_buffer_lock_reserve(*current_rb, | ||
| 934 | type, len, flags, pc); | 939 | type, len, flags, pc); |
| 935 | } | 940 | } |
| 936 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | 941 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); |
| 937 | 942 | ||
| 938 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 943 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
| 944 | struct ring_buffer_event *event, | ||
| 939 | unsigned long flags, int pc) | 945 | unsigned long flags, int pc) |
| 940 | { | 946 | { |
| 941 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 947 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
| 942 | } | 948 | } |
| 943 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 949 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); |
| 944 | 950 | ||
| 945 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 951 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
| 946 | unsigned long flags, int pc) | 952 | struct ring_buffer_event *event, |
| 953 | unsigned long flags, int pc) | ||
| 947 | { | 954 | { |
| 948 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 955 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); |
| 949 | } | 956 | } |
| 950 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | 957 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); |
| 951 | 958 | ||
| 952 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event) | 959 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
| 960 | struct ring_buffer_event *event) | ||
| 953 | { | 961 | { |
| 954 | ring_buffer_discard_commit(global_trace.buffer, event); | 962 | ring_buffer_discard_commit(buffer, event); |
| 955 | } | 963 | } |
| 956 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | 964 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); |
| 957 | 965 | ||
| @@ -961,6 +969,7 @@ trace_function(struct trace_array *tr, | |||
| 961 | int pc) | 969 | int pc) |
| 962 | { | 970 | { |
| 963 | struct ftrace_event_call *call = &event_function; | 971 | struct ftrace_event_call *call = &event_function; |
| 972 | struct ring_buffer *buffer = tr->buffer; | ||
| 964 | struct ring_buffer_event *event; | 973 | struct ring_buffer_event *event; |
| 965 | struct ftrace_entry *entry; | 974 | struct ftrace_entry *entry; |
| 966 | 975 | ||
| @@ -968,7 +977,7 @@ trace_function(struct trace_array *tr, | |||
| 968 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 977 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
| 969 | return; | 978 | return; |
| 970 | 979 | ||
| 971 | event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), | 980 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
| 972 | flags, pc); | 981 | flags, pc); |
| 973 | if (!event) | 982 | if (!event) |
| 974 | return; | 983 | return; |
| @@ -976,8 +985,8 @@ trace_function(struct trace_array *tr, | |||
| 976 | entry->ip = ip; | 985 | entry->ip = ip; |
| 977 | entry->parent_ip = parent_ip; | 986 | entry->parent_ip = parent_ip; |
| 978 | 987 | ||
| 979 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 988 | if (!filter_check_discard(call, entry, buffer, event)) |
| 980 | ring_buffer_unlock_commit(tr->buffer, event); | 989 | ring_buffer_unlock_commit(buffer, event); |
| 981 | } | 990 | } |
| 982 | 991 | ||
| 983 | void | 992 | void |
| @@ -990,7 +999,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
| 990 | } | 999 | } |
| 991 | 1000 | ||
| 992 | #ifdef CONFIG_STACKTRACE | 1001 | #ifdef CONFIG_STACKTRACE |
| 993 | static void __ftrace_trace_stack(struct trace_array *tr, | 1002 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
| 994 | unsigned long flags, | 1003 | unsigned long flags, |
| 995 | int skip, int pc) | 1004 | int skip, int pc) |
| 996 | { | 1005 | { |
| @@ -999,7 +1008,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
| 999 | struct stack_entry *entry; | 1008 | struct stack_entry *entry; |
| 1000 | struct stack_trace trace; | 1009 | struct stack_trace trace; |
| 1001 | 1010 | ||
| 1002 | event = trace_buffer_lock_reserve(tr, TRACE_STACK, | 1011 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
| 1003 | sizeof(*entry), flags, pc); | 1012 | sizeof(*entry), flags, pc); |
| 1004 | if (!event) | 1013 | if (!event) |
| 1005 | return; | 1014 | return; |
| @@ -1012,26 +1021,27 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
| 1012 | trace.entries = entry->caller; | 1021 | trace.entries = entry->caller; |
| 1013 | 1022 | ||
| 1014 | save_stack_trace(&trace); | 1023 | save_stack_trace(&trace); |
| 1015 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1024 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1016 | ring_buffer_unlock_commit(tr->buffer, event); | 1025 | ring_buffer_unlock_commit(buffer, event); |
| 1017 | } | 1026 | } |
| 1018 | 1027 | ||
| 1019 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1028 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
| 1020 | int pc) | 1029 | int skip, int pc) |
| 1021 | { | 1030 | { |
| 1022 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 1031 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) |
| 1023 | return; | 1032 | return; |
| 1024 | 1033 | ||
| 1025 | __ftrace_trace_stack(tr, flags, skip, pc); | 1034 | __ftrace_trace_stack(buffer, flags, skip, pc); |
| 1026 | } | 1035 | } |
| 1027 | 1036 | ||
| 1028 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1037 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
| 1029 | int pc) | 1038 | int pc) |
| 1030 | { | 1039 | { |
| 1031 | __ftrace_trace_stack(tr, flags, skip, pc); | 1040 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
| 1032 | } | 1041 | } |
| 1033 | 1042 | ||
| 1034 | void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | 1043 | void |
| 1044 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | ||
| 1035 | { | 1045 | { |
| 1036 | struct ftrace_event_call *call = &event_user_stack; | 1046 | struct ftrace_event_call *call = &event_user_stack; |
| 1037 | struct ring_buffer_event *event; | 1047 | struct ring_buffer_event *event; |
| @@ -1041,7 +1051,7 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | |||
| 1041 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1051 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
| 1042 | return; | 1052 | return; |
| 1043 | 1053 | ||
| 1044 | event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, | 1054 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1045 | sizeof(*entry), flags, pc); | 1055 | sizeof(*entry), flags, pc); |
| 1046 | if (!event) | 1056 | if (!event) |
| 1047 | return; | 1057 | return; |
| @@ -1055,8 +1065,8 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | |||
| 1055 | trace.entries = entry->caller; | 1065 | trace.entries = entry->caller; |
| 1056 | 1066 | ||
| 1057 | save_stack_trace_user(&trace); | 1067 | save_stack_trace_user(&trace); |
| 1058 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1068 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1059 | ring_buffer_unlock_commit(tr->buffer, event); | 1069 | ring_buffer_unlock_commit(buffer, event); |
| 1060 | } | 1070 | } |
| 1061 | 1071 | ||
| 1062 | #ifdef UNUSED | 1072 | #ifdef UNUSED |
| @@ -1075,9 +1085,10 @@ ftrace_trace_special(void *__tr, | |||
| 1075 | { | 1085 | { |
| 1076 | struct ring_buffer_event *event; | 1086 | struct ring_buffer_event *event; |
| 1077 | struct trace_array *tr = __tr; | 1087 | struct trace_array *tr = __tr; |
| 1088 | struct ring_buffer *buffer = tr->buffer; | ||
| 1078 | struct special_entry *entry; | 1089 | struct special_entry *entry; |
| 1079 | 1090 | ||
| 1080 | event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, |
| 1081 | sizeof(*entry), 0, pc); | 1092 | sizeof(*entry), 0, pc); |
| 1082 | if (!event) | 1093 | if (!event) |
| 1083 | return; | 1094 | return; |
| @@ -1085,7 +1096,7 @@ ftrace_trace_special(void *__tr, | |||
| 1085 | entry->arg1 = arg1; | 1096 | entry->arg1 = arg1; |
| 1086 | entry->arg2 = arg2; | 1097 | entry->arg2 = arg2; |
| 1087 | entry->arg3 = arg3; | 1098 | entry->arg3 = arg3; |
| 1088 | trace_buffer_unlock_commit(tr, event, 0, pc); | 1099 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 1089 | } | 1100 | } |
| 1090 | 1101 | ||
| 1091 | void | 1102 | void |
| @@ -1131,6 +1142,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1131 | 1142 | ||
| 1132 | struct ftrace_event_call *call = &event_bprint; | 1143 | struct ftrace_event_call *call = &event_bprint; |
| 1133 | struct ring_buffer_event *event; | 1144 | struct ring_buffer_event *event; |
| 1145 | struct ring_buffer *buffer; | ||
| 1134 | struct trace_array *tr = &global_trace; | 1146 | struct trace_array *tr = &global_trace; |
| 1135 | struct trace_array_cpu *data; | 1147 | struct trace_array_cpu *data; |
| 1136 | struct bprint_entry *entry; | 1148 | struct bprint_entry *entry; |
| @@ -1163,7 +1175,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1163 | goto out_unlock; | 1175 | goto out_unlock; |
| 1164 | 1176 | ||
| 1165 | size = sizeof(*entry) + sizeof(u32) * len; | 1177 | size = sizeof(*entry) + sizeof(u32) * len; |
| 1166 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); | 1178 | buffer = tr->buffer; |
| 1179 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | ||
| 1180 | flags, pc); | ||
| 1167 | if (!event) | 1181 | if (!event) |
| 1168 | goto out_unlock; | 1182 | goto out_unlock; |
| 1169 | entry = ring_buffer_event_data(event); | 1183 | entry = ring_buffer_event_data(event); |
| @@ -1171,8 +1185,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1171 | entry->fmt = fmt; | 1185 | entry->fmt = fmt; |
| 1172 | 1186 | ||
| 1173 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1187 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
| 1174 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1188 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1175 | ring_buffer_unlock_commit(tr->buffer, event); | 1189 | ring_buffer_unlock_commit(buffer, event); |
| 1176 | 1190 | ||
| 1177 | out_unlock: | 1191 | out_unlock: |
| 1178 | __raw_spin_unlock(&trace_buf_lock); | 1192 | __raw_spin_unlock(&trace_buf_lock); |
| @@ -1194,6 +1208,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1194 | 1208 | ||
| 1195 | struct ftrace_event_call *call = &event_print; | 1209 | struct ftrace_event_call *call = &event_print; |
| 1196 | struct ring_buffer_event *event; | 1210 | struct ring_buffer_event *event; |
| 1211 | struct ring_buffer *buffer; | ||
| 1197 | struct trace_array *tr = &global_trace; | 1212 | struct trace_array *tr = &global_trace; |
| 1198 | struct trace_array_cpu *data; | 1213 | struct trace_array_cpu *data; |
| 1199 | int cpu, len = 0, size, pc; | 1214 | int cpu, len = 0, size, pc; |
| @@ -1222,7 +1237,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1222 | trace_buf[len] = 0; | 1237 | trace_buf[len] = 0; |
| 1223 | 1238 | ||
| 1224 | size = sizeof(*entry) + len + 1; | 1239 | size = sizeof(*entry) + len + 1; |
| 1225 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | 1240 | buffer = tr->buffer; |
| 1241 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | ||
| 1242 | irq_flags, pc); | ||
| 1226 | if (!event) | 1243 | if (!event) |
| 1227 | goto out_unlock; | 1244 | goto out_unlock; |
| 1228 | entry = ring_buffer_event_data(event); | 1245 | entry = ring_buffer_event_data(event); |
| @@ -1230,8 +1247,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1230 | 1247 | ||
| 1231 | memcpy(&entry->buf, trace_buf, len); | 1248 | memcpy(&entry->buf, trace_buf, len); |
| 1232 | entry->buf[len] = 0; | 1249 | entry->buf[len] = 0; |
| 1233 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1250 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1234 | ring_buffer_unlock_commit(tr->buffer, event); | 1251 | ring_buffer_unlock_commit(buffer, event); |
| 1235 | 1252 | ||
| 1236 | out_unlock: | 1253 | out_unlock: |
| 1237 | __raw_spin_unlock(&trace_buf_lock); | 1254 | __raw_spin_unlock(&trace_buf_lock); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ca070de36227..4d30414fe19a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -415,12 +415,13 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | |||
| 415 | 415 | ||
| 416 | struct ring_buffer_event; | 416 | struct ring_buffer_event; |
| 417 | 417 | ||
| 418 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 418 | struct ring_buffer_event * |
| 419 | int type, | 419 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
| 420 | unsigned long len, | 420 | int type, |
| 421 | unsigned long flags, | 421 | unsigned long len, |
| 422 | int pc); | 422 | unsigned long flags, |
| 423 | void trace_buffer_unlock_commit(struct trace_array *tr, | 423 | int pc); |
| 424 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | ||
| 424 | struct ring_buffer_event *event, | 425 | struct ring_buffer_event *event, |
| 425 | unsigned long flags, int pc); | 426 | unsigned long flags, int pc); |
| 426 | 427 | ||
| @@ -481,10 +482,10 @@ void update_max_tr_single(struct trace_array *tr, | |||
| 481 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 482 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
| 482 | 483 | ||
| 483 | #ifdef CONFIG_STACKTRACE | 484 | #ifdef CONFIG_STACKTRACE |
| 484 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, | 485 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
| 485 | int skip, int pc); | 486 | int skip, int pc); |
| 486 | 487 | ||
| 487 | void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, | 488 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
| 488 | int pc); | 489 | int pc); |
| 489 | 490 | ||
| 490 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 491 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 863139327816..19bfc75d467e 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
| @@ -130,6 +130,7 @@ struct tracer boot_tracer __read_mostly = | |||
| 130 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | 130 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) |
| 131 | { | 131 | { |
| 132 | struct ring_buffer_event *event; | 132 | struct ring_buffer_event *event; |
| 133 | struct ring_buffer *buffer; | ||
| 133 | struct trace_boot_call *entry; | 134 | struct trace_boot_call *entry; |
| 134 | struct trace_array *tr = boot_trace; | 135 | struct trace_array *tr = boot_trace; |
| 135 | 136 | ||
| @@ -142,13 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
| 142 | sprint_symbol(bt->func, (unsigned long)fn); | 143 | sprint_symbol(bt->func, (unsigned long)fn); |
| 143 | preempt_disable(); | 144 | preempt_disable(); |
| 144 | 145 | ||
| 145 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, | 146 | buffer = tr->buffer; |
| 147 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL, | ||
| 146 | sizeof(*entry), 0, 0); | 148 | sizeof(*entry), 0, 0); |
| 147 | if (!event) | 149 | if (!event) |
| 148 | goto out; | 150 | goto out; |
| 149 | entry = ring_buffer_event_data(event); | 151 | entry = ring_buffer_event_data(event); |
| 150 | entry->boot_call = *bt; | 152 | entry->boot_call = *bt; |
| 151 | trace_buffer_unlock_commit(tr, event, 0, 0); | 153 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 152 | out: | 154 | out: |
| 153 | preempt_enable(); | 155 | preempt_enable(); |
| 154 | } | 156 | } |
| @@ -156,6 +158,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
| 156 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | 158 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) |
| 157 | { | 159 | { |
| 158 | struct ring_buffer_event *event; | 160 | struct ring_buffer_event *event; |
| 161 | struct ring_buffer *buffer; | ||
| 159 | struct trace_boot_ret *entry; | 162 | struct trace_boot_ret *entry; |
| 160 | struct trace_array *tr = boot_trace; | 163 | struct trace_array *tr = boot_trace; |
| 161 | 164 | ||
| @@ -165,13 +168,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
| 165 | sprint_symbol(bt->func, (unsigned long)fn); | 168 | sprint_symbol(bt->func, (unsigned long)fn); |
| 166 | preempt_disable(); | 169 | preempt_disable(); |
| 167 | 170 | ||
| 168 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET, | 171 | buffer = tr->buffer; |
| 172 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET, | ||
| 169 | sizeof(*entry), 0, 0); | 173 | sizeof(*entry), 0, 0); |
| 170 | if (!event) | 174 | if (!event) |
| 171 | goto out; | 175 | goto out; |
| 172 | entry = ring_buffer_event_data(event); | 176 | entry = ring_buffer_event_data(event); |
| 173 | entry->boot_ret = *bt; | 177 | entry->boot_ret = *bt; |
| 174 | trace_buffer_unlock_commit(tr, event, 0, 0); | 178 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 175 | out: | 179 | out: |
| 176 | preempt_enable(); | 180 | preempt_enable(); |
| 177 | } | 181 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d33bcdeffe69..78b1ed230177 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -1438,6 +1438,7 @@ static void | |||
| 1438 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | 1438 | function_test_events_call(unsigned long ip, unsigned long parent_ip) |
| 1439 | { | 1439 | { |
| 1440 | struct ring_buffer_event *event; | 1440 | struct ring_buffer_event *event; |
| 1441 | struct ring_buffer *buffer; | ||
| 1441 | struct ftrace_entry *entry; | 1442 | struct ftrace_entry *entry; |
| 1442 | unsigned long flags; | 1443 | unsigned long flags; |
| 1443 | long disabled; | 1444 | long disabled; |
| @@ -1455,7 +1456,8 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
| 1455 | 1456 | ||
| 1456 | local_save_flags(flags); | 1457 | local_save_flags(flags); |
| 1457 | 1458 | ||
| 1458 | event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry), | 1459 | event = trace_current_buffer_lock_reserve(&buffer, |
| 1460 | TRACE_FN, sizeof(*entry), | ||
| 1459 | flags, pc); | 1461 | flags, pc); |
| 1460 | if (!event) | 1462 | if (!event) |
| 1461 | goto out; | 1463 | goto out; |
| @@ -1463,7 +1465,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
| 1463 | entry->ip = ip; | 1465 | entry->ip = ip; |
| 1464 | entry->parent_ip = parent_ip; | 1466 | entry->parent_ip = parent_ip; |
| 1465 | 1467 | ||
| 1466 | trace_nowake_buffer_unlock_commit(event, flags, pc); | 1468 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); |
| 1467 | 1469 | ||
| 1468 | out: | 1470 | out: |
| 1469 | atomic_dec(&per_cpu(test_event_disable, cpu)); | 1471 | atomic_dec(&per_cpu(test_event_disable, cpu)); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 3f4a251b7d16..b3749a2c3132 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -173,19 +173,20 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
| 173 | { | 173 | { |
| 174 | struct ftrace_event_call *call = &event_funcgraph_entry; | 174 | struct ftrace_event_call *call = &event_funcgraph_entry; |
| 175 | struct ring_buffer_event *event; | 175 | struct ring_buffer_event *event; |
| 176 | struct ring_buffer *buffer = tr->buffer; | ||
| 176 | struct ftrace_graph_ent_entry *entry; | 177 | struct ftrace_graph_ent_entry *entry; |
| 177 | 178 | ||
| 178 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 179 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
| 179 | return 0; | 180 | return 0; |
| 180 | 181 | ||
| 181 | event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT, | 182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
| 182 | sizeof(*entry), flags, pc); | 183 | sizeof(*entry), flags, pc); |
| 183 | if (!event) | 184 | if (!event) |
| 184 | return 0; | 185 | return 0; |
| 185 | entry = ring_buffer_event_data(event); | 186 | entry = ring_buffer_event_data(event); |
| 186 | entry->graph_ent = *trace; | 187 | entry->graph_ent = *trace; |
| 187 | if (!filter_current_check_discard(call, entry, event)) | 188 | if (!filter_current_check_discard(buffer, call, entry, event)) |
| 188 | ring_buffer_unlock_commit(tr->buffer, event); | 189 | ring_buffer_unlock_commit(buffer, event); |
| 189 | 190 | ||
| 190 | return 1; | 191 | return 1; |
| 191 | } | 192 | } |
| @@ -236,19 +237,20 @@ static void __trace_graph_return(struct trace_array *tr, | |||
| 236 | { | 237 | { |
| 237 | struct ftrace_event_call *call = &event_funcgraph_exit; | 238 | struct ftrace_event_call *call = &event_funcgraph_exit; |
| 238 | struct ring_buffer_event *event; | 239 | struct ring_buffer_event *event; |
| 240 | struct ring_buffer *buffer = tr->buffer; | ||
| 239 | struct ftrace_graph_ret_entry *entry; | 241 | struct ftrace_graph_ret_entry *entry; |
| 240 | 242 | ||
| 241 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 243 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
| 242 | return; | 244 | return; |
| 243 | 245 | ||
| 244 | event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET, | 246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
| 245 | sizeof(*entry), flags, pc); | 247 | sizeof(*entry), flags, pc); |
| 246 | if (!event) | 248 | if (!event) |
| 247 | return; | 249 | return; |
| 248 | entry = ring_buffer_event_data(event); | 250 | entry = ring_buffer_event_data(event); |
| 249 | entry->ret = *trace; | 251 | entry->ret = *trace; |
| 250 | if (!filter_current_check_discard(call, entry, event)) | 252 | if (!filter_current_check_discard(buffer, call, entry, event)) |
| 251 | ring_buffer_unlock_commit(tr->buffer, event); | 253 | ring_buffer_unlock_commit(buffer, event); |
| 252 | } | 254 | } |
| 253 | 255 | ||
| 254 | void trace_graph_return(struct ftrace_graph_ret *trace) | 256 | void trace_graph_return(struct ftrace_graph_ret *trace) |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index d53b45ed0806..c4c9bbda53d3 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
| @@ -307,11 +307,12 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
| 307 | struct trace_array_cpu *data, | 307 | struct trace_array_cpu *data, |
| 308 | struct mmiotrace_rw *rw) | 308 | struct mmiotrace_rw *rw) |
| 309 | { | 309 | { |
| 310 | struct ring_buffer *buffer = tr->buffer; | ||
| 310 | struct ring_buffer_event *event; | 311 | struct ring_buffer_event *event; |
| 311 | struct trace_mmiotrace_rw *entry; | 312 | struct trace_mmiotrace_rw *entry; |
| 312 | int pc = preempt_count(); | 313 | int pc = preempt_count(); |
| 313 | 314 | ||
| 314 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, | 315 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
| 315 | sizeof(*entry), 0, pc); | 316 | sizeof(*entry), 0, pc); |
| 316 | if (!event) { | 317 | if (!event) { |
| 317 | atomic_inc(&dropped_count); | 318 | atomic_inc(&dropped_count); |
| @@ -319,7 +320,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
| 319 | } | 320 | } |
| 320 | entry = ring_buffer_event_data(event); | 321 | entry = ring_buffer_event_data(event); |
| 321 | entry->rw = *rw; | 322 | entry->rw = *rw; |
| 322 | trace_buffer_unlock_commit(tr, event, 0, pc); | 323 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 323 | } | 324 | } |
| 324 | 325 | ||
| 325 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 326 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
| @@ -333,11 +334,12 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
| 333 | struct trace_array_cpu *data, | 334 | struct trace_array_cpu *data, |
| 334 | struct mmiotrace_map *map) | 335 | struct mmiotrace_map *map) |
| 335 | { | 336 | { |
| 337 | struct ring_buffer *buffer = tr->buffer; | ||
| 336 | struct ring_buffer_event *event; | 338 | struct ring_buffer_event *event; |
| 337 | struct trace_mmiotrace_map *entry; | 339 | struct trace_mmiotrace_map *entry; |
| 338 | int pc = preempt_count(); | 340 | int pc = preempt_count(); |
| 339 | 341 | ||
| 340 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, | 342 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
| 341 | sizeof(*entry), 0, pc); | 343 | sizeof(*entry), 0, pc); |
| 342 | if (!event) { | 344 | if (!event) { |
| 343 | atomic_inc(&dropped_count); | 345 | atomic_inc(&dropped_count); |
| @@ -345,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
| 345 | } | 347 | } |
| 346 | entry = ring_buffer_event_data(event); | 348 | entry = ring_buffer_event_data(event); |
| 347 | entry->map = *map; | 349 | entry->map = *map; |
| 348 | trace_buffer_unlock_commit(tr, event, 0, pc); | 350 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 349 | } | 351 | } |
| 350 | 352 | ||
| 351 | void mmio_trace_mapping(struct mmiotrace_map *map) | 353 | void mmio_trace_mapping(struct mmiotrace_map *map) |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index a5d5a4f7745b..fe1a00f1445a 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
| @@ -38,6 +38,7 @@ static void probe_power_end(struct power_trace *it) | |||
| 38 | { | 38 | { |
| 39 | struct ftrace_event_call *call = &event_power; | 39 | struct ftrace_event_call *call = &event_power; |
| 40 | struct ring_buffer_event *event; | 40 | struct ring_buffer_event *event; |
| 41 | struct ring_buffer *buffer; | ||
| 41 | struct trace_power *entry; | 42 | struct trace_power *entry; |
| 42 | struct trace_array_cpu *data; | 43 | struct trace_array_cpu *data; |
| 43 | struct trace_array *tr = power_trace; | 44 | struct trace_array *tr = power_trace; |
| @@ -45,18 +46,20 @@ static void probe_power_end(struct power_trace *it) | |||
| 45 | if (!trace_power_enabled) | 46 | if (!trace_power_enabled) |
| 46 | return; | 47 | return; |
| 47 | 48 | ||
| 49 | buffer = tr->buffer; | ||
| 50 | |||
| 48 | preempt_disable(); | 51 | preempt_disable(); |
| 49 | it->end = ktime_get(); | 52 | it->end = ktime_get(); |
| 50 | data = tr->data[smp_processor_id()]; | 53 | data = tr->data[smp_processor_id()]; |
| 51 | 54 | ||
| 52 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | 55 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, |
| 53 | sizeof(*entry), 0, 0); | 56 | sizeof(*entry), 0, 0); |
| 54 | if (!event) | 57 | if (!event) |
| 55 | goto out; | 58 | goto out; |
| 56 | entry = ring_buffer_event_data(event); | 59 | entry = ring_buffer_event_data(event); |
| 57 | entry->state_data = *it; | 60 | entry->state_data = *it; |
| 58 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 61 | if (!filter_check_discard(call, entry, buffer, event)) |
| 59 | trace_buffer_unlock_commit(tr, event, 0, 0); | 62 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 60 | out: | 63 | out: |
| 61 | preempt_enable(); | 64 | preempt_enable(); |
| 62 | } | 65 | } |
| @@ -66,6 +69,7 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
| 66 | { | 69 | { |
| 67 | struct ftrace_event_call *call = &event_power; | 70 | struct ftrace_event_call *call = &event_power; |
| 68 | struct ring_buffer_event *event; | 71 | struct ring_buffer_event *event; |
| 72 | struct ring_buffer *buffer; | ||
| 69 | struct trace_power *entry; | 73 | struct trace_power *entry; |
| 70 | struct trace_array_cpu *data; | 74 | struct trace_array_cpu *data; |
| 71 | struct trace_array *tr = power_trace; | 75 | struct trace_array *tr = power_trace; |
| @@ -73,6 +77,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
| 73 | if (!trace_power_enabled) | 77 | if (!trace_power_enabled) |
| 74 | return; | 78 | return; |
| 75 | 79 | ||
| 80 | buffer = tr->buffer; | ||
| 81 | |||
| 76 | memset(it, 0, sizeof(struct power_trace)); | 82 | memset(it, 0, sizeof(struct power_trace)); |
| 77 | it->state = level; | 83 | it->state = level; |
| 78 | it->type = type; | 84 | it->type = type; |
| @@ -81,14 +87,14 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
| 81 | it->end = it->stamp; | 87 | it->end = it->stamp; |
| 82 | data = tr->data[smp_processor_id()]; | 88 | data = tr->data[smp_processor_id()]; |
| 83 | 89 | ||
| 84 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | 90 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, |
| 85 | sizeof(*entry), 0, 0); | 91 | sizeof(*entry), 0, 0); |
| 86 | if (!event) | 92 | if (!event) |
| 87 | goto out; | 93 | goto out; |
| 88 | entry = ring_buffer_event_data(event); | 94 | entry = ring_buffer_event_data(event); |
| 89 | entry->state_data = *it; | 95 | entry->state_data = *it; |
| 90 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 96 | if (!filter_check_discard(call, entry, buffer, event)) |
| 91 | trace_buffer_unlock_commit(tr, event, 0, 0); | 97 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 92 | out: | 98 | out: |
| 93 | preempt_enable(); | 99 | preempt_enable(); |
| 94 | } | 100 | } |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e1285d7b5488..5fca0f51fde4 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -28,10 +28,11 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
| 28 | unsigned long flags, int pc) | 28 | unsigned long flags, int pc) |
| 29 | { | 29 | { |
| 30 | struct ftrace_event_call *call = &event_context_switch; | 30 | struct ftrace_event_call *call = &event_context_switch; |
| 31 | struct ring_buffer *buffer = tr->buffer; | ||
| 31 | struct ring_buffer_event *event; | 32 | struct ring_buffer_event *event; |
| 32 | struct ctx_switch_entry *entry; | 33 | struct ctx_switch_entry *entry; |
| 33 | 34 | ||
| 34 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, | 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
| 35 | sizeof(*entry), flags, pc); | 36 | sizeof(*entry), flags, pc); |
| 36 | if (!event) | 37 | if (!event) |
| 37 | return; | 38 | return; |
| @@ -44,8 +45,8 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
| 44 | entry->next_state = next->state; | 45 | entry->next_state = next->state; |
| 45 | entry->next_cpu = task_cpu(next); | 46 | entry->next_cpu = task_cpu(next); |
| 46 | 47 | ||
| 47 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 48 | if (!filter_check_discard(call, entry, buffer, event)) |
| 48 | trace_buffer_unlock_commit(tr, event, flags, pc); | 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
| 49 | } | 50 | } |
| 50 | 51 | ||
| 51 | static void | 52 | static void |
| @@ -86,8 +87,9 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 86 | struct ftrace_event_call *call = &event_wakeup; | 87 | struct ftrace_event_call *call = &event_wakeup; |
| 87 | struct ring_buffer_event *event; | 88 | struct ring_buffer_event *event; |
| 88 | struct ctx_switch_entry *entry; | 89 | struct ctx_switch_entry *entry; |
| 90 | struct ring_buffer *buffer = tr->buffer; | ||
| 89 | 91 | ||
| 90 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | 92 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
| 91 | sizeof(*entry), flags, pc); | 93 | sizeof(*entry), flags, pc); |
| 92 | if (!event) | 94 | if (!event) |
| 93 | return; | 95 | return; |
| @@ -100,10 +102,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 100 | entry->next_state = wakee->state; | 102 | entry->next_state = wakee->state; |
| 101 | entry->next_cpu = task_cpu(wakee); | 103 | entry->next_cpu = task_cpu(wakee); |
| 102 | 104 | ||
| 103 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 105 | if (!filter_check_discard(call, entry, buffer, event)) |
| 104 | ring_buffer_unlock_commit(tr->buffer, event); | 106 | ring_buffer_unlock_commit(buffer, event); |
| 105 | ftrace_trace_stack(tr, flags, 6, pc); | 107 | ftrace_trace_stack(tr->buffer, flags, 6, pc); |
| 106 | ftrace_trace_userstack(tr, flags, pc); | 108 | ftrace_trace_userstack(tr->buffer, flags, pc); |
| 107 | } | 109 | } |
| 108 | 110 | ||
| 109 | static void | 111 | static void |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 4f5fae6fad90..8712ce3c6a0e 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -223,6 +223,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
| 223 | struct syscall_trace_enter *entry; | 223 | struct syscall_trace_enter *entry; |
| 224 | struct syscall_metadata *sys_data; | 224 | struct syscall_metadata *sys_data; |
| 225 | struct ring_buffer_event *event; | 225 | struct ring_buffer_event *event; |
| 226 | struct ring_buffer *buffer; | ||
| 226 | int size; | 227 | int size; |
| 227 | int syscall_nr; | 228 | int syscall_nr; |
| 228 | 229 | ||
| @@ -238,8 +239,8 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
| 238 | 239 | ||
| 239 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; | 240 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; |
| 240 | 241 | ||
| 241 | event = trace_current_buffer_lock_reserve(sys_data->enter_id, size, | 242 | event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id, |
| 242 | 0, 0); | 243 | size, 0, 0); |
| 243 | if (!event) | 244 | if (!event) |
| 244 | return; | 245 | return; |
| 245 | 246 | ||
| @@ -247,8 +248,9 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
| 247 | entry->nr = syscall_nr; | 248 | entry->nr = syscall_nr; |
| 248 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 249 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
| 249 | 250 | ||
| 250 | if (!filter_current_check_discard(sys_data->enter_event, entry, event)) | 251 | if (!filter_current_check_discard(buffer, sys_data->enter_event, |
| 251 | trace_current_buffer_unlock_commit(event, 0, 0); | 252 | entry, event)) |
| 253 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | ||
| 252 | } | 254 | } |
| 253 | 255 | ||
| 254 | void ftrace_syscall_exit(struct pt_regs *regs, long ret) | 256 | void ftrace_syscall_exit(struct pt_regs *regs, long ret) |
| @@ -256,6 +258,7 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
| 256 | struct syscall_trace_exit *entry; | 258 | struct syscall_trace_exit *entry; |
| 257 | struct syscall_metadata *sys_data; | 259 | struct syscall_metadata *sys_data; |
| 258 | struct ring_buffer_event *event; | 260 | struct ring_buffer_event *event; |
| 261 | struct ring_buffer *buffer; | ||
| 259 | int syscall_nr; | 262 | int syscall_nr; |
| 260 | 263 | ||
| 261 | syscall_nr = syscall_get_nr(current, regs); | 264 | syscall_nr = syscall_get_nr(current, regs); |
| @@ -268,7 +271,7 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
| 268 | if (!sys_data) | 271 | if (!sys_data) |
| 269 | return; | 272 | return; |
| 270 | 273 | ||
| 271 | event = trace_current_buffer_lock_reserve(sys_data->exit_id, | 274 | event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id, |
| 272 | sizeof(*entry), 0, 0); | 275 | sizeof(*entry), 0, 0); |
| 273 | if (!event) | 276 | if (!event) |
| 274 | return; | 277 | return; |
| @@ -277,8 +280,9 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
| 277 | entry->nr = syscall_nr; | 280 | entry->nr = syscall_nr; |
| 278 | entry->ret = syscall_get_return_value(current, regs); | 281 | entry->ret = syscall_get_return_value(current, regs); |
| 279 | 282 | ||
| 280 | if (!filter_current_check_discard(sys_data->exit_event, entry, event)) | 283 | if (!filter_current_check_discard(buffer, sys_data->exit_event, |
| 281 | trace_current_buffer_unlock_commit(event, 0, 0); | 284 | entry, event)) |
| 285 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | ||
| 282 | } | 286 | } |
| 283 | 287 | ||
| 284 | int reg_event_syscall_enter(void *ptr) | 288 | int reg_event_syscall_enter(void *ptr) |
