diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 18 | ||||
-rw-r--r-- | kernel/trace/trace.c | 18 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 2 |
4 files changed, 22 insertions, 18 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b549b0f5b977..6c508ff33c62 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | |||
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | 122 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
123 | * can use rcu_dereference_raw() is that elements removed from this list | 123 | * can use rcu_dereference_raw_notrace() is that elements removed from this list |
124 | * are simply leaked, so there is no need to interact with a grace-period | 124 | * are simply leaked, so there is no need to interact with a grace-period |
125 | * mechanism. The rcu_dereference_raw() calls are needed to handle | 125 | * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle |
126 | * concurrent insertions into the ftrace_global_list. | 126 | * concurrent insertions into the ftrace_global_list. |
127 | * | 127 | * |
128 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 128 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
129 | */ | 129 | */ |
130 | #define do_for_each_ftrace_op(op, list) \ | 130 | #define do_for_each_ftrace_op(op, list) \ |
131 | op = rcu_dereference_raw(list); \ | 131 | op = rcu_dereference_raw_notrace(list); \ |
132 | do | 132 | do |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * Optimized for just a single item in the list (as that is the normal case). | 135 | * Optimized for just a single item in the list (as that is the normal case). |
136 | */ | 136 | */ |
137 | #define while_for_each_ftrace_op(op) \ | 137 | #define while_for_each_ftrace_op(op) \ |
138 | while (likely(op = rcu_dereference_raw((op)->next)) && \ | 138 | while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ |
139 | unlikely((op) != &ftrace_list_end)) | 139 | unlikely((op) != &ftrace_list_end)) |
140 | 140 | ||
141 | static inline void ftrace_ops_init(struct ftrace_ops *ops) | 141 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | |||
779 | if (hlist_empty(hhd)) | 779 | if (hlist_empty(hhd)) |
780 | return NULL; | 780 | return NULL; |
781 | 781 | ||
782 | hlist_for_each_entry_rcu(rec, hhd, node) { | 782 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { |
783 | if (rec->ip == ip) | 783 | if (rec->ip == ip) |
784 | return rec; | 784 | return rec; |
785 | } | 785 | } |
@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |||
1165 | 1165 | ||
1166 | hhd = &hash->buckets[key]; | 1166 | hhd = &hash->buckets[key]; |
1167 | 1167 | ||
1168 | hlist_for_each_entry_rcu(entry, hhd, hlist) { | 1168 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
1169 | if (entry->ip == ip) | 1169 | if (entry->ip == ip) |
1170 | return entry; | 1170 | return entry; |
1171 | } | 1171 | } |
@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
1422 | struct ftrace_hash *notrace_hash; | 1422 | struct ftrace_hash *notrace_hash; |
1423 | int ret; | 1423 | int ret; |
1424 | 1424 | ||
1425 | filter_hash = rcu_dereference_raw(ops->filter_hash); | 1425 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); |
1426 | notrace_hash = rcu_dereference_raw(ops->notrace_hash); | 1426 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); |
1427 | 1427 | ||
1428 | if ((ftrace_hash_empty(filter_hash) || | 1428 | if ((ftrace_hash_empty(filter_hash) || |
1429 | ftrace_lookup_ip(filter_hash, ip)) && | 1429 | ftrace_lookup_ip(filter_hash, ip)) && |
@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | |||
2920 | * on the hash. rcu_read_lock is too dangerous here. | 2920 | * on the hash. rcu_read_lock is too dangerous here. |
2921 | */ | 2921 | */ |
2922 | preempt_disable_notrace(); | 2922 | preempt_disable_notrace(); |
2923 | hlist_for_each_entry_rcu(entry, hhd, node) { | 2923 | hlist_for_each_entry_rcu_notrace(entry, hhd, node) { |
2924 | if (entry->ip == ip) | 2924 | if (entry->ip == ip) |
2925 | entry->ops->func(ip, parent_ip, &entry->data); | 2925 | entry->ops->func(ip, parent_ip, &entry->data); |
2926 | } | 2926 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4d79485b3237..e71a8be4a6ee 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -652,8 +652,6 @@ static struct { | |||
652 | ARCH_TRACE_CLOCKS | 652 | ARCH_TRACE_CLOCKS |
653 | }; | 653 | }; |
654 | 654 | ||
655 | int trace_clock_id; | ||
656 | |||
657 | /* | 655 | /* |
658 | * trace_parser_get_init - gets the buffer for trace parser | 656 | * trace_parser_get_init - gets the buffer for trace parser |
659 | */ | 657 | */ |
@@ -843,7 +841,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
843 | 841 | ||
844 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); | 842 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
845 | max_data->pid = tsk->pid; | 843 | max_data->pid = tsk->pid; |
846 | max_data->uid = task_uid(tsk); | 844 | /* |
845 | * If tsk == current, then use current_uid(), as that does not use | ||
846 | * RCU. The irq tracer can be called out of RCU scope. | ||
847 | */ | ||
848 | if (tsk == current) | ||
849 | max_data->uid = current_uid(); | ||
850 | else | ||
851 | max_data->uid = task_uid(tsk); | ||
852 | |||
847 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 853 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
848 | max_data->policy = tsk->policy; | 854 | max_data->policy = tsk->policy; |
849 | max_data->rt_priority = tsk->rt_priority; | 855 | max_data->rt_priority = tsk->rt_priority; |
@@ -2818,7 +2824,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2818 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 2824 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
2819 | 2825 | ||
2820 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ | 2826 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
2821 | if (trace_clocks[trace_clock_id].in_ns) | 2827 | if (trace_clocks[tr->clock_id].in_ns) |
2822 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 2828 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
2823 | 2829 | ||
2824 | /* stop the trace while dumping if we are not opening "snapshot" */ | 2830 | /* stop the trace while dumping if we are not opening "snapshot" */ |
@@ -3817,7 +3823,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3817 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 3823 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
3818 | 3824 | ||
3819 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ | 3825 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
3820 | if (trace_clocks[trace_clock_id].in_ns) | 3826 | if (trace_clocks[tr->clock_id].in_ns) |
3821 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 3827 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
3822 | 3828 | ||
3823 | iter->cpu_file = tc->cpu; | 3829 | iter->cpu_file = tc->cpu; |
@@ -5087,7 +5093,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
5087 | cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); | 5093 | cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); |
5088 | trace_seq_printf(s, "bytes: %ld\n", cnt); | 5094 | trace_seq_printf(s, "bytes: %ld\n", cnt); |
5089 | 5095 | ||
5090 | if (trace_clocks[trace_clock_id].in_ns) { | 5096 | if (trace_clocks[tr->clock_id].in_ns) { |
5091 | /* local or global for trace_clock */ | 5097 | /* local or global for trace_clock */ |
5092 | t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); | 5098 | t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); |
5093 | usec_rem = do_div(t, USEC_PER_SEC); | 5099 | usec_rem = do_div(t, USEC_PER_SEC); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 711ca7d3e7f1..20572ed88c5c 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -700,8 +700,6 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); | |||
700 | 700 | ||
701 | extern unsigned long trace_flags; | 701 | extern unsigned long trace_flags; |
702 | 702 | ||
703 | extern int trace_clock_id; | ||
704 | |||
705 | /* Standard output formatting function used for function return traces */ | 703 | /* Standard output formatting function used for function return traces */ |
706 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 704 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
707 | 705 | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 55e2cf66967b..2901e3b88590 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
1159 | /* stop the tracing. */ | 1159 | /* stop the tracing. */ |
1160 | tracing_stop(); | 1160 | tracing_stop(); |
1161 | /* check the trace buffer */ | 1161 | /* check the trace buffer */ |
1162 | ret = trace_test_buffer(tr, &count); | 1162 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
1163 | trace->reset(tr); | 1163 | trace->reset(tr); |
1164 | tracing_start(); | 1164 | tracing_start(); |
1165 | 1165 | ||