aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c18
-rw-r--r--kernel/trace/ring_buffer.c3
-rw-r--r--kernel/trace/trace.c19
-rw-r--r--kernel/trace/trace_selftest.c2
4 files changed, 29 insertions, 13 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b549b0f5b977..6c508ff33c62 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
120 120
121/* 121/*
122 * Traverse the ftrace_global_list, invoking all entries. The reason that we 122 * Traverse the ftrace_global_list, invoking all entries. The reason that we
123 * can use rcu_dereference_raw() is that elements removed from this list 123 * can use rcu_dereference_raw_notrace() is that elements removed from this list
124 * are simply leaked, so there is no need to interact with a grace-period 124 * are simply leaked, so there is no need to interact with a grace-period
125 * mechanism. The rcu_dereference_raw() calls are needed to handle 125 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
126 * concurrent insertions into the ftrace_global_list. 126 * concurrent insertions into the ftrace_global_list.
127 * 127 *
128 * Silly Alpha and silly pointer-speculation compiler optimizations! 128 * Silly Alpha and silly pointer-speculation compiler optimizations!
129 */ 129 */
130#define do_for_each_ftrace_op(op, list) \ 130#define do_for_each_ftrace_op(op, list) \
131 op = rcu_dereference_raw(list); \ 131 op = rcu_dereference_raw_notrace(list); \
132 do 132 do
133 133
134/* 134/*
135 * Optimized for just a single item in the list (as that is the normal case). 135 * Optimized for just a single item in the list (as that is the normal case).
136 */ 136 */
137#define while_for_each_ftrace_op(op) \ 137#define while_for_each_ftrace_op(op) \
138 while (likely(op = rcu_dereference_raw((op)->next)) && \ 138 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
139 unlikely((op) != &ftrace_list_end)) 139 unlikely((op) != &ftrace_list_end))
140 140
141static inline void ftrace_ops_init(struct ftrace_ops *ops) 141static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
779 if (hlist_empty(hhd)) 779 if (hlist_empty(hhd))
780 return NULL; 780 return NULL;
781 781
782 hlist_for_each_entry_rcu(rec, hhd, node) { 782 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
783 if (rec->ip == ip) 783 if (rec->ip == ip)
784 return rec; 784 return rec;
785 } 785 }
@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1165 1165
1166 hhd = &hash->buckets[key]; 1166 hhd = &hash->buckets[key];
1167 1167
1168 hlist_for_each_entry_rcu(entry, hhd, hlist) { 1168 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1169 if (entry->ip == ip) 1169 if (entry->ip == ip)
1170 return entry; 1170 return entry;
1171 } 1171 }
@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1422 struct ftrace_hash *notrace_hash; 1422 struct ftrace_hash *notrace_hash;
1423 int ret; 1423 int ret;
1424 1424
1425 filter_hash = rcu_dereference_raw(ops->filter_hash); 1425 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1426 notrace_hash = rcu_dereference_raw(ops->notrace_hash); 1426 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1427 1427
1428 if ((ftrace_hash_empty(filter_hash) || 1428 if ((ftrace_hash_empty(filter_hash) ||
1429 ftrace_lookup_ip(filter_hash, ip)) && 1429 ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2920 * on the hash. rcu_read_lock is too dangerous here. 2920 * on the hash. rcu_read_lock is too dangerous here.
2921 */ 2921 */
2922 preempt_disable_notrace(); 2922 preempt_disable_notrace();
2923 hlist_for_each_entry_rcu(entry, hhd, node) { 2923 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2924 if (entry->ip == ip) 2924 if (entry->ip == ip)
2925 entry->ops->func(ip, parent_ip, &entry->data); 2925 entry->ops->func(ip, parent_ip, &entry->data);
2926 } 2926 }
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b59aea2c48c2..e444ff88f0a4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -620,6 +620,9 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
620 if (cpu == RING_BUFFER_ALL_CPUS) 620 if (cpu == RING_BUFFER_ALL_CPUS)
621 work = &buffer->irq_work; 621 work = &buffer->irq_work;
622 else { 622 else {
623 if (!cpumask_test_cpu(cpu, buffer->cpumask))
624 return -EINVAL;
625
623 cpu_buffer = buffer->buffers[cpu]; 626 cpu_buffer = buffer->buffers[cpu];
624 work = &cpu_buffer->irq_work; 627 work = &cpu_buffer->irq_work;
625 } 628 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ae6fa2d1cdf7..1a41023a1f88 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -843,7 +843,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
843 843
844 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 844 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
845 max_data->pid = tsk->pid; 845 max_data->pid = tsk->pid;
846 max_data->uid = task_uid(tsk); 846 /*
847 * If tsk == current, then use current_uid(), as that does not use
848 * RCU. The irq tracer can be called out of RCU scope.
849 */
850 if (tsk == current)
851 max_data->uid = current_uid();
852 else
853 max_data->uid = task_uid(tsk);
854
847 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 855 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
848 max_data->policy = tsk->policy; 856 max_data->policy = tsk->policy;
849 max_data->rt_priority = tsk->rt_priority; 857 max_data->rt_priority = tsk->rt_priority;
@@ -6216,10 +6224,15 @@ __init static int tracer_alloc_buffers(void)
6216 6224
6217 trace_init_cmdlines(); 6225 trace_init_cmdlines();
6218 6226
6219 register_tracer(&nop_trace); 6227 /*
6220 6228 * register_tracer() might reference current_trace, so it
6229 * needs to be set before we register anything. This is
6230 * just a bootstrap of current_trace anyway.
6231 */
6221 global_trace.current_trace = &nop_trace; 6232 global_trace.current_trace = &nop_trace;
6222 6233
6234 register_tracer(&nop_trace);
6235
6223 /* All seems OK, enable tracing */ 6236 /* All seems OK, enable tracing */
6224 tracing_disabled = 0; 6237 tracing_disabled = 0;
6225 6238
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 55e2cf66967b..2901e3b88590 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1159 /* stop the tracing. */ 1159 /* stop the tracing. */
1160 tracing_stop(); 1160 tracing_stop();
1161 /* check the trace buffer */ 1161 /* check the trace buffer */
1162 ret = trace_test_buffer(tr, &count); 1162 ret = trace_test_buffer(&tr->trace_buffer, &count);
1163 trace->reset(tr); 1163 trace->reset(tr);
1164 tracing_start(); 1164 tracing_start();
1165 1165