aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:43:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:43:01 -0500
commit4e3eaddd142e2142c048c5052a0a9d2604fccfc6 (patch)
tree5bc45a286502e54e790c54948f22364c5afd9d89 /kernel/trace
parent8655e7e3ddec60603c4f6c14cdf642e2ba198df8 (diff)
parentb97c4bc16734a2e597dac7f91ee9eb78f4aeef9a (diff)
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: locking: Make sparse work with inline spinlocks and rwlocks x86/mce: Fix RCU lockdep splats rcu: Increase RCU CPU stall timeouts if PROVE_RCU ftrace: Replace read_barrier_depends() with rcu_dereference_raw() rcu: Suppress RCU lockdep warnings during early boot rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare() rcu: Suppress __mpol_dup() false positive from RCU lockdep rcu: Make rcu_read_lock_sched_held() handle !PREEMPT rcu: Add control variables to lockdep_rcu_dereference() diagnostics rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU rcu: Use wrapper function instead of exporting tasklist_lock sched, rcu: Fix rcu_dereference() for RCU-lockdep rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use rcu: Fix holdoff for accelerated GPs for last non-dynticked CPU x86/gart: Unexport gart_iommu_aperture Fix trivial conflicts in kernel/trace/ftrace.c
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c22
-rw-r--r--kernel/trace/trace_event_profile.c4
2 files changed, 15 insertions, 11 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index bb53edbb5c8c..d9062f5cc0c0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -27,6 +27,7 @@
27#include <linux/ctype.h> 27#include <linux/ctype.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/hash.h> 29#include <linux/hash.h>
30#include <linux/rcupdate.h>
30 31
31#include <trace/events/sched.h> 32#include <trace/events/sched.h>
32 33
@@ -84,18 +85,22 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
84ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 85ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
85ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 86ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
86 87
88/*
89 * Traverse the ftrace_list, invoking all entries. The reason that we
90 * can use rcu_dereference_raw() is that elements removed from this list
91 * are simply leaked, so there is no need to interact with a grace-period
92 * mechanism. The rcu_dereference_raw() calls are needed to handle
93 * concurrent insertions into the ftrace_list.
94 *
95 * Silly Alpha and silly pointer-speculation compiler optimizations!
96 */
87static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 97static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88{ 98{
89 struct ftrace_ops *op = ftrace_list; 99 struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
90
91 /* in case someone actually ports this to alpha! */
92 read_barrier_depends();
93 100
94 while (op != &ftrace_list_end) { 101 while (op != &ftrace_list_end) {
95 /* silly alpha */
96 read_barrier_depends();
97 op->func(ip, parent_ip); 102 op->func(ip, parent_ip);
98 op = op->next; 103 op = rcu_dereference_raw(op->next); /*see above*/
99 }; 104 };
100} 105}
101 106
@@ -150,8 +155,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
150 * the ops->next pointer is valid before another CPU sees 155 * the ops->next pointer is valid before another CPU sees
151 * the ops pointer included into the ftrace_list. 156 * the ops pointer included into the ftrace_list.
152 */ 157 */
153 smp_wmb(); 158 rcu_assign_pointer(ftrace_list, ops);
154 ftrace_list = ops;
155 159
156 if (ftrace_enabled) { 160 if (ftrace_enabled) {
157 ftrace_func_t func; 161 ftrace_func_t func;
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index f0d693005075..c1cc3ab633de 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -138,9 +138,9 @@ __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
138 cpu = smp_processor_id(); 138 cpu = smp_processor_id();
139 139
140 if (in_nmi()) 140 if (in_nmi())
141 trace_buf = rcu_dereference(perf_trace_buf_nmi); 141 trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
142 else 142 else
143 trace_buf = rcu_dereference(perf_trace_buf); 143 trace_buf = rcu_dereference_sched(perf_trace_buf);
144 144
145 if (!trace_buf) 145 if (!trace_buf)
146 goto err; 146 goto err;