aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-11-03 23:15:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-04 04:09:49 -0500
commit182e9f5f704ed6b9175142fe8da33c9ce0c52b52 (patch)
tree60e72119c74141cfb7b0b9b757b47d719138621f
parent8f0a056fcb2f83a069fb5d60c2383304b7456687 (diff)
ftrace: insert in the ftrace_preempt_disable()/enable() functions
Impact: use new, consolidated APIs in ftrace plugins This patch replaces the schedule safe preempt disable code with the ftrace_preempt_disable() and ftrace_preempt_enable() safe functions. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/trace/ring_buffer.c27
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c13
-rw-r--r--kernel/trace/trace_stack.c8
4 files changed, 15 insertions, 41 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cedf4e268285..151f6a748676 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -16,6 +16,8 @@
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18 18
19#include "trace.h"
20
19/* Up this if you want to test the TIME_EXTENTS and normalization */ 21/* Up this if you want to test the TIME_EXTENTS and normalization */
20#define DEBUG_SHIFT 0 22#define DEBUG_SHIFT 0
21 23
@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1122 return NULL; 1124 return NULL;
1123 1125
1124 /* If we are tracing schedule, we don't want to recurse */ 1126 /* If we are tracing schedule, we don't want to recurse */
1125 resched = need_resched(); 1127 resched = ftrace_preempt_disable();
1126 preempt_disable_notrace();
1127 1128
1128 cpu = raw_smp_processor_id(); 1129 cpu = raw_smp_processor_id();
1129 1130
@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1154 return event; 1155 return event;
1155 1156
1156 out: 1157 out:
1157 if (resched) 1158 ftrace_preempt_enable(resched);
1158 preempt_enable_notrace();
1159 else
1160 preempt_enable_notrace();
1161 return NULL; 1159 return NULL;
1162} 1160}
1163 1161
@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1199 /* 1197 /*
1200 * Only the last preempt count needs to restore preemption. 1198 * Only the last preempt count needs to restore preemption.
1201 */ 1199 */
1202 if (preempt_count() == 1) { 1200 if (preempt_count() == 1)
1203 if (per_cpu(rb_need_resched, cpu)) 1201 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1204 preempt_enable_no_resched_notrace(); 1202 else
1205 else
1206 preempt_enable_notrace();
1207 } else
1208 preempt_enable_no_resched_notrace(); 1203 preempt_enable_no_resched_notrace();
1209 1204
1210 return 0; 1205 return 0;
@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1237 if (atomic_read(&buffer->record_disabled)) 1232 if (atomic_read(&buffer->record_disabled))
1238 return -EBUSY; 1233 return -EBUSY;
1239 1234
1240 resched = need_resched(); 1235 resched = ftrace_preempt_disable();
1241 preempt_disable_notrace();
1242 1236
1243 cpu = raw_smp_processor_id(); 1237 cpu = raw_smp_processor_id();
1244 1238
@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1264 1258
1265 ret = 0; 1259 ret = 0;
1266 out: 1260 out:
1267 if (resched) 1261 ftrace_preempt_enable(resched);
1268 preempt_enable_no_resched_notrace();
1269 else
1270 preempt_enable_notrace();
1271 1262
1272 return ret; 1263 return ret;
1273} 1264}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e4c40c868d67..3e7bf5eb9007 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -904,8 +904,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
904 return; 904 return;
905 905
906 pc = preempt_count(); 906 pc = preempt_count();
907 resched = need_resched(); 907 resched = ftrace_preempt_disable();
908 preempt_disable_notrace();
909 local_save_flags(flags); 908 local_save_flags(flags);
910 cpu = raw_smp_processor_id(); 909 cpu = raw_smp_processor_id();
911 data = tr->data[cpu]; 910 data = tr->data[cpu];
@@ -915,10 +914,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
915 trace_function(tr, data, ip, parent_ip, flags, pc); 914 trace_function(tr, data, ip, parent_ip, flags, pc);
916 915
917 atomic_dec(&data->disabled); 916 atomic_dec(&data->disabled);
918 if (resched) 917 ftrace_preempt_enable(resched);
919 preempt_enable_no_resched_notrace();
920 else
921 preempt_enable_notrace();
922} 918}
923 919
924static struct ftrace_ops trace_ops __read_mostly = 920static struct ftrace_ops trace_ops __read_mostly =
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3ae93f16b565..7bc4abf6fca8 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
50 return; 50 return;
51 51
52 pc = preempt_count(); 52 pc = preempt_count();
53 resched = need_resched(); 53 resched = ftrace_preempt_disable();
54 preempt_disable_notrace();
55 54
56 cpu = raw_smp_processor_id(); 55 cpu = raw_smp_processor_id();
57 data = tr->data[cpu]; 56 data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
81 out: 80 out:
82 atomic_dec(&data->disabled); 81 atomic_dec(&data->disabled);
83 82
84 /* 83 ftrace_preempt_enable(resched);
85 * To prevent recursion from the scheduler, if the
86 * resched flag was set before we entered, then
87 * don't reschedule.
88 */
89 if (resched)
90 preempt_enable_no_resched_notrace();
91 else
92 preempt_enable_notrace();
93} 84}
94 85
95static struct ftrace_ops trace_ops __read_mostly = 86static struct ftrace_ops trace_ops __read_mostly =
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index be682b62fe58..d39e8b7de6a2 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
107 if (unlikely(!ftrace_enabled || stack_trace_disabled)) 107 if (unlikely(!ftrace_enabled || stack_trace_disabled))
108 return; 108 return;
109 109
110 resched = need_resched(); 110 resched = ftrace_preempt_disable();
111 preempt_disable_notrace();
112 111
113 cpu = raw_smp_processor_id(); 112 cpu = raw_smp_processor_id();
114 /* no atomic needed, we only modify this variable by this cpu */ 113 /* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
120 out: 119 out:
121 per_cpu(trace_active, cpu)--; 120 per_cpu(trace_active, cpu)--;
122 /* prevent recursion in schedule */ 121 /* prevent recursion in schedule */
123 if (resched) 122 ftrace_preempt_enable(resched);
124 preempt_enable_no_resched_notrace();
125 else
126 preempt_enable_notrace();
127} 123}
128 124
129static struct ftrace_ops trace_ops __read_mostly = 125static struct ftrace_ops trace_ops __read_mostly =