aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-11-03 23:15:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-04 04:09:49 -0500
commit182e9f5f704ed6b9175142fe8da33c9ce0c52b52 (patch)
tree60e72119c74141cfb7b0b9b757b47d719138621f /kernel/trace/ring_buffer.c
parent8f0a056fcb2f83a069fb5d60c2383304b7456687 (diff)
ftrace: insert in the ftrace_preempt_disable()/enable() functions
Impact: use new, consolidated APIs in ftrace plugins This patch replaces the schedule safe preempt disable code with the ftrace_preempt_disable() and ftrace_preempt_enable() safe functions. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c27
1 files changed, 9 insertions, 18 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cedf4e26828..151f6a74867 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -16,6 +16,8 @@
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18 18
19#include "trace.h"
20
19/* Up this if you want to test the TIME_EXTENTS and normalization */ 21/* Up this if you want to test the TIME_EXTENTS and normalization */
20#define DEBUG_SHIFT 0 22#define DEBUG_SHIFT 0
21 23
@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1122 return NULL; 1124 return NULL;
1123 1125
1124 /* If we are tracing schedule, we don't want to recurse */ 1126 /* If we are tracing schedule, we don't want to recurse */
1125 resched = need_resched(); 1127 resched = ftrace_preempt_disable();
1126 preempt_disable_notrace();
1127 1128
1128 cpu = raw_smp_processor_id(); 1129 cpu = raw_smp_processor_id();
1129 1130
@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1154 return event; 1155 return event;
1155 1156
1156 out: 1157 out:
1157 if (resched) 1158 ftrace_preempt_enable(resched);
1158 preempt_enable_notrace();
1159 else
1160 preempt_enable_notrace();
1161 return NULL; 1159 return NULL;
1162} 1160}
1163 1161
@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1199 /* 1197 /*
1200 * Only the last preempt count needs to restore preemption. 1198 * Only the last preempt count needs to restore preemption.
1201 */ 1199 */
1202 if (preempt_count() == 1) { 1200 if (preempt_count() == 1)
1203 if (per_cpu(rb_need_resched, cpu)) 1201 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1204 preempt_enable_no_resched_notrace(); 1202 else
1205 else
1206 preempt_enable_notrace();
1207 } else
1208 preempt_enable_no_resched_notrace(); 1203 preempt_enable_no_resched_notrace();
1209 1204
1210 return 0; 1205 return 0;
@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1237 if (atomic_read(&buffer->record_disabled)) 1232 if (atomic_read(&buffer->record_disabled))
1238 return -EBUSY; 1233 return -EBUSY;
1239 1234
1240 resched = need_resched(); 1235 resched = ftrace_preempt_disable();
1241 preempt_disable_notrace();
1242 1236
1243 cpu = raw_smp_processor_id(); 1237 cpu = raw_smp_processor_id();
1244 1238
@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1264 1258
1265 ret = 0; 1259 ret = 0;
1266 out: 1260 out:
1267 if (resched) 1261 ftrace_preempt_enable(resched);
1268 preempt_enable_no_resched_notrace();
1269 else
1270 preempt_enable_notrace();
1271 1262
1272 return ret; 1263 return ret;
1273} 1264}