aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/blktrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
commita1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch)
tree0f1777542b385ebefd30b3586d830fd8ed6fda5b /kernel/trace/blktrace.c
parent75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff)
parentd28daf923ac5e4a0d7cecebae56f3e339189366b (diff)
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts: arch/Kconfig kernel/trace/trace.h Merge reason: resolve the conflicts, plus adopt to the new ring-buffer APIs. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/blktrace.c')
-rw-r--r--kernel/trace/blktrace.c25
1 files changed, 10 insertions, 15 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 39af8af6fc30..3eb159c277c8 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/smp_lock.h>
25#include <linux/time.h> 26#include <linux/time.h>
26#include <linux/uaccess.h> 27#include <linux/uaccess.h>
27 28
@@ -64,13 +65,15 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
64{ 65{
65 struct blk_io_trace *t; 66 struct blk_io_trace *t;
66 struct ring_buffer_event *event = NULL; 67 struct ring_buffer_event *event = NULL;
68 struct ring_buffer *buffer = NULL;
67 int pc = 0; 69 int pc = 0;
68 int cpu = smp_processor_id(); 70 int cpu = smp_processor_id();
69 bool blk_tracer = blk_tracer_enabled; 71 bool blk_tracer = blk_tracer_enabled;
70 72
71 if (blk_tracer) { 73 if (blk_tracer) {
74 buffer = blk_tr->buffer;
72 pc = preempt_count(); 75 pc = preempt_count();
73 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, 76 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
74 sizeof(*t) + len, 77 sizeof(*t) + len,
75 0, pc); 78 0, pc);
76 if (!event) 79 if (!event)
@@ -95,7 +98,7 @@ record_it:
95 memcpy((void *) t + sizeof(*t), data, len); 98 memcpy((void *) t + sizeof(*t), data, len);
96 99
97 if (blk_tracer) 100 if (blk_tracer)
98 trace_buffer_unlock_commit(blk_tr, event, 0, pc); 101 trace_buffer_unlock_commit(buffer, event, 0, pc);
99 } 102 }
100} 103}
101 104
@@ -178,6 +181,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
178{ 181{
179 struct task_struct *tsk = current; 182 struct task_struct *tsk = current;
180 struct ring_buffer_event *event = NULL; 183 struct ring_buffer_event *event = NULL;
184 struct ring_buffer *buffer = NULL;
181 struct blk_io_trace *t; 185 struct blk_io_trace *t;
182 unsigned long flags = 0; 186 unsigned long flags = 0;
183 unsigned long *sequence; 187 unsigned long *sequence;
@@ -203,8 +207,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
203 if (blk_tracer) { 207 if (blk_tracer) {
204 tracing_record_cmdline(current); 208 tracing_record_cmdline(current);
205 209
210 buffer = blk_tr->buffer;
206 pc = preempt_count(); 211 pc = preempt_count();
207 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, 212 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
208 sizeof(*t) + pdu_len, 213 sizeof(*t) + pdu_len,
209 0, pc); 214 0, pc);
210 if (!event) 215 if (!event)
@@ -251,7 +256,7 @@ record_it:
251 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); 256 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
252 257
253 if (blk_tracer) { 258 if (blk_tracer) {
254 trace_buffer_unlock_commit(blk_tr, event, 0, pc); 259 trace_buffer_unlock_commit(buffer, event, 0, pc);
255 return; 260 return;
256 } 261 }
257 } 262 }
@@ -266,8 +271,8 @@ static void blk_trace_free(struct blk_trace *bt)
266{ 271{
267 debugfs_remove(bt->msg_file); 272 debugfs_remove(bt->msg_file);
268 debugfs_remove(bt->dropped_file); 273 debugfs_remove(bt->dropped_file);
269 debugfs_remove(bt->dir);
270 relay_close(bt->rchan); 274 relay_close(bt->rchan);
275 debugfs_remove(bt->dir);
271 free_percpu(bt->sequence); 276 free_percpu(bt->sequence);
272 free_percpu(bt->msg_data); 277 free_percpu(bt->msg_data);
273 kfree(bt); 278 kfree(bt);
@@ -377,18 +382,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
377 382
378static int blk_remove_buf_file_callback(struct dentry *dentry) 383static int blk_remove_buf_file_callback(struct dentry *dentry)
379{ 384{
380 struct dentry *parent = dentry->d_parent;
381 debugfs_remove(dentry); 385 debugfs_remove(dentry);
382 386
383 /*
384 * this will fail for all but the last file, but that is ok. what we
385 * care about is the top level buts->name directory going away, when
386 * the last trace file is gone. Then we don't have to rmdir() that
387 * manually on trace stop, so it nicely solves the issue with
388 * force killing of running traces.
389 */
390
391 debugfs_remove(parent);
392 return 0; 387 return 0;
393} 388}
394 389