aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2009-03-26 22:20:09 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-31 11:27:45 -0400
commit17ba97e347bec9bbc47a0877c7a098708982129d (patch)
treeb6a7472c354e4135f1ea47482d19ae277be65dee /kernel/trace
parent35ac51bfe4c293b67ce9f85082ba0b9bc6123c40 (diff)
blktrace: fix blk_probes_ref chaos
Impact: fix mixed ioctl and ftrace-plugin blktrace use refcount bugs ioctl-based blktrace allocates bt and registers tracepoints when ioctl(BLKTRACESETUP), and do all cleanups when ioctl(BLKTRACETEARDOWN). while ftrace-based blktrace allocates/frees bt when: # echo 1/0 > /sys/block/sda/sda1/trace/enable and registers/unregisters tracepoints when: # echo blk/nop > /debugfs/tracing/current_tracer or # echo 1/0 > /debugfs/tracing/tracing_enable The separatation of allocation and registeration causes 2 problems: 1. current user-space blktrace still calls ioctl(TEARDOWN) when ioctl(SETUP) failed: # echo 1 > /sys/block/sda/sda1/trace/enable # blktrace /dev/sda BLKTRACESETUP: Device or resource busy ^C and now blk_probes_ref == -1 2. Another way to make blk_probes_ref == -1: # plugin sdb && mount sdb1 # echo 1 > /sys/block/sdb/sdb1/trace/enable # remove sdb This patch does the allocation and registeration when writing sdaX/trace/enable. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Jens Axboe <jens.axboe@oracle.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/blktrace.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 5b28f0f119c5..8d6bd12aab10 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -478,7 +478,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
478 goto err; 478 goto err;
479 } 479 }
480 480
481 if (atomic_add_return(1, &blk_probes_ref) == 1) 481 if (atomic_inc_return(&blk_probes_ref) == 1)
482 blk_register_tracepoints(); 482 blk_register_tracepoints();
483 483
484 return 0; 484 return 0;
@@ -1091,8 +1091,6 @@ static void blk_tracer_print_header(struct seq_file *m)
1091 1091
1092static void blk_tracer_start(struct trace_array *tr) 1092static void blk_tracer_start(struct trace_array *tr)
1093{ 1093{
1094 if (atomic_add_return(1, &blk_probes_ref) == 1)
1095 blk_register_tracepoints();
1096 trace_flags &= ~TRACE_ITER_CONTEXT_INFO; 1094 trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1097} 1095}
1098 1096
@@ -1107,15 +1105,10 @@ static int blk_tracer_init(struct trace_array *tr)
1107static void blk_tracer_stop(struct trace_array *tr) 1105static void blk_tracer_stop(struct trace_array *tr)
1108{ 1106{
1109 trace_flags |= TRACE_ITER_CONTEXT_INFO; 1107 trace_flags |= TRACE_ITER_CONTEXT_INFO;
1110 if (atomic_dec_and_test(&blk_probes_ref))
1111 blk_unregister_tracepoints();
1112} 1108}
1113 1109
1114static void blk_tracer_reset(struct trace_array *tr) 1110static void blk_tracer_reset(struct trace_array *tr)
1115{ 1111{
1116 if (!atomic_read(&blk_probes_ref))
1117 return;
1118
1119 blk_tracer_enabled = false; 1112 blk_tracer_enabled = false;
1120 blk_tracer_stop(tr); 1113 blk_tracer_stop(tr);
1121} 1114}
@@ -1254,6 +1247,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
1254 if (bt == NULL) 1247 if (bt == NULL)
1255 return -EINVAL; 1248 return -EINVAL;
1256 1249
1250 if (atomic_dec_and_test(&blk_probes_ref))
1251 blk_unregister_tracepoints();
1252
1257 kfree(bt); 1253 kfree(bt);
1258 return 0; 1254 return 0;
1259} 1255}
@@ -1280,6 +1276,9 @@ static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
1280 return -EBUSY; 1276 return -EBUSY;
1281 } 1277 }
1282 1278
1279 if (atomic_inc_return(&blk_probes_ref) == 1)
1280 blk_register_tracepoints();
1281
1283 return 0; 1282 return 0;
1284} 1283}
1285 1284