diff options
-rw-r--r-- | include/linux/blktrace_api.h | 2 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 33 |
2 files changed, 29 insertions, 6 deletions
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7c2e030e72f1..a12f6ed91c84 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/relay.h> | 5 | #include <linux/relay.h> |
6 | #include <linux/compat.h> | 6 | #include <linux/compat.h> |
7 | #include <uapi/linux/blktrace_api.h> | 7 | #include <uapi/linux/blktrace_api.h> |
8 | #include <linux/list.h> | ||
8 | 9 | ||
9 | #if defined(CONFIG_BLK_DEV_IO_TRACE) | 10 | #if defined(CONFIG_BLK_DEV_IO_TRACE) |
10 | 11 | ||
@@ -23,6 +24,7 @@ struct blk_trace { | |||
23 | struct dentry *dir; | 24 | struct dentry *dir; |
24 | struct dentry *dropped_file; | 25 | struct dentry *dropped_file; |
25 | struct dentry *msg_file; | 26 | struct dentry *msg_file; |
27 | struct list_head running_list; | ||
26 | atomic_t dropped; | 28 | atomic_t dropped; |
27 | }; | 29 | }; |
28 | 30 | ||
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b8b8560bfb95..7f727b34280d 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | #include <linux/time.h> | 27 | #include <linux/time.h> |
28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
29 | #include <linux/list.h> | ||
29 | 30 | ||
30 | #include <trace/events/block.h> | 31 | #include <trace/events/block.h> |
31 | 32 | ||
@@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1; | |||
38 | static struct trace_array *blk_tr; | 39 | static struct trace_array *blk_tr; |
39 | static bool blk_tracer_enabled __read_mostly; | 40 | static bool blk_tracer_enabled __read_mostly; |
40 | 41 | ||
42 | static LIST_HEAD(running_trace_list); | ||
43 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); | ||
44 | |||
41 | /* Select an alternative, minimalistic output than the original one */ | 45 | /* Select an alternative, minimalistic output than the original one */ |
42 | #define TRACE_BLK_OPT_CLASSIC 0x1 | 46 | #define TRACE_BLK_OPT_CLASSIC 0x1 |
43 | 47 | ||
@@ -107,10 +111,18 @@ record_it: | |||
107 | * Send out a notify for this process, if we haven't done so since a trace | 111 | * Send out a notify for this process, if we haven't done so since a trace |
108 | * started | 112 | * started |
109 | */ | 113 | */ |
110 | static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) | 114 | static void trace_note_tsk(struct task_struct *tsk) |
111 | { | 115 | { |
116 | unsigned long flags; | ||
117 | struct blk_trace *bt; | ||
118 | |||
112 | tsk->btrace_seq = blktrace_seq; | 119 | tsk->btrace_seq = blktrace_seq; |
113 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); | 120 | spin_lock_irqsave(&running_trace_lock, flags); |
121 | list_for_each_entry(bt, &running_trace_list, running_list) { | ||
122 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, | ||
123 | sizeof(tsk->comm)); | ||
124 | } | ||
125 | spin_unlock_irqrestore(&running_trace_lock, flags); | ||
114 | } | 126 | } |
115 | 127 | ||
116 | static void trace_note_time(struct blk_trace *bt) | 128 | static void trace_note_time(struct blk_trace *bt) |
@@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
229 | goto record_it; | 241 | goto record_it; |
230 | } | 242 | } |
231 | 243 | ||
244 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | ||
245 | trace_note_tsk(tsk); | ||
246 | |||
232 | /* | 247 | /* |
233 | * A word about the locking here - we disable interrupts to reserve | 248 | * A word about the locking here - we disable interrupts to reserve |
234 | * some space in the relay per-cpu buffer, to prevent an irq | 249 | * some space in the relay per-cpu buffer, to prevent an irq |
235 | * from coming in and stepping on our toes. | 250 | * from coming in and stepping on our toes. |
236 | */ | 251 | */ |
237 | local_irq_save(flags); | 252 | local_irq_save(flags); |
238 | |||
239 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | ||
240 | trace_note_tsk(bt, tsk); | ||
241 | |||
242 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); | 253 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); |
243 | if (t) { | 254 | if (t) { |
244 | sequence = per_cpu_ptr(bt->sequence, cpu); | 255 | sequence = per_cpu_ptr(bt->sequence, cpu); |
@@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
477 | bt->dir = dir; | 488 | bt->dir = dir; |
478 | bt->dev = dev; | 489 | bt->dev = dev; |
479 | atomic_set(&bt->dropped, 0); | 490 | atomic_set(&bt->dropped, 0); |
491 | INIT_LIST_HEAD(&bt->running_list); | ||
480 | 492 | ||
481 | ret = -EIO; | 493 | ret = -EIO; |
482 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, | 494 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, |
@@ -601,6 +613,9 @@ int blk_trace_startstop(struct request_queue *q, int start) | |||
601 | blktrace_seq++; | 613 | blktrace_seq++; |
602 | smp_mb(); | 614 | smp_mb(); |
603 | bt->trace_state = Blktrace_running; | 615 | bt->trace_state = Blktrace_running; |
616 | spin_lock_irq(&running_trace_lock); | ||
617 | list_add(&bt->running_list, &running_trace_list); | ||
618 | spin_unlock_irq(&running_trace_lock); | ||
604 | 619 | ||
605 | trace_note_time(bt); | 620 | trace_note_time(bt); |
606 | ret = 0; | 621 | ret = 0; |
@@ -608,6 +623,9 @@ int blk_trace_startstop(struct request_queue *q, int start) | |||
608 | } else { | 623 | } else { |
609 | if (bt->trace_state == Blktrace_running) { | 624 | if (bt->trace_state == Blktrace_running) { |
610 | bt->trace_state = Blktrace_stopped; | 625 | bt->trace_state = Blktrace_stopped; |
626 | spin_lock_irq(&running_trace_lock); | ||
627 | list_del_init(&bt->running_list); | ||
628 | spin_unlock_irq(&running_trace_lock); | ||
611 | relay_flush(bt->rchan); | 629 | relay_flush(bt->rchan); |
612 | ret = 0; | 630 | ret = 0; |
613 | } | 631 | } |
@@ -1472,6 +1490,9 @@ static int blk_trace_remove_queue(struct request_queue *q) | |||
1472 | if (atomic_dec_and_test(&blk_probes_ref)) | 1490 | if (atomic_dec_and_test(&blk_probes_ref)) |
1473 | blk_unregister_tracepoints(); | 1491 | blk_unregister_tracepoints(); |
1474 | 1492 | ||
1493 | spin_lock_irq(&running_trace_lock); | ||
1494 | list_del(&bt->running_list); | ||
1495 | spin_unlock_irq(&running_trace_lock); | ||
1475 | blk_trace_free(bt); | 1496 | blk_trace_free(bt); |
1476 | return 0; | 1497 | return 0; |
1477 | } | 1498 | } |