aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-03-23 14:00:26 -0500
committerJens Axboe <axboe@suse.de>2006-03-23 14:00:26 -0500
commit2056a782f8e7e65fd4bfd027506b4ce1c5e9ccd4 (patch)
treed4fe59a7ca0c110690937085548936a4535c39db /block
parent6dac40a7ce2483a47b54af07afebeb84131c7228 (diff)
[PATCH] Block queue IO tracing support (blktrace) as of 2006-03-23
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig12
-rw-r--r--block/Makefile2
-rw-r--r--block/blktrace.c538
-rw-r--r--block/elevator.c4
-rw-r--r--block/ioctl.c6
-rw-r--r--block/ll_rw_blk.c44
6 files changed, 604 insertions, 2 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 377f6dd20e17..96783645092d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -11,4 +11,16 @@ config LBD
11 your machine, or if you want to have a raid or loopback device 11 your machine, or if you want to have a raid or loopback device
12 bigger than 2TB. Otherwise say N. 12 bigger than 2TB. Otherwise say N.
13 13
14config BLK_DEV_IO_TRACE
15 bool "Support for tracing block io actions"
16 select RELAY
17 select DEBUG_FS
18 help
19 Say Y here, if you want to be able to trace the block layer actions
20 on a given queue. Tracing allows you to see any traffic happening
21 on a block device queue. For more information (and the user space
22 support tools needed), fetch the blktrace app from:
23
24 git://brick.kernel.dk/data/git/blktrace.git
25
14source block/Kconfig.iosched 26source block/Kconfig.iosched
diff --git a/block/Makefile b/block/Makefile
index 7e4f93e2b44e..c05de0e0037f 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
8obj-$(CONFIG_IOSCHED_AS) += as-iosched.o 8obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
9obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o 9obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
10obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o 10obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
11
12obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
diff --git a/block/blktrace.c b/block/blktrace.c
new file mode 100644
index 000000000000..36f3a172275f
--- /dev/null
+++ b/block/blktrace.c
@@ -0,0 +1,538 @@
1/*
2 * Copyright (C) 2006 Jens Axboe <axboe@suse.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 *
17 */
18#include <linux/config.h>
19#include <linux/kernel.h>
20#include <linux/blkdev.h>
21#include <linux/blktrace_api.h>
22#include <linux/percpu.h>
23#include <linux/init.h>
24#include <linux/mutex.h>
25#include <linux/debugfs.h>
26#include <asm/uaccess.h>
27
28static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
29static unsigned int blktrace_seq __read_mostly = 1;
30
31/*
32 * Send out a notify for this process, if we haven't done so since a trace
33 * started
34 */
35static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
36{
37 struct blk_io_trace *t;
38
39 t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm));
40 if (t) {
41 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
42 t->device = bt->dev;
43 t->action = BLK_TC_ACT(BLK_TC_NOTIFY);
44 t->pid = tsk->pid;
45 t->cpu = smp_processor_id();
46 t->pdu_len = sizeof(tsk->comm);
47 memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len);
48 tsk->btrace_seq = blktrace_seq;
49 }
50}
51
52static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
53 pid_t pid)
54{
55 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
56 return 1;
57 if (sector < bt->start_lba || sector > bt->end_lba)
58 return 1;
59 if (bt->pid && pid != bt->pid)
60 return 1;
61
62 return 0;
63}
64
65/*
66 * Data direction bit lookup
67 */
68static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
69
70/*
71 * Bio action bits of interest
72 */
73static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC) };
74
75/*
76 * More could be added as needed, taking care to increment the decrementer
77 * to get correct indexing
78 */
79#define trace_barrier_bit(rw) \
80 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
81#define trace_sync_bit(rw) \
82 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
83
84/*
85 * The worker for the various blk_add_trace*() types. Fills out a
86 * blk_io_trace structure and places it in a per-cpu subbuffer.
87 */
88void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
89 int rw, u32 what, int error, int pdu_len, void *pdu_data)
90{
91 struct task_struct *tsk = current;
92 struct blk_io_trace *t;
93 unsigned long flags;
94 unsigned long *sequence;
95 pid_t pid;
96 int cpu;
97
98 if (unlikely(bt->trace_state != Blktrace_running))
99 return;
100
101 what |= ddir_act[rw & WRITE];
102 what |= bio_act[trace_barrier_bit(rw)];
103 what |= bio_act[trace_sync_bit(rw)];
104
105 pid = tsk->pid;
106 if (unlikely(act_log_check(bt, what, sector, pid)))
107 return;
108
109 /*
110 * A word about the locking here - we disable interrupts to reserve
111 * some space in the relay per-cpu buffer, to prevent an irq
112 * from coming in and stepping on our toes. Once reserved, it's
113 * enough to get preemption disabled to prevent read of this data
114 * before we are through filling it. get_cpu()/put_cpu() does this
115 * for us
116 */
117 local_irq_save(flags);
118
119 if (unlikely(tsk->btrace_seq != blktrace_seq))
120 trace_note_tsk(bt, tsk);
121
122 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
123 if (t) {
124 cpu = smp_processor_id();
125 sequence = per_cpu_ptr(bt->sequence, cpu);
126
127 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
128 t->sequence = ++(*sequence);
129 t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
130 t->sector = sector;
131 t->bytes = bytes;
132 t->action = what;
133 t->pid = pid;
134 t->device = bt->dev;
135 t->cpu = cpu;
136 t->error = error;
137 t->pdu_len = pdu_len;
138
139 if (pdu_len)
140 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
141 }
142
143 local_irq_restore(flags);
144}
145
146EXPORT_SYMBOL_GPL(__blk_add_trace);
147
148static struct dentry *blk_tree_root;
149static struct mutex blk_tree_mutex;
150static unsigned int root_users;
151
152static inline void blk_remove_root(void)
153{
154 if (blk_tree_root) {
155 debugfs_remove(blk_tree_root);
156 blk_tree_root = NULL;
157 }
158}
159
160static void blk_remove_tree(struct dentry *dir)
161{
162 mutex_lock(&blk_tree_mutex);
163 debugfs_remove(dir);
164 if (--root_users == 0)
165 blk_remove_root();
166 mutex_unlock(&blk_tree_mutex);
167}
168
169static struct dentry *blk_create_tree(const char *blk_name)
170{
171 struct dentry *dir = NULL;
172
173 mutex_lock(&blk_tree_mutex);
174
175 if (!blk_tree_root) {
176 blk_tree_root = debugfs_create_dir("block", NULL);
177 if (!blk_tree_root)
178 goto err;
179 }
180
181 dir = debugfs_create_dir(blk_name, blk_tree_root);
182 if (dir)
183 root_users++;
184 else
185 blk_remove_root();
186
187err:
188 mutex_unlock(&blk_tree_mutex);
189 return dir;
190}
191
192static void blk_trace_cleanup(struct blk_trace *bt)
193{
194 relay_close(bt->rchan);
195 debugfs_remove(bt->dropped_file);
196 blk_remove_tree(bt->dir);
197 free_percpu(bt->sequence);
198 kfree(bt);
199}
200
201static int blk_trace_remove(request_queue_t *q)
202{
203 struct blk_trace *bt;
204
205 bt = xchg(&q->blk_trace, NULL);
206 if (!bt)
207 return -EINVAL;
208
209 if (bt->trace_state == Blktrace_setup ||
210 bt->trace_state == Blktrace_stopped)
211 blk_trace_cleanup(bt);
212
213 return 0;
214}
215
216static int blk_dropped_open(struct inode *inode, struct file *filp)
217{
218 filp->private_data = inode->u.generic_ip;
219
220 return 0;
221}
222
223static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
224 size_t count, loff_t *ppos)
225{
226 struct blk_trace *bt = filp->private_data;
227 char buf[16];
228
229 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
230
231 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
232}
233
234static struct file_operations blk_dropped_fops = {
235 .owner = THIS_MODULE,
236 .open = blk_dropped_open,
237 .read = blk_dropped_read,
238};
239
240/*
241 * Keep track of how many times we encountered a full subbuffer, to aid
242 * the user space app in telling how many lost events there were.
243 */
244static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
245 void *prev_subbuf, size_t prev_padding)
246{
247 struct blk_trace *bt;
248
249 if (!relay_buf_full(buf))
250 return 1;
251
252 bt = buf->chan->private_data;
253 atomic_inc(&bt->dropped);
254 return 0;
255}
256
257static int blk_remove_buf_file_callback(struct dentry *dentry)
258{
259 debugfs_remove(dentry);
260 return 0;
261}
262
263static struct dentry *blk_create_buf_file_callback(const char *filename,
264 struct dentry *parent,
265 int mode,
266 struct rchan_buf *buf,
267 int *is_global)
268{
269 return debugfs_create_file(filename, mode, parent, buf,
270 &relay_file_operations);
271}
272
273static struct rchan_callbacks blk_relay_callbacks = {
274 .subbuf_start = blk_subbuf_start_callback,
275 .create_buf_file = blk_create_buf_file_callback,
276 .remove_buf_file = blk_remove_buf_file_callback,
277};
278
279/*
280 * Setup everything required to start tracing
281 */
282static int blk_trace_setup(request_queue_t *q, struct block_device *bdev,
283 char __user *arg)
284{
285 struct blk_user_trace_setup buts;
286 struct blk_trace *old_bt, *bt = NULL;
287 struct dentry *dir = NULL;
288 char b[BDEVNAME_SIZE];
289 int ret, i;
290
291 if (copy_from_user(&buts, arg, sizeof(buts)))
292 return -EFAULT;
293
294 if (!buts.buf_size || !buts.buf_nr)
295 return -EINVAL;
296
297 strcpy(buts.name, bdevname(bdev, b));
298
299 /*
300 * some device names have larger paths - convert the slashes
301 * to underscores for this to work as expected
302 */
303 for (i = 0; i < strlen(buts.name); i++)
304 if (buts.name[i] == '/')
305 buts.name[i] = '_';
306
307 if (copy_to_user(arg, &buts, sizeof(buts)))
308 return -EFAULT;
309
310 ret = -ENOMEM;
311 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
312 if (!bt)
313 goto err;
314
315 bt->sequence = alloc_percpu(unsigned long);
316 if (!bt->sequence)
317 goto err;
318
319 ret = -ENOENT;
320 dir = blk_create_tree(buts.name);
321 if (!dir)
322 goto err;
323
324 bt->dir = dir;
325 bt->dev = bdev->bd_dev;
326 atomic_set(&bt->dropped, 0);
327
328 ret = -EIO;
329 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
330 if (!bt->dropped_file)
331 goto err;
332
333 bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks);
334 if (!bt->rchan)
335 goto err;
336 bt->rchan->private_data = bt;
337
338 bt->act_mask = buts.act_mask;
339 if (!bt->act_mask)
340 bt->act_mask = (u16) -1;
341
342 bt->start_lba = buts.start_lba;
343 bt->end_lba = buts.end_lba;
344 if (!bt->end_lba)
345 bt->end_lba = -1ULL;
346
347 bt->pid = buts.pid;
348 bt->trace_state = Blktrace_setup;
349
350 ret = -EBUSY;
351 old_bt = xchg(&q->blk_trace, bt);
352 if (old_bt) {
353 (void) xchg(&q->blk_trace, old_bt);
354 goto err;
355 }
356
357 return 0;
358err:
359 if (dir)
360 blk_remove_tree(dir);
361 if (bt) {
362 if (bt->dropped_file)
363 debugfs_remove(bt->dropped_file);
364 if (bt->sequence)
365 free_percpu(bt->sequence);
366 if (bt->rchan)
367 relay_close(bt->rchan);
368 kfree(bt);
369 }
370 return ret;
371}
372
373static int blk_trace_startstop(request_queue_t *q, int start)
374{
375 struct blk_trace *bt;
376 int ret;
377
378 if ((bt = q->blk_trace) == NULL)
379 return -EINVAL;
380
381 /*
382 * For starting a trace, we can transition from a setup or stopped
383 * trace. For stopping a trace, the state must be running
384 */
385 ret = -EINVAL;
386 if (start) {
387 if (bt->trace_state == Blktrace_setup ||
388 bt->trace_state == Blktrace_stopped) {
389 blktrace_seq++;
390 smp_mb();
391 bt->trace_state = Blktrace_running;
392 ret = 0;
393 }
394 } else {
395 if (bt->trace_state == Blktrace_running) {
396 bt->trace_state = Blktrace_stopped;
397 relay_flush(bt->rchan);
398 ret = 0;
399 }
400 }
401
402 return ret;
403}
404
405/**
406 * blk_trace_ioctl: - handle the ioctls associated with tracing
407 * @bdev: the block device
408 * @cmd: the ioctl cmd
409 * @arg: the argument data, if any
410 *
411 **/
412int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
413{
414 request_queue_t *q;
415 int ret, start = 0;
416
417 q = bdev_get_queue(bdev);
418 if (!q)
419 return -ENXIO;
420
421 mutex_lock(&bdev->bd_mutex);
422
423 switch (cmd) {
424 case BLKTRACESETUP:
425 ret = blk_trace_setup(q, bdev, arg);
426 break;
427 case BLKTRACESTART:
428 start = 1;
429 case BLKTRACESTOP:
430 ret = blk_trace_startstop(q, start);
431 break;
432 case BLKTRACETEARDOWN:
433 ret = blk_trace_remove(q);
434 break;
435 default:
436 ret = -ENOTTY;
437 break;
438 }
439
440 mutex_unlock(&bdev->bd_mutex);
441 return ret;
442}
443
444/**
445 * blk_trace_shutdown: - stop and cleanup trace structures
446 * @q: the request queue associated with the device
447 *
448 **/
449void blk_trace_shutdown(request_queue_t *q)
450{
451 blk_trace_startstop(q, 0);
452 blk_trace_remove(q);
453}
454
455/*
456 * Average offset over two calls to sched_clock() with a gettimeofday()
457 * in the middle
458 */
459static void blk_check_time(unsigned long long *t)
460{
461 unsigned long long a, b;
462 struct timeval tv;
463
464 a = sched_clock();
465 do_gettimeofday(&tv);
466 b = sched_clock();
467
468 *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
469 *t -= (a + b) / 2;
470}
471
472static void blk_trace_check_cpu_time(void *data)
473{
474 unsigned long long *t;
475 int cpu = get_cpu();
476
477 t = &per_cpu(blk_trace_cpu_offset, cpu);
478
479 /*
480 * Just call it twice, hopefully the second call will be cache hot
481 * and a little more precise
482 */
483 blk_check_time(t);
484 blk_check_time(t);
485
486 put_cpu();
487}
488
489/*
490 * Call blk_trace_check_cpu_time() on each CPU to calibrate our inter-CPU
491 * timings
492 */
493static void blk_trace_calibrate_offsets(void)
494{
495 unsigned long flags;
496
497 smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
498 local_irq_save(flags);
499 blk_trace_check_cpu_time(NULL);
500 local_irq_restore(flags);
501}
502
503static void blk_trace_set_ht_offsets(void)
504{
505#if defined(CONFIG_SCHED_SMT)
506 int cpu, i;
507
508 /*
509 * now make sure HT siblings have the same time offset
510 */
511 preempt_disable();
512 for_each_online_cpu(cpu) {
513 unsigned long long *cpu_off, *sibling_off;
514
515 for_each_cpu_mask(i, cpu_sibling_map[cpu]) {
516 if (i == cpu)
517 continue;
518
519 cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
520 sibling_off = &per_cpu(blk_trace_cpu_offset, i);
521 *sibling_off = *cpu_off;
522 }
523 }
524 preempt_enable();
525#endif
526}
527
528static __init int blk_trace_init(void)
529{
530 mutex_init(&blk_tree_mutex);
531 blk_trace_calibrate_offsets();
532 blk_trace_set_ht_offsets();
533
534 return 0;
535}
536
537module_init(blk_trace_init);
538
diff --git a/block/elevator.c b/block/elevator.c
index db3d0d8296a0..5e558c4689a4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/blktrace_api.h>
36 37
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
@@ -333,6 +334,8 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
333 struct list_head *pos; 334 struct list_head *pos;
334 unsigned ordseq; 335 unsigned ordseq;
335 336
337 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
338
336 rq->q = q; 339 rq->q = q;
337 340
338 switch (where) { 341 switch (where) {
@@ -499,6 +502,7 @@ struct request *elv_next_request(request_queue_t *q)
499 * not be passed by new incoming requests 502 * not be passed by new incoming requests
500 */ 503 */
501 rq->flags |= REQ_STARTED; 504 rq->flags |= REQ_STARTED;
505 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
502 } 506 }
503 507
504 if (!q->boundary_rq || q->boundary_rq == rq) { 508 if (!q->boundary_rq || q->boundary_rq == rq) {
diff --git a/block/ioctl.c b/block/ioctl.c
index 35fdb7dc6512..9cfa2e1ecb24 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -5,6 +5,7 @@
5#include <linux/backing-dev.h> 5#include <linux/backing-dev.h>
6#include <linux/buffer_head.h> 6#include <linux/buffer_head.h>
7#include <linux/smp_lock.h> 7#include <linux/smp_lock.h>
8#include <linux/blktrace_api.h>
8#include <asm/uaccess.h> 9#include <asm/uaccess.h>
9 10
10static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) 11static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
@@ -189,6 +190,11 @@ static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev,
189 return put_ulong(arg, bdev->bd_inode->i_size >> 9); 190 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
190 case BLKGETSIZE64: 191 case BLKGETSIZE64:
191 return put_u64(arg, bdev->bd_inode->i_size); 192 return put_u64(arg, bdev->bd_inode->i_size);
193 case BLKTRACESTART:
194 case BLKTRACESTOP:
195 case BLKTRACESETUP:
196 case BLKTRACETEARDOWN:
197 return blk_trace_ioctl(bdev, cmd, (char __user *) arg);
192 } 198 }
193 return -ENOIOCTLCMD; 199 return -ENOIOCTLCMD;
194} 200}
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 6c793b196aa9..062067fa7ead 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -28,6 +28,7 @@
28#include <linux/writeback.h> 28#include <linux/writeback.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/blktrace_api.h>
31 32
32/* 33/*
33 * for max sense size 34 * for max sense size
@@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q)
1556 if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) 1557 if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
1557 return; 1558 return;
1558 1559
1559 if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) 1560 if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
1560 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 1561 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
1562 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
1563 }
1561} 1564}
1562 1565
1563EXPORT_SYMBOL(blk_plug_device); 1566EXPORT_SYMBOL(blk_plug_device);
@@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1621 /* 1624 /*
1622 * devices don't necessarily have an ->unplug_fn defined 1625 * devices don't necessarily have an ->unplug_fn defined
1623 */ 1626 */
1624 if (q->unplug_fn) 1627 if (q->unplug_fn) {
1628 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1629 q->rq.count[READ] + q->rq.count[WRITE]);
1630
1625 q->unplug_fn(q); 1631 q->unplug_fn(q);
1632 }
1626} 1633}
1627 1634
1628static void blk_unplug_work(void *data) 1635static void blk_unplug_work(void *data)
1629{ 1636{
1630 request_queue_t *q = data; 1637 request_queue_t *q = data;
1631 1638
1639 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1640 q->rq.count[READ] + q->rq.count[WRITE]);
1641
1632 q->unplug_fn(q); 1642 q->unplug_fn(q);
1633} 1643}
1634 1644
@@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data)
1636{ 1646{
1637 request_queue_t *q = (request_queue_t *)data; 1647 request_queue_t *q = (request_queue_t *)data;
1638 1648
1649 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
1650 q->rq.count[READ] + q->rq.count[WRITE]);
1651
1639 kblockd_schedule_work(&q->unplug_work); 1652 kblockd_schedule_work(&q->unplug_work);
1640} 1653}
1641 1654
@@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj)
1753 if (q->queue_tags) 1766 if (q->queue_tags)
1754 __blk_queue_free_tags(q); 1767 __blk_queue_free_tags(q);
1755 1768
1769 if (q->blk_trace)
1770 blk_trace_shutdown(q);
1771
1756 kmem_cache_free(requestq_cachep, q); 1772 kmem_cache_free(requestq_cachep, q);
1757} 1773}
1758 1774
@@ -2129,6 +2145,8 @@ rq_starved:
2129 2145
2130 rq_init(q, rq); 2146 rq_init(q, rq);
2131 rq->rl = rl; 2147 rq->rl = rl;
2148
2149 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
2132out: 2150out:
2133 return rq; 2151 return rq;
2134} 2152}
@@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
2157 if (!rq) { 2175 if (!rq) {
2158 struct io_context *ioc; 2176 struct io_context *ioc;
2159 2177
2178 blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
2179
2160 __generic_unplug_device(q); 2180 __generic_unplug_device(q);
2161 spin_unlock_irq(q->queue_lock); 2181 spin_unlock_irq(q->queue_lock);
2162 io_schedule(); 2182 io_schedule();
@@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request);
2210 */ 2230 */
2211void blk_requeue_request(request_queue_t *q, struct request *rq) 2231void blk_requeue_request(request_queue_t *q, struct request *rq)
2212{ 2232{
2233 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
2234
2213 if (blk_rq_tagged(rq)) 2235 if (blk_rq_tagged(rq))
2214 blk_queue_end_tag(q, rq); 2236 blk_queue_end_tag(q, rq);
2215 2237
@@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2844 if (!q->back_merge_fn(q, req, bio)) 2866 if (!q->back_merge_fn(q, req, bio))
2845 break; 2867 break;
2846 2868
2869 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
2870
2847 req->biotail->bi_next = bio; 2871 req->biotail->bi_next = bio;
2848 req->biotail = bio; 2872 req->biotail = bio;
2849 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 2873 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
@@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2859 if (!q->front_merge_fn(q, req, bio)) 2883 if (!q->front_merge_fn(q, req, bio))
2860 break; 2884 break;
2861 2885
2886 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
2887
2862 bio->bi_next = req->bio; 2888 bio->bi_next = req->bio;
2863 req->bio = bio; 2889 req->bio = bio;
2864 2890
@@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio)
2976 request_queue_t *q; 3002 request_queue_t *q;
2977 sector_t maxsector; 3003 sector_t maxsector;
2978 int ret, nr_sectors = bio_sectors(bio); 3004 int ret, nr_sectors = bio_sectors(bio);
3005 dev_t old_dev;
2979 3006
2980 might_sleep(); 3007 might_sleep();
2981 /* Test device or partition size, when known. */ 3008 /* Test device or partition size, when known. */
@@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio)
3002 * NOTE: we don't repeat the blk_size check for each new device. 3029 * NOTE: we don't repeat the blk_size check for each new device.
3003 * Stacking drivers are expected to know what they are doing. 3030 * Stacking drivers are expected to know what they are doing.
3004 */ 3031 */
3032 maxsector = -1;
3033 old_dev = 0;
3005 do { 3034 do {
3006 char b[BDEVNAME_SIZE]; 3035 char b[BDEVNAME_SIZE];
3007 3036
@@ -3034,6 +3063,15 @@ end_io:
3034 */ 3063 */
3035 blk_partition_remap(bio); 3064 blk_partition_remap(bio);
3036 3065
3066 if (maxsector != -1)
3067 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
3068 maxsector);
3069
3070 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
3071
3072 maxsector = bio->bi_sector;
3073 old_dev = bio->bi_bdev->bd_dev;
3074
3037 ret = q->make_request_fn(q, bio); 3075 ret = q->make_request_fn(q, bio);
3038 } while (ret); 3076 } while (ret);
3039} 3077}
@@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
3153 int total_bytes, bio_nbytes, error, next_idx = 0; 3191 int total_bytes, bio_nbytes, error, next_idx = 0;
3154 struct bio *bio; 3192 struct bio *bio;
3155 3193
3194 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
3195
3156 /* 3196 /*
3157 * extend uptodate bool to allow < 0 value to be direct io error 3197 * extend uptodate bool to allow < 0 value to be direct io error
3158 */ 3198 */