aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig1
-rw-r--r--block/blk-core.c46
-rw-r--r--block/blktrace.c332
-rw-r--r--block/elevator.c12
4 files changed, 366 insertions, 25 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 1ab7c15c8d7a..290b219fad9c 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -47,6 +47,7 @@ config BLK_DEV_IO_TRACE
47 depends on SYSFS 47 depends on SYSFS
48 select RELAY 48 select RELAY
49 select DEBUG_FS 49 select DEBUG_FS
50 select TRACEPOINTS
50 help 51 help
51 Say Y here if you want to be able to trace the block layer actions 52 Say Y here if you want to be able to trace the block layer actions
52 on a given queue. Tracing allows you to see any traffic happening 53 on a given queue. Tracing allows you to see any traffic happening
diff --git a/block/blk-core.c b/block/blk-core.c
index c36aa98fafa3..561e8a1b43a4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -28,9 +28,23 @@
28#include <linux/task_io_accounting_ops.h> 28#include <linux/task_io_accounting_ops.h>
29#include <linux/blktrace_api.h> 29#include <linux/blktrace_api.h>
30#include <linux/fault-inject.h> 30#include <linux/fault-inject.h>
31#include <trace/block.h>
31 32
32#include "blk.h" 33#include "blk.h"
33 34
35DEFINE_TRACE(block_plug);
36DEFINE_TRACE(block_unplug_io);
37DEFINE_TRACE(block_unplug_timer);
38DEFINE_TRACE(block_getrq);
39DEFINE_TRACE(block_sleeprq);
40DEFINE_TRACE(block_rq_requeue);
41DEFINE_TRACE(block_bio_backmerge);
42DEFINE_TRACE(block_bio_frontmerge);
43DEFINE_TRACE(block_bio_queue);
44DEFINE_TRACE(block_rq_complete);
45DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */
46EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
47
34static int __make_request(struct request_queue *q, struct bio *bio); 48static int __make_request(struct request_queue *q, struct bio *bio);
35 49
36/* 50/*
@@ -205,7 +219,7 @@ void blk_plug_device(struct request_queue *q)
205 219
206 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { 220 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
207 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 221 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
208 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 222 trace_block_plug(q);
209 } 223 }
210} 224}
211EXPORT_SYMBOL(blk_plug_device); 225EXPORT_SYMBOL(blk_plug_device);
@@ -292,9 +306,7 @@ void blk_unplug_work(struct work_struct *work)
292 struct request_queue *q = 306 struct request_queue *q =
293 container_of(work, struct request_queue, unplug_work); 307 container_of(work, struct request_queue, unplug_work);
294 308
295 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 309 trace_block_unplug_io(q);
296 q->rq.count[READ] + q->rq.count[WRITE]);
297
298 q->unplug_fn(q); 310 q->unplug_fn(q);
299} 311}
300 312
@@ -302,9 +314,7 @@ void blk_unplug_timeout(unsigned long data)
302{ 314{
303 struct request_queue *q = (struct request_queue *)data; 315 struct request_queue *q = (struct request_queue *)data;
304 316
305 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, 317 trace_block_unplug_timer(q);
306 q->rq.count[READ] + q->rq.count[WRITE]);
307
308 kblockd_schedule_work(q, &q->unplug_work); 318 kblockd_schedule_work(q, &q->unplug_work);
309} 319}
310 320
@@ -314,9 +324,7 @@ void blk_unplug(struct request_queue *q)
314 * devices don't necessarily have an ->unplug_fn defined 324 * devices don't necessarily have an ->unplug_fn defined
315 */ 325 */
316 if (q->unplug_fn) { 326 if (q->unplug_fn) {
317 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 327 trace_block_unplug_io(q);
318 q->rq.count[READ] + q->rq.count[WRITE]);
319
320 q->unplug_fn(q); 328 q->unplug_fn(q);
321 } 329 }
322} 330}
@@ -822,7 +830,7 @@ rq_starved:
822 if (ioc_batching(q, ioc)) 830 if (ioc_batching(q, ioc))
823 ioc->nr_batch_requests--; 831 ioc->nr_batch_requests--;
824 832
825 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 833 trace_block_getrq(q, bio, rw);
826out: 834out:
827 return rq; 835 return rq;
828} 836}
@@ -848,7 +856,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
848 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 856 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
849 TASK_UNINTERRUPTIBLE); 857 TASK_UNINTERRUPTIBLE);
850 858
851 blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); 859 trace_block_sleeprq(q, bio, rw);
852 860
853 __generic_unplug_device(q); 861 __generic_unplug_device(q);
854 spin_unlock_irq(q->queue_lock); 862 spin_unlock_irq(q->queue_lock);
@@ -928,7 +936,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
928{ 936{
929 blk_delete_timer(rq); 937 blk_delete_timer(rq);
930 blk_clear_rq_complete(rq); 938 blk_clear_rq_complete(rq);
931 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 939 trace_block_rq_requeue(q, rq);
932 940
933 if (blk_rq_tagged(rq)) 941 if (blk_rq_tagged(rq))
934 blk_queue_end_tag(q, rq); 942 blk_queue_end_tag(q, rq);
@@ -1167,7 +1175,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1167 if (!ll_back_merge_fn(q, req, bio)) 1175 if (!ll_back_merge_fn(q, req, bio))
1168 break; 1176 break;
1169 1177
1170 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1178 trace_block_bio_backmerge(q, bio);
1171 1179
1172 req->biotail->bi_next = bio; 1180 req->biotail->bi_next = bio;
1173 req->biotail = bio; 1181 req->biotail = bio;
@@ -1186,7 +1194,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1186 if (!ll_front_merge_fn(q, req, bio)) 1194 if (!ll_front_merge_fn(q, req, bio))
1187 break; 1195 break;
1188 1196
1189 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1197 trace_block_bio_frontmerge(q, bio);
1190 1198
1191 bio->bi_next = req->bio; 1199 bio->bi_next = req->bio;
1192 req->bio = bio; 1200 req->bio = bio;
@@ -1269,7 +1277,7 @@ static inline void blk_partition_remap(struct bio *bio)
1269 bio->bi_sector += p->start_sect; 1277 bio->bi_sector += p->start_sect;
1270 bio->bi_bdev = bdev->bd_contains; 1278 bio->bi_bdev = bdev->bd_contains;
1271 1279
1272 blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, 1280 trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
1273 bdev->bd_dev, bio->bi_sector, 1281 bdev->bd_dev, bio->bi_sector,
1274 bio->bi_sector - p->start_sect); 1282 bio->bi_sector - p->start_sect);
1275 } 1283 }
@@ -1441,10 +1449,10 @@ end_io:
1441 goto end_io; 1449 goto end_io;
1442 1450
1443 if (old_sector != -1) 1451 if (old_sector != -1)
1444 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 1452 trace_block_remap(q, bio, old_dev, bio->bi_sector,
1445 old_sector); 1453 old_sector);
1446 1454
1447 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 1455 trace_block_bio_queue(q, bio);
1448 1456
1449 old_sector = bio->bi_sector; 1457 old_sector = bio->bi_sector;
1450 old_dev = bio->bi_bdev->bd_dev; 1458 old_dev = bio->bi_bdev->bd_dev;
@@ -1678,7 +1686,7 @@ static int __end_that_request_first(struct request *req, int error,
1678 int total_bytes, bio_nbytes, next_idx = 0; 1686 int total_bytes, bio_nbytes, next_idx = 0;
1679 struct bio *bio; 1687 struct bio *bio;
1680 1688
1681 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); 1689 trace_block_rq_complete(req->q, req);
1682 1690
1683 /* 1691 /*
1684 * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual 1692 * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
diff --git a/block/blktrace.c b/block/blktrace.c
index 85049a7e7a17..b0a2cae886db 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -23,10 +23,18 @@
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/time.h> 25#include <linux/time.h>
26#include <trace/block.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27 28
28static unsigned int blktrace_seq __read_mostly = 1; 29static unsigned int blktrace_seq __read_mostly = 1;
29 30
31/* Global reference count of probes */
32static DEFINE_MUTEX(blk_probe_mutex);
33static atomic_t blk_probes_ref = ATOMIC_INIT(0);
34
35static int blk_register_tracepoints(void);
36static void blk_unregister_tracepoints(void);
37
30/* 38/*
31 * Send out a notify message. 39 * Send out a notify message.
32 */ 40 */
@@ -119,7 +127,7 @@ static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK
119 * The worker for the various blk_add_trace*() types. Fills out a 127 * The worker for the various blk_add_trace*() types. Fills out a
120 * blk_io_trace structure and places it in a per-cpu subbuffer. 128 * blk_io_trace structure and places it in a per-cpu subbuffer.
121 */ 129 */
122void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, 130static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
123 int rw, u32 what, int error, int pdu_len, void *pdu_data) 131 int rw, u32 what, int error, int pdu_len, void *pdu_data)
124{ 132{
125 struct task_struct *tsk = current; 133 struct task_struct *tsk = current;
@@ -177,8 +185,6 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
177 local_irq_restore(flags); 185 local_irq_restore(flags);
178} 186}
179 187
180EXPORT_SYMBOL_GPL(__blk_add_trace);
181
182static struct dentry *blk_tree_root; 188static struct dentry *blk_tree_root;
183static DEFINE_MUTEX(blk_tree_mutex); 189static DEFINE_MUTEX(blk_tree_mutex);
184static unsigned int root_users; 190static unsigned int root_users;
@@ -237,6 +243,10 @@ static void blk_trace_cleanup(struct blk_trace *bt)
237 free_percpu(bt->sequence); 243 free_percpu(bt->sequence);
238 free_percpu(bt->msg_data); 244 free_percpu(bt->msg_data);
239 kfree(bt); 245 kfree(bt);
246 mutex_lock(&blk_probe_mutex);
247 if (atomic_dec_and_test(&blk_probes_ref))
248 blk_unregister_tracepoints();
249 mutex_unlock(&blk_probe_mutex);
240} 250}
241 251
242int blk_trace_remove(struct request_queue *q) 252int blk_trace_remove(struct request_queue *q)
@@ -428,6 +438,14 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
428 bt->pid = buts->pid; 438 bt->pid = buts->pid;
429 bt->trace_state = Blktrace_setup; 439 bt->trace_state = Blktrace_setup;
430 440
441 mutex_lock(&blk_probe_mutex);
442 if (atomic_add_return(1, &blk_probes_ref) == 1) {
443 ret = blk_register_tracepoints();
444 if (ret)
445 goto probe_err;
446 }
447 mutex_unlock(&blk_probe_mutex);
448
431 ret = -EBUSY; 449 ret = -EBUSY;
432 old_bt = xchg(&q->blk_trace, bt); 450 old_bt = xchg(&q->blk_trace, bt);
433 if (old_bt) { 451 if (old_bt) {
@@ -436,6 +454,9 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
436 } 454 }
437 455
438 return 0; 456 return 0;
457probe_err:
458 atomic_dec(&blk_probes_ref);
459 mutex_unlock(&blk_probe_mutex);
439err: 460err:
440 if (dir) 461 if (dir)
441 blk_remove_tree(dir); 462 blk_remove_tree(dir);
@@ -562,3 +583,308 @@ void blk_trace_shutdown(struct request_queue *q)
562 blk_trace_remove(q); 583 blk_trace_remove(q);
563 } 584 }
564} 585}
586
587/*
588 * blktrace probes
589 */
590
591/**
592 * blk_add_trace_rq - Add a trace for a request oriented action
593 * @q: queue the io is for
594 * @rq: the source request
595 * @what: the action
596 *
597 * Description:
598 * Records an action against a request. Will log the bio offset + size.
599 *
600 **/
601static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
602 u32 what)
603{
604 struct blk_trace *bt = q->blk_trace;
605 int rw = rq->cmd_flags & 0x03;
606
607 if (likely(!bt))
608 return;
609
610 if (blk_discard_rq(rq))
611 rw |= (1 << BIO_RW_DISCARD);
612
613 if (blk_pc_request(rq)) {
614 what |= BLK_TC_ACT(BLK_TC_PC);
615 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
616 sizeof(rq->cmd), rq->cmd);
617 } else {
618 what |= BLK_TC_ACT(BLK_TC_FS);
619 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
620 rw, what, rq->errors, 0, NULL);
621 }
622}
623
624static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
625{
626 blk_add_trace_rq(q, rq, BLK_TA_ABORT);
627}
628
629static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
630{
631 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
632}
633
634static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
635{
636 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
637}
638
639static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq)
640{
641 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
642}
643
644static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq)
645{
646 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
647}
648
649/**
650 * blk_add_trace_bio - Add a trace for a bio oriented action
651 * @q: queue the io is for
652 * @bio: the source bio
653 * @what: the action
654 *
655 * Description:
656 * Records an action against a bio. Will log the bio offset + size.
657 *
658 **/
659static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
660 u32 what)
661{
662 struct blk_trace *bt = q->blk_trace;
663
664 if (likely(!bt))
665 return;
666
667 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
668 !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
669}
670
671static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
672{
673 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
674}
675
676static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
677{
678 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
679}
680
681static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio)
682{
683 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
684}
685
686static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio)
687{
688 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
689}
690
691static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
692{
693 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
694}
695
696static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw)
697{
698 if (bio)
699 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
700 else {
701 struct blk_trace *bt = q->blk_trace;
702
703 if (bt)
704 __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
705 }
706}
707
708
709static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw)
710{
711 if (bio)
712 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
713 else {
714 struct blk_trace *bt = q->blk_trace;
715
716 if (bt)
717 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL);
718 }
719}
720
721static void blk_add_trace_plug(struct request_queue *q)
722{
723 struct blk_trace *bt = q->blk_trace;
724
725 if (bt)
726 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
727}
728
729static void blk_add_trace_unplug_io(struct request_queue *q)
730{
731 struct blk_trace *bt = q->blk_trace;
732
733 if (bt) {
734 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
735 __be64 rpdu = cpu_to_be64(pdu);
736
737 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
738 sizeof(rpdu), &rpdu);
739 }
740}
741
742static void blk_add_trace_unplug_timer(struct request_queue *q)
743{
744 struct blk_trace *bt = q->blk_trace;
745
746 if (bt) {
747 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
748 __be64 rpdu = cpu_to_be64(pdu);
749
750 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
751 sizeof(rpdu), &rpdu);
752 }
753}
754
755static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
756 unsigned int pdu)
757{
758 struct blk_trace *bt = q->blk_trace;
759
760 if (bt) {
761 __be64 rpdu = cpu_to_be64(pdu);
762
763 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
764 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
765 sizeof(rpdu), &rpdu);
766 }
767}
768
769/**
770 * blk_add_trace_remap - Add a trace for a remap operation
771 * @q: queue the io is for
772 * @bio: the source bio
773 * @dev: target device
774 * @from: source sector
775 * @to: target sector
776 *
777 * Description:
778 * Device mapper or raid target sometimes need to split a bio because
779 * it spans a stripe (or similar). Add a trace for that action.
780 *
781 **/
782static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
783 dev_t dev, sector_t from, sector_t to)
784{
785 struct blk_trace *bt = q->blk_trace;
786 struct blk_io_trace_remap r;
787
788 if (likely(!bt))
789 return;
790
791 r.device = cpu_to_be32(dev);
792 r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
793 r.sector = cpu_to_be64(to);
794
795 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
796 !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
797}
798
799/**
800 * blk_add_driver_data - Add binary message with driver-specific data
801 * @q: queue the io is for
802 * @rq: io request
803 * @data: driver-specific data
804 * @len: length of driver-specific data
805 *
806 * Description:
807 * Some drivers might want to write driver-specific data per request.
808 *
809 **/
810void blk_add_driver_data(struct request_queue *q,
811 struct request *rq,
812 void *data, size_t len)
813{
814 struct blk_trace *bt = q->blk_trace;
815
816 if (likely(!bt))
817 return;
818
819 if (blk_pc_request(rq))
820 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
821 rq->errors, len, data);
822 else
823 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
824 0, BLK_TA_DRV_DATA, rq->errors, len, data);
825}
826EXPORT_SYMBOL_GPL(blk_add_driver_data);
827
828static int blk_register_tracepoints(void)
829{
830 int ret;
831
832 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
833 WARN_ON(ret);
834 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
835 WARN_ON(ret);
836 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
837 WARN_ON(ret);
838 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
839 WARN_ON(ret);
840 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
841 WARN_ON(ret);
842 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
843 WARN_ON(ret);
844 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
845 WARN_ON(ret);
846 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
847 WARN_ON(ret);
848 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
849 WARN_ON(ret);
850 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
851 WARN_ON(ret);
852 ret = register_trace_block_getrq(blk_add_trace_getrq);
853 WARN_ON(ret);
854 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
855 WARN_ON(ret);
856 ret = register_trace_block_plug(blk_add_trace_plug);
857 WARN_ON(ret);
858 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
859 WARN_ON(ret);
860 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
861 WARN_ON(ret);
862 ret = register_trace_block_split(blk_add_trace_split);
863 WARN_ON(ret);
864 ret = register_trace_block_remap(blk_add_trace_remap);
865 WARN_ON(ret);
866 return 0;
867}
868
869static void blk_unregister_tracepoints(void)
870{
871 unregister_trace_block_remap(blk_add_trace_remap);
872 unregister_trace_block_split(blk_add_trace_split);
873 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
874 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
875 unregister_trace_block_plug(blk_add_trace_plug);
876 unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
877 unregister_trace_block_getrq(blk_add_trace_getrq);
878 unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
879 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
880 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
881 unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
882 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
883 unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
884 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
885 unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
886 unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
887 unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
888
889 tracepoint_synchronize_unregister();
890}
diff --git a/block/elevator.c b/block/elevator.c
index a6951f76ba0c..86836dd179c0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -33,6 +33,7 @@
33#include <linux/compiler.h> 33#include <linux/compiler.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/blktrace_api.h> 35#include <linux/blktrace_api.h>
36#include <trace/block.h>
36#include <linux/hash.h> 37#include <linux/hash.h>
37#include <linux/uaccess.h> 38#include <linux/uaccess.h>
38 39
@@ -41,6 +42,8 @@
41static DEFINE_SPINLOCK(elv_list_lock); 42static DEFINE_SPINLOCK(elv_list_lock);
42static LIST_HEAD(elv_list); 43static LIST_HEAD(elv_list);
43 44
45DEFINE_TRACE(block_rq_abort);
46
44/* 47/*
45 * Merge hash stuff. 48 * Merge hash stuff.
46 */ 49 */
@@ -52,6 +55,9 @@ static const int elv_hash_shift = 6;
52#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
53#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 56#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
54 57
58DEFINE_TRACE(block_rq_insert);
59DEFINE_TRACE(block_rq_issue);
60
55/* 61/*
56 * Query io scheduler to see if the current process issuing bio may be 62 * Query io scheduler to see if the current process issuing bio may be
57 * merged with rq. 63 * merged with rq.
@@ -586,7 +592,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
586 unsigned ordseq; 592 unsigned ordseq;
587 int unplug_it = 1; 593 int unplug_it = 1;
588 594
589 blk_add_trace_rq(q, rq, BLK_TA_INSERT); 595 trace_block_rq_insert(q, rq);
590 596
591 rq->q = q; 597 rq->q = q;
592 598
@@ -772,7 +778,7 @@ struct request *elv_next_request(struct request_queue *q)
772 * not be passed by new incoming requests 778 * not be passed by new incoming requests
773 */ 779 */
774 rq->cmd_flags |= REQ_STARTED; 780 rq->cmd_flags |= REQ_STARTED;
775 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 781 trace_block_rq_issue(q, rq);
776 } 782 }
777 783
778 if (!q->boundary_rq || q->boundary_rq == rq) { 784 if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -914,7 +920,7 @@ void elv_abort_queue(struct request_queue *q)
914 while (!list_empty(&q->queue_head)) { 920 while (!list_empty(&q->queue_head)) {
915 rq = list_entry_rq(q->queue_head.next); 921 rq = list_entry_rq(q->queue_head.next);
916 rq->cmd_flags |= REQ_QUIET; 922 rq->cmd_flags |= REQ_QUIET;
917 blk_add_trace_rq(q, rq, BLK_TA_ABORT); 923 trace_block_rq_abort(q, rq);
918 __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 924 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
919 } 925 }
920} 926}