diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 44 |
1 files changed, 42 insertions, 2 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 6c793b196aa9..062067fa7ead 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/writeback.h> | 28 | #include <linux/writeback.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
31 | #include <linux/blktrace_api.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * for max sense size | 34 | * for max sense size |
@@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q) | |||
1556 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) | 1557 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) |
1557 | return; | 1558 | return; |
1558 | 1559 | ||
1559 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | 1560 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { |
1560 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 1561 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
1562 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | ||
1563 | } | ||
1561 | } | 1564 | } |
1562 | 1565 | ||
1563 | EXPORT_SYMBOL(blk_plug_device); | 1566 | EXPORT_SYMBOL(blk_plug_device); |
@@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1621 | /* | 1624 | /* |
1622 | * devices don't necessarily have an ->unplug_fn defined | 1625 | * devices don't necessarily have an ->unplug_fn defined |
1623 | */ | 1626 | */ |
1624 | if (q->unplug_fn) | 1627 | if (q->unplug_fn) { |
1628 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1629 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1630 | |||
1625 | q->unplug_fn(q); | 1631 | q->unplug_fn(q); |
1632 | } | ||
1626 | } | 1633 | } |
1627 | 1634 | ||
1628 | static void blk_unplug_work(void *data) | 1635 | static void blk_unplug_work(void *data) |
1629 | { | 1636 | { |
1630 | request_queue_t *q = data; | 1637 | request_queue_t *q = data; |
1631 | 1638 | ||
1639 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1640 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1641 | |||
1632 | q->unplug_fn(q); | 1642 | q->unplug_fn(q); |
1633 | } | 1643 | } |
1634 | 1644 | ||
@@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data) | |||
1636 | { | 1646 | { |
1637 | request_queue_t *q = (request_queue_t *)data; | 1647 | request_queue_t *q = (request_queue_t *)data; |
1638 | 1648 | ||
1649 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, | ||
1650 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1651 | |||
1639 | kblockd_schedule_work(&q->unplug_work); | 1652 | kblockd_schedule_work(&q->unplug_work); |
1640 | } | 1653 | } |
1641 | 1654 | ||
@@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj) | |||
1753 | if (q->queue_tags) | 1766 | if (q->queue_tags) |
1754 | __blk_queue_free_tags(q); | 1767 | __blk_queue_free_tags(q); |
1755 | 1768 | ||
1769 | if (q->blk_trace) | ||
1770 | blk_trace_shutdown(q); | ||
1771 | |||
1756 | kmem_cache_free(requestq_cachep, q); | 1772 | kmem_cache_free(requestq_cachep, q); |
1757 | } | 1773 | } |
1758 | 1774 | ||
@@ -2129,6 +2145,8 @@ rq_starved: | |||
2129 | 2145 | ||
2130 | rq_init(q, rq); | 2146 | rq_init(q, rq); |
2131 | rq->rl = rl; | 2147 | rq->rl = rl; |
2148 | |||
2149 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | ||
2132 | out: | 2150 | out: |
2133 | return rq; | 2151 | return rq; |
2134 | } | 2152 | } |
@@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw, | |||
2157 | if (!rq) { | 2175 | if (!rq) { |
2158 | struct io_context *ioc; | 2176 | struct io_context *ioc; |
2159 | 2177 | ||
2178 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | ||
2179 | |||
2160 | __generic_unplug_device(q); | 2180 | __generic_unplug_device(q); |
2161 | spin_unlock_irq(q->queue_lock); | 2181 | spin_unlock_irq(q->queue_lock); |
2162 | io_schedule(); | 2182 | io_schedule(); |
@@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request); | |||
2210 | */ | 2230 | */ |
2211 | void blk_requeue_request(request_queue_t *q, struct request *rq) | 2231 | void blk_requeue_request(request_queue_t *q, struct request *rq) |
2212 | { | 2232 | { |
2233 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | ||
2234 | |||
2213 | if (blk_rq_tagged(rq)) | 2235 | if (blk_rq_tagged(rq)) |
2214 | blk_queue_end_tag(q, rq); | 2236 | blk_queue_end_tag(q, rq); |
2215 | 2237 | ||
@@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2844 | if (!q->back_merge_fn(q, req, bio)) | 2866 | if (!q->back_merge_fn(q, req, bio)) |
2845 | break; | 2867 | break; |
2846 | 2868 | ||
2869 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | ||
2870 | |||
2847 | req->biotail->bi_next = bio; | 2871 | req->biotail->bi_next = bio; |
2848 | req->biotail = bio; | 2872 | req->biotail = bio; |
2849 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 2873 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
@@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2859 | if (!q->front_merge_fn(q, req, bio)) | 2883 | if (!q->front_merge_fn(q, req, bio)) |
2860 | break; | 2884 | break; |
2861 | 2885 | ||
2886 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | ||
2887 | |||
2862 | bio->bi_next = req->bio; | 2888 | bio->bi_next = req->bio; |
2863 | req->bio = bio; | 2889 | req->bio = bio; |
2864 | 2890 | ||
@@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio) | |||
2976 | request_queue_t *q; | 3002 | request_queue_t *q; |
2977 | sector_t maxsector; | 3003 | sector_t maxsector; |
2978 | int ret, nr_sectors = bio_sectors(bio); | 3004 | int ret, nr_sectors = bio_sectors(bio); |
3005 | dev_t old_dev; | ||
2979 | 3006 | ||
2980 | might_sleep(); | 3007 | might_sleep(); |
2981 | /* Test device or partition size, when known. */ | 3008 | /* Test device or partition size, when known. */ |
@@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio) | |||
3002 | * NOTE: we don't repeat the blk_size check for each new device. | 3029 | * NOTE: we don't repeat the blk_size check for each new device. |
3003 | * Stacking drivers are expected to know what they are doing. | 3030 | * Stacking drivers are expected to know what they are doing. |
3004 | */ | 3031 | */ |
3032 | maxsector = -1; | ||
3033 | old_dev = 0; | ||
3005 | do { | 3034 | do { |
3006 | char b[BDEVNAME_SIZE]; | 3035 | char b[BDEVNAME_SIZE]; |
3007 | 3036 | ||
@@ -3034,6 +3063,15 @@ end_io: | |||
3034 | */ | 3063 | */ |
3035 | blk_partition_remap(bio); | 3064 | blk_partition_remap(bio); |
3036 | 3065 | ||
3066 | if (maxsector != -1) | ||
3067 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, | ||
3068 | maxsector); | ||
3069 | |||
3070 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | ||
3071 | |||
3072 | maxsector = bio->bi_sector; | ||
3073 | old_dev = bio->bi_bdev->bd_dev; | ||
3074 | |||
3037 | ret = q->make_request_fn(q, bio); | 3075 | ret = q->make_request_fn(q, bio); |
3038 | } while (ret); | 3076 | } while (ret); |
3039 | } | 3077 | } |
@@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate, | |||
3153 | int total_bytes, bio_nbytes, error, next_idx = 0; | 3191 | int total_bytes, bio_nbytes, error, next_idx = 0; |
3154 | struct bio *bio; | 3192 | struct bio *bio; |
3155 | 3193 | ||
3194 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | ||
3195 | |||
3156 | /* | 3196 | /* |
3157 | * extend uptodate bool to allow < 0 value to be direct io error | 3197 | * extend uptodate bool to allow < 0 value to be direct io error |
3158 | */ | 3198 | */ |