diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 33 |
1 files changed, 14 insertions, 19 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 10e8a64a5a5b..04267d66a2b9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/blktrace_api.h> | 29 | #include <linux/blktrace_api.h> |
30 | #include <linux/fault-inject.h> | 30 | #include <linux/fault-inject.h> |
31 | #include <trace/block.h> | ||
31 | 32 | ||
32 | #include "blk.h" | 33 | #include "blk.h" |
33 | 34 | ||
@@ -205,7 +206,7 @@ void blk_plug_device(struct request_queue *q) | |||
205 | 206 | ||
206 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { | 207 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { |
207 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 208 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
208 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 209 | trace_block_plug(q); |
209 | } | 210 | } |
210 | } | 211 | } |
211 | EXPORT_SYMBOL(blk_plug_device); | 212 | EXPORT_SYMBOL(blk_plug_device); |
@@ -292,9 +293,7 @@ void blk_unplug_work(struct work_struct *work) | |||
292 | struct request_queue *q = | 293 | struct request_queue *q = |
293 | container_of(work, struct request_queue, unplug_work); | 294 | container_of(work, struct request_queue, unplug_work); |
294 | 295 | ||
295 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | 296 | trace_block_unplug_io(q); |
296 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
297 | |||
298 | q->unplug_fn(q); | 297 | q->unplug_fn(q); |
299 | } | 298 | } |
300 | 299 | ||
@@ -302,9 +301,7 @@ void blk_unplug_timeout(unsigned long data) | |||
302 | { | 301 | { |
303 | struct request_queue *q = (struct request_queue *)data; | 302 | struct request_queue *q = (struct request_queue *)data; |
304 | 303 | ||
305 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, | 304 | trace_block_unplug_timer(q); |
306 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
307 | |||
308 | kblockd_schedule_work(q, &q->unplug_work); | 305 | kblockd_schedule_work(q, &q->unplug_work); |
309 | } | 306 | } |
310 | 307 | ||
@@ -314,9 +311,7 @@ void blk_unplug(struct request_queue *q) | |||
314 | * devices don't necessarily have an ->unplug_fn defined | 311 | * devices don't necessarily have an ->unplug_fn defined |
315 | */ | 312 | */ |
316 | if (q->unplug_fn) { | 313 | if (q->unplug_fn) { |
317 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | 314 | trace_block_unplug_io(q); |
318 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
319 | |||
320 | q->unplug_fn(q); | 315 | q->unplug_fn(q); |
321 | } | 316 | } |
322 | } | 317 | } |
@@ -822,7 +817,7 @@ rq_starved: | |||
822 | if (ioc_batching(q, ioc)) | 817 | if (ioc_batching(q, ioc)) |
823 | ioc->nr_batch_requests--; | 818 | ioc->nr_batch_requests--; |
824 | 819 | ||
825 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | 820 | trace_block_getrq(q, bio, rw); |
826 | out: | 821 | out: |
827 | return rq; | 822 | return rq; |
828 | } | 823 | } |
@@ -848,7 +843,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
848 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | 843 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, |
849 | TASK_UNINTERRUPTIBLE); | 844 | TASK_UNINTERRUPTIBLE); |
850 | 845 | ||
851 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | 846 | trace_block_sleeprq(q, bio, rw); |
852 | 847 | ||
853 | __generic_unplug_device(q); | 848 | __generic_unplug_device(q); |
854 | spin_unlock_irq(q->queue_lock); | 849 | spin_unlock_irq(q->queue_lock); |
@@ -928,7 +923,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) | |||
928 | { | 923 | { |
929 | blk_delete_timer(rq); | 924 | blk_delete_timer(rq); |
930 | blk_clear_rq_complete(rq); | 925 | blk_clear_rq_complete(rq); |
931 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 926 | trace_block_rq_requeue(q, rq); |
932 | 927 | ||
933 | if (blk_rq_tagged(rq)) | 928 | if (blk_rq_tagged(rq)) |
934 | blk_queue_end_tag(q, rq); | 929 | blk_queue_end_tag(q, rq); |
@@ -1167,7 +1162,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1167 | if (!ll_back_merge_fn(q, req, bio)) | 1162 | if (!ll_back_merge_fn(q, req, bio)) |
1168 | break; | 1163 | break; |
1169 | 1164 | ||
1170 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | 1165 | trace_block_bio_backmerge(q, bio); |
1171 | 1166 | ||
1172 | req->biotail->bi_next = bio; | 1167 | req->biotail->bi_next = bio; |
1173 | req->biotail = bio; | 1168 | req->biotail = bio; |
@@ -1186,7 +1181,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1186 | if (!ll_front_merge_fn(q, req, bio)) | 1181 | if (!ll_front_merge_fn(q, req, bio)) |
1187 | break; | 1182 | break; |
1188 | 1183 | ||
1189 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | 1184 | trace_block_bio_frontmerge(q, bio); |
1190 | 1185 | ||
1191 | bio->bi_next = req->bio; | 1186 | bio->bi_next = req->bio; |
1192 | req->bio = bio; | 1187 | req->bio = bio; |
@@ -1269,7 +1264,7 @@ static inline void blk_partition_remap(struct bio *bio) | |||
1269 | bio->bi_sector += p->start_sect; | 1264 | bio->bi_sector += p->start_sect; |
1270 | bio->bi_bdev = bdev->bd_contains; | 1265 | bio->bi_bdev = bdev->bd_contains; |
1271 | 1266 | ||
1272 | blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, | 1267 | trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, |
1273 | bdev->bd_dev, bio->bi_sector, | 1268 | bdev->bd_dev, bio->bi_sector, |
1274 | bio->bi_sector - p->start_sect); | 1269 | bio->bi_sector - p->start_sect); |
1275 | } | 1270 | } |
@@ -1441,10 +1436,10 @@ end_io: | |||
1441 | goto end_io; | 1436 | goto end_io; |
1442 | 1437 | ||
1443 | if (old_sector != -1) | 1438 | if (old_sector != -1) |
1444 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, | 1439 | trace_block_remap(q, bio, old_dev, bio->bi_sector, |
1445 | old_sector); | 1440 | old_sector); |
1446 | 1441 | ||
1447 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | 1442 | trace_block_bio_queue(q, bio); |
1448 | 1443 | ||
1449 | old_sector = bio->bi_sector; | 1444 | old_sector = bio->bi_sector; |
1450 | old_dev = bio->bi_bdev->bd_dev; | 1445 | old_dev = bio->bi_bdev->bd_dev; |
@@ -1656,7 +1651,7 @@ static int __end_that_request_first(struct request *req, int error, | |||
1656 | int total_bytes, bio_nbytes, next_idx = 0; | 1651 | int total_bytes, bio_nbytes, next_idx = 0; |
1657 | struct bio *bio; | 1652 | struct bio *bio; |
1658 | 1653 | ||
1659 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | 1654 | trace_block_rq_complete(req->q, req); |
1660 | 1655 | ||
1661 | /* | 1656 | /* |
1662 | * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual | 1657 | * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual |