diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blktrace.c | 51 |
1 files changed, 30 insertions, 21 deletions
diff --git a/block/blktrace.c b/block/blktrace.c index 4f45b343690a..8f5c37b0f80f 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
25 | #include <linux/time.h> | 25 | #include <linux/time.h> |
26 | #include <trace/block.h> | 26 | #include <trace/block.h> |
27 | #include <asm/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <../kernel/trace/trace_output.h> | 28 | #include <../kernel/trace/trace_output.h> |
29 | 29 | ||
30 | static unsigned int blktrace_seq __read_mostly = 1; | 30 | static unsigned int blktrace_seq __read_mostly = 1; |
@@ -148,11 +148,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |||
148 | /* | 148 | /* |
149 | * Data direction bit lookup | 149 | * Data direction bit lookup |
150 | */ | 150 | */ |
151 | static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; | 151 | static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), |
152 | BLK_TC_ACT(BLK_TC_WRITE) }; | ||
152 | 153 | ||
153 | /* The ilog2() calls fall out because they're constant */ | 154 | /* The ilog2() calls fall out because they're constant */ |
154 | #define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \ | 155 | #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ |
155 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) ) | 156 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) |
156 | 157 | ||
157 | /* | 158 | /* |
158 | * The worker for the various blk_add_trace*() types. Fills out a | 159 | * The worker for the various blk_add_trace*() types. Fills out a |
@@ -221,13 +222,13 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
221 | t->time = ktime_to_ns(ktime_get()); | 222 | t->time = ktime_to_ns(ktime_get()); |
222 | record_it: | 223 | record_it: |
223 | /* | 224 | /* |
224 | * These two are not needed in ftrace as they are in the | 225 | * These two are not needed in ftrace as they are in the |
225 | * generic trace_entry, filled by tracing_generic_entry_update, | 226 | * generic trace_entry, filled by tracing_generic_entry_update, |
226 | * but for the trace_event->bin() synthesizer benefit we do it | 227 | * but for the trace_event->bin() synthesizer benefit we do it |
227 | * here too. | 228 | * here too. |
228 | */ | 229 | */ |
229 | t->cpu = cpu; | 230 | t->cpu = cpu; |
230 | t->pid = pid; | 231 | t->pid = pid; |
231 | 232 | ||
232 | t->sector = sector; | 233 | t->sector = sector; |
233 | t->bytes = bytes; | 234 | t->bytes = bytes; |
@@ -453,7 +454,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
453 | atomic_set(&bt->dropped, 0); | 454 | atomic_set(&bt->dropped, 0); |
454 | 455 | ||
455 | ret = -EIO; | 456 | ret = -EIO; |
456 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); | 457 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, |
458 | &blk_dropped_fops); | ||
457 | if (!bt->dropped_file) | 459 | if (!bt->dropped_file) |
458 | goto err; | 460 | goto err; |
459 | 461 | ||
@@ -535,10 +537,10 @@ EXPORT_SYMBOL_GPL(blk_trace_setup); | |||
535 | 537 | ||
536 | int blk_trace_startstop(struct request_queue *q, int start) | 538 | int blk_trace_startstop(struct request_queue *q, int start) |
537 | { | 539 | { |
538 | struct blk_trace *bt; | ||
539 | int ret; | 540 | int ret; |
541 | struct blk_trace *bt = q->blk_trace; | ||
540 | 542 | ||
541 | if ((bt = q->blk_trace) == NULL) | 543 | if (bt == NULL) |
542 | return -EINVAL; | 544 | return -EINVAL; |
543 | 545 | ||
544 | /* | 546 | /* |
@@ -674,12 +676,14 @@ static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) | |||
674 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 676 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); |
675 | } | 677 | } |
676 | 678 | ||
677 | static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq) | 679 | static void blk_add_trace_rq_requeue(struct request_queue *q, |
680 | struct request *rq) | ||
678 | { | 681 | { |
679 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 682 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); |
680 | } | 683 | } |
681 | 684 | ||
682 | static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq) | 685 | static void blk_add_trace_rq_complete(struct request_queue *q, |
686 | struct request *rq) | ||
683 | { | 687 | { |
684 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | 688 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); |
685 | } | 689 | } |
@@ -716,12 +720,14 @@ static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) | |||
716 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); | 720 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); |
717 | } | 721 | } |
718 | 722 | ||
719 | static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio) | 723 | static void blk_add_trace_bio_backmerge(struct request_queue *q, |
724 | struct bio *bio) | ||
720 | { | 725 | { |
721 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | 726 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); |
722 | } | 727 | } |
723 | 728 | ||
724 | static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio) | 729 | static void blk_add_trace_bio_frontmerge(struct request_queue *q, |
730 | struct bio *bio) | ||
725 | { | 731 | { |
726 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | 732 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); |
727 | } | 733 | } |
@@ -731,7 +737,8 @@ static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) | |||
731 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | 737 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); |
732 | } | 738 | } |
733 | 739 | ||
734 | static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw) | 740 | static void blk_add_trace_getrq(struct request_queue *q, |
741 | struct bio *bio, int rw) | ||
735 | { | 742 | { |
736 | if (bio) | 743 | if (bio) |
737 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ); | 744 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ); |
@@ -744,7 +751,8 @@ static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw | |||
744 | } | 751 | } |
745 | 752 | ||
746 | 753 | ||
747 | static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw) | 754 | static void blk_add_trace_sleeprq(struct request_queue *q, |
755 | struct bio *bio, int rw) | ||
748 | { | 756 | { |
749 | if (bio) | 757 | if (bio) |
750 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); | 758 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); |
@@ -752,7 +760,8 @@ static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int | |||
752 | struct blk_trace *bt = q->blk_trace; | 760 | struct blk_trace *bt = q->blk_trace; |
753 | 761 | ||
754 | if (bt) | 762 | if (bt) |
755 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); | 763 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, |
764 | 0, 0, NULL); | ||
756 | } | 765 | } |
757 | } | 766 | } |
758 | 767 | ||