aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2015-06-09 17:22:49 -0400
committerMike Snitzer <snitzer@redhat.com>2015-06-17 12:40:41 -0400
commite262f34741522e0d821642e5449c6eeb512723fc (patch)
tree2f6608a34b9b1ef4c249267901d73a5e0d481cd0
parentdfcfac3e4cd94abef779297fab6adfd2dbcf52fa (diff)
dm stats: add support for request-based DM devices
This makes it possible to use dm stats with DM multipath. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-stats.c5
-rw-r--r--drivers/md/dm.c26
2 files changed, 26 insertions, 5 deletions
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index faf1071ef631..8a8b48fa901a 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -1155,11 +1155,6 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1155{ 1155{
1156 int r; 1156 int r;
1157 1157
1158 if (dm_request_based(md)) {
1159 DMWARN("Statistics are only supported for bio-based devices");
1160 return -EOPNOTSUPP;
1161 }
1162
1163 /* All messages here must start with '@' */ 1158 /* All messages here must start with '@' */
1164 if (!strcasecmp(argv[0], "@stats_create")) 1159 if (!strcasecmp(argv[0], "@stats_create"))
1165 r = message_stats_create(md, argc, argv, result, maxlen); 1160 r = message_stats_create(md, argc, argv, result, maxlen);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 767bce906588..90dc49e3c78f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -86,6 +86,9 @@ struct dm_rq_target_io {
86 struct kthread_work work; 86 struct kthread_work work;
87 int error; 87 int error;
88 union map_info info; 88 union map_info info;
89 struct dm_stats_aux stats_aux;
90 unsigned long duration_jiffies;
91 unsigned n_sectors;
89}; 92};
90 93
91/* 94/*
@@ -995,6 +998,17 @@ static struct dm_rq_target_io *tio_from_request(struct request *rq)
995 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 998 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
996} 999}
997 1000
1001static void rq_end_stats(struct mapped_device *md, struct request *orig)
1002{
1003 if (unlikely(dm_stats_used(&md->stats))) {
1004 struct dm_rq_target_io *tio = tio_from_request(orig);
1005 tio->duration_jiffies = jiffies - tio->duration_jiffies;
1006 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
1007 tio->n_sectors, true, tio->duration_jiffies,
1008 &tio->stats_aux);
1009 }
1010}
1011
998/* 1012/*
999 * Don't touch any member of the md after calling this function because 1013 * Don't touch any member of the md after calling this function because
1000 * the md may be freed in dm_put() at the end of this function. 1014 * the md may be freed in dm_put() at the end of this function.
@@ -1078,6 +1092,7 @@ static void dm_end_request(struct request *clone, int error)
1078 } 1092 }
1079 1093
1080 free_rq_clone(clone); 1094 free_rq_clone(clone);
1095 rq_end_stats(md, rq);
1081 if (!rq->q->mq_ops) 1096 if (!rq->q->mq_ops)
1082 blk_end_request_all(rq, error); 1097 blk_end_request_all(rq, error);
1083 else 1098 else
@@ -1120,6 +1135,7 @@ static void dm_requeue_original_request(struct mapped_device *md,
1120 1135
1121 dm_unprep_request(rq); 1136 dm_unprep_request(rq);
1122 1137
1138 rq_end_stats(md, rq);
1123 if (!rq->q->mq_ops) 1139 if (!rq->q->mq_ops)
1124 old_requeue_request(rq); 1140 old_requeue_request(rq);
1125 else { 1141 else {
@@ -1211,6 +1227,7 @@ static void dm_softirq_done(struct request *rq)
1211 int rw; 1227 int rw;
1212 1228
1213 if (!clone) { 1229 if (!clone) {
1230 rq_end_stats(tio->md, rq);
1214 rw = rq_data_dir(rq); 1231 rw = rq_data_dir(rq);
1215 if (!rq->q->mq_ops) { 1232 if (!rq->q->mq_ops) {
1216 blk_end_request_all(rq, tio->error); 1233 blk_end_request_all(rq, tio->error);
@@ -1943,6 +1960,14 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
1943 md->last_rq_start_time = ktime_get(); 1960 md->last_rq_start_time = ktime_get();
1944 } 1961 }
1945 1962
1963 if (unlikely(dm_stats_used(&md->stats))) {
1964 struct dm_rq_target_io *tio = tio_from_request(orig);
1965 tio->duration_jiffies = jiffies;
1966 tio->n_sectors = blk_rq_sectors(orig);
1967 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
1968 tio->n_sectors, false, 0, &tio->stats_aux);
1969 }
1970
1946 /* 1971 /*
1947 * Hold the md reference here for the in-flight I/O. 1972 * Hold the md reference here for the in-flight I/O.
1948 * We can't rely on the reference count by device opener, 1973 * We can't rely on the reference count by device opener,
@@ -2689,6 +2714,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2689 /* Direct call is fine since .queue_rq allows allocations */ 2714 /* Direct call is fine since .queue_rq allows allocations */
2690 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 2715 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
2691 /* Undo dm_start_request() before requeuing */ 2716 /* Undo dm_start_request() before requeuing */
2717 rq_end_stats(md, rq);
2692 rq_completed(md, rq_data_dir(rq), false); 2718 rq_completed(md, rq_data_dir(rq), false);
2693 return BLK_MQ_RQ_QUEUE_BUSY; 2719 return BLK_MQ_RQ_QUEUE_BUSY;
2694 } 2720 }