aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2014-10-17 19:46:37 -0400
committerMike Snitzer <snitzer@redhat.com>2015-02-09 13:06:47 -0500
commit466d89a6bcd500f64896b514f78b32e8d0b0303a (patch)
tree765bf96679a9230f65a3201bcf07c21dac81a8ee /drivers/md/dm.c
parent2eb6e1e3aa873f2bb62075bebe17fa108ee07374 (diff)
dm: prepare for allocating blk-mq clone requests in target
For blk-mq request-based DM the responsibility of allocating a cloned request will be transfered from DM core to the target type. To prepare for conditionally using this new model the original request's 'special' now points to the dm_rq_target_io because the clone is allocated later in the block layer rather than in DM core. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c134
1 files changed, 66 insertions, 68 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f0e34070c11d..ae1219893948 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1016,7 +1016,7 @@ static void end_clone_bio(struct bio *clone, int error)
1016 * the md may be freed in dm_put() at the end of this function. 1016 * the md may be freed in dm_put() at the end of this function.
1017 * Or do dm_get() before calling this function and dm_put() later. 1017 * Or do dm_get() before calling this function and dm_put() later.
1018 */ 1018 */
1019static void rq_completed(struct mapped_device *md, int rw, int run_queue) 1019static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1020{ 1020{
1021 atomic_dec(&md->pending[rw]); 1021 atomic_dec(&md->pending[rw]);
1022 1022
@@ -1050,7 +1050,8 @@ static void free_rq_clone(struct request *clone)
1050 1050
1051/* 1051/*
1052 * Complete the clone and the original request. 1052 * Complete the clone and the original request.
1053 * Must be called without queue lock. 1053 * Must be called without clone's queue lock held,
1054 * see end_clone_request() for more details.
1054 */ 1055 */
1055static void dm_end_request(struct request *clone, int error) 1056static void dm_end_request(struct request *clone, int error)
1056{ 1057{
@@ -1079,7 +1080,8 @@ static void dm_end_request(struct request *clone, int error)
1079 1080
1080static void dm_unprep_request(struct request *rq) 1081static void dm_unprep_request(struct request *rq)
1081{ 1082{
1082 struct request *clone = rq->special; 1083 struct dm_rq_target_io *tio = rq->special;
1084 struct request *clone = tio->clone;
1083 1085
1084 rq->special = NULL; 1086 rq->special = NULL;
1085 rq->cmd_flags &= ~REQ_DONTPREP; 1087 rq->cmd_flags &= ~REQ_DONTPREP;
@@ -1090,12 +1092,10 @@ static void dm_unprep_request(struct request *rq)
1090/* 1092/*
1091 * Requeue the original request of a clone. 1093 * Requeue the original request of a clone.
1092 */ 1094 */
1093static void dm_requeue_unmapped_request(struct request *clone) 1095static void dm_requeue_unmapped_original_request(struct mapped_device *md,
1096 struct request *rq)
1094{ 1097{
1095 int rw = rq_data_dir(clone); 1098 int rw = rq_data_dir(rq);
1096 struct dm_rq_target_io *tio = clone->end_io_data;
1097 struct mapped_device *md = tio->md;
1098 struct request *rq = tio->orig;
1099 struct request_queue *q = rq->q; 1099 struct request_queue *q = rq->q;
1100 unsigned long flags; 1100 unsigned long flags;
1101 1101
@@ -1105,7 +1105,14 @@ static void dm_requeue_unmapped_request(struct request *clone)
1105 blk_requeue_request(q, rq); 1105 blk_requeue_request(q, rq);
1106 spin_unlock_irqrestore(q->queue_lock, flags); 1106 spin_unlock_irqrestore(q->queue_lock, flags);
1107 1107
1108 rq_completed(md, rw, 0); 1108 rq_completed(md, rw, false);
1109}
1110
1111static void dm_requeue_unmapped_request(struct request *clone)
1112{
1113 struct dm_rq_target_io *tio = clone->end_io_data;
1114
1115 dm_requeue_unmapped_original_request(tio->md, tio->orig);
1109} 1116}
1110 1117
1111static void __stop_queue(struct request_queue *q) 1118static void __stop_queue(struct request_queue *q)
@@ -1175,8 +1182,8 @@ static void dm_done(struct request *clone, int error, bool mapped)
1175static void dm_softirq_done(struct request *rq) 1182static void dm_softirq_done(struct request *rq)
1176{ 1183{
1177 bool mapped = true; 1184 bool mapped = true;
1178 struct request *clone = rq->completion_data; 1185 struct dm_rq_target_io *tio = rq->special;
1179 struct dm_rq_target_io *tio = clone->end_io_data; 1186 struct request *clone = tio->clone;
1180 1187
1181 if (rq->cmd_flags & REQ_FAILED) 1188 if (rq->cmd_flags & REQ_FAILED)
1182 mapped = false; 1189 mapped = false;
@@ -1188,13 +1195,11 @@ static void dm_softirq_done(struct request *rq)
1188 * Complete the clone and the original request with the error status 1195 * Complete the clone and the original request with the error status
1189 * through softirq context. 1196 * through softirq context.
1190 */ 1197 */
1191static void dm_complete_request(struct request *clone, int error) 1198static void dm_complete_request(struct request *rq, int error)
1192{ 1199{
1193 struct dm_rq_target_io *tio = clone->end_io_data; 1200 struct dm_rq_target_io *tio = rq->special;
1194 struct request *rq = tio->orig;
1195 1201
1196 tio->error = error; 1202 tio->error = error;
1197 rq->completion_data = clone;
1198 blk_complete_request(rq); 1203 blk_complete_request(rq);
1199} 1204}
1200 1205
@@ -1204,20 +1209,19 @@ static void dm_complete_request(struct request *clone, int error)
1204 * Target's rq_end_io() function isn't called. 1209 * Target's rq_end_io() function isn't called.
1205 * This may be used when the target's map_rq() function fails. 1210 * This may be used when the target's map_rq() function fails.
1206 */ 1211 */
1207static void dm_kill_unmapped_request(struct request *clone, int error) 1212static void dm_kill_unmapped_request(struct request *rq, int error)
1208{ 1213{
1209 struct dm_rq_target_io *tio = clone->end_io_data;
1210 struct request *rq = tio->orig;
1211
1212 rq->cmd_flags |= REQ_FAILED; 1214 rq->cmd_flags |= REQ_FAILED;
1213 dm_complete_request(clone, error); 1215 dm_complete_request(rq, error);
1214} 1216}
1215 1217
1216/* 1218/*
1217 * Called with the queue lock held 1219 * Called with the clone's queue lock held
1218 */ 1220 */
1219static void end_clone_request(struct request *clone, int error) 1221static void end_clone_request(struct request *clone, int error)
1220{ 1222{
1223 struct dm_rq_target_io *tio = clone->end_io_data;
1224
1221 /* 1225 /*
1222 * For just cleaning up the information of the queue in which 1226 * For just cleaning up the information of the queue in which
1223 * the clone was dispatched. 1227 * the clone was dispatched.
@@ -1228,13 +1232,13 @@ static void end_clone_request(struct request *clone, int error)
1228 1232
1229 /* 1233 /*
1230 * Actual request completion is done in a softirq context which doesn't 1234 * Actual request completion is done in a softirq context which doesn't
1231 * hold the queue lock. Otherwise, deadlock could occur because: 1235 * hold the clone's queue lock. Otherwise, deadlock could occur because:
1232 * - another request may be submitted by the upper level driver 1236 * - another request may be submitted by the upper level driver
1233 * of the stacking during the completion 1237 * of the stacking during the completion
1234 * - the submission which requires queue lock may be done 1238 * - the submission which requires queue lock may be done
1235 * against this queue 1239 * against this clone's queue
1236 */ 1240 */
1237 dm_complete_request(clone, error); 1241 dm_complete_request(tio->orig, error);
1238} 1242}
1239 1243
1240/* 1244/*
@@ -1712,16 +1716,17 @@ static void dm_request(struct request_queue *q, struct bio *bio)
1712 _dm_request(q, bio); 1716 _dm_request(q, bio);
1713} 1717}
1714 1718
1715static void dm_dispatch_request(struct request *rq) 1719static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
1716{ 1720{
1717 int r; 1721 int r;
1718 1722
1719 if (blk_queue_io_stat(rq->q)) 1723 if (blk_queue_io_stat(clone->q))
1720 rq->cmd_flags |= REQ_IO_STAT; 1724 clone->cmd_flags |= REQ_IO_STAT;
1721 1725
1722 rq->start_time = jiffies; 1726 clone->start_time = jiffies;
1723 r = blk_insert_cloned_request(rq->q, rq); 1727 r = blk_insert_cloned_request(clone->q, clone);
1724 if (r) 1728 if (r)
1729 /* must complete clone in terms of original request */
1725 dm_complete_request(rq, r); 1730 dm_complete_request(rq, r);
1726} 1731}
1727 1732
@@ -1760,8 +1765,8 @@ static int setup_clone(struct request *clone, struct request *rq,
1760 return 0; 1765 return 0;
1761} 1766}
1762 1767
1763static struct request *__clone_rq(struct request *rq, struct mapped_device *md, 1768static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1764 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1769 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1765{ 1770{
1766 struct request *clone = alloc_clone_request(md, gfp_mask); 1771 struct request *clone = alloc_clone_request(md, gfp_mask);
1767 1772
@@ -1780,10 +1785,9 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
1780 1785
1781static void map_tio_request(struct kthread_work *work); 1786static void map_tio_request(struct kthread_work *work);
1782 1787
1783static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1788static struct dm_rq_target_io *prep_tio(struct request *rq,
1784 gfp_t gfp_mask) 1789 struct mapped_device *md, gfp_t gfp_mask)
1785{ 1790{
1786 struct request *clone;
1787 struct dm_rq_target_io *tio; 1791 struct dm_rq_target_io *tio;
1788 1792
1789 tio = alloc_rq_tio(md, gfp_mask); 1793 tio = alloc_rq_tio(md, gfp_mask);
@@ -1798,13 +1802,12 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1798 memset(&tio->info, 0, sizeof(tio->info)); 1802 memset(&tio->info, 0, sizeof(tio->info));
1799 init_kthread_work(&tio->work, map_tio_request); 1803 init_kthread_work(&tio->work, map_tio_request);
1800 1804
1801 clone = __clone_rq(rq, md, tio, GFP_ATOMIC); 1805 if (!clone_rq(rq, md, tio, gfp_mask)) {
1802 if (!clone) {
1803 free_rq_tio(tio); 1806 free_rq_tio(tio);
1804 return NULL; 1807 return NULL;
1805 } 1808 }
1806 1809
1807 return clone; 1810 return tio;
1808} 1811}
1809 1812
1810/* 1813/*
@@ -1813,18 +1816,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1813static int dm_prep_fn(struct request_queue *q, struct request *rq) 1816static int dm_prep_fn(struct request_queue *q, struct request *rq)
1814{ 1817{
1815 struct mapped_device *md = q->queuedata; 1818 struct mapped_device *md = q->queuedata;
1816 struct request *clone; 1819 struct dm_rq_target_io *tio;
1817 1820
1818 if (unlikely(rq->special)) { 1821 if (unlikely(rq->special)) {
1819 DMWARN("Already has something in rq->special."); 1822 DMWARN("Already has something in rq->special.");
1820 return BLKPREP_KILL; 1823 return BLKPREP_KILL;
1821 } 1824 }
1822 1825
1823 clone = clone_rq(rq, md, GFP_ATOMIC); 1826 tio = prep_tio(rq, md, GFP_ATOMIC);
1824 if (!clone) 1827 if (!tio)
1825 return BLKPREP_DEFER; 1828 return BLKPREP_DEFER;
1826 1829
1827 rq->special = clone; 1830 rq->special = tio;
1828 rq->cmd_flags |= REQ_DONTPREP; 1831 rq->cmd_flags |= REQ_DONTPREP;
1829 1832
1830 return BLKPREP_OK; 1833 return BLKPREP_OK;
@@ -1835,11 +1838,12 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1835 * 0 : the request has been processed (not requeued) 1838 * 0 : the request has been processed (not requeued)
1836 * !0 : the request has been requeued 1839 * !0 : the request has been requeued
1837 */ 1840 */
1838static int map_request(struct dm_target *ti, struct request *clone, 1841static int map_request(struct dm_target *ti, struct request *rq,
1839 struct mapped_device *md) 1842 struct mapped_device *md)
1840{ 1843{
1841 int r, requeued = 0; 1844 int r, requeued = 0;
1842 struct dm_rq_target_io *tio = clone->end_io_data; 1845 struct dm_rq_target_io *tio = rq->special;
1846 struct request *clone = tio->clone;
1843 1847
1844 r = ti->type->map_rq(ti, clone, &tio->info); 1848 r = ti->type->map_rq(ti, clone, &tio->info);
1845 switch (r) { 1849 switch (r) {
@@ -1849,8 +1853,8 @@ static int map_request(struct dm_target *ti, struct request *clone,
1849 case DM_MAPIO_REMAPPED: 1853 case DM_MAPIO_REMAPPED:
1850 /* The target has remapped the I/O so dispatch it */ 1854 /* The target has remapped the I/O so dispatch it */
1851 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1855 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1852 blk_rq_pos(tio->orig)); 1856 blk_rq_pos(rq));
1853 dm_dispatch_request(clone); 1857 dm_dispatch_clone_request(clone, rq);
1854 break; 1858 break;
1855 case DM_MAPIO_REQUEUE: 1859 case DM_MAPIO_REQUEUE:
1856 /* The target wants to requeue the I/O */ 1860 /* The target wants to requeue the I/O */
@@ -1864,7 +1868,7 @@ static int map_request(struct dm_target *ti, struct request *clone,
1864 } 1868 }
1865 1869
1866 /* The target wants to complete the I/O */ 1870 /* The target wants to complete the I/O */
1867 dm_kill_unmapped_request(clone, r); 1871 dm_kill_unmapped_request(rq, r);
1868 break; 1872 break;
1869 } 1873 }
1870 1874
@@ -1875,16 +1879,13 @@ static void map_tio_request(struct kthread_work *work)
1875{ 1879{
1876 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 1880 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
1877 1881
1878 map_request(tio->ti, tio->clone, tio->md); 1882 map_request(tio->ti, tio->orig, tio->md);
1879} 1883}
1880 1884
1881static struct request *dm_start_request(struct mapped_device *md, struct request *orig) 1885static void dm_start_request(struct mapped_device *md, struct request *orig)
1882{ 1886{
1883 struct request *clone;
1884
1885 blk_start_request(orig); 1887 blk_start_request(orig);
1886 clone = orig->special; 1888 atomic_inc(&md->pending[rq_data_dir(orig)]);
1887 atomic_inc(&md->pending[rq_data_dir(clone)]);
1888 1889
1889 /* 1890 /*
1890 * Hold the md reference here for the in-flight I/O. 1891 * Hold the md reference here for the in-flight I/O.
@@ -1894,8 +1895,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request
1894 * See the comment in rq_completed() too. 1895 * See the comment in rq_completed() too.
1895 */ 1896 */
1896 dm_get(md); 1897 dm_get(md);
1897
1898 return clone;
1899} 1898}
1900 1899
1901/* 1900/*
@@ -1908,7 +1907,7 @@ static void dm_request_fn(struct request_queue *q)
1908 int srcu_idx; 1907 int srcu_idx;
1909 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1908 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1910 struct dm_target *ti; 1909 struct dm_target *ti;
1911 struct request *rq, *clone; 1910 struct request *rq;
1912 struct dm_rq_target_io *tio; 1911 struct dm_rq_target_io *tio;
1913 sector_t pos; 1912 sector_t pos;
1914 1913
@@ -1931,19 +1930,19 @@ static void dm_request_fn(struct request_queue *q)
1931 ti = dm_table_find_target(map, pos); 1930 ti = dm_table_find_target(map, pos);
1932 if (!dm_target_is_valid(ti)) { 1931 if (!dm_target_is_valid(ti)) {
1933 /* 1932 /*
1934 * Must perform setup, that dm_done() requires, 1933 * Must perform setup, that rq_completed() requires,
1935 * before calling dm_kill_unmapped_request 1934 * before calling dm_kill_unmapped_request
1936 */ 1935 */
1937 DMERR_LIMIT("request attempted access beyond the end of device"); 1936 DMERR_LIMIT("request attempted access beyond the end of device");
1938 clone = dm_start_request(md, rq); 1937 dm_start_request(md, rq);
1939 dm_kill_unmapped_request(clone, -EIO); 1938 dm_kill_unmapped_request(rq, -EIO);
1940 continue; 1939 continue;
1941 } 1940 }
1942 1941
1943 if (ti->type->busy && ti->type->busy(ti)) 1942 if (ti->type->busy && ti->type->busy(ti))
1944 goto delay_and_out; 1943 goto delay_and_out;
1945 1944
1946 clone = dm_start_request(md, rq); 1945 dm_start_request(md, rq);
1947 1946
1948 tio = rq->special; 1947 tio = rq->special;
1949 /* Establish tio->ti before queuing work (map_tio_request) */ 1948 /* Establish tio->ti before queuing work (map_tio_request) */
@@ -2240,16 +2239,15 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2240 bioset_free(md->bs); 2239 bioset_free(md->bs);
2241 md->bs = p->bs; 2240 md->bs = p->bs;
2242 p->bs = NULL; 2241 p->bs = NULL;
2243 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2244 /*
2245 * There's no need to reload with request-based dm
2246 * because the size of front_pad doesn't change.
2247 * Note for future: If you are to reload bioset,
2248 * prep-ed requests in the queue may refer
2249 * to bio from the old bioset, so you must walk
2250 * through the queue to unprep.
2251 */
2252 } 2242 }
2243 /*
2244 * There's no need to reload with request-based dm
2245 * because the size of front_pad doesn't change.
2246 * Note for future: If you are to reload bioset,
2247 * prep-ed requests in the queue may refer
2248 * to bio from the old bioset, so you must walk
2249 * through the queue to unprep.
2250 */
2253 goto out; 2251 goto out;
2254 } 2252 }
2255 2253