summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-02-20 13:45:38 -0500
committerMike Snitzer <snitzer@redhat.com>2016-02-22 22:34:33 -0500
commiteca7ee6dc01b21c669bce8c39d3d368509fb65e8 (patch)
treee913759074c83e9aa01874ab38f38cda4cdc9463
parentc5248f79f39e5254977a3916b2149c3ccffa2722 (diff)
dm: distinquish old .request_fn (dm-old) vs dm-mq request-based DM
Rename various methods to have either a "dm_old" or "dm_mq" prefix. Improve code comments to assist with understanding the duality of code that handles both "dm_old" and "dm_mq" cases. It is no much easier to quickly look at the code and _know_ that a given method is either 1) "dm_old" only 2) "dm_mq" only 3) common to both. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm.c102
2 files changed, 58 insertions, 50 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7259eeba6a58..fde08c9809ef 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -426,7 +426,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
426 clone->rq_disk = bdev->bd_disk; 426 clone->rq_disk = bdev->bd_disk;
427 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 427 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
428 } else { 428 } else {
429 /* blk-mq request-based interface */ 429 /*
430 * blk-mq request-based interface; used by both:
431 * .request_fn stacked on blk-mq path(s) and
432 * blk-mq stacked on blk-mq path(s).
433 */
430 *__clone = blk_get_request(bdev_get_queue(bdev), 434 *__clone = blk_get_request(bdev_get_queue(bdev),
431 rq_data_dir(rq), GFP_ATOMIC); 435 rq_data_dir(rq), GFP_ATOMIC);
432 if (IS_ERR(*__clone)) { 436 if (IS_ERR(*__clone)) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 26fedd93702e..d4040e6d4d3d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -333,7 +333,7 @@ static int __init local_init(void)
333 if (!_rq_tio_cache) 333 if (!_rq_tio_cache)
334 goto out_free_io_cache; 334 goto out_free_io_cache;
335 335
336 _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), 336 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
337 __alignof__(struct request), 0, NULL); 337 __alignof__(struct request), 0, NULL);
338 if (!_rq_cache) 338 if (!_rq_cache)
339 goto out_free_rq_tio_cache; 339 goto out_free_rq_tio_cache;
@@ -652,24 +652,24 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
652 bio_put(&tio->clone); 652 bio_put(&tio->clone);
653} 653}
654 654
655static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 655static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
656 gfp_t gfp_mask) 656 gfp_t gfp_mask)
657{ 657{
658 return mempool_alloc(md->io_pool, gfp_mask); 658 return mempool_alloc(md->io_pool, gfp_mask);
659} 659}
660 660
661static void free_rq_tio(struct dm_rq_target_io *tio) 661static void free_old_rq_tio(struct dm_rq_target_io *tio)
662{ 662{
663 mempool_free(tio, tio->md->io_pool); 663 mempool_free(tio, tio->md->io_pool);
664} 664}
665 665
666static struct request *alloc_clone_request(struct mapped_device *md, 666static struct request *alloc_old_clone_request(struct mapped_device *md,
667 gfp_t gfp_mask) 667 gfp_t gfp_mask)
668{ 668{
669 return mempool_alloc(md->rq_pool, gfp_mask); 669 return mempool_alloc(md->rq_pool, gfp_mask);
670} 670}
671 671
672static void free_clone_request(struct mapped_device *md, struct request *rq) 672static void free_old_clone_request(struct mapped_device *md, struct request *rq)
673{ 673{
674 mempool_free(rq, md->rq_pool); 674 mempool_free(rq, md->rq_pool);
675} 675}
@@ -1140,10 +1140,10 @@ static void free_rq_clone(struct request *clone)
1140 tio->ti->type->release_clone_rq(clone); 1140 tio->ti->type->release_clone_rq(clone);
1141 else if (!md->queue->mq_ops) 1141 else if (!md->queue->mq_ops)
1142 /* request_fn queue stacked on request_fn queue(s) */ 1142 /* request_fn queue stacked on request_fn queue(s) */
1143 free_clone_request(md, clone); 1143 free_old_clone_request(md, clone);
1144 1144
1145 if (!md->queue->mq_ops) 1145 if (!md->queue->mq_ops)
1146 free_rq_tio(tio); 1146 free_old_rq_tio(tio);
1147} 1147}
1148 1148
1149/* 1149/*
@@ -1193,13 +1193,13 @@ static void dm_unprep_request(struct request *rq)
1193 if (clone) 1193 if (clone)
1194 free_rq_clone(clone); 1194 free_rq_clone(clone);
1195 else if (!tio->md->queue->mq_ops) 1195 else if (!tio->md->queue->mq_ops)
1196 free_rq_tio(tio); 1196 free_old_rq_tio(tio);
1197} 1197}
1198 1198
1199/* 1199/*
1200 * Requeue the original request of a clone. 1200 * Requeue the original request of a clone.
1201 */ 1201 */
1202static void old_requeue_request(struct request *rq) 1202static void dm_old_requeue_request(struct request *rq)
1203{ 1203{
1204 struct request_queue *q = rq->q; 1204 struct request_queue *q = rq->q;
1205 unsigned long flags; 1205 unsigned long flags;
@@ -1231,14 +1231,14 @@ static void dm_requeue_original_request(struct mapped_device *md,
1231 1231
1232 rq_end_stats(md, rq); 1232 rq_end_stats(md, rq);
1233 if (!rq->q->mq_ops) 1233 if (!rq->q->mq_ops)
1234 old_requeue_request(rq); 1234 dm_old_requeue_request(rq);
1235 else 1235 else
1236 dm_mq_requeue_request(rq); 1236 dm_mq_requeue_request(rq);
1237 1237
1238 rq_completed(md, rw, false); 1238 rq_completed(md, rw, false);
1239} 1239}
1240 1240
1241static void old_stop_queue(struct request_queue *q) 1241static void dm_old_stop_queue(struct request_queue *q)
1242{ 1242{
1243 unsigned long flags; 1243 unsigned long flags;
1244 1244
@@ -1252,15 +1252,15 @@ static void old_stop_queue(struct request_queue *q)
1252 spin_unlock_irqrestore(q->queue_lock, flags); 1252 spin_unlock_irqrestore(q->queue_lock, flags);
1253} 1253}
1254 1254
1255static void stop_queue(struct request_queue *q) 1255static void dm_stop_queue(struct request_queue *q)
1256{ 1256{
1257 if (!q->mq_ops) 1257 if (!q->mq_ops)
1258 old_stop_queue(q); 1258 dm_old_stop_queue(q);
1259 else 1259 else
1260 blk_mq_stop_hw_queues(q); 1260 blk_mq_stop_hw_queues(q);
1261} 1261}
1262 1262
1263static void old_start_queue(struct request_queue *q) 1263static void dm_old_start_queue(struct request_queue *q)
1264{ 1264{
1265 unsigned long flags; 1265 unsigned long flags;
1266 1266
@@ -1270,10 +1270,10 @@ static void old_start_queue(struct request_queue *q)
1270 spin_unlock_irqrestore(q->queue_lock, flags); 1270 spin_unlock_irqrestore(q->queue_lock, flags);
1271} 1271}
1272 1272
1273static void start_queue(struct request_queue *q) 1273static void dm_start_queue(struct request_queue *q)
1274{ 1274{
1275 if (!q->mq_ops) 1275 if (!q->mq_ops)
1276 old_start_queue(q); 1276 dm_old_start_queue(q);
1277 else { 1277 else {
1278 blk_mq_start_stopped_hw_queues(q, true); 1278 blk_mq_start_stopped_hw_queues(q, true);
1279 blk_mq_kick_requeue_list(q); 1279 blk_mq_kick_requeue_list(q);
@@ -1328,7 +1328,7 @@ static void dm_softirq_done(struct request *rq)
1328 if (!rq->q->mq_ops) { 1328 if (!rq->q->mq_ops) {
1329 blk_end_request_all(rq, tio->error); 1329 blk_end_request_all(rq, tio->error);
1330 rq_completed(tio->md, rw, false); 1330 rq_completed(tio->md, rw, false);
1331 free_rq_tio(tio); 1331 free_old_rq_tio(tio);
1332 } else { 1332 } else {
1333 blk_mq_end_request(rq, tio->error); 1333 blk_mq_end_request(rq, tio->error);
1334 rq_completed(tio->md, rw, false); 1334 rq_completed(tio->md, rw, false);
@@ -1370,7 +1370,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
1370} 1370}
1371 1371
1372/* 1372/*
1373 * Called with the clone's queue lock held (for non-blk-mq) 1373 * Called with the clone's queue lock held (in the case of .request_fn)
1374 */ 1374 */
1375static void end_clone_request(struct request *clone, int error) 1375static void end_clone_request(struct request *clone, int error)
1376{ 1376{
@@ -1857,22 +1857,22 @@ static int setup_clone(struct request *clone, struct request *rq,
1857 return 0; 1857 return 0;
1858} 1858}
1859 1859
1860static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1860static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
1861 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1861 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1862{ 1862{
1863 /* 1863 /*
1864 * Create clone for use with .request_fn request_queue 1864 * Create clone for use with .request_fn request_queue
1865 */ 1865 */
1866 struct request *clone; 1866 struct request *clone;
1867 1867
1868 clone = alloc_clone_request(md, gfp_mask); 1868 clone = alloc_old_clone_request(md, gfp_mask);
1869 if (!clone) 1869 if (!clone)
1870 return NULL; 1870 return NULL;
1871 1871
1872 blk_rq_init(NULL, clone); 1872 blk_rq_init(NULL, clone);
1873 if (setup_clone(clone, rq, tio, gfp_mask)) { 1873 if (setup_clone(clone, rq, tio, gfp_mask)) {
1874 /* -ENOMEM */ 1874 /* -ENOMEM */
1875 free_clone_request(md, clone); 1875 free_old_clone_request(md, clone);
1876 return NULL; 1876 return NULL;
1877 } 1877 }
1878 1878
@@ -1894,24 +1894,29 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
1894 init_kthread_work(&tio->work, map_tio_request); 1894 init_kthread_work(&tio->work, map_tio_request);
1895} 1895}
1896 1896
1897static struct dm_rq_target_io *prep_tio(struct request *rq, 1897static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
1898 struct mapped_device *md, gfp_t gfp_mask) 1898 struct mapped_device *md,
1899 gfp_t gfp_mask)
1899{ 1900{
1900 struct dm_rq_target_io *tio; 1901 struct dm_rq_target_io *tio;
1901 int srcu_idx; 1902 int srcu_idx;
1902 struct dm_table *table; 1903 struct dm_table *table;
1903 1904
1904 tio = alloc_rq_tio(md, gfp_mask); 1905 tio = alloc_old_rq_tio(md, gfp_mask);
1905 if (!tio) 1906 if (!tio)
1906 return NULL; 1907 return NULL;
1907 1908
1908 init_tio(tio, rq, md); 1909 init_tio(tio, rq, md);
1909 1910
1910 table = dm_get_live_table(md, &srcu_idx); 1911 table = dm_get_live_table(md, &srcu_idx);
1912 /*
1913 * Must clone a request if this .request_fn DM device
1914 * is stacked on .request_fn device(s).
1915 */
1911 if (!dm_table_mq_request_based(table)) { 1916 if (!dm_table_mq_request_based(table)) {
1912 if (!clone_rq(rq, md, tio, gfp_mask)) { 1917 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
1913 dm_put_live_table(md, srcu_idx); 1918 dm_put_live_table(md, srcu_idx);
1914 free_rq_tio(tio); 1919 free_old_rq_tio(tio);
1915 return NULL; 1920 return NULL;
1916 } 1921 }
1917 } 1922 }
@@ -1923,7 +1928,7 @@ static struct dm_rq_target_io *prep_tio(struct request *rq,
1923/* 1928/*
1924 * Called with the queue lock held. 1929 * Called with the queue lock held.
1925 */ 1930 */
1926static int dm_prep_fn(struct request_queue *q, struct request *rq) 1931static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
1927{ 1932{
1928 struct mapped_device *md = q->queuedata; 1933 struct mapped_device *md = q->queuedata;
1929 struct dm_rq_target_io *tio; 1934 struct dm_rq_target_io *tio;
@@ -1933,7 +1938,7 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1933 return BLKPREP_KILL; 1938 return BLKPREP_KILL;
1934 } 1939 }
1935 1940
1936 tio = prep_tio(rq, md, GFP_ATOMIC); 1941 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
1937 if (!tio) 1942 if (!tio)
1938 return BLKPREP_DEFER; 1943 return BLKPREP_DEFER;
1939 1944
@@ -2236,7 +2241,7 @@ static void dm_init_md_queue(struct mapped_device *md)
2236 md->queue->backing_dev_info.congested_data = md; 2241 md->queue->backing_dev_info.congested_data = md;
2237} 2242}
2238 2243
2239static void dm_init_old_md_queue(struct mapped_device *md) 2244static void dm_init_normal_md_queue(struct mapped_device *md)
2240{ 2245{
2241 md->use_blk_mq = false; 2246 md->use_blk_mq = false;
2242 dm_init_md_queue(md); 2247 dm_init_md_queue(md);
@@ -2503,7 +2508,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2503 * because request-based dm may be run just after the setting. 2508 * because request-based dm may be run just after the setting.
2504 */ 2509 */
2505 if (dm_table_request_based(t)) { 2510 if (dm_table_request_based(t)) {
2506 stop_queue(q); 2511 dm_stop_queue(q);
2507 /* 2512 /*
2508 * Leverage the fact that request-based DM targets are 2513 * Leverage the fact that request-based DM targets are
2509 * immutable singletons and establish md->immutable_target 2514 * immutable singletons and establish md->immutable_target
@@ -2600,7 +2605,7 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2600} 2605}
2601EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2606EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2602 2607
2603static void init_rq_based_worker_thread(struct mapped_device *md) 2608static void dm_old_init_rq_based_worker_thread(struct mapped_device *md)
2604{ 2609{
2605 /* Initialize the request-based DM worker thread */ 2610 /* Initialize the request-based DM worker thread */
2606 init_kthread_worker(&md->kworker); 2611 init_kthread_worker(&md->kworker);
@@ -2609,9 +2614,9 @@ static void init_rq_based_worker_thread(struct mapped_device *md)
2609} 2614}
2610 2615
2611/* 2616/*
2612 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2617 * Fully initialize a .request_fn request-based queue.
2613 */ 2618 */
2614static int dm_init_request_based_queue(struct mapped_device *md) 2619static int dm_old_init_request_queue(struct mapped_device *md)
2615{ 2620{
2616 struct request_queue *q = NULL; 2621 struct request_queue *q = NULL;
2617 2622
@@ -2624,11 +2629,11 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2624 md->seq_rq_merge_deadline_usecs = 0; 2629 md->seq_rq_merge_deadline_usecs = 0;
2625 2630
2626 md->queue = q; 2631 md->queue = q;
2627 dm_init_old_md_queue(md); 2632 dm_init_normal_md_queue(md);
2628 blk_queue_softirq_done(md->queue, dm_softirq_done); 2633 blk_queue_softirq_done(md->queue, dm_softirq_done);
2629 blk_queue_prep_rq(md->queue, dm_prep_fn); 2634 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
2630 2635
2631 init_rq_based_worker_thread(md); 2636 dm_old_init_rq_based_worker_thread(md);
2632 2637
2633 elv_register_queue(md->queue); 2638 elv_register_queue(md->queue);
2634 2639
@@ -2699,9 +2704,8 @@ static struct blk_mq_ops dm_mq_ops = {
2699 .init_request = dm_mq_init_request, 2704 .init_request = dm_mq_init_request,
2700}; 2705};
2701 2706
2702static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) 2707static int dm_mq_init_request_queue(struct mapped_device *md)
2703{ 2708{
2704 unsigned md_type = dm_get_md_type(md);
2705 struct request_queue *q; 2709 struct request_queue *q;
2706 int err; 2710 int err;
2707 2711
@@ -2766,21 +2770,21 @@ int dm_setup_md_queue(struct mapped_device *md)
2766 2770
2767 switch (md_type) { 2771 switch (md_type) {
2768 case DM_TYPE_REQUEST_BASED: 2772 case DM_TYPE_REQUEST_BASED:
2769 r = dm_init_request_based_queue(md); 2773 r = dm_old_init_request_queue(md);
2770 if (r) { 2774 if (r) {
2771 DMWARN("Cannot initialize queue for request-based mapped device"); 2775 DMERR("Cannot initialize queue for request-based mapped device");
2772 return r; 2776 return r;
2773 } 2777 }
2774 break; 2778 break;
2775 case DM_TYPE_MQ_REQUEST_BASED: 2779 case DM_TYPE_MQ_REQUEST_BASED:
2776 r = dm_init_request_based_blk_mq_queue(md); 2780 r = dm_mq_init_request_queue(md);
2777 if (r) { 2781 if (r) {
2778 DMWARN("Cannot initialize queue for request-based blk-mq mapped device"); 2782 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2779 return r; 2783 return r;
2780 } 2784 }
2781 break; 2785 break;
2782 case DM_TYPE_BIO_BASED: 2786 case DM_TYPE_BIO_BASED:
2783 dm_init_old_md_queue(md); 2787 dm_init_normal_md_queue(md);
2784 blk_queue_make_request(md->queue, dm_make_request); 2788 blk_queue_make_request(md->queue, dm_make_request);
2785 /* 2789 /*
2786 * DM handles splitting bios as needed. Free the bio_split bioset 2790 * DM handles splitting bios as needed. Free the bio_split bioset
@@ -3123,7 +3127,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
3123 * dm defers requests to md->wq from md->queue. 3127 * dm defers requests to md->wq from md->queue.
3124 */ 3128 */
3125 if (dm_request_based(md)) { 3129 if (dm_request_based(md)) {
3126 stop_queue(md->queue); 3130 dm_stop_queue(md->queue);
3127 if (md->kworker_task) 3131 if (md->kworker_task)
3128 flush_kthread_worker(&md->kworker); 3132 flush_kthread_worker(&md->kworker);
3129 } 3133 }
@@ -3147,7 +3151,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
3147 dm_queue_flush(md); 3151 dm_queue_flush(md);
3148 3152
3149 if (dm_request_based(md)) 3153 if (dm_request_based(md))
3150 start_queue(md->queue); 3154 dm_start_queue(md->queue);
3151 3155
3152 unlock_fs(md); 3156 unlock_fs(md);
3153 dm_table_presuspend_undo_targets(map); 3157 dm_table_presuspend_undo_targets(map);
@@ -3226,7 +3230,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
3226 * Request-based dm is queueing the deferred I/Os in its request_queue. 3230 * Request-based dm is queueing the deferred I/Os in its request_queue.
3227 */ 3231 */
3228 if (dm_request_based(md)) 3232 if (dm_request_based(md))
3229 start_queue(md->queue); 3233 dm_start_queue(md->queue);
3230 3234
3231 unlock_fs(md); 3235 unlock_fs(md);
3232 3236