aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIgor Konopko <igor.j.konopko@intel.com>2019-05-04 14:38:06 -0400
committerJens Axboe <axboe@kernel.dk>2019-05-06 12:19:19 -0400
commit3e03f6322ab2ab055478c92bf7af66d6ab13a35f (patch)
tree3647edeb1c6cd73c0c601d8f38867edd28615f26
parentf2e024570ef1a3d443875f6063321ee2e503ceb1 (diff)
lightnvm: pblk: IO path reorganization
This patch is made in order to prepare read path for new approach to partial read handling, which is simpler in compare with previous one. The most important change is to move the handling of completed and failed bio from the pblk_make_rq() to particular read and write functions. This is needed, since after partial read path changes, sometimes completed/failed bio will be different from original one, so we cannot do this any longer in pblk_make_rq(). Other changes are small read path refactor in order to reduce the size of the following patch with partial read changes. Generally the goal of this patch is not to change the functionality, but just to prepare the code for the following changes. Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> Reviewed-by: Javier González <javier@javigon.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/lightnvm/pblk-cache.c8
-rw-r--r--drivers/lightnvm/pblk-init.c14
-rw-r--r--drivers/lightnvm/pblk-read.c83
-rw-r--r--drivers/lightnvm/pblk.h4
4 files changed, 48 insertions, 61 deletions
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index c9fa26f95659..5c1034c22197 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -18,7 +18,8 @@
18 18
19#include "pblk.h" 19#include "pblk.h"
20 20
21int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) 21void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
22 unsigned long flags)
22{ 23{
23 struct request_queue *q = pblk->dev->q; 24 struct request_queue *q = pblk->dev->q;
24 struct pblk_w_ctx w_ctx; 25 struct pblk_w_ctx w_ctx;
@@ -43,6 +44,7 @@ retry:
43 goto retry; 44 goto retry;
44 case NVM_IO_ERR: 45 case NVM_IO_ERR:
45 pblk_pipeline_stop(pblk); 46 pblk_pipeline_stop(pblk);
47 bio_io_error(bio);
46 goto out; 48 goto out;
47 } 49 }
48 50
@@ -79,7 +81,9 @@ retry:
79out: 81out:
80 generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time); 82 generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
81 pblk_write_should_kick(pblk); 83 pblk_write_should_kick(pblk);
82 return ret; 84
85 if (ret == NVM_IO_DONE)
86 bio_endio(bio);
83} 87}
84 88
85/* 89/*
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 1e227a08e54a..b351c7f002de 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -50,7 +50,6 @@ struct bio_set pblk_bio_set;
50static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) 50static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
51{ 51{
52 struct pblk *pblk = q->queuedata; 52 struct pblk *pblk = q->queuedata;
53 int ret;
54 53
55 if (bio_op(bio) == REQ_OP_DISCARD) { 54 if (bio_op(bio) == REQ_OP_DISCARD) {
56 pblk_discard(pblk, bio); 55 pblk_discard(pblk, bio);
@@ -65,7 +64,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
65 */ 64 */
66 if (bio_data_dir(bio) == READ) { 65 if (bio_data_dir(bio) == READ) {
67 blk_queue_split(q, &bio); 66 blk_queue_split(q, &bio);
68 ret = pblk_submit_read(pblk, bio); 67 pblk_submit_read(pblk, bio);
69 } else { 68 } else {
70 /* Prevent deadlock in the case of a modest LUN configuration 69 /* Prevent deadlock in the case of a modest LUN configuration
71 * and large user I/Os. Unless stalled, the rate limiter 70 * and large user I/Os. Unless stalled, the rate limiter
@@ -74,16 +73,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
74 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl)) 73 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
75 blk_queue_split(q, &bio); 74 blk_queue_split(q, &bio);
76 75
77 ret = pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER); 76 pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
78 }
79
80 switch (ret) {
81 case NVM_IO_ERR:
82 bio_io_error(bio);
83 break;
84 case NVM_IO_DONE:
85 bio_endio(bio);
86 break;
87 } 77 }
88 78
89 return BLK_QC_T_NONE; 79 return BLK_QC_T_NONE;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 27f8a76d8bd8..f5f155d540e2 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -179,7 +179,8 @@ static void pblk_end_user_read(struct bio *bio, int error)
179{ 179{
180 if (error && error != NVM_RSP_WARN_HIGHECC) 180 if (error && error != NVM_RSP_WARN_HIGHECC)
181 bio_io_error(bio); 181 bio_io_error(bio);
182 bio_endio(bio); 182 else
183 bio_endio(bio);
183} 184}
184 185
185static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd, 186static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
@@ -389,7 +390,6 @@ err:
389 390
390 /* Free allocated pages in new bio */ 391 /* Free allocated pages in new bio */
391 pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt); 392 pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
392 __pblk_end_io_read(pblk, rqd, false);
393 return NVM_IO_ERR; 393 return NVM_IO_ERR;
394} 394}
395 395
@@ -434,7 +434,7 @@ retry:
434 } 434 }
435} 435}
436 436
437int pblk_submit_read(struct pblk *pblk, struct bio *bio) 437void pblk_submit_read(struct pblk *pblk, struct bio *bio)
438{ 438{
439 struct nvm_tgt_dev *dev = pblk->dev; 439 struct nvm_tgt_dev *dev = pblk->dev;
440 struct request_queue *q = dev->q; 440 struct request_queue *q = dev->q;
@@ -442,9 +442,9 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
442 unsigned int nr_secs = pblk_get_secs(bio); 442 unsigned int nr_secs = pblk_get_secs(bio);
443 struct pblk_g_ctx *r_ctx; 443 struct pblk_g_ctx *r_ctx;
444 struct nvm_rq *rqd; 444 struct nvm_rq *rqd;
445 struct bio *int_bio;
445 unsigned int bio_init_idx; 446 unsigned int bio_init_idx;
446 DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA); 447 DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
447 int ret = NVM_IO_ERR;
448 448
449 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio), 449 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
450 &pblk->disk->part0); 450 &pblk->disk->part0);
@@ -455,74 +455,67 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
455 455
456 rqd->opcode = NVM_OP_PREAD; 456 rqd->opcode = NVM_OP_PREAD;
457 rqd->nr_ppas = nr_secs; 457 rqd->nr_ppas = nr_secs;
458 rqd->bio = NULL; /* cloned bio if needed */
459 rqd->private = pblk; 458 rqd->private = pblk;
460 rqd->end_io = pblk_end_io_read; 459 rqd->end_io = pblk_end_io_read;
461 460
462 r_ctx = nvm_rq_to_pdu(rqd); 461 r_ctx = nvm_rq_to_pdu(rqd);
463 r_ctx->start_time = jiffies; 462 r_ctx->start_time = jiffies;
464 r_ctx->lba = blba; 463 r_ctx->lba = blba;
465 r_ctx->private = bio; /* original bio */
466 464
467 /* Save the index for this bio's start. This is needed in case 465 /* Save the index for this bio's start. This is needed in case
468 * we need to fill a partial read. 466 * we need to fill a partial read.
469 */ 467 */
470 bio_init_idx = pblk_get_bi_idx(bio); 468 bio_init_idx = pblk_get_bi_idx(bio);
471 469
472 if (pblk_alloc_rqd_meta(pblk, rqd)) 470 if (pblk_alloc_rqd_meta(pblk, rqd)) {
473 goto fail_rqd_free; 471 bio_io_error(bio);
472 pblk_free_rqd(pblk, rqd, PBLK_READ);
473 return;
474 }
475
476 /* Clone read bio to deal internally with:
477 * -read errors when reading from drive
478 * -bio_advance() calls during l2p lookup and cache reads
479 */
480 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
474 481
475 if (nr_secs > 1) 482 if (nr_secs > 1)
476 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap); 483 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
477 else 484 else
478 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap); 485 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
479 486
487 r_ctx->private = bio; /* original bio */
488 rqd->bio = int_bio; /* internal bio */
489
480 if (bitmap_full(read_bitmap, nr_secs)) { 490 if (bitmap_full(read_bitmap, nr_secs)) {
491 pblk_end_user_read(bio, 0);
481 atomic_inc(&pblk->inflight_io); 492 atomic_inc(&pblk->inflight_io);
482 __pblk_end_io_read(pblk, rqd, false); 493 __pblk_end_io_read(pblk, rqd, false);
483 return NVM_IO_DONE; 494 return;
484 } 495 }
485 496
486 /* All sectors are to be read from the device */ 497 if (!bitmap_empty(read_bitmap, rqd->nr_ppas)) {
487 if (bitmap_empty(read_bitmap, rqd->nr_ppas)) { 498 /* The read bio request could be partially filled by the write
488 struct bio *int_bio = NULL; 499 * buffer, but there are some holes that need to be read from
489 500 * the drive.
490 /* Clone read bio to deal with read errors internally */ 501 */
491 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set); 502 bio_put(int_bio);
492 if (!int_bio) { 503 rqd->bio = NULL;
493 pblk_err(pblk, "could not clone read bio\n"); 504 if (pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
494 goto fail_end_io; 505 nr_secs)) {
495 }
496
497 rqd->bio = int_bio;
498
499 if (pblk_submit_io(pblk, rqd)) {
500 pblk_err(pblk, "read IO submission failed\n"); 506 pblk_err(pblk, "read IO submission failed\n");
501 ret = NVM_IO_ERR; 507 bio_io_error(bio);
502 goto fail_end_io; 508 __pblk_end_io_read(pblk, rqd, false);
503 } 509 }
504 510 return;
505 return NVM_IO_OK;
506 } 511 }
507 512
508 /* The read bio request could be partially filled by the write buffer, 513 /* All sectors are to be read from the device */
509 * but there are some holes that need to be read from the drive. 514 if (pblk_submit_io(pblk, rqd)) {
510 */ 515 pblk_err(pblk, "read IO submission failed\n");
511 ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap, 516 bio_io_error(bio);
512 nr_secs); 517 __pblk_end_io_read(pblk, rqd, false);
513 if (ret) 518 }
514 goto fail_meta_free;
515
516 return NVM_IO_OK;
517
518fail_meta_free:
519 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
520fail_rqd_free:
521 pblk_free_rqd(pblk, rqd, PBLK_READ);
522 return ret;
523fail_end_io:
524 __pblk_end_io_read(pblk, rqd, false);
525 return ret;
526} 519}
527 520
528static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, 521static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index e304754aaa3c..17ced12db7dd 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -867,7 +867,7 @@ void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
867/* 867/*
868 * pblk user I/O write path 868 * pblk user I/O write path
869 */ 869 */
870int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, 870void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
871 unsigned long flags); 871 unsigned long flags);
872int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq); 872int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
873 873
@@ -893,7 +893,7 @@ void pblk_write_kick(struct pblk *pblk);
893 * pblk read path 893 * pblk read path
894 */ 894 */
895extern struct bio_set pblk_bio_set; 895extern struct bio_set pblk_bio_set;
896int pblk_submit_read(struct pblk *pblk, struct bio *bio); 896void pblk_submit_read(struct pblk *pblk, struct bio *bio);
897int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq); 897int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
898/* 898/*
899 * pblk recovery 899 * pblk recovery