aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/lightnvm/pblk-core.c4
-rw-r--r--drivers/lightnvm/pblk-read.c8
-rw-r--r--drivers/lightnvm/pblk-recovery.c35
-rw-r--r--drivers/lightnvm/pblk-write.c24
4 files changed, 12 insertions, 59 deletions
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 215aadb84c6e..0da58869006b 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -206,8 +206,6 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
206 206
207 for (i = 0; i < nr_pages; i++) { 207 for (i = 0; i < nr_pages; i++) {
208 page = mempool_alloc(pblk->page_bio_pool, flags); 208 page = mempool_alloc(pblk->page_bio_pool, flags);
209 if (!page)
210 goto err;
211 209
212 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); 210 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
213 if (ret != PBLK_EXPOSED_PAGE_SIZE) { 211 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
@@ -1653,8 +1651,6 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1653 struct pblk_line_ws *line_ws; 1651 struct pblk_line_ws *line_ws;
1654 1652
1655 line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask); 1653 line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
1656 if (!line_ws)
1657 return;
1658 1654
1659 line_ws->pblk = pblk; 1655 line_ws->pblk = pblk;
1660 line_ws->line = line; 1656 line_ws->line = line;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 402c732f0970..d2b6e2a7d7d5 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -168,10 +168,6 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
168 DECLARE_COMPLETION_ONSTACK(wait); 168 DECLARE_COMPLETION_ONSTACK(wait);
169 169
170 new_bio = bio_alloc(GFP_KERNEL, nr_holes); 170 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
171 if (!new_bio) {
172 pr_err("pblk: could not alloc read bio\n");
173 return NVM_IO_ERR;
174 }
175 171
176 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes)) 172 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
177 goto err; 173 goto err;
@@ -321,10 +317,6 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
321 bitmap_zero(&read_bitmap, nr_secs); 317 bitmap_zero(&read_bitmap, nr_secs);
322 318
323 rqd = pblk_alloc_rqd(pblk, READ); 319 rqd = pblk_alloc_rqd(pblk, READ);
324 if (IS_ERR(rqd)) {
325 pr_err_ratelimited("pblk: not able to alloc rqd");
326 return NVM_IO_ERR;
327 }
328 320
329 rqd->opcode = NVM_OP_PREAD; 321 rqd->opcode = NVM_OP_PREAD;
330 rqd->bio = bio; 322 rqd->bio = bio;
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index de5270712be7..6b6b4183b41e 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -34,10 +34,6 @@ void pblk_submit_rec(struct work_struct *work)
34 max_secs); 34 max_secs);
35 35
36 bio = bio_alloc(GFP_KERNEL, nr_rec_secs); 36 bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
37 if (!bio) {
38 pr_err("pblk: not able to create recovery bio\n");
39 return;
40 }
41 37
42 bio->bi_iter.bi_sector = 0; 38 bio->bi_iter.bi_sector = 0;
43 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 39 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -85,11 +81,6 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
85 int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded; 81 int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
86 82
87 rec_rqd = pblk_alloc_rqd(pblk, WRITE); 83 rec_rqd = pblk_alloc_rqd(pblk, WRITE);
88 if (IS_ERR(rec_rqd)) {
89 pr_err("pblk: could not create recovery req.\n");
90 return -ENOMEM;
91 }
92
93 rec_ctx = nvm_rq_to_pdu(rec_rqd); 84 rec_ctx = nvm_rq_to_pdu(rec_rqd);
94 85
95 /* Copy completion bitmap, but exclude the first X completed entries */ 86 /* Copy completion bitmap, but exclude the first X completed entries */
@@ -404,22 +395,18 @@ next_pad_rq:
404 ppa_list = (void *)(meta_list) + pblk_dma_meta_size; 395 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
405 dma_ppa_list = dma_meta_list + pblk_dma_meta_size; 396 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
406 397
407 rqd = pblk_alloc_rqd(pblk, WRITE);
408 if (IS_ERR(rqd)) {
409 ret = PTR_ERR(rqd);
410 goto fail_free_meta;
411 }
412
413 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, 398 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
414 PBLK_VMALLOC_META, GFP_KERNEL); 399 PBLK_VMALLOC_META, GFP_KERNEL);
415 if (IS_ERR(bio)) { 400 if (IS_ERR(bio)) {
416 ret = PTR_ERR(bio); 401 ret = PTR_ERR(bio);
417 goto fail_free_rqd; 402 goto fail_free_meta;
418 } 403 }
419 404
420 bio->bi_iter.bi_sector = 0; /* internal bio */ 405 bio->bi_iter.bi_sector = 0; /* internal bio */
421 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 406 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
422 407
408 rqd = pblk_alloc_rqd(pblk, WRITE);
409
423 rqd->bio = bio; 410 rqd->bio = bio;
424 rqd->opcode = NVM_OP_PWRITE; 411 rqd->opcode = NVM_OP_PWRITE;
425 rqd->flags = pblk_set_progr_mode(pblk, WRITE); 412 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
@@ -490,8 +477,6 @@ free_rq:
490 477
491fail_free_bio: 478fail_free_bio:
492 bio_put(bio); 479 bio_put(bio);
493fail_free_rqd:
494 pblk_free_rqd(pblk, rqd, WRITE);
495fail_free_meta: 480fail_free_meta:
496 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list); 481 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
497fail_free_pad: 482fail_free_pad:
@@ -785,15 +770,9 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
785 dma_addr_t dma_ppa_list, dma_meta_list; 770 dma_addr_t dma_ppa_list, dma_meta_list;
786 int done, ret = 0; 771 int done, ret = 0;
787 772
788 rqd = pblk_alloc_rqd(pblk, READ);
789 if (IS_ERR(rqd))
790 return PTR_ERR(rqd);
791
792 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list); 773 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
793 if (!meta_list) { 774 if (!meta_list)
794 ret = -ENOMEM; 775 return -ENOMEM;
795 goto free_rqd;
796 }
797 776
798 ppa_list = (void *)(meta_list) + pblk_dma_meta_size; 777 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
799 dma_ppa_list = dma_meta_list + pblk_dma_meta_size; 778 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
@@ -804,6 +783,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
804 goto free_meta_list; 783 goto free_meta_list;
805 } 784 }
806 785
786 rqd = pblk_alloc_rqd(pblk, READ);
787
807 p.ppa_list = ppa_list; 788 p.ppa_list = ppa_list;
808 p.meta_list = meta_list; 789 p.meta_list = meta_list;
809 p.rqd = rqd; 790 p.rqd = rqd;
@@ -832,8 +813,6 @@ out:
832 kfree(data); 813 kfree(data);
833free_meta_list: 814free_meta_list:
834 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list); 815 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
835free_rqd:
836 pblk_free_rqd(pblk, rqd, READ);
837 816
838 return ret; 817 return ret;
839} 818}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 26c2b8345149..0fb8f26a6311 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -111,10 +111,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
111 ppa_list = &rqd->ppa_addr; 111 ppa_list = &rqd->ppa_addr;
112 112
113 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC); 113 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
114 if (!recovery) { 114
115 pr_err("pblk: could not allocate recovery context\n");
116 return;
117 }
118 INIT_LIST_HEAD(&recovery->failed); 115 INIT_LIST_HEAD(&recovery->failed);
119 116
120 bit = -1; 117 bit = -1;
@@ -375,10 +372,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
375 int ret; 372 int ret;
376 373
377 rqd = pblk_alloc_rqd(pblk, READ); 374 rqd = pblk_alloc_rqd(pblk, READ);
378 if (IS_ERR(rqd)) { 375
379 pr_err("pblk: cannot allocate write req.\n");
380 return PTR_ERR(rqd);
381 }
382 m_ctx = nvm_rq_to_pdu(rqd); 376 m_ctx = nvm_rq_to_pdu(rqd);
383 m_ctx->private = meta_line; 377 m_ctx->private = meta_line;
384 378
@@ -546,19 +540,12 @@ static int pblk_submit_write(struct pblk *pblk)
546 if (!secs_to_flush && secs_avail < pblk->min_write_pgs) 540 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
547 return 1; 541 return 1;
548 542
549 rqd = pblk_alloc_rqd(pblk, WRITE);
550 if (IS_ERR(rqd)) {
551 pr_err("pblk: cannot allocate write req.\n");
552 return 1;
553 }
554
555 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs); 543 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
556 if (!bio) { 544
557 pr_err("pblk: cannot allocate write bio\n");
558 goto fail_free_rqd;
559 }
560 bio->bi_iter.bi_sector = 0; /* internal bio */ 545 bio->bi_iter.bi_sector = 0; /* internal bio */
561 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 546 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
547
548 rqd = pblk_alloc_rqd(pblk, WRITE);
562 rqd->bio = bio; 549 rqd->bio = bio;
563 550
564 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush); 551 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
@@ -589,7 +576,6 @@ fail_free_bio:
589 pblk_free_write_rqd(pblk, rqd); 576 pblk_free_write_rqd(pblk, rqd);
590fail_put_bio: 577fail_put_bio:
591 bio_put(bio); 578 bio_put(bio);
592fail_free_rqd:
593 pblk_free_rqd(pblk, rqd, WRITE); 579 pblk_free_rqd(pblk, rqd, WRITE);
594 580
595 return 1; 581 return 1;