aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIgor Konopko <igor.j.konopko@intel.com>2019-05-04 14:38:03 -0400
committerJens Axboe <axboe@kernel.dk>2019-05-06 12:19:18 -0400
commit1fc3b30569bc1087dc8c8b8eff27ca7727b807c4 (patch)
tree5fd5d77ce820e85d6e94920502fdcf87b651676b /drivers
parentd165a7a6f5aa05dfdfc164e24c11b6458a523ff7 (diff)
lightnvm: pblk: wait for inflight IOs in recovery
This patch changes the behaviour of recovery padding in order to support a case, when some IOs were already submitted to the drive and some next one are not submitted due to error returned. Currently in case of errors we simply exit the pad function without waiting for inflight IOs, which leads to panic on inflight IOs completion. After the changes we always wait for all the inflight IOs before exiting the function. Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lightnvm/pblk-recovery.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 124d8179b2ad..137e963cd51d 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -208,7 +208,7 @@ next_pad_rq:
208 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false); 208 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
209 if (rq_ppas < pblk->min_write_pgs) { 209 if (rq_ppas < pblk->min_write_pgs) {
210 pblk_err(pblk, "corrupted pad line %d\n", line->id); 210 pblk_err(pblk, "corrupted pad line %d\n", line->id);
211 goto fail_free_pad; 211 goto fail_complete;
212 } 212 }
213 213
214 rq_len = rq_ppas * geo->csecs; 214 rq_len = rq_ppas * geo->csecs;
@@ -217,7 +217,7 @@ next_pad_rq:
217 PBLK_VMALLOC_META, GFP_KERNEL); 217 PBLK_VMALLOC_META, GFP_KERNEL);
218 if (IS_ERR(bio)) { 218 if (IS_ERR(bio)) {
219 ret = PTR_ERR(bio); 219 ret = PTR_ERR(bio);
220 goto fail_free_pad; 220 goto fail_complete;
221 } 221 }
222 222
223 bio->bi_iter.bi_sector = 0; /* internal bio */ 223 bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -226,8 +226,11 @@ next_pad_rq:
226 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT); 226 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
227 227
228 ret = pblk_alloc_rqd_meta(pblk, rqd); 228 ret = pblk_alloc_rqd_meta(pblk, rqd);
229 if (ret) 229 if (ret) {
230 goto fail_free_rqd; 230 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
231 bio_put(bio);
232 goto fail_complete;
233 }
231 234
232 rqd->bio = bio; 235 rqd->bio = bio;
233 rqd->opcode = NVM_OP_PWRITE; 236 rqd->opcode = NVM_OP_PWRITE;
@@ -274,7 +277,10 @@ next_pad_rq:
274 if (ret) { 277 if (ret) {
275 pblk_err(pblk, "I/O submission failed: %d\n", ret); 278 pblk_err(pblk, "I/O submission failed: %d\n", ret);
276 pblk_up_chunk(pblk, rqd->ppa_list[0]); 279 pblk_up_chunk(pblk, rqd->ppa_list[0]);
277 goto fail_free_rqd; 280 kref_put(&pad_rq->ref, pblk_recov_complete);
281 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
282 bio_put(bio);
283 goto fail_complete;
278 } 284 }
279 285
280 left_line_ppas -= rq_ppas; 286 left_line_ppas -= rq_ppas;
@@ -282,6 +288,7 @@ next_pad_rq:
282 if (left_ppas && left_line_ppas) 288 if (left_ppas && left_line_ppas)
283 goto next_pad_rq; 289 goto next_pad_rq;
284 290
291fail_complete:
285 kref_put(&pad_rq->ref, pblk_recov_complete); 292 kref_put(&pad_rq->ref, pblk_recov_complete);
286 293
287 if (!wait_for_completion_io_timeout(&pad_rq->wait, 294 if (!wait_for_completion_io_timeout(&pad_rq->wait,
@@ -297,14 +304,6 @@ next_pad_rq:
297free_rq: 304free_rq:
298 kfree(pad_rq); 305 kfree(pad_rq);
299 return ret; 306 return ret;
300
301fail_free_rqd:
302 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
303 bio_put(bio);
304fail_free_pad:
305 kfree(pad_rq);
306 vfree(data);
307 return ret;
308} 307}
309 308
310static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line) 309static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)