diff options
Diffstat (limited to 'drivers/lightnvm/pblk-write.c')
-rw-r--r-- | drivers/lightnvm/pblk-write.c | 229 |
1 files changed, 95 insertions, 134 deletions
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index 3ad9e56d2473..6c1cafafef53 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c | |||
@@ -20,7 +20,6 @@ | |||
20 | static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, | 20 | static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, |
21 | struct pblk_c_ctx *c_ctx) | 21 | struct pblk_c_ctx *c_ctx) |
22 | { | 22 | { |
23 | struct nvm_tgt_dev *dev = pblk->dev; | ||
24 | struct bio *original_bio; | 23 | struct bio *original_bio; |
25 | unsigned long ret; | 24 | unsigned long ret; |
26 | int i; | 25 | int i; |
@@ -33,16 +32,18 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, | |||
33 | bio_endio(original_bio); | 32 | bio_endio(original_bio); |
34 | } | 33 | } |
35 | 34 | ||
35 | if (c_ctx->nr_padded) | ||
36 | pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, | ||
37 | c_ctx->nr_padded); | ||
38 | |||
36 | #ifdef CONFIG_NVM_DEBUG | 39 | #ifdef CONFIG_NVM_DEBUG |
37 | atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes); | 40 | atomic_long_add(rqd->nr_ppas, &pblk->sync_writes); |
38 | #endif | 41 | #endif |
39 | 42 | ||
40 | ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid); | 43 | ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid); |
41 | 44 | ||
42 | nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); | ||
43 | |||
44 | bio_put(rqd->bio); | 45 | bio_put(rqd->bio); |
45 | pblk_free_rqd(pblk, rqd, WRITE); | 46 | pblk_free_rqd(pblk, rqd, PBLK_WRITE); |
46 | 47 | ||
47 | return ret; | 48 | return ret; |
48 | } | 49 | } |
@@ -107,10 +108,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd) | |||
107 | ppa_list = &rqd->ppa_addr; | 108 | ppa_list = &rqd->ppa_addr; |
108 | 109 | ||
109 | recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC); | 110 | recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC); |
110 | if (!recovery) { | 111 | |
111 | pr_err("pblk: could not allocate recovery context\n"); | ||
112 | return; | ||
113 | } | ||
114 | INIT_LIST_HEAD(&recovery->failed); | 112 | INIT_LIST_HEAD(&recovery->failed); |
115 | 113 | ||
116 | bit = -1; | 114 | bit = -1; |
@@ -175,7 +173,6 @@ static void pblk_end_io_write(struct nvm_rq *rqd) | |||
175 | static void pblk_end_io_write_meta(struct nvm_rq *rqd) | 173 | static void pblk_end_io_write_meta(struct nvm_rq *rqd) |
176 | { | 174 | { |
177 | struct pblk *pblk = rqd->private; | 175 | struct pblk *pblk = rqd->private; |
178 | struct nvm_tgt_dev *dev = pblk->dev; | ||
179 | struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd); | 176 | struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd); |
180 | struct pblk_line *line = m_ctx->private; | 177 | struct pblk_line *line = m_ctx->private; |
181 | struct pblk_emeta *emeta = line->emeta; | 178 | struct pblk_emeta *emeta = line->emeta; |
@@ -187,19 +184,13 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd) | |||
187 | pblk_log_write_err(pblk, rqd); | 184 | pblk_log_write_err(pblk, rqd); |
188 | pr_err("pblk: metadata I/O failed. Line %d\n", line->id); | 185 | pr_err("pblk: metadata I/O failed. Line %d\n", line->id); |
189 | } | 186 | } |
190 | #ifdef CONFIG_NVM_DEBUG | ||
191 | else | ||
192 | WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n"); | ||
193 | #endif | ||
194 | 187 | ||
195 | sync = atomic_add_return(rqd->nr_ppas, &emeta->sync); | 188 | sync = atomic_add_return(rqd->nr_ppas, &emeta->sync); |
196 | if (sync == emeta->nr_entries) | 189 | if (sync == emeta->nr_entries) |
197 | pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws, | 190 | pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws, |
198 | pblk->close_wq); | 191 | GFP_ATOMIC, pblk->close_wq); |
199 | 192 | ||
200 | bio_put(rqd->bio); | 193 | pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); |
201 | nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); | ||
202 | pblk_free_rqd(pblk, rqd, READ); | ||
203 | 194 | ||
204 | atomic_dec(&pblk->inflight_io); | 195 | atomic_dec(&pblk->inflight_io); |
205 | } | 196 | } |
@@ -213,7 +204,7 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, | |||
213 | /* Setup write request */ | 204 | /* Setup write request */ |
214 | rqd->opcode = NVM_OP_PWRITE; | 205 | rqd->opcode = NVM_OP_PWRITE; |
215 | rqd->nr_ppas = nr_secs; | 206 | rqd->nr_ppas = nr_secs; |
216 | rqd->flags = pblk_set_progr_mode(pblk, WRITE); | 207 | rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE); |
217 | rqd->private = pblk; | 208 | rqd->private = pblk; |
218 | rqd->end_io = end_io; | 209 | rqd->end_io = end_io; |
219 | 210 | ||
@@ -229,15 +220,16 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, | |||
229 | } | 220 | } |
230 | 221 | ||
231 | static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd, | 222 | static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd, |
232 | struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa) | 223 | struct ppa_addr *erase_ppa) |
233 | { | 224 | { |
234 | struct pblk_line_meta *lm = &pblk->lm; | 225 | struct pblk_line_meta *lm = &pblk->lm; |
235 | struct pblk_line *e_line = pblk_line_get_erase(pblk); | 226 | struct pblk_line *e_line = pblk_line_get_erase(pblk); |
227 | struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); | ||
236 | unsigned int valid = c_ctx->nr_valid; | 228 | unsigned int valid = c_ctx->nr_valid; |
237 | unsigned int padded = c_ctx->nr_padded; | 229 | unsigned int padded = c_ctx->nr_padded; |
238 | unsigned int nr_secs = valid + padded; | 230 | unsigned int nr_secs = valid + padded; |
239 | unsigned long *lun_bitmap; | 231 | unsigned long *lun_bitmap; |
240 | int ret = 0; | 232 | int ret; |
241 | 233 | ||
242 | lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL); | 234 | lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL); |
243 | if (!lun_bitmap) | 235 | if (!lun_bitmap) |
@@ -279,7 +271,7 @@ int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, | |||
279 | pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0); | 271 | pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0); |
280 | 272 | ||
281 | rqd->ppa_status = (u64)0; | 273 | rqd->ppa_status = (u64)0; |
282 | rqd->flags = pblk_set_progr_mode(pblk, WRITE); | 274 | rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE); |
283 | 275 | ||
284 | return ret; | 276 | return ret; |
285 | } | 277 | } |
@@ -303,55 +295,6 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail, | |||
303 | return secs_to_sync; | 295 | return secs_to_sync; |
304 | } | 296 | } |
305 | 297 | ||
306 | static inline int pblk_valid_meta_ppa(struct pblk *pblk, | ||
307 | struct pblk_line *meta_line, | ||
308 | struct ppa_addr *ppa_list, int nr_ppas) | ||
309 | { | ||
310 | struct nvm_tgt_dev *dev = pblk->dev; | ||
311 | struct nvm_geo *geo = &dev->geo; | ||
312 | struct pblk_line *data_line; | ||
313 | struct ppa_addr ppa, ppa_opt; | ||
314 | u64 paddr; | ||
315 | int i; | ||
316 | |||
317 | data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])]; | ||
318 | paddr = pblk_lookup_page(pblk, meta_line); | ||
319 | ppa = addr_to_gen_ppa(pblk, paddr, 0); | ||
320 | |||
321 | if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap)) | ||
322 | return 1; | ||
323 | |||
324 | /* Schedule a metadata I/O that is half the distance from the data I/O | ||
325 | * with regards to the number of LUNs forming the pblk instance. This | ||
326 | * balances LUN conflicts across every I/O. | ||
327 | * | ||
328 | * When the LUN configuration changes (e.g., due to GC), this distance | ||
329 | * can align, which would result on a LUN deadlock. In this case, modify | ||
330 | * the distance to not be optimal, but allow metadata I/Os to succeed. | ||
331 | */ | ||
332 | ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0); | ||
333 | if (unlikely(ppa_opt.ppa == ppa.ppa)) { | ||
334 | data_line->meta_distance--; | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | for (i = 0; i < nr_ppas; i += pblk->min_write_pgs) | ||
339 | if (ppa_list[i].g.ch == ppa_opt.g.ch && | ||
340 | ppa_list[i].g.lun == ppa_opt.g.lun) | ||
341 | return 1; | ||
342 | |||
343 | if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) { | ||
344 | for (i = 0; i < nr_ppas; i += pblk->min_write_pgs) | ||
345 | if (ppa_list[i].g.ch == ppa.g.ch && | ||
346 | ppa_list[i].g.lun == ppa.g.lun) | ||
347 | return 0; | ||
348 | |||
349 | return 1; | ||
350 | } | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) | 298 | int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) |
356 | { | 299 | { |
357 | struct nvm_tgt_dev *dev = pblk->dev; | 300 | struct nvm_tgt_dev *dev = pblk->dev; |
@@ -370,11 +313,8 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) | |||
370 | int i, j; | 313 | int i, j; |
371 | int ret; | 314 | int ret; |
372 | 315 | ||
373 | rqd = pblk_alloc_rqd(pblk, READ); | 316 | rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT); |
374 | if (IS_ERR(rqd)) { | 317 | |
375 | pr_err("pblk: cannot allocate write req.\n"); | ||
376 | return PTR_ERR(rqd); | ||
377 | } | ||
378 | m_ctx = nvm_rq_to_pdu(rqd); | 318 | m_ctx = nvm_rq_to_pdu(rqd); |
379 | m_ctx->private = meta_line; | 319 | m_ctx->private = meta_line; |
380 | 320 | ||
@@ -407,8 +347,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) | |||
407 | if (emeta->mem >= lm->emeta_len[0]) { | 347 | if (emeta->mem >= lm->emeta_len[0]) { |
408 | spin_lock(&l_mg->close_lock); | 348 | spin_lock(&l_mg->close_lock); |
409 | list_del(&meta_line->list); | 349 | list_del(&meta_line->list); |
410 | WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line), | ||
411 | "pblk: corrupt meta line %d\n", meta_line->id); | ||
412 | spin_unlock(&l_mg->close_lock); | 350 | spin_unlock(&l_mg->close_lock); |
413 | } | 351 | } |
414 | 352 | ||
@@ -428,18 +366,51 @@ fail_rollback: | |||
428 | pblk_dealloc_page(pblk, meta_line, rq_ppas); | 366 | pblk_dealloc_page(pblk, meta_line, rq_ppas); |
429 | list_add(&meta_line->list, &meta_line->list); | 367 | list_add(&meta_line->list, &meta_line->list); |
430 | spin_unlock(&l_mg->close_lock); | 368 | spin_unlock(&l_mg->close_lock); |
431 | |||
432 | nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); | ||
433 | fail_free_bio: | 369 | fail_free_bio: |
434 | if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META)) | 370 | bio_put(bio); |
435 | bio_put(bio); | ||
436 | fail_free_rqd: | 371 | fail_free_rqd: |
437 | pblk_free_rqd(pblk, rqd, READ); | 372 | pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); |
438 | return ret; | 373 | return ret; |
439 | } | 374 | } |
440 | 375 | ||
441 | static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list, | 376 | static inline bool pblk_valid_meta_ppa(struct pblk *pblk, |
442 | int prev_n) | 377 | struct pblk_line *meta_line, |
378 | struct nvm_rq *data_rqd) | ||
379 | { | ||
380 | struct nvm_tgt_dev *dev = pblk->dev; | ||
381 | struct nvm_geo *geo = &dev->geo; | ||
382 | struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd); | ||
383 | struct pblk_line *data_line = pblk_line_get_data(pblk); | ||
384 | struct ppa_addr ppa, ppa_opt; | ||
385 | u64 paddr; | ||
386 | int pos_opt; | ||
387 | |||
388 | /* Schedule a metadata I/O that is half the distance from the data I/O | ||
389 | * with regards to the number of LUNs forming the pblk instance. This | ||
390 | * balances LUN conflicts across every I/O. | ||
391 | * | ||
392 | * When the LUN configuration changes (e.g., due to GC), this distance | ||
393 | * can align, which would result on metadata and data I/Os colliding. In | ||
394 | * this case, modify the distance to not be optimal, but move the | ||
395 | * optimal in the right direction. | ||
396 | */ | ||
397 | paddr = pblk_lookup_page(pblk, meta_line); | ||
398 | ppa = addr_to_gen_ppa(pblk, paddr, 0); | ||
399 | ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0); | ||
400 | pos_opt = pblk_ppa_to_pos(geo, ppa_opt); | ||
401 | |||
402 | if (test_bit(pos_opt, data_c_ctx->lun_bitmap) || | ||
403 | test_bit(pos_opt, data_line->blk_bitmap)) | ||
404 | return true; | ||
405 | |||
406 | if (unlikely(pblk_ppa_comp(ppa_opt, ppa))) | ||
407 | data_line->meta_distance--; | ||
408 | |||
409 | return false; | ||
410 | } | ||
411 | |||
412 | static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk, | ||
413 | struct nvm_rq *data_rqd) | ||
443 | { | 414 | { |
444 | struct pblk_line_meta *lm = &pblk->lm; | 415 | struct pblk_line_meta *lm = &pblk->lm; |
445 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | 416 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
@@ -449,57 +420,45 @@ static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list, | |||
449 | retry: | 420 | retry: |
450 | if (list_empty(&l_mg->emeta_list)) { | 421 | if (list_empty(&l_mg->emeta_list)) { |
451 | spin_unlock(&l_mg->close_lock); | 422 | spin_unlock(&l_mg->close_lock); |
452 | return 0; | 423 | return NULL; |
453 | } | 424 | } |
454 | meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list); | 425 | meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list); |
455 | if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line)) | 426 | if (meta_line->emeta->mem >= lm->emeta_len[0]) |
456 | goto retry; | 427 | goto retry; |
457 | spin_unlock(&l_mg->close_lock); | 428 | spin_unlock(&l_mg->close_lock); |
458 | 429 | ||
459 | if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n)) | 430 | if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd)) |
460 | return 0; | 431 | return NULL; |
461 | 432 | ||
462 | return pblk_submit_meta_io(pblk, meta_line); | 433 | return meta_line; |
463 | } | 434 | } |
464 | 435 | ||
465 | static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) | 436 | static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) |
466 | { | 437 | { |
467 | struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); | ||
468 | struct ppa_addr erase_ppa; | 438 | struct ppa_addr erase_ppa; |
439 | struct pblk_line *meta_line; | ||
469 | int err; | 440 | int err; |
470 | 441 | ||
471 | ppa_set_empty(&erase_ppa); | 442 | ppa_set_empty(&erase_ppa); |
472 | 443 | ||
473 | /* Assign lbas to ppas and populate request structure */ | 444 | /* Assign lbas to ppas and populate request structure */ |
474 | err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa); | 445 | err = pblk_setup_w_rq(pblk, rqd, &erase_ppa); |
475 | if (err) { | 446 | if (err) { |
476 | pr_err("pblk: could not setup write request: %d\n", err); | 447 | pr_err("pblk: could not setup write request: %d\n", err); |
477 | return NVM_IO_ERR; | 448 | return NVM_IO_ERR; |
478 | } | 449 | } |
479 | 450 | ||
480 | if (likely(ppa_empty(erase_ppa))) { | 451 | meta_line = pblk_should_submit_meta_io(pblk, rqd); |
481 | /* Submit metadata write for previous data line */ | ||
482 | err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas); | ||
483 | if (err) { | ||
484 | pr_err("pblk: metadata I/O submission failed: %d", err); | ||
485 | return NVM_IO_ERR; | ||
486 | } | ||
487 | 452 | ||
488 | /* Submit data write for current data line */ | 453 | /* Submit data write for current data line */ |
489 | err = pblk_submit_io(pblk, rqd); | 454 | err = pblk_submit_io(pblk, rqd); |
490 | if (err) { | 455 | if (err) { |
491 | pr_err("pblk: data I/O submission failed: %d\n", err); | 456 | pr_err("pblk: data I/O submission failed: %d\n", err); |
492 | return NVM_IO_ERR; | 457 | return NVM_IO_ERR; |
493 | } | 458 | } |
494 | } else { | ||
495 | /* Submit data write for current data line */ | ||
496 | err = pblk_submit_io(pblk, rqd); | ||
497 | if (err) { | ||
498 | pr_err("pblk: data I/O submission failed: %d\n", err); | ||
499 | return NVM_IO_ERR; | ||
500 | } | ||
501 | 459 | ||
502 | /* Submit available erase for next data line */ | 460 | if (!ppa_empty(erase_ppa)) { |
461 | /* Submit erase for next data line */ | ||
503 | if (pblk_blk_erase_async(pblk, erase_ppa)) { | 462 | if (pblk_blk_erase_async(pblk, erase_ppa)) { |
504 | struct pblk_line *e_line = pblk_line_get_erase(pblk); | 463 | struct pblk_line *e_line = pblk_line_get_erase(pblk); |
505 | struct nvm_tgt_dev *dev = pblk->dev; | 464 | struct nvm_tgt_dev *dev = pblk->dev; |
@@ -512,6 +471,15 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) | |||
512 | } | 471 | } |
513 | } | 472 | } |
514 | 473 | ||
474 | if (meta_line) { | ||
475 | /* Submit metadata write for previous data line */ | ||
476 | err = pblk_submit_meta_io(pblk, meta_line); | ||
477 | if (err) { | ||
478 | pr_err("pblk: metadata I/O submission failed: %d", err); | ||
479 | return NVM_IO_ERR; | ||
480 | } | ||
481 | } | ||
482 | |||
515 | return NVM_IO_OK; | 483 | return NVM_IO_OK; |
516 | } | 484 | } |
517 | 485 | ||
@@ -521,7 +489,8 @@ static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd) | |||
521 | struct bio *bio = rqd->bio; | 489 | struct bio *bio = rqd->bio; |
522 | 490 | ||
523 | if (c_ctx->nr_padded) | 491 | if (c_ctx->nr_padded) |
524 | pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded); | 492 | pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid, |
493 | c_ctx->nr_padded); | ||
525 | } | 494 | } |
526 | 495 | ||
527 | static int pblk_submit_write(struct pblk *pblk) | 496 | static int pblk_submit_write(struct pblk *pblk) |
@@ -543,31 +512,24 @@ static int pblk_submit_write(struct pblk *pblk) | |||
543 | if (!secs_to_flush && secs_avail < pblk->min_write_pgs) | 512 | if (!secs_to_flush && secs_avail < pblk->min_write_pgs) |
544 | return 1; | 513 | return 1; |
545 | 514 | ||
546 | rqd = pblk_alloc_rqd(pblk, WRITE); | ||
547 | if (IS_ERR(rqd)) { | ||
548 | pr_err("pblk: cannot allocate write req.\n"); | ||
549 | return 1; | ||
550 | } | ||
551 | |||
552 | bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs); | ||
553 | if (!bio) { | ||
554 | pr_err("pblk: cannot allocate write bio\n"); | ||
555 | goto fail_free_rqd; | ||
556 | } | ||
557 | bio->bi_iter.bi_sector = 0; /* internal bio */ | ||
558 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
559 | rqd->bio = bio; | ||
560 | |||
561 | secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush); | 515 | secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush); |
562 | if (secs_to_sync > pblk->max_write_pgs) { | 516 | if (secs_to_sync > pblk->max_write_pgs) { |
563 | pr_err("pblk: bad buffer sync calculation\n"); | 517 | pr_err("pblk: bad buffer sync calculation\n"); |
564 | goto fail_put_bio; | 518 | return 1; |
565 | } | 519 | } |
566 | 520 | ||
567 | secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync; | 521 | secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync; |
568 | pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com); | 522 | pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com); |
569 | 523 | ||
570 | if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync, | 524 | bio = bio_alloc(GFP_KERNEL, secs_to_sync); |
525 | |||
526 | bio->bi_iter.bi_sector = 0; /* internal bio */ | ||
527 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
528 | |||
529 | rqd = pblk_alloc_rqd(pblk, PBLK_WRITE); | ||
530 | rqd->bio = bio; | ||
531 | |||
532 | if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync, | ||
571 | secs_avail)) { | 533 | secs_avail)) { |
572 | pr_err("pblk: corrupted write bio\n"); | 534 | pr_err("pblk: corrupted write bio\n"); |
573 | goto fail_put_bio; | 535 | goto fail_put_bio; |
@@ -586,8 +548,7 @@ fail_free_bio: | |||
586 | pblk_free_write_rqd(pblk, rqd); | 548 | pblk_free_write_rqd(pblk, rqd); |
587 | fail_put_bio: | 549 | fail_put_bio: |
588 | bio_put(bio); | 550 | bio_put(bio); |
589 | fail_free_rqd: | 551 | pblk_free_rqd(pblk, rqd, PBLK_WRITE); |
590 | pblk_free_rqd(pblk, rqd, WRITE); | ||
591 | 552 | ||
592 | return 1; | 553 | return 1; |
593 | } | 554 | } |