aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJavier González <jg@lightnvm.io>2017-06-26 05:57:15 -0400
committerJens Axboe <axboe@kernel.dk>2017-06-26 18:24:53 -0400
commitd624f371d5c17a6e230ffed3f0371a4eb588bf45 (patch)
tree8518b6bcd4c0571041142c8637fa8dc38662a005
parentc2e9f5d457ad6a75516e749a3e544165766ab1ce (diff)
lightnvm: pblk: generalize erase path
Erase I/Os are scheduled with the following goals in mind: (i) minimize LUNs collisions with write I/Os, and (ii) even out the price of erasing on every write, instead of putting all the burden on when garbage collection runs. This works well on the current design, but is specific to the default mapping algorithm. This patch generalizes the erase path so that other mapping algorithms can select an arbitrary line to be erased instead. It also gets rid of the erase semaphore since it creates jittering for user writes. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <matias@cnexlabs.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/lightnvm/pblk-core.c4
-rw-r--r--drivers/lightnvm/pblk-init.c9
-rw-r--r--drivers/lightnvm/pblk-map.c40
-rw-r--r--drivers/lightnvm/pblk-rb.c33
-rw-r--r--drivers/lightnvm/pblk-write.c109
-rw-r--r--drivers/lightnvm/pblk.h11
6 files changed, 116 insertions, 90 deletions
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 567ed5aa5a0f..a1125547e638 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -61,7 +61,6 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
61{ 61{
62 struct pblk *pblk = rqd->private; 62 struct pblk *pblk = rqd->private;
63 63
64 up(&pblk->erase_sem);
65 __pblk_end_io_erase(pblk, rqd); 64 __pblk_end_io_erase(pblk, rqd);
66 mempool_free(rqd, pblk->r_rq_pool); 65 mempool_free(rqd, pblk->r_rq_pool);
67} 66}
@@ -1373,7 +1372,8 @@ struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1373 return pblk->l_mg.data_line; 1372 return pblk->l_mg.data_line;
1374} 1373}
1375 1374
1376struct pblk_line *pblk_line_get_data_next(struct pblk *pblk) 1375/* For now, always erase next line */
1376struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1377{ 1377{
1378 return pblk->l_mg.data_next; 1378 return pblk->l_mg.data_next;
1379} 1379}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 0389068c60cb..2bf59855f43f 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -545,7 +545,7 @@ static int pblk_lines_init(struct pblk *pblk)
545 struct pblk_line_meta *lm = &pblk->lm; 545 struct pblk_line_meta *lm = &pblk->lm;
546 struct pblk_line *line; 546 struct pblk_line *line;
547 unsigned int smeta_len, emeta_len; 547 unsigned int smeta_len, emeta_len;
548 long nr_bad_blks, nr_meta_blks, nr_free_blks; 548 long nr_bad_blks, nr_free_blks;
549 int bb_distance; 549 int bb_distance;
550 int i; 550 int i;
551 int ret; 551 int ret;
@@ -591,9 +591,8 @@ add_emeta_page:
591 } 591 }
592 lm->emeta_bb = geo->nr_luns - i; 592 lm->emeta_bb = geo->nr_luns - i;
593 593
594 nr_meta_blks = (lm->smeta_sec + lm->emeta_sec + 594 lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec,
595 (geo->sec_per_blk / 2)) / geo->sec_per_blk; 595 geo->sec_per_blk);
596 lm->min_blk_line = nr_meta_blks + 1;
597 596
598 l_mg->nr_lines = geo->blks_per_lun; 597 l_mg->nr_lines = geo->blks_per_lun;
599 l_mg->log_line = l_mg->data_line = NULL; 598 l_mg->log_line = l_mg->data_line = NULL;
@@ -716,8 +715,6 @@ add_emeta_page:
716 715
717 pblk_set_provision(pblk, nr_free_blks); 716 pblk_set_provision(pblk, nr_free_blks);
718 717
719 sema_init(&pblk->erase_sem, 1);
720
721 /* Cleanup per-LUN bad block lists - managed within lines on run-time */ 718 /* Cleanup per-LUN bad block lists - managed within lines on run-time */
722 for (i = 0; i < geo->nr_luns; i++) 719 for (i = 0; i < geo->nr_luns; i++)
723 kfree(pblk->luns[i].bb_list); 720 kfree(pblk->luns[i].bb_list);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 18291c238930..84309bd400d5 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -92,8 +92,9 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
92{ 92{
93 struct nvm_tgt_dev *dev = pblk->dev; 93 struct nvm_tgt_dev *dev = pblk->dev;
94 struct nvm_geo *geo = &dev->geo; 94 struct nvm_geo *geo = &dev->geo;
95 struct pblk_line *e_line = pblk_line_get_data_next(pblk); 95 struct pblk_line_meta *lm = &pblk->lm;
96 struct pblk_sec_meta *meta_list = rqd->meta_list; 96 struct pblk_sec_meta *meta_list = rqd->meta_list;
97 struct pblk_line *e_line, *d_line;
97 unsigned int map_secs; 98 unsigned int map_secs;
98 int min = pblk->min_write_pgs; 99 int min = pblk->min_write_pgs;
99 int i, erase_lun; 100 int i, erase_lun;
@@ -106,32 +107,49 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
106 erase_lun = rqd->ppa_list[i].g.lun * geo->nr_chnls + 107 erase_lun = rqd->ppa_list[i].g.lun * geo->nr_chnls +
107 rqd->ppa_list[i].g.ch; 108 rqd->ppa_list[i].g.ch;
108 109
110 /* line can change after page map */
111 e_line = pblk_line_get_erase(pblk);
112 spin_lock(&e_line->lock);
109 if (!test_bit(erase_lun, e_line->erase_bitmap)) { 113 if (!test_bit(erase_lun, e_line->erase_bitmap)) {
110 if (down_trylock(&pblk->erase_sem))
111 continue;
112
113 set_bit(erase_lun, e_line->erase_bitmap); 114 set_bit(erase_lun, e_line->erase_bitmap);
114 atomic_dec(&e_line->left_eblks); 115 atomic_dec(&e_line->left_eblks);
116
115 *erase_ppa = rqd->ppa_list[i]; 117 *erase_ppa = rqd->ppa_list[i];
116 erase_ppa->g.blk = e_line->id; 118 erase_ppa->g.blk = e_line->id;
117 119
120 spin_unlock(&e_line->lock);
121
118 /* Avoid evaluating e_line->left_eblks */ 122 /* Avoid evaluating e_line->left_eblks */
119 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap, 123 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
120 valid_secs, i + min); 124 valid_secs, i + min);
121 } 125 }
126 spin_unlock(&e_line->lock);
122 } 127 }
123 128
124 /* Erase blocks that are bad in this line but might not be in next */ 129 e_line = pblk_line_get_erase(pblk);
125 if (unlikely(ppa_empty(*erase_ppa))) { 130 d_line = pblk_line_get_data(pblk);
126 struct pblk_line_meta *lm = &pblk->lm;
127 131
128 i = find_first_zero_bit(e_line->erase_bitmap, lm->blk_per_line); 132 /* Erase blocks that are bad in this line but might not be in next */
129 if (i == lm->blk_per_line) 133 if (unlikely(ppa_empty(*erase_ppa)) &&
134 bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
135 int bit = -1;
136
137retry:
138 bit = find_next_bit(d_line->blk_bitmap,
139 lm->blk_per_line, bit + 1);
140 if (bit >= lm->blk_per_line)
130 return; 141 return;
131 142
132 set_bit(i, e_line->erase_bitmap); 143 spin_lock(&e_line->lock);
144 if (test_bit(bit, e_line->erase_bitmap)) {
145 spin_unlock(&e_line->lock);
146 goto retry;
147 }
148 spin_unlock(&e_line->lock);
149
150 set_bit(bit, e_line->erase_bitmap);
133 atomic_dec(&e_line->left_eblks); 151 atomic_dec(&e_line->left_eblks);
134 *erase_ppa = pblk->luns[i].bppa; /* set ch and lun */ 152 *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
135 erase_ppa->g.blk = e_line->id; 153 erase_ppa->g.blk = e_line->id;
136 } 154 }
137} 155}
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 045384ddc1f9..d293af12aa7a 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -521,20 +521,19 @@ out:
521 * This function is used by the write thread to form the write bio that will 521 * This function is used by the write thread to form the write bio that will
522 * persist data on the write buffer to the media. 522 * persist data on the write buffer to the media.
523 */ 523 */
524unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio, 524unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
525 struct pblk_c_ctx *c_ctx, 525 struct bio *bio, unsigned int pos,
526 unsigned int pos, 526 unsigned int nr_entries, unsigned int count)
527 unsigned int nr_entries,
528 unsigned int count)
529{ 527{
530 struct pblk *pblk = container_of(rb, struct pblk, rwb); 528 struct pblk *pblk = container_of(rb, struct pblk, rwb);
529 struct request_queue *q = pblk->dev->q;
530 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
531 struct pblk_rb_entry *entry; 531 struct pblk_rb_entry *entry;
532 struct page *page; 532 struct page *page;
533 unsigned int pad = 0, read = 0, to_read = nr_entries; 533 unsigned int pad = 0, to_read = nr_entries;
534 unsigned int user_io = 0, gc_io = 0; 534 unsigned int user_io = 0, gc_io = 0;
535 unsigned int i; 535 unsigned int i;
536 int flags; 536 int flags;
537 int ret;
538 537
539 if (count < nr_entries) { 538 if (count < nr_entries) {
540 pad = nr_entries - count; 539 pad = nr_entries - count;
@@ -570,17 +569,17 @@ try:
570 flags |= PBLK_SUBMITTED_ENTRY; 569 flags |= PBLK_SUBMITTED_ENTRY;
571 /* Release flags on context. Protect from writes */ 570 /* Release flags on context. Protect from writes */
572 smp_store_release(&entry->w_ctx.flags, flags); 571 smp_store_release(&entry->w_ctx.flags, flags);
573 goto out; 572 return NVM_IO_ERR;
574 } 573 }
575 574
576 ret = bio_add_page(bio, page, rb->seg_size, 0); 575 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
577 if (ret != rb->seg_size) { 576 rb->seg_size) {
578 pr_err("pblk: could not add page to write bio\n"); 577 pr_err("pblk: could not add page to write bio\n");
579 flags &= ~PBLK_WRITTEN_DATA; 578 flags &= ~PBLK_WRITTEN_DATA;
580 flags |= PBLK_SUBMITTED_ENTRY; 579 flags |= PBLK_SUBMITTED_ENTRY;
581 /* Release flags on context. Protect from writes */ 580 /* Release flags on context. Protect from writes */
582 smp_store_release(&entry->w_ctx.flags, flags); 581 smp_store_release(&entry->w_ctx.flags, flags);
583 goto out; 582 return NVM_IO_ERR;
584 } 583 }
585 584
586 if (flags & PBLK_FLUSH_ENTRY) { 585 if (flags & PBLK_FLUSH_ENTRY) {
@@ -607,14 +606,20 @@ try:
607 pos = (pos + 1) & (rb->nr_entries - 1); 606 pos = (pos + 1) & (rb->nr_entries - 1);
608 } 607 }
609 608
610 read = to_read; 609 if (pad) {
610 if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
611 pr_err("pblk: could not pad page in write bio\n");
612 return NVM_IO_ERR;
613 }
614 }
615
611 pblk_rl_out(&pblk->rl, user_io, gc_io); 616 pblk_rl_out(&pblk->rl, user_io, gc_io);
612#ifdef CONFIG_NVM_DEBUG 617#ifdef CONFIG_NVM_DEBUG
613 atomic_long_add(pad, &((struct pblk *) 618 atomic_long_add(pad, &((struct pblk *)
614 (container_of(rb, struct pblk, rwb)))->padded_writes); 619 (container_of(rb, struct pblk, rwb)))->padded_writes);
615#endif 620#endif
616out: 621
617 return read; 622 return NVM_IO_OK;
618} 623}
619 624
620/* 625/*
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 79b90d8dbcb3..c745a22057f8 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -219,11 +219,10 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
219} 219}
220 220
221static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd, 221static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
222 struct pblk_c_ctx *c_ctx) 222 struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
223{ 223{
224 struct pblk_line_meta *lm = &pblk->lm; 224 struct pblk_line_meta *lm = &pblk->lm;
225 struct pblk_line *e_line = pblk_line_get_data_next(pblk); 225 struct pblk_line *e_line = pblk_line_get_erase(pblk);
226 struct ppa_addr erase_ppa;
227 unsigned int valid = c_ctx->nr_valid; 226 unsigned int valid = c_ctx->nr_valid;
228 unsigned int padded = c_ctx->nr_padded; 227 unsigned int padded = c_ctx->nr_padded;
229 unsigned int nr_secs = valid + padded; 228 unsigned int nr_secs = valid + padded;
@@ -231,40 +230,23 @@ static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
231 int ret = 0; 230 int ret = 0;
232 231
233 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL); 232 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
234 if (!lun_bitmap) { 233 if (!lun_bitmap)
235 ret = -ENOMEM; 234 return -ENOMEM;
236 goto out;
237 }
238 c_ctx->lun_bitmap = lun_bitmap; 235 c_ctx->lun_bitmap = lun_bitmap;
239 236
240 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs); 237 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs);
241 if (ret) { 238 if (ret) {
242 kfree(lun_bitmap); 239 kfree(lun_bitmap);
243 goto out; 240 return ret;
244 } 241 }
245 242
246 ppa_set_empty(&erase_ppa); 243 if (likely(!atomic_read(&e_line->left_eblks) || !e_line))
247 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
248 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0); 244 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
249 else 245 else
250 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, 246 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
251 valid, &erase_ppa); 247 valid, erase_ppa);
252 248
253out: 249 return 0;
254 if (unlikely(e_line && !ppa_empty(erase_ppa))) {
255 if (pblk_blk_erase_async(pblk, erase_ppa)) {
256 struct nvm_tgt_dev *dev = pblk->dev;
257 struct nvm_geo *geo = &dev->geo;
258 int bit;
259
260 atomic_inc(&e_line->left_eblks);
261 bit = erase_ppa.g.lun * geo->nr_chnls + erase_ppa.g.ch;
262 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
263 up(&pblk->erase_sem);
264 }
265 }
266
267 return ret;
268} 250}
269 251
270int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, 252int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -311,16 +293,60 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
311 return secs_to_sync; 293 return secs_to_sync;
312} 294}
313 295
296static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
297{
298 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
299 struct ppa_addr erase_ppa;
300 int err;
301
302 ppa_set_empty(&erase_ppa);
303
304 /* Assign lbas to ppas and populate request structure */
305 err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
306 if (err) {
307 pr_err("pblk: could not setup write request: %d\n", err);
308 return NVM_IO_ERR;
309 }
310
311 /* Submit write for current data line */
312 err = pblk_submit_io(pblk, rqd);
313 if (err) {
314 pr_err("pblk: I/O submission failed: %d\n", err);
315 return NVM_IO_ERR;
316 }
317
318 /* Submit available erase for next data line */
319 if (unlikely(!ppa_empty(erase_ppa)) &&
320 pblk_blk_erase_async(pblk, erase_ppa)) {
321 struct pblk_line *e_line = pblk_line_get_erase(pblk);
322 struct nvm_tgt_dev *dev = pblk->dev;
323 struct nvm_geo *geo = &dev->geo;
324 int bit;
325
326 atomic_inc(&e_line->left_eblks);
327 bit = erase_ppa.g.lun * geo->nr_chnls + erase_ppa.g.ch;
328 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
329 }
330
331 return NVM_IO_OK;
332}
333
334static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
335{
336 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
337 struct bio *bio = rqd->bio;
338
339 if (c_ctx->nr_padded)
340 pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
341}
342
314static int pblk_submit_write(struct pblk *pblk) 343static int pblk_submit_write(struct pblk *pblk)
315{ 344{
316 struct bio *bio; 345 struct bio *bio;
317 struct nvm_rq *rqd; 346 struct nvm_rq *rqd;
318 struct pblk_c_ctx *c_ctx;
319 unsigned int pgs_read;
320 unsigned int secs_avail, secs_to_sync, secs_to_com; 347 unsigned int secs_avail, secs_to_sync, secs_to_com;
321 unsigned int secs_to_flush; 348 unsigned int secs_to_flush;
322 unsigned long pos; 349 unsigned long pos;
323 int err;
324 350
325 /* If there are no sectors in the cache, flushes (bios without data) 351 /* If there are no sectors in the cache, flushes (bios without data)
326 * will be cleared on the cache threads 352 * will be cleared on the cache threads
@@ -338,7 +364,6 @@ static int pblk_submit_write(struct pblk *pblk)
338 pr_err("pblk: cannot allocate write req.\n"); 364 pr_err("pblk: cannot allocate write req.\n");
339 return 1; 365 return 1;
340 } 366 }
341 c_ctx = nvm_rq_to_pdu(rqd);
342 367
343 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs); 368 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
344 if (!bio) { 369 if (!bio) {
@@ -358,29 +383,14 @@ static int pblk_submit_write(struct pblk *pblk)
358 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync; 383 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
359 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com); 384 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
360 385
361 pgs_read = pblk_rb_read_to_bio(&pblk->rwb, bio, c_ctx, pos, 386 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
362 secs_to_sync, secs_avail); 387 secs_avail)) {
363 if (!pgs_read) {
364 pr_err("pblk: corrupted write bio\n"); 388 pr_err("pblk: corrupted write bio\n");
365 goto fail_put_bio; 389 goto fail_put_bio;
366 } 390 }
367 391
368 if (c_ctx->nr_padded) 392 if (pblk_submit_io_set(pblk, rqd))
369 if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, c_ctx->nr_padded))
370 goto fail_put_bio;
371
372 /* Assign lbas to ppas and populate request structure */
373 err = pblk_setup_w_rq(pblk, rqd, c_ctx);
374 if (err) {
375 pr_err("pblk: could not setup write request\n");
376 goto fail_free_bio; 393 goto fail_free_bio;
377 }
378
379 err = pblk_submit_io(pblk, rqd);
380 if (err) {
381 pr_err("pblk: I/O submission failed: %d\n", err);
382 goto fail_free_bio;
383 }
384 394
385#ifdef CONFIG_NVM_DEBUG 395#ifdef CONFIG_NVM_DEBUG
386 atomic_long_add(secs_to_sync, &pblk->sub_writes); 396 atomic_long_add(secs_to_sync, &pblk->sub_writes);
@@ -389,8 +399,7 @@ static int pblk_submit_write(struct pblk *pblk)
389 return 0; 399 return 0;
390 400
391fail_free_bio: 401fail_free_bio:
392 if (c_ctx->nr_padded) 402 pblk_free_write_rqd(pblk, rqd);
393 pblk_bio_free_pages(pblk, bio, secs_to_sync, c_ctx->nr_padded);
394fail_put_bio: 403fail_put_bio:
395 bio_put(bio); 404 bio_put(bio);
396fail_free_rqd: 405fail_free_rqd:
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index edff59aae741..08887d34119e 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -500,7 +500,6 @@ struct pblk {
500 struct pblk_rl rl; 500 struct pblk_rl rl;
501 501
502 int sec_per_write; 502 int sec_per_write;
503 struct semaphore erase_sem;
504 503
505 unsigned char instance_uuid[16]; 504 unsigned char instance_uuid[16];
506#ifdef CONFIG_NVM_DEBUG 505#ifdef CONFIG_NVM_DEBUG
@@ -583,11 +582,9 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
583struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos); 582struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
584 583
585void pblk_rb_sync_l2p(struct pblk_rb *rb); 584void pblk_rb_sync_l2p(struct pblk_rb *rb);
586unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio, 585unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
587 struct pblk_c_ctx *c_ctx, 586 struct bio *bio, unsigned int pos,
588 unsigned int pos, 587 unsigned int nr_entries, unsigned int count);
589 unsigned int nr_entries,
590 unsigned int count);
591unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio, 588unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
592 struct list_head *list, 589 struct list_head *list,
593 unsigned int max); 590 unsigned int max);
@@ -633,7 +630,7 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
633int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line); 630int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
634void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line); 631void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
635struct pblk_line *pblk_line_get_data(struct pblk *pblk); 632struct pblk_line *pblk_line_get_data(struct pblk *pblk);
636struct pblk_line *pblk_line_get_data_next(struct pblk *pblk); 633struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
637int pblk_line_erase(struct pblk *pblk, struct pblk_line *line); 634int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
638int pblk_line_is_full(struct pblk_line *line); 635int pblk_line_is_full(struct pblk_line *line);
639void pblk_line_free(struct pblk *pblk, struct pblk_line *line); 636void pblk_line_free(struct pblk *pblk, struct pblk_line *line);