summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm
diff options
context:
space:
mode:
authorHans Holmberg <hans.holmberg@cnexlabs.com>2018-06-01 10:41:05 -0400
committerJens Axboe <axboe@kernel.dk>2018-06-01 11:02:53 -0400
commit6a3abf5beef6ae46381c1fb6976e6f313c40f0c1 (patch)
tree0cb1da46fdb14e1a0de58d656331b0cc6c0c9082 /drivers/lightnvm
parent72b6cdbb11135ec077bd3299fa2fad4503800d37 (diff)
lightnvm: pblk: rework write error recovery path
The write error recovery path is incomplete, so rework the write error recovery handling to do resubmits directly from the write buffer. When a write error occurs, the remaining sectors in the chunk are mapped out and invalidated and the request inserted in a resubmit list. The writer thread checks if there are any requests to resubmit, scans and invalidates any lbas that have been overwritten by later writes and resubmits the failed entries. Signed-off-by: Hans Holmberg <hans.holmberg@cnexlabs.com> Reviewed-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r--drivers/lightnvm/pblk-init.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c39
-rw-r--r--drivers/lightnvm/pblk-recovery.c91
-rw-r--r--drivers/lightnvm/pblk-write.c267
-rw-r--r--drivers/lightnvm/pblk.h11
5 files changed, 181 insertions, 229 deletions
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index f47e95c0e5da..2a3302f7e853 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -426,6 +426,7 @@ static int pblk_core_init(struct pblk *pblk)
426 goto free_r_end_wq; 426 goto free_r_end_wq;
427 427
428 INIT_LIST_HEAD(&pblk->compl_list); 428 INIT_LIST_HEAD(&pblk->compl_list);
429 INIT_LIST_HEAD(&pblk->resubmit_list);
429 430
430 return 0; 431 return 0;
431 432
@@ -1185,6 +1186,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1185 pblk->state = PBLK_STATE_RUNNING; 1186 pblk->state = PBLK_STATE_RUNNING;
1186 pblk->gc.gc_enabled = 0; 1187 pblk->gc.gc_enabled = 0;
1187 1188
1189 spin_lock_init(&pblk->resubmit_lock);
1188 spin_lock_init(&pblk->trans_lock); 1190 spin_lock_init(&pblk->trans_lock);
1189 spin_lock_init(&pblk->lock); 1191 spin_lock_init(&pblk->lock);
1190 1192
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 58946ffebe81..1b74ec51a4ad 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -503,45 +503,6 @@ int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
503} 503}
504 504
505/* 505/*
506 * The caller of this function must ensure that the backpointer will not
507 * overwrite the entries passed on the list.
508 */
509unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
510 struct list_head *list,
511 unsigned int max)
512{
513 struct pblk_rb_entry *entry, *tentry;
514 struct page *page;
515 unsigned int read = 0;
516 int ret;
517
518 list_for_each_entry_safe(entry, tentry, list, index) {
519 if (read > max) {
520 pr_err("pblk: too many entries on list\n");
521 goto out;
522 }
523
524 page = virt_to_page(entry->data);
525 if (!page) {
526 pr_err("pblk: could not allocate write bio page\n");
527 goto out;
528 }
529
530 ret = bio_add_page(bio, page, rb->seg_size, 0);
531 if (ret != rb->seg_size) {
532 pr_err("pblk: could not add page to write bio\n");
533 goto out;
534 }
535
536 list_del(&entry->index);
537 read++;
538 }
539
540out:
541 return read;
542}
543
544/*
545 * Read available entries on rb and add them to the given bio. To avoid a memory 506 * Read available entries on rb and add them to the given bio. To avoid a memory
546 * copy, a page reference to the write buffer is used to be added to the bio. 507 * copy, a page reference to the write buffer is used to be added to the bio.
547 * 508 *
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 364ad52a5bfb..788dce87043e 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -16,97 +16,6 @@
16 16
17#include "pblk.h" 17#include "pblk.h"
18 18
19void pblk_submit_rec(struct work_struct *work)
20{
21 struct pblk_rec_ctx *recovery =
22 container_of(work, struct pblk_rec_ctx, ws_rec);
23 struct pblk *pblk = recovery->pblk;
24 struct nvm_rq *rqd = recovery->rqd;
25 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
26 struct bio *bio;
27 unsigned int nr_rec_secs;
28 unsigned int pgs_read;
29 int ret;
30
31 nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
32 NVM_MAX_VLBA);
33
34 bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
35
36 bio->bi_iter.bi_sector = 0;
37 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
38 rqd->bio = bio;
39 rqd->nr_ppas = nr_rec_secs;
40
41 pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
42 nr_rec_secs);
43 if (pgs_read != nr_rec_secs) {
44 pr_err("pblk: could not read recovery entries\n");
45 goto err;
46 }
47
48 if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
49 pr_err("pblk: could not setup recovery request\n");
50 goto err;
51 }
52
53#ifdef CONFIG_NVM_DEBUG
54 atomic_long_add(nr_rec_secs, &pblk->recov_writes);
55#endif
56
57 ret = pblk_submit_io(pblk, rqd);
58 if (ret) {
59 pr_err("pblk: I/O submission failed: %d\n", ret);
60 goto err;
61 }
62
63 mempool_free(recovery, &pblk->rec_pool);
64 return;
65
66err:
67 bio_put(bio);
68 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
69}
70
71int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
72 struct pblk_rec_ctx *recovery, u64 *comp_bits,
73 unsigned int comp)
74{
75 struct nvm_rq *rec_rqd;
76 struct pblk_c_ctx *rec_ctx;
77 int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
78
79 rec_rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
80 rec_ctx = nvm_rq_to_pdu(rec_rqd);
81
82 /* Copy completion bitmap, but exclude the first X completed entries */
83 bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
84 (unsigned long int *)comp_bits,
85 comp, NVM_MAX_VLBA);
86
87 /* Save the context for the entries that need to be re-written and
88 * update current context with the completed entries.
89 */
90 rec_ctx->sentry = pblk_rb_wrap_pos(&pblk->rwb, c_ctx->sentry + comp);
91 if (comp >= c_ctx->nr_valid) {
92 rec_ctx->nr_valid = 0;
93 rec_ctx->nr_padded = nr_entries - comp;
94
95 c_ctx->nr_padded = comp - c_ctx->nr_valid;
96 } else {
97 rec_ctx->nr_valid = c_ctx->nr_valid - comp;
98 rec_ctx->nr_padded = c_ctx->nr_padded;
99
100 c_ctx->nr_valid = comp;
101 c_ctx->nr_padded = 0;
102 }
103
104 recovery->rqd = rec_rqd;
105 recovery->pblk = pblk;
106
107 return 0;
108}
109
110int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf) 19int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
111{ 20{
112 u32 crc; 21 u32 crc;
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index aef7fa2d401d..7f9491bc64b5 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -103,68 +103,149 @@ retry:
103 pblk_rb_sync_end(&pblk->rwb, &flags); 103 pblk_rb_sync_end(&pblk->rwb, &flags);
104} 104}
105 105
106/* When a write fails, we are not sure whether the block has grown bad or a page 106/* Map remaining sectors in chunk, starting from ppa */
107 * range is more susceptible to write errors. If a high number of pages fail, we 107static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
108 * assume that the block is bad and we mark it accordingly. In all cases, we
109 * remap and resubmit the failed entries as fast as possible; if a flush is
110 * waiting on a completion, the whole stack would stall otherwise.
111 */
112static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
113{ 108{
114 void *comp_bits = &rqd->ppa_status; 109 struct nvm_tgt_dev *dev = pblk->dev;
115 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); 110 struct nvm_geo *geo = &dev->geo;
116 struct pblk_rec_ctx *recovery; 111 struct pblk_line *line;
117 struct ppa_addr *ppa_list = rqd->ppa_list; 112 struct ppa_addr map_ppa = *ppa;
118 int nr_ppas = rqd->nr_ppas; 113 u64 paddr;
119 unsigned int c_entries; 114 int done = 0;
120 int bit, ret;
121 115
122 if (unlikely(nr_ppas == 1)) 116 line = &pblk->lines[pblk_ppa_to_line(*ppa)];
123 ppa_list = &rqd->ppa_addr; 117 spin_lock(&line->lock);
124 118
125 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC); 119 while (!done) {
120 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
126 121
127 INIT_LIST_HEAD(&recovery->failed); 122 if (!test_and_set_bit(paddr, line->map_bitmap))
123 line->left_msecs--;
128 124
129 bit = -1; 125 if (!test_and_set_bit(paddr, line->invalid_bitmap))
130 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) { 126 le32_add_cpu(line->vsc, -1);
131 struct pblk_rb_entry *entry;
132 struct ppa_addr ppa;
133 127
134 /* Logic error */ 128 if (geo->version == NVM_OCSSD_SPEC_12) {
135 if (bit > c_ctx->nr_valid) { 129 map_ppa.ppa++;
136 WARN_ONCE(1, "pblk: corrupted write request\n"); 130 if (map_ppa.g.pg == geo->num_pg)
137 mempool_free(recovery, &pblk->rec_pool); 131 done = 1;
138 goto out; 132 } else {
133 map_ppa.m.sec++;
134 if (map_ppa.m.sec == geo->clba)
135 done = 1;
139 } 136 }
137 }
140 138
141 ppa = ppa_list[bit]; 139 spin_unlock(&line->lock);
142 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa); 140}
143 if (!entry) { 141
144 pr_err("pblk: could not scan entry on write failure\n"); 142static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
145 mempool_free(recovery, &pblk->rec_pool); 143 unsigned int nr_entries)
146 goto out; 144{
147 } 145 struct pblk_rb *rb = &pblk->rwb;
146 struct pblk_rb_entry *entry;
147 struct pblk_line *line;
148 struct pblk_w_ctx *w_ctx;
149 struct ppa_addr ppa_l2p;
150 int flags;
151 unsigned int pos, i;
152
153 spin_lock(&pblk->trans_lock);
154 pos = sentry;
155 for (i = 0; i < nr_entries; i++) {
156 entry = &rb->entries[pos];
157 w_ctx = &entry->w_ctx;
158
159 /* Check if the lba has been overwritten */
160 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
161 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
162 w_ctx->lba = ADDR_EMPTY;
163
164 /* Mark up the entry as submittable again */
165 flags = READ_ONCE(w_ctx->flags);
166 flags |= PBLK_WRITTEN_DATA;
167 /* Release flags on write context. Protect from writes */
168 smp_store_release(&w_ctx->flags, flags);
148 169
149 /* The list is filled first and emptied afterwards. No need for 170 /* Decrese the reference count to the line as we will
150 * protecting it with a lock 171 * re-map these entries
151 */ 172 */
152 list_add_tail(&entry->index, &recovery->failed); 173 line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
174 kref_put(&line->ref, pblk_line_put);
175
176 pos = (pos + 1) & (rb->nr_entries - 1);
153 } 177 }
178 spin_unlock(&pblk->trans_lock);
179}
154 180
155 c_entries = find_first_bit(comp_bits, nr_ppas); 181static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
156 ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries); 182{
157 if (ret) { 183 struct pblk_c_ctx *r_ctx;
158 pr_err("pblk: could not recover from write failure\n"); 184
159 mempool_free(recovery, &pblk->rec_pool); 185 r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
160 goto out; 186 if (!r_ctx)
187 return;
188
189 r_ctx->lun_bitmap = NULL;
190 r_ctx->sentry = c_ctx->sentry;
191 r_ctx->nr_valid = c_ctx->nr_valid;
192 r_ctx->nr_padded = c_ctx->nr_padded;
193
194 spin_lock(&pblk->resubmit_lock);
195 list_add_tail(&r_ctx->list, &pblk->resubmit_list);
196 spin_unlock(&pblk->resubmit_lock);
197
198#ifdef CONFIG_NVM_DEBUG
199 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
200#endif
201}
202
203static void pblk_submit_rec(struct work_struct *work)
204{
205 struct pblk_rec_ctx *recovery =
206 container_of(work, struct pblk_rec_ctx, ws_rec);
207 struct pblk *pblk = recovery->pblk;
208 struct nvm_rq *rqd = recovery->rqd;
209 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
210 struct ppa_addr *ppa_list;
211
212 pblk_log_write_err(pblk, rqd);
213
214 if (rqd->nr_ppas == 1)
215 ppa_list = &rqd->ppa_addr;
216 else
217 ppa_list = rqd->ppa_list;
218
219 pblk_map_remaining(pblk, ppa_list);
220 pblk_queue_resubmit(pblk, c_ctx);
221
222 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
223 if (c_ctx->nr_padded)
224 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
225 c_ctx->nr_padded);
226 bio_put(rqd->bio);
227 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
228 mempool_free(recovery, &pblk->rec_pool);
229
230 atomic_dec(&pblk->inflight_io);
231}
232
233
234static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
235{
236 struct pblk_rec_ctx *recovery;
237
238 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
239 if (!recovery) {
240 pr_err("pblk: could not allocate recovery work\n");
241 return;
161 } 242 }
162 243
244 recovery->pblk = pblk;
245 recovery->rqd = rqd;
246
163 INIT_WORK(&recovery->ws_rec, pblk_submit_rec); 247 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
164 queue_work(pblk->close_wq, &recovery->ws_rec); 248 queue_work(pblk->close_wq, &recovery->ws_rec);
165
166out:
167 pblk_complete_write(pblk, rqd, c_ctx);
168} 249}
169 250
170static void pblk_end_io_write(struct nvm_rq *rqd) 251static void pblk_end_io_write(struct nvm_rq *rqd)
@@ -173,8 +254,8 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
173 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); 254 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
174 255
175 if (rqd->error) { 256 if (rqd->error) {
176 pblk_log_write_err(pblk, rqd); 257 pblk_end_w_fail(pblk, rqd);
177 return pblk_end_w_fail(pblk, rqd); 258 return;
178 } 259 }
179#ifdef CONFIG_NVM_DEBUG 260#ifdef CONFIG_NVM_DEBUG
180 else 261 else
@@ -266,31 +347,6 @@ static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
266 return 0; 347 return 0;
267} 348}
268 349
269int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
270 struct pblk_c_ctx *c_ctx)
271{
272 struct pblk_line_meta *lm = &pblk->lm;
273 unsigned long *lun_bitmap;
274 int ret;
275
276 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
277 if (!lun_bitmap)
278 return -ENOMEM;
279
280 c_ctx->lun_bitmap = lun_bitmap;
281
282 ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
283 if (ret)
284 return ret;
285
286 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
287
288 rqd->ppa_status = (u64)0;
289 rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
290
291 return ret;
292}
293
294static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail, 350static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
295 unsigned int secs_to_flush) 351 unsigned int secs_to_flush)
296{ 352{
@@ -339,6 +395,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
339 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, 395 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
340 l_mg->emeta_alloc_type, GFP_KERNEL); 396 l_mg->emeta_alloc_type, GFP_KERNEL);
341 if (IS_ERR(bio)) { 397 if (IS_ERR(bio)) {
398 pr_err("pblk: failed to map emeta io");
342 ret = PTR_ERR(bio); 399 ret = PTR_ERR(bio);
343 goto fail_free_rqd; 400 goto fail_free_rqd;
344 } 401 }
@@ -515,26 +572,54 @@ static int pblk_submit_write(struct pblk *pblk)
515 unsigned int secs_avail, secs_to_sync, secs_to_com; 572 unsigned int secs_avail, secs_to_sync, secs_to_com;
516 unsigned int secs_to_flush; 573 unsigned int secs_to_flush;
517 unsigned long pos; 574 unsigned long pos;
575 unsigned int resubmit;
518 576
519 /* If there are no sectors in the cache, flushes (bios without data) 577 spin_lock(&pblk->resubmit_lock);
520 * will be cleared on the cache threads 578 resubmit = !list_empty(&pblk->resubmit_list);
521 */ 579 spin_unlock(&pblk->resubmit_lock);
522 secs_avail = pblk_rb_read_count(&pblk->rwb); 580
523 if (!secs_avail) 581 /* Resubmit failed writes first */
524 return 1; 582 if (resubmit) {
525 583 struct pblk_c_ctx *r_ctx;
526 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb); 584
527 if (!secs_to_flush && secs_avail < pblk->min_write_pgs) 585 spin_lock(&pblk->resubmit_lock);
528 return 1; 586 r_ctx = list_first_entry(&pblk->resubmit_list,
529 587 struct pblk_c_ctx, list);
530 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush); 588 list_del(&r_ctx->list);
531 if (secs_to_sync > pblk->max_write_pgs) { 589 spin_unlock(&pblk->resubmit_lock);
532 pr_err("pblk: bad buffer sync calculation\n"); 590
533 return 1; 591 secs_avail = r_ctx->nr_valid;
534 } 592 pos = r_ctx->sentry;
593
594 pblk_prepare_resubmit(pblk, pos, secs_avail);
595 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
596 secs_avail);
535 597
536 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync; 598 kfree(r_ctx);
537 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com); 599 } else {
600 /* If there are no sectors in the cache,
601 * flushes (bios without data) will be cleared on
602 * the cache threads
603 */
604 secs_avail = pblk_rb_read_count(&pblk->rwb);
605 if (!secs_avail)
606 return 1;
607
608 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
609 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
610 return 1;
611
612 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
613 secs_to_flush);
614 if (secs_to_sync > pblk->max_write_pgs) {
615 pr_err("pblk: bad buffer sync calculation\n");
616 return 1;
617 }
618
619 secs_to_com = (secs_to_sync > secs_avail) ?
620 secs_avail : secs_to_sync;
621 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
622 }
538 623
539 bio = bio_alloc(GFP_KERNEL, secs_to_sync); 624 bio = bio_alloc(GFP_KERNEL, secs_to_sync);
540 625
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 9f22bbb54a6d..043a851a8e6c 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -128,7 +128,6 @@ struct pblk_pad_rq {
128struct pblk_rec_ctx { 128struct pblk_rec_ctx {
129 struct pblk *pblk; 129 struct pblk *pblk;
130 struct nvm_rq *rqd; 130 struct nvm_rq *rqd;
131 struct list_head failed;
132 struct work_struct ws_rec; 131 struct work_struct ws_rec;
133}; 132};
134 133
@@ -664,6 +663,9 @@ struct pblk {
664 663
665 struct list_head compl_list; 664 struct list_head compl_list;
666 665
666 spinlock_t resubmit_lock; /* Resubmit list lock */
667 struct list_head resubmit_list; /* Resubmit list for failed writes*/
668
667 mempool_t page_bio_pool; 669 mempool_t page_bio_pool;
668 mempool_t gen_ws_pool; 670 mempool_t gen_ws_pool;
669 mempool_t rec_pool; 671 mempool_t rec_pool;
@@ -713,9 +715,6 @@ void pblk_rb_sync_l2p(struct pblk_rb *rb);
713unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, 715unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
714 unsigned int pos, unsigned int nr_entries, 716 unsigned int pos, unsigned int nr_entries,
715 unsigned int count); 717 unsigned int count);
716unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
717 struct list_head *list,
718 unsigned int max);
719int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, 718int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
720 struct ppa_addr ppa, int bio_iter, bool advanced_bio); 719 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
721unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); 720unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
@@ -848,13 +847,9 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
848/* 847/*
849 * pblk recovery 848 * pblk recovery
850 */ 849 */
851void pblk_submit_rec(struct work_struct *work);
852struct pblk_line *pblk_recov_l2p(struct pblk *pblk); 850struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
853int pblk_recov_pad(struct pblk *pblk); 851int pblk_recov_pad(struct pblk *pblk);
854int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta); 852int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
855int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
856 struct pblk_rec_ctx *recovery, u64 *comp_bits,
857 unsigned int comp);
858 853
859/* 854/*
860 * pblk gc 855 * pblk gc