diff options
Diffstat (limited to 'drivers/lightnvm/pblk-read.c')
-rw-r--r-- | drivers/lightnvm/pblk-read.c | 274 |
1 files changed, 164 insertions, 110 deletions
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index d682e89e6493..ca79d8fb3e60 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c | |||
@@ -39,21 +39,15 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, | |||
39 | } | 39 | } |
40 | 40 | ||
41 | static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, | 41 | static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, |
42 | unsigned long *read_bitmap) | 42 | sector_t blba, unsigned long *read_bitmap) |
43 | { | 43 | { |
44 | struct pblk_sec_meta *meta_list = rqd->meta_list; | ||
44 | struct bio *bio = rqd->bio; | 45 | struct bio *bio = rqd->bio; |
45 | struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; | 46 | struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; |
46 | sector_t blba = pblk_get_lba(bio); | ||
47 | int nr_secs = rqd->nr_ppas; | 47 | int nr_secs = rqd->nr_ppas; |
48 | bool advanced_bio = false; | 48 | bool advanced_bio = false; |
49 | int i, j = 0; | 49 | int i, j = 0; |
50 | 50 | ||
51 | /* logic error: lba out-of-bounds. Ignore read request */ | ||
52 | if (blba + nr_secs >= pblk->rl.nr_secs) { | ||
53 | WARN(1, "pblk: read lbas out of bounds\n"); | ||
54 | return; | ||
55 | } | ||
56 | |||
57 | pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs); | 51 | pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs); |
58 | 52 | ||
59 | for (i = 0; i < nr_secs; i++) { | 53 | for (i = 0; i < nr_secs; i++) { |
@@ -63,6 +57,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, | |||
63 | retry: | 57 | retry: |
64 | if (pblk_ppa_empty(p)) { | 58 | if (pblk_ppa_empty(p)) { |
65 | WARN_ON(test_and_set_bit(i, read_bitmap)); | 59 | WARN_ON(test_and_set_bit(i, read_bitmap)); |
60 | meta_list[i].lba = cpu_to_le64(ADDR_EMPTY); | ||
66 | 61 | ||
67 | if (unlikely(!advanced_bio)) { | 62 | if (unlikely(!advanced_bio)) { |
68 | bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE); | 63 | bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE); |
@@ -82,6 +77,7 @@ retry: | |||
82 | goto retry; | 77 | goto retry; |
83 | } | 78 | } |
84 | WARN_ON(test_and_set_bit(i, read_bitmap)); | 79 | WARN_ON(test_and_set_bit(i, read_bitmap)); |
80 | meta_list[i].lba = cpu_to_le64(lba); | ||
85 | advanced_bio = true; | 81 | advanced_bio = true; |
86 | #ifdef CONFIG_NVM_DEBUG | 82 | #ifdef CONFIG_NVM_DEBUG |
87 | atomic_long_inc(&pblk->cache_reads); | 83 | atomic_long_inc(&pblk->cache_reads); |
@@ -117,10 +113,51 @@ static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd) | |||
117 | return NVM_IO_OK; | 113 | return NVM_IO_OK; |
118 | } | 114 | } |
119 | 115 | ||
120 | static void pblk_end_io_read(struct nvm_rq *rqd) | 116 | static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd, |
117 | sector_t blba) | ||
118 | { | ||
119 | struct pblk_sec_meta *meta_list = rqd->meta_list; | ||
120 | int nr_lbas = rqd->nr_ppas; | ||
121 | int i; | ||
122 | |||
123 | for (i = 0; i < nr_lbas; i++) { | ||
124 | u64 lba = le64_to_cpu(meta_list[i].lba); | ||
125 | |||
126 | if (lba == ADDR_EMPTY) | ||
127 | continue; | ||
128 | |||
129 | WARN(lba != blba + i, "pblk: corrupted read LBA\n"); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd) | ||
134 | { | ||
135 | struct ppa_addr *ppa_list; | ||
136 | int i; | ||
137 | |||
138 | ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; | ||
139 | |||
140 | for (i = 0; i < rqd->nr_ppas; i++) { | ||
141 | struct ppa_addr ppa = ppa_list[i]; | ||
142 | struct pblk_line *line; | ||
143 | |||
144 | line = &pblk->lines[pblk_dev_ppa_to_line(ppa)]; | ||
145 | kref_put(&line->ref, pblk_line_put_wq); | ||
146 | } | ||
147 | } | ||
148 | |||
149 | static void pblk_end_user_read(struct bio *bio) | ||
150 | { | ||
151 | #ifdef CONFIG_NVM_DEBUG | ||
152 | WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n"); | ||
153 | #endif | ||
154 | bio_endio(bio); | ||
155 | bio_put(bio); | ||
156 | } | ||
157 | |||
158 | static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd, | ||
159 | bool put_line) | ||
121 | { | 160 | { |
122 | struct pblk *pblk = rqd->private; | ||
123 | struct nvm_tgt_dev *dev = pblk->dev; | ||
124 | struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); | 161 | struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); |
125 | struct bio *bio = rqd->bio; | 162 | struct bio *bio = rqd->bio; |
126 | 163 | ||
@@ -131,47 +168,51 @@ static void pblk_end_io_read(struct nvm_rq *rqd) | |||
131 | WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n"); | 168 | WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n"); |
132 | #endif | 169 | #endif |
133 | 170 | ||
134 | nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); | 171 | pblk_read_check(pblk, rqd, r_ctx->lba); |
135 | 172 | ||
136 | bio_put(bio); | 173 | bio_put(bio); |
137 | if (r_ctx->private) { | 174 | if (r_ctx->private) |
138 | struct bio *orig_bio = r_ctx->private; | 175 | pblk_end_user_read((struct bio *)r_ctx->private); |
139 | 176 | ||
140 | #ifdef CONFIG_NVM_DEBUG | 177 | if (put_line) |
141 | WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n"); | 178 | pblk_read_put_rqd_kref(pblk, rqd); |
142 | #endif | ||
143 | bio_endio(orig_bio); | ||
144 | bio_put(orig_bio); | ||
145 | } | ||
146 | 179 | ||
147 | #ifdef CONFIG_NVM_DEBUG | 180 | #ifdef CONFIG_NVM_DEBUG |
148 | atomic_long_add(rqd->nr_ppas, &pblk->sync_reads); | 181 | atomic_long_add(rqd->nr_ppas, &pblk->sync_reads); |
149 | atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads); | 182 | atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads); |
150 | #endif | 183 | #endif |
151 | 184 | ||
152 | pblk_free_rqd(pblk, rqd, READ); | 185 | pblk_free_rqd(pblk, rqd, PBLK_READ); |
153 | atomic_dec(&pblk->inflight_io); | 186 | atomic_dec(&pblk->inflight_io); |
154 | } | 187 | } |
155 | 188 | ||
189 | static void pblk_end_io_read(struct nvm_rq *rqd) | ||
190 | { | ||
191 | struct pblk *pblk = rqd->private; | ||
192 | |||
193 | __pblk_end_io_read(pblk, rqd, true); | ||
194 | } | ||
195 | |||
156 | static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, | 196 | static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, |
157 | unsigned int bio_init_idx, | 197 | unsigned int bio_init_idx, |
158 | unsigned long *read_bitmap) | 198 | unsigned long *read_bitmap) |
159 | { | 199 | { |
160 | struct bio *new_bio, *bio = rqd->bio; | 200 | struct bio *new_bio, *bio = rqd->bio; |
201 | struct pblk_sec_meta *meta_list = rqd->meta_list; | ||
161 | struct bio_vec src_bv, dst_bv; | 202 | struct bio_vec src_bv, dst_bv; |
162 | void *ppa_ptr = NULL; | 203 | void *ppa_ptr = NULL; |
163 | void *src_p, *dst_p; | 204 | void *src_p, *dst_p; |
164 | dma_addr_t dma_ppa_list = 0; | 205 | dma_addr_t dma_ppa_list = 0; |
206 | __le64 *lba_list_mem, *lba_list_media; | ||
165 | int nr_secs = rqd->nr_ppas; | 207 | int nr_secs = rqd->nr_ppas; |
166 | int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); | 208 | int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); |
167 | int i, ret, hole; | 209 | int i, ret, hole; |
168 | DECLARE_COMPLETION_ONSTACK(wait); | 210 | |
211 | /* Re-use allocated memory for intermediate lbas */ | ||
212 | lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size); | ||
213 | lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size); | ||
169 | 214 | ||
170 | new_bio = bio_alloc(GFP_KERNEL, nr_holes); | 215 | new_bio = bio_alloc(GFP_KERNEL, nr_holes); |
171 | if (!new_bio) { | ||
172 | pr_err("pblk: could not alloc read bio\n"); | ||
173 | return NVM_IO_ERR; | ||
174 | } | ||
175 | 216 | ||
176 | if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes)) | 217 | if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes)) |
177 | goto err; | 218 | goto err; |
@@ -181,34 +222,29 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, | |||
181 | goto err; | 222 | goto err; |
182 | } | 223 | } |
183 | 224 | ||
225 | for (i = 0; i < nr_secs; i++) | ||
226 | lba_list_mem[i] = meta_list[i].lba; | ||
227 | |||
184 | new_bio->bi_iter.bi_sector = 0; /* internal bio */ | 228 | new_bio->bi_iter.bi_sector = 0; /* internal bio */ |
185 | bio_set_op_attrs(new_bio, REQ_OP_READ, 0); | 229 | bio_set_op_attrs(new_bio, REQ_OP_READ, 0); |
186 | new_bio->bi_private = &wait; | ||
187 | new_bio->bi_end_io = pblk_end_bio_sync; | ||
188 | 230 | ||
189 | rqd->bio = new_bio; | 231 | rqd->bio = new_bio; |
190 | rqd->nr_ppas = nr_holes; | 232 | rqd->nr_ppas = nr_holes; |
191 | rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM); | 233 | rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM); |
192 | rqd->end_io = NULL; | ||
193 | 234 | ||
194 | if (unlikely(nr_secs > 1 && nr_holes == 1)) { | 235 | if (unlikely(nr_holes == 1)) { |
195 | ppa_ptr = rqd->ppa_list; | 236 | ppa_ptr = rqd->ppa_list; |
196 | dma_ppa_list = rqd->dma_ppa_list; | 237 | dma_ppa_list = rqd->dma_ppa_list; |
197 | rqd->ppa_addr = rqd->ppa_list[0]; | 238 | rqd->ppa_addr = rqd->ppa_list[0]; |
198 | } | 239 | } |
199 | 240 | ||
200 | ret = pblk_submit_read_io(pblk, rqd); | 241 | ret = pblk_submit_io_sync(pblk, rqd); |
201 | if (ret) { | 242 | if (ret) { |
202 | bio_put(rqd->bio); | 243 | bio_put(rqd->bio); |
203 | pr_err("pblk: read IO submission failed\n"); | 244 | pr_err("pblk: sync read IO submission failed\n"); |
204 | goto err; | 245 | goto err; |
205 | } | 246 | } |
206 | 247 | ||
207 | if (!wait_for_completion_io_timeout(&wait, | ||
208 | msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) { | ||
209 | pr_err("pblk: partial read I/O timed out\n"); | ||
210 | } | ||
211 | |||
212 | if (rqd->error) { | 248 | if (rqd->error) { |
213 | atomic_long_inc(&pblk->read_failed); | 249 | atomic_long_inc(&pblk->read_failed); |
214 | #ifdef CONFIG_NVM_DEBUG | 250 | #ifdef CONFIG_NVM_DEBUG |
@@ -216,15 +252,31 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, | |||
216 | #endif | 252 | #endif |
217 | } | 253 | } |
218 | 254 | ||
219 | if (unlikely(nr_secs > 1 && nr_holes == 1)) { | 255 | if (unlikely(nr_holes == 1)) { |
256 | struct ppa_addr ppa; | ||
257 | |||
258 | ppa = rqd->ppa_addr; | ||
220 | rqd->ppa_list = ppa_ptr; | 259 | rqd->ppa_list = ppa_ptr; |
221 | rqd->dma_ppa_list = dma_ppa_list; | 260 | rqd->dma_ppa_list = dma_ppa_list; |
261 | rqd->ppa_list[0] = ppa; | ||
262 | } | ||
263 | |||
264 | for (i = 0; i < nr_secs; i++) { | ||
265 | lba_list_media[i] = meta_list[i].lba; | ||
266 | meta_list[i].lba = lba_list_mem[i]; | ||
222 | } | 267 | } |
223 | 268 | ||
224 | /* Fill the holes in the original bio */ | 269 | /* Fill the holes in the original bio */ |
225 | i = 0; | 270 | i = 0; |
226 | hole = find_first_zero_bit(read_bitmap, nr_secs); | 271 | hole = find_first_zero_bit(read_bitmap, nr_secs); |
227 | do { | 272 | do { |
273 | int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]); | ||
274 | struct pblk_line *line = &pblk->lines[line_id]; | ||
275 | |||
276 | kref_put(&line->ref, pblk_line_put); | ||
277 | |||
278 | meta_list[hole].lba = lba_list_media[i]; | ||
279 | |||
228 | src_bv = new_bio->bi_io_vec[i++]; | 280 | src_bv = new_bio->bi_io_vec[i++]; |
229 | dst_bv = bio->bi_io_vec[bio_init_idx + hole]; | 281 | dst_bv = bio->bi_io_vec[bio_init_idx + hole]; |
230 | 282 | ||
@@ -238,7 +290,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, | |||
238 | kunmap_atomic(src_p); | 290 | kunmap_atomic(src_p); |
239 | kunmap_atomic(dst_p); | 291 | kunmap_atomic(dst_p); |
240 | 292 | ||
241 | mempool_free(src_bv.bv_page, pblk->page_pool); | 293 | mempool_free(src_bv.bv_page, pblk->page_bio_pool); |
242 | 294 | ||
243 | hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); | 295 | hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); |
244 | } while (hole < nr_secs); | 296 | } while (hole < nr_secs); |
@@ -246,34 +298,26 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, | |||
246 | bio_put(new_bio); | 298 | bio_put(new_bio); |
247 | 299 | ||
248 | /* Complete the original bio and associated request */ | 300 | /* Complete the original bio and associated request */ |
301 | bio_endio(bio); | ||
249 | rqd->bio = bio; | 302 | rqd->bio = bio; |
250 | rqd->nr_ppas = nr_secs; | 303 | rqd->nr_ppas = nr_secs; |
251 | rqd->private = pblk; | ||
252 | 304 | ||
253 | bio_endio(bio); | 305 | __pblk_end_io_read(pblk, rqd, false); |
254 | pblk_end_io_read(rqd); | ||
255 | return NVM_IO_OK; | 306 | return NVM_IO_OK; |
256 | 307 | ||
257 | err: | 308 | err: |
258 | /* Free allocated pages in new bio */ | 309 | /* Free allocated pages in new bio */ |
259 | pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt); | 310 | pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt); |
260 | rqd->private = pblk; | 311 | __pblk_end_io_read(pblk, rqd, false); |
261 | pblk_end_io_read(rqd); | ||
262 | return NVM_IO_ERR; | 312 | return NVM_IO_ERR; |
263 | } | 313 | } |
264 | 314 | ||
265 | static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, | 315 | static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, |
266 | unsigned long *read_bitmap) | 316 | sector_t lba, unsigned long *read_bitmap) |
267 | { | 317 | { |
318 | struct pblk_sec_meta *meta_list = rqd->meta_list; | ||
268 | struct bio *bio = rqd->bio; | 319 | struct bio *bio = rqd->bio; |
269 | struct ppa_addr ppa; | 320 | struct ppa_addr ppa; |
270 | sector_t lba = pblk_get_lba(bio); | ||
271 | |||
272 | /* logic error: lba out-of-bounds. Ignore read request */ | ||
273 | if (lba >= pblk->rl.nr_secs) { | ||
274 | WARN(1, "pblk: read lba out of bounds\n"); | ||
275 | return; | ||
276 | } | ||
277 | 321 | ||
278 | pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); | 322 | pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); |
279 | 323 | ||
@@ -284,6 +328,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, | |||
284 | retry: | 328 | retry: |
285 | if (pblk_ppa_empty(ppa)) { | 329 | if (pblk_ppa_empty(ppa)) { |
286 | WARN_ON(test_and_set_bit(0, read_bitmap)); | 330 | WARN_ON(test_and_set_bit(0, read_bitmap)); |
331 | meta_list[0].lba = cpu_to_le64(ADDR_EMPTY); | ||
287 | return; | 332 | return; |
288 | } | 333 | } |
289 | 334 | ||
@@ -295,9 +340,12 @@ retry: | |||
295 | pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); | 340 | pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); |
296 | goto retry; | 341 | goto retry; |
297 | } | 342 | } |
343 | |||
298 | WARN_ON(test_and_set_bit(0, read_bitmap)); | 344 | WARN_ON(test_and_set_bit(0, read_bitmap)); |
345 | meta_list[0].lba = cpu_to_le64(lba); | ||
346 | |||
299 | #ifdef CONFIG_NVM_DEBUG | 347 | #ifdef CONFIG_NVM_DEBUG |
300 | atomic_long_inc(&pblk->cache_reads); | 348 | atomic_long_inc(&pblk->cache_reads); |
301 | #endif | 349 | #endif |
302 | } else { | 350 | } else { |
303 | rqd->ppa_addr = ppa; | 351 | rqd->ppa_addr = ppa; |
@@ -309,22 +357,24 @@ retry: | |||
309 | int pblk_submit_read(struct pblk *pblk, struct bio *bio) | 357 | int pblk_submit_read(struct pblk *pblk, struct bio *bio) |
310 | { | 358 | { |
311 | struct nvm_tgt_dev *dev = pblk->dev; | 359 | struct nvm_tgt_dev *dev = pblk->dev; |
360 | sector_t blba = pblk_get_lba(bio); | ||
312 | unsigned int nr_secs = pblk_get_secs(bio); | 361 | unsigned int nr_secs = pblk_get_secs(bio); |
362 | struct pblk_g_ctx *r_ctx; | ||
313 | struct nvm_rq *rqd; | 363 | struct nvm_rq *rqd; |
314 | unsigned long read_bitmap; /* Max 64 ppas per request */ | ||
315 | unsigned int bio_init_idx; | 364 | unsigned int bio_init_idx; |
365 | unsigned long read_bitmap; /* Max 64 ppas per request */ | ||
316 | int ret = NVM_IO_ERR; | 366 | int ret = NVM_IO_ERR; |
317 | 367 | ||
318 | if (nr_secs > PBLK_MAX_REQ_ADDRS) | 368 | /* logic error: lba out-of-bounds. Ignore read request */ |
369 | if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) { | ||
370 | WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n", | ||
371 | (unsigned long long)blba, nr_secs); | ||
319 | return NVM_IO_ERR; | 372 | return NVM_IO_ERR; |
373 | } | ||
320 | 374 | ||
321 | bitmap_zero(&read_bitmap, nr_secs); | 375 | bitmap_zero(&read_bitmap, nr_secs); |
322 | 376 | ||
323 | rqd = pblk_alloc_rqd(pblk, READ); | 377 | rqd = pblk_alloc_rqd(pblk, PBLK_READ); |
324 | if (IS_ERR(rqd)) { | ||
325 | pr_err_ratelimited("pblk: not able to alloc rqd"); | ||
326 | return NVM_IO_ERR; | ||
327 | } | ||
328 | 378 | ||
329 | rqd->opcode = NVM_OP_PREAD; | 379 | rqd->opcode = NVM_OP_PREAD; |
330 | rqd->bio = bio; | 380 | rqd->bio = bio; |
@@ -332,6 +382,9 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) | |||
332 | rqd->private = pblk; | 382 | rqd->private = pblk; |
333 | rqd->end_io = pblk_end_io_read; | 383 | rqd->end_io = pblk_end_io_read; |
334 | 384 | ||
385 | r_ctx = nvm_rq_to_pdu(rqd); | ||
386 | r_ctx->lba = blba; | ||
387 | |||
335 | /* Save the index for this bio's start. This is needed in case | 388 | /* Save the index for this bio's start. This is needed in case |
336 | * we need to fill a partial read. | 389 | * we need to fill a partial read. |
337 | */ | 390 | */ |
@@ -348,23 +401,22 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) | |||
348 | rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size; | 401 | rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size; |
349 | rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size; | 402 | rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size; |
350 | 403 | ||
351 | pblk_read_ppalist_rq(pblk, rqd, &read_bitmap); | 404 | pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap); |
352 | } else { | 405 | } else { |
353 | pblk_read_rq(pblk, rqd, &read_bitmap); | 406 | pblk_read_rq(pblk, rqd, blba, &read_bitmap); |
354 | } | 407 | } |
355 | 408 | ||
356 | bio_get(bio); | 409 | bio_get(bio); |
357 | if (bitmap_full(&read_bitmap, nr_secs)) { | 410 | if (bitmap_full(&read_bitmap, nr_secs)) { |
358 | bio_endio(bio); | 411 | bio_endio(bio); |
359 | atomic_inc(&pblk->inflight_io); | 412 | atomic_inc(&pblk->inflight_io); |
360 | pblk_end_io_read(rqd); | 413 | __pblk_end_io_read(pblk, rqd, false); |
361 | return NVM_IO_OK; | 414 | return NVM_IO_OK; |
362 | } | 415 | } |
363 | 416 | ||
364 | /* All sectors are to be read from the device */ | 417 | /* All sectors are to be read from the device */ |
365 | if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) { | 418 | if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) { |
366 | struct bio *int_bio = NULL; | 419 | struct bio *int_bio = NULL; |
367 | struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); | ||
368 | 420 | ||
369 | /* Clone read bio to deal with read errors internally */ | 421 | /* Clone read bio to deal with read errors internally */ |
370 | int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set); | 422 | int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set); |
@@ -399,40 +451,46 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) | |||
399 | return NVM_IO_OK; | 451 | return NVM_IO_OK; |
400 | 452 | ||
401 | fail_rqd_free: | 453 | fail_rqd_free: |
402 | pblk_free_rqd(pblk, rqd, READ); | 454 | pblk_free_rqd(pblk, rqd, PBLK_READ); |
403 | return ret; | 455 | return ret; |
404 | } | 456 | } |
405 | 457 | ||
406 | static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, | 458 | static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, |
407 | struct pblk_line *line, u64 *lba_list, | 459 | struct pblk_line *line, u64 *lba_list, |
408 | unsigned int nr_secs) | 460 | u64 *paddr_list_gc, unsigned int nr_secs) |
409 | { | 461 | { |
410 | struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; | 462 | struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS]; |
463 | struct ppa_addr ppa_gc; | ||
411 | int valid_secs = 0; | 464 | int valid_secs = 0; |
412 | int i; | 465 | int i; |
413 | 466 | ||
414 | pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs); | 467 | pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs); |
415 | 468 | ||
416 | for (i = 0; i < nr_secs; i++) { | 469 | for (i = 0; i < nr_secs; i++) { |
417 | if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id || | 470 | if (lba_list[i] == ADDR_EMPTY) |
418 | pblk_ppa_empty(ppas[i])) { | 471 | continue; |
419 | lba_list[i] = ADDR_EMPTY; | 472 | |
473 | ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id); | ||
474 | if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) { | ||
475 | paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY; | ||
420 | continue; | 476 | continue; |
421 | } | 477 | } |
422 | 478 | ||
423 | rqd->ppa_list[valid_secs++] = ppas[i]; | 479 | rqd->ppa_list[valid_secs++] = ppa_list_l2p[i]; |
424 | } | 480 | } |
425 | 481 | ||
426 | #ifdef CONFIG_NVM_DEBUG | 482 | #ifdef CONFIG_NVM_DEBUG |
427 | atomic_long_add(valid_secs, &pblk->inflight_reads); | 483 | atomic_long_add(valid_secs, &pblk->inflight_reads); |
428 | #endif | 484 | #endif |
485 | |||
429 | return valid_secs; | 486 | return valid_secs; |
430 | } | 487 | } |
431 | 488 | ||
432 | static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, | 489 | static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, |
433 | struct pblk_line *line, sector_t lba) | 490 | struct pblk_line *line, sector_t lba, |
491 | u64 paddr_gc) | ||
434 | { | 492 | { |
435 | struct ppa_addr ppa; | 493 | struct ppa_addr ppa_l2p, ppa_gc; |
436 | int valid_secs = 0; | 494 | int valid_secs = 0; |
437 | 495 | ||
438 | if (lba == ADDR_EMPTY) | 496 | if (lba == ADDR_EMPTY) |
@@ -445,15 +503,14 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, | |||
445 | } | 503 | } |
446 | 504 | ||
447 | spin_lock(&pblk->trans_lock); | 505 | spin_lock(&pblk->trans_lock); |
448 | ppa = pblk_trans_map_get(pblk, lba); | 506 | ppa_l2p = pblk_trans_map_get(pblk, lba); |
449 | spin_unlock(&pblk->trans_lock); | 507 | spin_unlock(&pblk->trans_lock); |
450 | 508 | ||
451 | /* Ignore updated values until the moment */ | 509 | ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id); |
452 | if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id || | 510 | if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) |
453 | pblk_ppa_empty(ppa)) | ||
454 | goto out; | 511 | goto out; |
455 | 512 | ||
456 | rqd->ppa_addr = ppa; | 513 | rqd->ppa_addr = ppa_l2p; |
457 | valid_secs = 1; | 514 | valid_secs = 1; |
458 | 515 | ||
459 | #ifdef CONFIG_NVM_DEBUG | 516 | #ifdef CONFIG_NVM_DEBUG |
@@ -464,42 +521,44 @@ out: | |||
464 | return valid_secs; | 521 | return valid_secs; |
465 | } | 522 | } |
466 | 523 | ||
467 | int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, | 524 | int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) |
468 | unsigned int nr_secs, unsigned int *secs_to_gc, | ||
469 | struct pblk_line *line) | ||
470 | { | 525 | { |
471 | struct nvm_tgt_dev *dev = pblk->dev; | 526 | struct nvm_tgt_dev *dev = pblk->dev; |
472 | struct nvm_geo *geo = &dev->geo; | 527 | struct nvm_geo *geo = &dev->geo; |
473 | struct bio *bio; | 528 | struct bio *bio; |
474 | struct nvm_rq rqd; | 529 | struct nvm_rq rqd; |
475 | int ret, data_len; | 530 | int data_len; |
476 | DECLARE_COMPLETION_ONSTACK(wait); | 531 | int ret = NVM_IO_OK; |
477 | 532 | ||
478 | memset(&rqd, 0, sizeof(struct nvm_rq)); | 533 | memset(&rqd, 0, sizeof(struct nvm_rq)); |
479 | 534 | ||
480 | rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, | 535 | rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, |
481 | &rqd.dma_meta_list); | 536 | &rqd.dma_meta_list); |
482 | if (!rqd.meta_list) | 537 | if (!rqd.meta_list) |
483 | return NVM_IO_ERR; | 538 | return -ENOMEM; |
484 | 539 | ||
485 | if (nr_secs > 1) { | 540 | if (gc_rq->nr_secs > 1) { |
486 | rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size; | 541 | rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size; |
487 | rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size; | 542 | rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size; |
488 | 543 | ||
489 | *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list, | 544 | gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line, |
490 | nr_secs); | 545 | gc_rq->lba_list, |
491 | if (*secs_to_gc == 1) | 546 | gc_rq->paddr_list, |
547 | gc_rq->nr_secs); | ||
548 | if (gc_rq->secs_to_gc == 1) | ||
492 | rqd.ppa_addr = rqd.ppa_list[0]; | 549 | rqd.ppa_addr = rqd.ppa_list[0]; |
493 | } else { | 550 | } else { |
494 | *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]); | 551 | gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line, |
552 | gc_rq->lba_list[0], | ||
553 | gc_rq->paddr_list[0]); | ||
495 | } | 554 | } |
496 | 555 | ||
497 | if (!(*secs_to_gc)) | 556 | if (!(gc_rq->secs_to_gc)) |
498 | goto out; | 557 | goto out; |
499 | 558 | ||
500 | data_len = (*secs_to_gc) * geo->sec_size; | 559 | data_len = (gc_rq->secs_to_gc) * geo->sec_size; |
501 | bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len, | 560 | bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len, |
502 | PBLK_KMALLOC_META, GFP_KERNEL); | 561 | PBLK_VMALLOC_META, GFP_KERNEL); |
503 | if (IS_ERR(bio)) { | 562 | if (IS_ERR(bio)) { |
504 | pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio)); | 563 | pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio)); |
505 | goto err_free_dma; | 564 | goto err_free_dma; |
@@ -509,23 +568,16 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, | |||
509 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | 568 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
510 | 569 | ||
511 | rqd.opcode = NVM_OP_PREAD; | 570 | rqd.opcode = NVM_OP_PREAD; |
512 | rqd.end_io = pblk_end_io_sync; | 571 | rqd.nr_ppas = gc_rq->secs_to_gc; |
513 | rqd.private = &wait; | ||
514 | rqd.nr_ppas = *secs_to_gc; | ||
515 | rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM); | 572 | rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM); |
516 | rqd.bio = bio; | 573 | rqd.bio = bio; |
517 | 574 | ||
518 | ret = pblk_submit_read_io(pblk, &rqd); | 575 | if (pblk_submit_io_sync(pblk, &rqd)) { |
519 | if (ret) { | 576 | ret = -EIO; |
520 | bio_endio(bio); | ||
521 | pr_err("pblk: GC read request failed\n"); | 577 | pr_err("pblk: GC read request failed\n"); |
522 | goto err_free_dma; | 578 | goto err_free_bio; |
523 | } | 579 | } |
524 | 580 | ||
525 | if (!wait_for_completion_io_timeout(&wait, | ||
526 | msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) { | ||
527 | pr_err("pblk: GC read I/O timed out\n"); | ||
528 | } | ||
529 | atomic_dec(&pblk->inflight_io); | 581 | atomic_dec(&pblk->inflight_io); |
530 | 582 | ||
531 | if (rqd.error) { | 583 | if (rqd.error) { |
@@ -536,16 +588,18 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, | |||
536 | } | 588 | } |
537 | 589 | ||
538 | #ifdef CONFIG_NVM_DEBUG | 590 | #ifdef CONFIG_NVM_DEBUG |
539 | atomic_long_add(*secs_to_gc, &pblk->sync_reads); | 591 | atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads); |
540 | atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads); | 592 | atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads); |
541 | atomic_long_sub(*secs_to_gc, &pblk->inflight_reads); | 593 | atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads); |
542 | #endif | 594 | #endif |
543 | 595 | ||
544 | out: | 596 | out: |
545 | nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); | 597 | nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); |
546 | return NVM_IO_OK; | 598 | return ret; |
547 | 599 | ||
600 | err_free_bio: | ||
601 | bio_put(bio); | ||
548 | err_free_dma: | 602 | err_free_dma: |
549 | nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); | 603 | nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); |
550 | return NVM_IO_ERR; | 604 | return ret; |
551 | } | 605 | } |