diff options
author | Matias Bjørling <m@bjorling.me> | 2016-05-06 14:02:56 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-05-06 14:51:10 -0400 |
commit | 1145e6351a9fefe0965df4c6dba2a04156dc47d2 (patch) | |
tree | 4c92d803aea93a7f85de048de36e1981550add98 /drivers/lightnvm | |
parent | ecfb40c6aa5691257054eac81bc8bdfd5442e8e5 (diff) |
lightnvm: implement nvm_submit_ppa_list
The nvm_submit_ppa function assumes that users manage all plane
blocks as a single block. Extend the API with nvm_submit_ppa_list
to allow the user to send its own ppa list. If the user submits more
than a single PPA, the user must take care to allocate and free
the corresponding ppa list.
Reviewed by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r-- | drivers/lightnvm/core.c | 88 |
1 files changed, 69 insertions, 19 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index c2ef53a0d7f8..f4e04a505859 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -322,11 +322,10 @@ static void nvm_end_io_sync(struct nvm_rq *rqd) | |||
322 | complete(waiting); | 322 | complete(waiting); |
323 | } | 323 | } |
324 | 324 | ||
325 | int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, | 325 | int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode, |
326 | int opcode, int flags, void *buf, int len) | 326 | int flags, void *buf, int len) |
327 | { | 327 | { |
328 | DECLARE_COMPLETION_ONSTACK(wait); | 328 | DECLARE_COMPLETION_ONSTACK(wait); |
329 | struct nvm_rq rqd; | ||
330 | struct bio *bio; | 329 | struct bio *bio; |
331 | int ret; | 330 | int ret; |
332 | unsigned long hang_check; | 331 | unsigned long hang_check; |
@@ -335,24 +334,17 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, | |||
335 | if (IS_ERR_OR_NULL(bio)) | 334 | if (IS_ERR_OR_NULL(bio)) |
336 | return -ENOMEM; | 335 | return -ENOMEM; |
337 | 336 | ||
338 | memset(&rqd, 0, sizeof(struct nvm_rq)); | 337 | nvm_generic_to_addr_mode(dev, rqd); |
339 | ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas); | ||
340 | if (ret) { | ||
341 | bio_put(bio); | ||
342 | return ret; | ||
343 | } | ||
344 | 338 | ||
345 | rqd.opcode = opcode; | 339 | rqd->dev = dev; |
346 | rqd.bio = bio; | 340 | rqd->opcode = opcode; |
347 | rqd.wait = &wait; | 341 | rqd->flags = flags; |
348 | rqd.dev = dev; | 342 | rqd->bio = bio; |
349 | rqd.end_io = nvm_end_io_sync; | 343 | rqd->wait = &wait; |
350 | rqd.flags = flags; | 344 | rqd->end_io = nvm_end_io_sync; |
351 | nvm_generic_to_addr_mode(dev, &rqd); | ||
352 | 345 | ||
353 | ret = dev->ops->submit_io(dev, &rqd); | 346 | ret = dev->ops->submit_io(dev, rqd); |
354 | if (ret) { | 347 | if (ret) { |
355 | nvm_free_rqd_ppalist(dev, &rqd); | ||
356 | bio_put(bio); | 348 | bio_put(bio); |
357 | return ret; | 349 | return ret; |
358 | } | 350 | } |
@@ -364,9 +356,67 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, | |||
364 | else | 356 | else |
365 | wait_for_completion_io(&wait); | 357 | wait_for_completion_io(&wait); |
366 | 358 | ||
359 | return rqd->error; | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must | ||
364 | * take to free ppa list if necessary. | ||
365 | * @dev: device | ||
366 | * @ppa_list: user created ppa_list | ||
367 | * @nr_ppas: length of ppa_list | ||
368 | * @opcode: device opcode | ||
369 | * @flags: device flags | ||
370 | * @buf: data buffer | ||
371 | * @len: data buffer length | ||
372 | */ | ||
373 | int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list, | ||
374 | int nr_ppas, int opcode, int flags, void *buf, int len) | ||
375 | { | ||
376 | struct nvm_rq rqd; | ||
377 | |||
378 | if (dev->ops->max_phys_sect < nr_ppas) | ||
379 | return -EINVAL; | ||
380 | |||
381 | memset(&rqd, 0, sizeof(struct nvm_rq)); | ||
382 | |||
383 | rqd.nr_pages = nr_ppas; | ||
384 | if (nr_ppas > 1) | ||
385 | rqd.ppa_list = ppa_list; | ||
386 | else | ||
387 | rqd.ppa_addr = ppa_list[0]; | ||
388 | |||
389 | return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); | ||
390 | } | ||
391 | EXPORT_SYMBOL(nvm_submit_ppa_list); | ||
392 | |||
393 | /** | ||
394 | * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded | ||
395 | * as single, dual, quad plane PPAs depending on device type. | ||
396 | * @dev: device | ||
397 | * @ppa: user created ppa_list | ||
398 | * @nr_ppas: length of ppa_list | ||
399 | * @opcode: device opcode | ||
400 | * @flags: device flags | ||
401 | * @buf: data buffer | ||
402 | * @len: data buffer length | ||
403 | */ | ||
404 | int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, | ||
405 | int opcode, int flags, void *buf, int len) | ||
406 | { | ||
407 | struct nvm_rq rqd; | ||
408 | int ret; | ||
409 | |||
410 | memset(&rqd, 0, sizeof(struct nvm_rq)); | ||
411 | ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas); | ||
412 | if (ret) | ||
413 | return ret; | ||
414 | |||
415 | ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); | ||
416 | |||
367 | nvm_free_rqd_ppalist(dev, &rqd); | 417 | nvm_free_rqd_ppalist(dev, &rqd); |
368 | 418 | ||
369 | return rqd.error; | 419 | return ret; |
370 | } | 420 | } |
371 | EXPORT_SYMBOL(nvm_submit_ppa); | 421 | EXPORT_SYMBOL(nvm_submit_ppa); |
372 | 422 | ||