diff options
author | Matias Bjørling <m@bjorling.me> | 2016-01-12 01:49:30 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-01-12 10:21:17 -0500 |
commit | 09719b62fdab031e39b39a6470364a372abdf3f4 (patch) | |
tree | 149130e72879af4aa048967694c3927d3e397a63 /drivers/lightnvm/core.c | |
parent | 72d256ecc5d0c8cbcc0bd5c6d983b434df556cb4 (diff) |
lightnvm: introduce nvm_submit_ppa
Internal logic for both core and media managers, does not have a
backing bio for issuing I/Os. Introduce nvm_submit_ppa to allow raw
I/Os to be submitted to the underlying device driver.
The function request the device, ppa, data buffer and its length and
will submit the I/O synchronously to the device. The return value may
therefore be used to detect any errors regarding the issued I/O.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r-- | drivers/lightnvm/core.c | 45 |
1 files changed, 44 insertions, 1 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index dad84ddbefb4..dc83e010d084 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -296,7 +296,7 @@ void nvm_end_io(struct nvm_rq *rqd, int error) | |||
296 | } | 296 | } |
297 | EXPORT_SYMBOL(nvm_end_io); | 297 | EXPORT_SYMBOL(nvm_end_io); |
298 | 298 | ||
299 | static void nvm_end_io_sync(struct nvm_rq *rqd, int errors) | 299 | static void nvm_end_io_sync(struct nvm_rq *rqd) |
300 | { | 300 | { |
301 | struct completion *waiting = rqd->wait; | 301 | struct completion *waiting = rqd->wait; |
302 | 302 | ||
@@ -305,6 +305,49 @@ static void nvm_end_io_sync(struct nvm_rq *rqd, int errors) | |||
305 | complete(waiting); | 305 | complete(waiting); |
306 | } | 306 | } |
307 | 307 | ||
308 | int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, | ||
309 | int opcode, int flags, void *buf, int len) | ||
310 | { | ||
311 | DECLARE_COMPLETION_ONSTACK(wait); | ||
312 | struct nvm_rq rqd; | ||
313 | struct bio *bio; | ||
314 | int ret; | ||
315 | unsigned long hang_check; | ||
316 | |||
317 | bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL); | ||
318 | if (IS_ERR_OR_NULL(bio)) | ||
319 | return -ENOMEM; | ||
320 | |||
321 | memset(&rqd, 0, sizeof(struct nvm_rq)); | ||
322 | ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas); | ||
323 | if (ret) { | ||
324 | bio_put(bio); | ||
325 | return ret; | ||
326 | } | ||
327 | |||
328 | rqd.opcode = opcode; | ||
329 | rqd.bio = bio; | ||
330 | rqd.wait = &wait; | ||
331 | rqd.dev = dev; | ||
332 | rqd.end_io = nvm_end_io_sync; | ||
333 | rqd.flags = flags; | ||
334 | nvm_generic_to_addr_mode(dev, &rqd); | ||
335 | |||
336 | ret = dev->ops->submit_io(dev, &rqd); | ||
337 | |||
338 | /* Prevent hang_check timer from firing at us during very long I/O */ | ||
339 | hang_check = sysctl_hung_task_timeout_secs; | ||
340 | if (hang_check) | ||
341 | while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); | ||
342 | else | ||
343 | wait_for_completion_io(&wait); | ||
344 | |||
345 | nvm_free_rqd_ppalist(dev, &rqd); | ||
346 | |||
347 | return rqd.error; | ||
348 | } | ||
349 | EXPORT_SYMBOL(nvm_submit_ppa); | ||
350 | |||
308 | static int nvm_core_init(struct nvm_dev *dev) | 351 | static int nvm_core_init(struct nvm_dev *dev) |
309 | { | 352 | { |
310 | struct nvm_id *id = &dev->identity; | 353 | struct nvm_id *id = &dev->identity; |