diff options
author | Richard Weinberger <richard@nod.at> | 2015-01-10 16:52:14 -0500 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2015-01-28 10:05:20 -0500 |
commit | ff1f48ee3bb3af226f1f8993af0103794b4d4eab (patch) | |
tree | f0a8541666b2b64a5d90cde7b310f50cbd2ce5e5 | |
parent | 9ff08979e17423f0f691c1d76f35dfec72a5e459 (diff) |
UBI: Block: Add blk-mq support
Convert the driver to blk-mq.
Beside of moving to the modern block interface this change boosts
also the performance of the driver.
nand: device found, Manufacturer ID: 0x2c, Chip ID: 0xda
nand: Micron NAND 256MiB 3,3V 8-bit
nand: 256MiB, SLC, page size: 2048, OOB size: 64
root@debian-armhf:~# dd if=/dev/ubiblock0_0 of=/dev/zero bs=1M
243+1 records in
243+1 records out
255080448 bytes (255 MB) copied, 4.39295 s, 58.1 MB/s
vs.
root@debian-armhf:~# dd if=/dev/ubiblock0_0 of=/dev/zero bs=1M
243+1 records in
243+1 records out
255080448 bytes (255 MB) copied, 2.87676 s, 88.7 MB/s
Cc: hch@infradead.org
Cc: axboe@fb.com
Cc: tom.leiming@gmail.com
Signed-off-by: Richard Weinberger <richard@nod.at>
Tested-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
Reviewed-by: Jens Axboe <axboe@fb.com>
Acked-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
-rw-r--r-- | drivers/mtd/ubi/block.c | 202 |
1 files changed, 94 insertions, 108 deletions
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 6b6bce28bd63..00caf460337e 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c | |||
@@ -42,11 +42,12 @@ | |||
42 | #include <linux/list.h> | 42 | #include <linux/list.h> |
43 | #include <linux/mutex.h> | 43 | #include <linux/mutex.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/vmalloc.h> | ||
46 | #include <linux/mtd/ubi.h> | 45 | #include <linux/mtd/ubi.h> |
47 | #include <linux/workqueue.h> | 46 | #include <linux/workqueue.h> |
48 | #include <linux/blkdev.h> | 47 | #include <linux/blkdev.h> |
48 | #include <linux/blk-mq.h> | ||
49 | #include <linux/hdreg.h> | 49 | #include <linux/hdreg.h> |
50 | #include <linux/scatterlist.h> | ||
50 | #include <asm/div64.h> | 51 | #include <asm/div64.h> |
51 | 52 | ||
52 | #include "ubi-media.h" | 53 | #include "ubi-media.h" |
@@ -67,6 +68,11 @@ struct ubiblock_param { | |||
67 | char name[UBIBLOCK_PARAM_LEN+1]; | 68 | char name[UBIBLOCK_PARAM_LEN+1]; |
68 | }; | 69 | }; |
69 | 70 | ||
71 | struct ubiblock_pdu { | ||
72 | struct work_struct work; | ||
73 | struct ubi_sgl usgl; | ||
74 | }; | ||
75 | |||
70 | /* Numbers of elements set in the @ubiblock_param array */ | 76 | /* Numbers of elements set in the @ubiblock_param array */ |
71 | static int ubiblock_devs __initdata; | 77 | static int ubiblock_devs __initdata; |
72 | 78 | ||
@@ -84,11 +90,10 @@ struct ubiblock { | |||
84 | struct request_queue *rq; | 90 | struct request_queue *rq; |
85 | 91 | ||
86 | struct workqueue_struct *wq; | 92 | struct workqueue_struct *wq; |
87 | struct work_struct work; | ||
88 | 93 | ||
89 | struct mutex dev_mutex; | 94 | struct mutex dev_mutex; |
90 | spinlock_t queue_lock; | ||
91 | struct list_head list; | 95 | struct list_head list; |
96 | struct blk_mq_tag_set tag_set; | ||
92 | }; | 97 | }; |
93 | 98 | ||
94 | /* Linked list of all ubiblock instances */ | 99 | /* Linked list of all ubiblock instances */ |
@@ -181,31 +186,20 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id) | |||
181 | return NULL; | 186 | return NULL; |
182 | } | 187 | } |
183 | 188 | ||
184 | static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer, | 189 | static int ubiblock_read(struct ubiblock_pdu *pdu) |
185 | int leb, int offset, int len) | ||
186 | { | 190 | { |
187 | int ret; | 191 | int ret, leb, offset, bytes_left, to_read; |
192 | u64 pos; | ||
193 | struct request *req = blk_mq_rq_from_pdu(pdu); | ||
194 | struct ubiblock *dev = req->q->queuedata; | ||
188 | 195 | ||
189 | ret = ubi_read(dev->desc, leb, buffer, offset, len); | 196 | to_read = blk_rq_bytes(req); |
190 | if (ret) { | 197 | pos = blk_rq_pos(req) << 9; |
191 | dev_err(disk_to_dev(dev->gd), "%d while reading from LEB %d (offset %d, length %d)", | ||
192 | ret, leb, offset, len); | ||
193 | return ret; | ||
194 | } | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int ubiblock_read(struct ubiblock *dev, char *buffer, | ||
199 | sector_t sec, int len) | ||
200 | { | ||
201 | int ret, leb, offset; | ||
202 | int bytes_left = len; | ||
203 | int to_read = len; | ||
204 | u64 pos = sec << 9; | ||
205 | 198 | ||
206 | /* Get LEB:offset address to read from */ | 199 | /* Get LEB:offset address to read from */ |
207 | offset = do_div(pos, dev->leb_size); | 200 | offset = do_div(pos, dev->leb_size); |
208 | leb = pos; | 201 | leb = pos; |
202 | bytes_left = to_read; | ||
209 | 203 | ||
210 | while (bytes_left) { | 204 | while (bytes_left) { |
211 | /* | 205 | /* |
@@ -215,11 +209,10 @@ static int ubiblock_read(struct ubiblock *dev, char *buffer, | |||
215 | if (offset + to_read > dev->leb_size) | 209 | if (offset + to_read > dev->leb_size) |
216 | to_read = dev->leb_size - offset; | 210 | to_read = dev->leb_size - offset; |
217 | 211 | ||
218 | ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read); | 212 | ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read); |
219 | if (ret) | 213 | if (ret < 0) |
220 | return ret; | 214 | return ret; |
221 | 215 | ||
222 | buffer += to_read; | ||
223 | bytes_left -= to_read; | 216 | bytes_left -= to_read; |
224 | to_read = bytes_left; | 217 | to_read = bytes_left; |
225 | leb += 1; | 218 | leb += 1; |
@@ -228,79 +221,6 @@ static int ubiblock_read(struct ubiblock *dev, char *buffer, | |||
228 | return 0; | 221 | return 0; |
229 | } | 222 | } |
230 | 223 | ||
231 | static int do_ubiblock_request(struct ubiblock *dev, struct request *req) | ||
232 | { | ||
233 | int len, ret; | ||
234 | sector_t sec; | ||
235 | |||
236 | if (req->cmd_type != REQ_TYPE_FS) | ||
237 | return -EIO; | ||
238 | |||
239 | if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > | ||
240 | get_capacity(req->rq_disk)) | ||
241 | return -EIO; | ||
242 | |||
243 | if (rq_data_dir(req) != READ) | ||
244 | return -ENOSYS; /* Write not implemented */ | ||
245 | |||
246 | sec = blk_rq_pos(req); | ||
247 | len = blk_rq_cur_bytes(req); | ||
248 | |||
249 | /* | ||
250 | * Let's prevent the device from being removed while we're doing I/O | ||
251 | * work. Notice that this means we serialize all the I/O operations, | ||
252 | * but it's probably of no impact given the NAND core serializes | ||
253 | * flash access anyway. | ||
254 | */ | ||
255 | mutex_lock(&dev->dev_mutex); | ||
256 | ret = ubiblock_read(dev, bio_data(req->bio), sec, len); | ||
257 | mutex_unlock(&dev->dev_mutex); | ||
258 | |||
259 | return ret; | ||
260 | } | ||
261 | |||
262 | static void ubiblock_do_work(struct work_struct *work) | ||
263 | { | ||
264 | struct ubiblock *dev = | ||
265 | container_of(work, struct ubiblock, work); | ||
266 | struct request_queue *rq = dev->rq; | ||
267 | struct request *req; | ||
268 | int res; | ||
269 | |||
270 | spin_lock_irq(rq->queue_lock); | ||
271 | |||
272 | req = blk_fetch_request(rq); | ||
273 | while (req) { | ||
274 | |||
275 | spin_unlock_irq(rq->queue_lock); | ||
276 | res = do_ubiblock_request(dev, req); | ||
277 | spin_lock_irq(rq->queue_lock); | ||
278 | |||
279 | /* | ||
280 | * If we're done with this request, | ||
281 | * we need to fetch a new one | ||
282 | */ | ||
283 | if (!__blk_end_request_cur(req, res)) | ||
284 | req = blk_fetch_request(rq); | ||
285 | } | ||
286 | |||
287 | spin_unlock_irq(rq->queue_lock); | ||
288 | } | ||
289 | |||
290 | static void ubiblock_request(struct request_queue *rq) | ||
291 | { | ||
292 | struct ubiblock *dev; | ||
293 | struct request *req; | ||
294 | |||
295 | dev = rq->queuedata; | ||
296 | |||
297 | if (!dev) | ||
298 | while ((req = blk_fetch_request(rq)) != NULL) | ||
299 | __blk_end_request_all(req, -ENODEV); | ||
300 | else | ||
301 | queue_work(dev->wq, &dev->work); | ||
302 | } | ||
303 | |||
304 | static int ubiblock_open(struct block_device *bdev, fmode_t mode) | 224 | static int ubiblock_open(struct block_device *bdev, fmode_t mode) |
305 | { | 225 | { |
306 | struct ubiblock *dev = bdev->bd_disk->private_data; | 226 | struct ubiblock *dev = bdev->bd_disk->private_data; |
@@ -374,6 +294,57 @@ static const struct block_device_operations ubiblock_ops = { | |||
374 | .getgeo = ubiblock_getgeo, | 294 | .getgeo = ubiblock_getgeo, |
375 | }; | 295 | }; |
376 | 296 | ||
297 | static void ubiblock_do_work(struct work_struct *work) | ||
298 | { | ||
299 | int ret; | ||
300 | struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work); | ||
301 | struct request *req = blk_mq_rq_from_pdu(pdu); | ||
302 | |||
303 | blk_mq_start_request(req); | ||
304 | blk_rq_map_sg(req->q, req, pdu->usgl.sg); | ||
305 | |||
306 | ret = ubiblock_read(pdu); | ||
307 | blk_mq_end_request(req, ret); | ||
308 | } | ||
309 | |||
310 | static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, | ||
311 | const struct blk_mq_queue_data *bd) | ||
312 | { | ||
313 | struct request *req = bd->rq; | ||
314 | struct ubiblock *dev = hctx->queue->queuedata; | ||
315 | struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); | ||
316 | |||
317 | if (req->cmd_type != REQ_TYPE_FS) | ||
318 | return BLK_MQ_RQ_QUEUE_ERROR; | ||
319 | |||
320 | if (rq_data_dir(req) != READ) | ||
321 | return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */ | ||
322 | |||
323 | ubi_sgl_init(&pdu->usgl); | ||
324 | queue_work(dev->wq, &pdu->work); | ||
325 | |||
326 | return BLK_MQ_RQ_QUEUE_OK; | ||
327 | } | ||
328 | |||
329 | static int ubiblock_init_request(void *data, struct request *req, | ||
330 | unsigned int hctx_idx, | ||
331 | unsigned int request_idx, | ||
332 | unsigned int numa_node) | ||
333 | { | ||
334 | struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); | ||
335 | |||
336 | sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT); | ||
337 | INIT_WORK(&pdu->work, ubiblock_do_work); | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static struct blk_mq_ops ubiblock_mq_ops = { | ||
343 | .queue_rq = ubiblock_queue_rq, | ||
344 | .init_request = ubiblock_init_request, | ||
345 | .map_queue = blk_mq_map_queue, | ||
346 | }; | ||
347 | |||
377 | int ubiblock_create(struct ubi_volume_info *vi) | 348 | int ubiblock_create(struct ubi_volume_info *vi) |
378 | { | 349 | { |
379 | struct ubiblock *dev; | 350 | struct ubiblock *dev; |
@@ -417,13 +388,27 @@ int ubiblock_create(struct ubi_volume_info *vi) | |||
417 | set_capacity(gd, disk_capacity); | 388 | set_capacity(gd, disk_capacity); |
418 | dev->gd = gd; | 389 | dev->gd = gd; |
419 | 390 | ||
420 | spin_lock_init(&dev->queue_lock); | 391 | dev->tag_set.ops = &ubiblock_mq_ops; |
421 | dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock); | 392 | dev->tag_set.queue_depth = 64; |
393 | dev->tag_set.numa_node = NUMA_NO_NODE; | ||
394 | dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||
395 | dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); | ||
396 | dev->tag_set.driver_data = dev; | ||
397 | dev->tag_set.nr_hw_queues = 1; | ||
398 | |||
399 | ret = blk_mq_alloc_tag_set(&dev->tag_set); | ||
400 | if (ret) { | ||
401 | dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed"); | ||
402 | goto out_put_disk; | ||
403 | } | ||
404 | |||
405 | dev->rq = blk_mq_init_queue(&dev->tag_set); | ||
422 | if (!dev->rq) { | 406 | if (!dev->rq) { |
423 | dev_err(disk_to_dev(gd), "blk_init_queue failed"); | 407 | dev_err(disk_to_dev(gd), "blk_mq_init_queue failed"); |
424 | ret = -ENODEV; | 408 | ret = -ENODEV; |
425 | goto out_put_disk; | 409 | goto out_free_tags; |
426 | } | 410 | } |
411 | blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT); | ||
427 | 412 | ||
428 | dev->rq->queuedata = dev; | 413 | dev->rq->queuedata = dev; |
429 | dev->gd->queue = dev->rq; | 414 | dev->gd->queue = dev->rq; |
@@ -437,7 +422,6 @@ int ubiblock_create(struct ubi_volume_info *vi) | |||
437 | ret = -ENOMEM; | 422 | ret = -ENOMEM; |
438 | goto out_free_queue; | 423 | goto out_free_queue; |
439 | } | 424 | } |
440 | INIT_WORK(&dev->work, ubiblock_do_work); | ||
441 | 425 | ||
442 | mutex_lock(&devices_mutex); | 426 | mutex_lock(&devices_mutex); |
443 | list_add_tail(&dev->list, &ubiblock_devices); | 427 | list_add_tail(&dev->list, &ubiblock_devices); |
@@ -451,6 +435,8 @@ int ubiblock_create(struct ubi_volume_info *vi) | |||
451 | 435 | ||
452 | out_free_queue: | 436 | out_free_queue: |
453 | blk_cleanup_queue(dev->rq); | 437 | blk_cleanup_queue(dev->rq); |
438 | out_free_tags: | ||
439 | blk_mq_free_tag_set(&dev->tag_set); | ||
454 | out_put_disk: | 440 | out_put_disk: |
455 | put_disk(dev->gd); | 441 | put_disk(dev->gd); |
456 | out_free_dev: | 442 | out_free_dev: |
@@ -461,8 +447,13 @@ out_free_dev: | |||
461 | 447 | ||
462 | static void ubiblock_cleanup(struct ubiblock *dev) | 448 | static void ubiblock_cleanup(struct ubiblock *dev) |
463 | { | 449 | { |
450 | /* Stop new requests to arrive */ | ||
464 | del_gendisk(dev->gd); | 451 | del_gendisk(dev->gd); |
452 | /* Flush pending work */ | ||
453 | destroy_workqueue(dev->wq); | ||
454 | /* Finally destroy the blk queue */ | ||
465 | blk_cleanup_queue(dev->rq); | 455 | blk_cleanup_queue(dev->rq); |
456 | blk_mq_free_tag_set(&dev->tag_set); | ||
466 | dev_info(disk_to_dev(dev->gd), "released"); | 457 | dev_info(disk_to_dev(dev->gd), "released"); |
467 | put_disk(dev->gd); | 458 | put_disk(dev->gd); |
468 | } | 459 | } |
@@ -490,9 +481,6 @@ int ubiblock_remove(struct ubi_volume_info *vi) | |||
490 | list_del(&dev->list); | 481 | list_del(&dev->list); |
491 | mutex_unlock(&devices_mutex); | 482 | mutex_unlock(&devices_mutex); |
492 | 483 | ||
493 | /* Flush pending work and stop this workqueue */ | ||
494 | destroy_workqueue(dev->wq); | ||
495 | |||
496 | ubiblock_cleanup(dev); | 484 | ubiblock_cleanup(dev); |
497 | mutex_unlock(&dev->dev_mutex); | 485 | mutex_unlock(&dev->dev_mutex); |
498 | kfree(dev); | 486 | kfree(dev); |
@@ -620,8 +608,6 @@ static void ubiblock_remove_all(void) | |||
620 | struct ubiblock *dev; | 608 | struct ubiblock *dev; |
621 | 609 | ||
622 | list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { | 610 | list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { |
623 | /* Flush pending work and stop workqueue */ | ||
624 | destroy_workqueue(dev->wq); | ||
625 | /* The module is being forcefully removed */ | 611 | /* The module is being forcefully removed */ |
626 | WARN_ON(dev->desc); | 612 | WARN_ON(dev->desc); |
627 | /* Remove from device list */ | 613 | /* Remove from device list */ |