diff options
author | Richard Weinberger <richard@nod.at> | 2017-11-26 07:33:11 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-10-14 14:48:50 -0400 |
commit | 4e6da0fe8058df9bfa937902fcd9cb2b7b89b2df (patch) | |
tree | f7c02f970203896f6bbdc87da13dc935762c7d0c /arch/um/drivers | |
parent | 6d1f9dfde7343c4ebfb8f84dcb333af571bb3b22 (diff) |
um: Convert ubd driver to blk-mq
Convert the driver to the modern blk-mq framework.
As byproduct we get rid of our open coded restart logic and let
blk-mq handle it.
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r-- | arch/um/drivers/ubd_kern.c | 178 |
1 files changed, 93 insertions, 85 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 6ee4c56032f7..9cb0cabb4e02 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/blkdev.h> | 25 | #include <linux/blkdev.h> |
26 | #include <linux/blk-mq.h> | ||
26 | #include <linux/ata.h> | 27 | #include <linux/ata.h> |
27 | #include <linux/hdreg.h> | 28 | #include <linux/hdreg.h> |
28 | #include <linux/cdrom.h> | 29 | #include <linux/cdrom.h> |
@@ -142,7 +143,6 @@ struct cow { | |||
142 | #define MAX_SG 64 | 143 | #define MAX_SG 64 |
143 | 144 | ||
144 | struct ubd { | 145 | struct ubd { |
145 | struct list_head restart; | ||
146 | /* name (and fd, below) of the file opened for writing, either the | 146 | /* name (and fd, below) of the file opened for writing, either the |
147 | * backing or the cow file. */ | 147 | * backing or the cow file. */ |
148 | char *file; | 148 | char *file; |
@@ -156,9 +156,12 @@ struct ubd { | |||
156 | struct cow cow; | 156 | struct cow cow; |
157 | struct platform_device pdev; | 157 | struct platform_device pdev; |
158 | struct request_queue *queue; | 158 | struct request_queue *queue; |
159 | struct blk_mq_tag_set tag_set; | ||
159 | spinlock_t lock; | 160 | spinlock_t lock; |
161 | }; | ||
162 | |||
163 | struct ubd_pdu { | ||
160 | struct scatterlist sg[MAX_SG]; | 164 | struct scatterlist sg[MAX_SG]; |
161 | struct request *request; | ||
162 | int start_sg, end_sg; | 165 | int start_sg, end_sg; |
163 | sector_t rq_pos; | 166 | sector_t rq_pos; |
164 | }; | 167 | }; |
@@ -182,10 +185,6 @@ struct ubd { | |||
182 | .shared = 0, \ | 185 | .shared = 0, \ |
183 | .cow = DEFAULT_COW, \ | 186 | .cow = DEFAULT_COW, \ |
184 | .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ | 187 | .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ |
185 | .request = NULL, \ | ||
186 | .start_sg = 0, \ | ||
187 | .end_sg = 0, \ | ||
188 | .rq_pos = 0, \ | ||
189 | } | 188 | } |
190 | 189 | ||
191 | /* Protected by ubd_lock */ | 190 | /* Protected by ubd_lock */ |
@@ -196,6 +195,12 @@ static int fake_ide = 0; | |||
196 | static struct proc_dir_entry *proc_ide_root = NULL; | 195 | static struct proc_dir_entry *proc_ide_root = NULL; |
197 | static struct proc_dir_entry *proc_ide = NULL; | 196 | static struct proc_dir_entry *proc_ide = NULL; |
198 | 197 | ||
198 | static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, | ||
199 | const struct blk_mq_queue_data *bd); | ||
200 | static int ubd_init_request(struct blk_mq_tag_set *set, | ||
201 | struct request *req, unsigned int hctx_idx, | ||
202 | unsigned int numa_node); | ||
203 | |||
199 | static void make_proc_ide(void) | 204 | static void make_proc_ide(void) |
200 | { | 205 | { |
201 | proc_ide_root = proc_mkdir("ide", NULL); | 206 | proc_ide_root = proc_mkdir("ide", NULL); |
@@ -436,11 +441,8 @@ __uml_help(udb_setup, | |||
436 | " in the boot output.\n\n" | 441 | " in the boot output.\n\n" |
437 | ); | 442 | ); |
438 | 443 | ||
439 | static void do_ubd_request(struct request_queue * q); | ||
440 | |||
441 | /* Only changed by ubd_init, which is an initcall. */ | 444 | /* Only changed by ubd_init, which is an initcall. */ |
442 | static int thread_fd = -1; | 445 | static int thread_fd = -1; |
443 | static LIST_HEAD(restart); | ||
444 | 446 | ||
445 | /* Function to read several request pointers at a time | 447 | /* Function to read several request pointers at a time |
446 | * handling fractional reads if (and as) needed | 448 | * handling fractional reads if (and as) needed |
@@ -498,9 +500,6 @@ static int bulk_req_safe_read( | |||
498 | /* Called without dev->lock held, and only in interrupt context. */ | 500 | /* Called without dev->lock held, and only in interrupt context. */ |
499 | static void ubd_handler(void) | 501 | static void ubd_handler(void) |
500 | { | 502 | { |
501 | struct ubd *ubd; | ||
502 | struct list_head *list, *next_ele; | ||
503 | unsigned long flags; | ||
504 | int n; | 503 | int n; |
505 | int count; | 504 | int count; |
506 | 505 | ||
@@ -520,23 +519,17 @@ static void ubd_handler(void) | |||
520 | return; | 519 | return; |
521 | } | 520 | } |
522 | for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { | 521 | for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { |
523 | blk_end_request( | 522 | struct io_thread_req *io_req = (*irq_req_buffer)[count]; |
524 | (*irq_req_buffer)[count]->req, | 523 | int err = io_req->error ? BLK_STS_IOERR : BLK_STS_OK; |
525 | BLK_STS_OK, | 524 | |
526 | (*irq_req_buffer)[count]->length | 525 | if (!blk_update_request(io_req->req, err, io_req->length)) |
527 | ); | 526 | __blk_mq_end_request(io_req->req, err); |
528 | kfree((*irq_req_buffer)[count]); | 527 | |
528 | kfree(io_req); | ||
529 | } | 529 | } |
530 | } | 530 | } |
531 | reactivate_fd(thread_fd, UBD_IRQ); | ||
532 | 531 | ||
533 | list_for_each_safe(list, next_ele, &restart){ | 532 | reactivate_fd(thread_fd, UBD_IRQ); |
534 | ubd = container_of(list, struct ubd, restart); | ||
535 | list_del_init(&ubd->restart); | ||
536 | spin_lock_irqsave(&ubd->lock, flags); | ||
537 | do_ubd_request(ubd->queue); | ||
538 | spin_unlock_irqrestore(&ubd->lock, flags); | ||
539 | } | ||
540 | } | 533 | } |
541 | 534 | ||
542 | static irqreturn_t ubd_intr(int irq, void *dev) | 535 | static irqreturn_t ubd_intr(int irq, void *dev) |
@@ -857,6 +850,7 @@ static void ubd_device_release(struct device *dev) | |||
857 | struct ubd *ubd_dev = dev_get_drvdata(dev); | 850 | struct ubd *ubd_dev = dev_get_drvdata(dev); |
858 | 851 | ||
859 | blk_cleanup_queue(ubd_dev->queue); | 852 | blk_cleanup_queue(ubd_dev->queue); |
853 | blk_mq_free_tag_set(&ubd_dev->tag_set); | ||
860 | *ubd_dev = ((struct ubd) DEFAULT_UBD); | 854 | *ubd_dev = ((struct ubd) DEFAULT_UBD); |
861 | } | 855 | } |
862 | 856 | ||
@@ -899,6 +893,11 @@ static int ubd_disk_register(int major, u64 size, int unit, | |||
899 | 893 | ||
900 | #define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9)) | 894 | #define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9)) |
901 | 895 | ||
896 | static const struct blk_mq_ops ubd_mq_ops = { | ||
897 | .queue_rq = ubd_queue_rq, | ||
898 | .init_request = ubd_init_request, | ||
899 | }; | ||
900 | |||
902 | static int ubd_add(int n, char **error_out) | 901 | static int ubd_add(int n, char **error_out) |
903 | { | 902 | { |
904 | struct ubd *ubd_dev = &ubd_devs[n]; | 903 | struct ubd *ubd_dev = &ubd_devs[n]; |
@@ -915,15 +914,24 @@ static int ubd_add(int n, char **error_out) | |||
915 | 914 | ||
916 | ubd_dev->size = ROUND_BLOCK(ubd_dev->size); | 915 | ubd_dev->size = ROUND_BLOCK(ubd_dev->size); |
917 | 916 | ||
918 | INIT_LIST_HEAD(&ubd_dev->restart); | 917 | ubd_dev->tag_set.ops = &ubd_mq_ops; |
919 | sg_init_table(ubd_dev->sg, MAX_SG); | 918 | ubd_dev->tag_set.queue_depth = 64; |
919 | ubd_dev->tag_set.numa_node = NUMA_NO_NODE; | ||
920 | ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||
921 | ubd_dev->tag_set.cmd_size = sizeof(struct ubd_pdu); | ||
922 | ubd_dev->tag_set.driver_data = ubd_dev; | ||
923 | ubd_dev->tag_set.nr_hw_queues = 1; | ||
920 | 924 | ||
921 | err = -ENOMEM; | 925 | err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); |
922 | ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock); | 926 | if (err) |
923 | if (ubd_dev->queue == NULL) { | ||
924 | *error_out = "Failed to initialize device queue"; | ||
925 | goto out; | 927 | goto out; |
928 | |||
929 | ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set); | ||
930 | if (IS_ERR(ubd_dev->queue)) { | ||
931 | err = PTR_ERR(ubd_dev->queue); | ||
932 | goto out_cleanup; | ||
926 | } | 933 | } |
934 | |||
927 | ubd_dev->queue->queuedata = ubd_dev; | 935 | ubd_dev->queue->queuedata = ubd_dev; |
928 | blk_queue_write_cache(ubd_dev->queue, true, false); | 936 | blk_queue_write_cache(ubd_dev->queue, true, false); |
929 | 937 | ||
@@ -931,7 +939,7 @@ static int ubd_add(int n, char **error_out) | |||
931 | err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); | 939 | err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); |
932 | if(err){ | 940 | if(err){ |
933 | *error_out = "Failed to register device"; | 941 | *error_out = "Failed to register device"; |
934 | goto out_cleanup; | 942 | goto out_cleanup_tags; |
935 | } | 943 | } |
936 | 944 | ||
937 | if (fake_major != UBD_MAJOR) | 945 | if (fake_major != UBD_MAJOR) |
@@ -949,6 +957,8 @@ static int ubd_add(int n, char **error_out) | |||
949 | out: | 957 | out: |
950 | return err; | 958 | return err; |
951 | 959 | ||
960 | out_cleanup_tags: | ||
961 | blk_mq_free_tag_set(&ubd_dev->tag_set); | ||
952 | out_cleanup: | 962 | out_cleanup: |
953 | blk_cleanup_queue(ubd_dev->queue); | 963 | blk_cleanup_queue(ubd_dev->queue); |
954 | goto out; | 964 | goto out; |
@@ -1333,80 +1343,78 @@ static void prepare_flush_request(struct request *req, | |||
1333 | io_req->op = UBD_FLUSH; | 1343 | io_req->op = UBD_FLUSH; |
1334 | } | 1344 | } |
1335 | 1345 | ||
1336 | static bool submit_request(struct io_thread_req *io_req, struct ubd *dev) | 1346 | static void submit_request(struct io_thread_req *io_req, struct ubd *dev) |
1337 | { | 1347 | { |
1338 | int n = os_write_file(thread_fd, &io_req, | 1348 | int n = os_write_file(thread_fd, &io_req, |
1339 | sizeof(io_req)); | 1349 | sizeof(io_req)); |
1350 | |||
1340 | if (n != sizeof(io_req)) { | 1351 | if (n != sizeof(io_req)) { |
1341 | if (n != -EAGAIN) | 1352 | if (n != -EAGAIN) |
1342 | printk("write to io thread failed, " | 1353 | pr_err("write to io thread failed: %d\n", -n); |
1343 | "errno = %d\n", -n); | ||
1344 | else if (list_empty(&dev->restart)) | ||
1345 | list_add(&dev->restart, &restart); | ||
1346 | 1354 | ||
1355 | blk_mq_requeue_request(io_req->req, true); | ||
1347 | kfree(io_req); | 1356 | kfree(io_req); |
1348 | return false; | ||
1349 | } | 1357 | } |
1350 | return true; | ||
1351 | } | 1358 | } |
1352 | 1359 | ||
1353 | /* Called with dev->lock held */ | 1360 | static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, |
1354 | static void do_ubd_request(struct request_queue *q) | 1361 | const struct blk_mq_queue_data *bd) |
1355 | { | 1362 | { |
1363 | struct request *req = bd->rq; | ||
1364 | struct ubd *dev = hctx->queue->queuedata; | ||
1365 | struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req); | ||
1356 | struct io_thread_req *io_req; | 1366 | struct io_thread_req *io_req; |
1357 | struct request *req; | ||
1358 | 1367 | ||
1359 | while(1){ | 1368 | blk_mq_start_request(req); |
1360 | struct ubd *dev = q->queuedata; | 1369 | |
1361 | if(dev->request == NULL){ | 1370 | pdu->rq_pos = blk_rq_pos(req); |
1362 | struct request *req = blk_fetch_request(q); | 1371 | pdu->start_sg = 0; |
1363 | if(req == NULL) | 1372 | pdu->end_sg = blk_rq_map_sg(req->q, req, pdu->sg); |
1364 | return; | ||
1365 | 1373 | ||
1366 | dev->request = req; | 1374 | if (req_op(req) == REQ_OP_FLUSH) { |
1367 | dev->rq_pos = blk_rq_pos(req); | 1375 | io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); |
1368 | dev->start_sg = 0; | 1376 | if (io_req == NULL) { |
1369 | dev->end_sg = blk_rq_map_sg(q, req, dev->sg); | 1377 | blk_mq_requeue_request(req, true); |
1378 | goto done; | ||
1370 | } | 1379 | } |
1380 | prepare_flush_request(req, io_req); | ||
1381 | submit_request(io_req, dev); | ||
1371 | 1382 | ||
1372 | req = dev->request; | 1383 | goto done; |
1384 | } | ||
1373 | 1385 | ||
1374 | if (req_op(req) == REQ_OP_FLUSH) { | 1386 | while (pdu->start_sg < pdu->end_sg) { |
1375 | io_req = kmalloc(sizeof(struct io_thread_req), | 1387 | struct scatterlist *sg = &pdu->sg[pdu->start_sg]; |
1376 | GFP_ATOMIC); | 1388 | |
1377 | if (io_req == NULL) { | 1389 | io_req = kmalloc(sizeof(struct io_thread_req), |
1378 | if (list_empty(&dev->restart)) | 1390 | GFP_ATOMIC); |
1379 | list_add(&dev->restart, &restart); | 1391 | if (io_req == NULL) { |
1380 | return; | 1392 | blk_mq_requeue_request(req, true); |
1381 | } | 1393 | goto done; |
1382 | prepare_flush_request(req, io_req); | ||
1383 | if (submit_request(io_req, dev) == false) | ||
1384 | return; | ||
1385 | } | 1394 | } |
1395 | prepare_request(req, io_req, | ||
1396 | (unsigned long long)pdu->rq_pos << 9, | ||
1397 | sg->offset, sg->length, sg_page(sg)); | ||
1386 | 1398 | ||
1387 | while(dev->start_sg < dev->end_sg){ | 1399 | submit_request(io_req, dev); |
1388 | struct scatterlist *sg = &dev->sg[dev->start_sg]; | ||
1389 | 1400 | ||
1390 | io_req = kmalloc(sizeof(struct io_thread_req), | 1401 | pdu->rq_pos += sg->length >> 9; |
1391 | GFP_ATOMIC); | 1402 | pdu->start_sg++; |
1392 | if(io_req == NULL){ | 1403 | } |
1393 | if(list_empty(&dev->restart)) | ||
1394 | list_add(&dev->restart, &restart); | ||
1395 | return; | ||
1396 | } | ||
1397 | prepare_request(req, io_req, | ||
1398 | (unsigned long long)dev->rq_pos << 9, | ||
1399 | sg->offset, sg->length, sg_page(sg)); | ||
1400 | 1404 | ||
1401 | if (submit_request(io_req, dev) == false) | 1405 | done: |
1402 | return; | 1406 | return BLK_STS_OK; |
1407 | } | ||
1403 | 1408 | ||
1404 | dev->rq_pos += sg->length >> 9; | 1409 | static int ubd_init_request(struct blk_mq_tag_set *set, |
1405 | dev->start_sg++; | 1410 | struct request *req, unsigned int hctx_idx, |
1406 | } | 1411 | unsigned int numa_node) |
1407 | dev->end_sg = 0; | 1412 | { |
1408 | dev->request = NULL; | 1413 | struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req); |
1409 | } | 1414 | |
1415 | sg_init_table(pdu->sg, MAX_SG); | ||
1416 | |||
1417 | return 0; | ||
1410 | } | 1418 | } |
1411 | 1419 | ||
1412 | static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 1420 | static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) |