diff options
-rw-r--r-- | drivers/block/loop.c | 327 | ||||
-rw-r--r-- | drivers/block/loop.h | 17 |
2 files changed, 174 insertions, 170 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6cb1beb47c25..c678eb24a7b5 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -75,6 +75,7 @@ | |||
75 | #include <linux/sysfs.h> | 75 | #include <linux/sysfs.h> |
76 | #include <linux/miscdevice.h> | 76 | #include <linux/miscdevice.h> |
77 | #include <linux/falloc.h> | 77 | #include <linux/falloc.h> |
78 | #include <linux/blk-mq.h> | ||
78 | #include "loop.h" | 79 | #include "loop.h" |
79 | 80 | ||
80 | #include <asm/uaccess.h> | 81 | #include <asm/uaccess.h> |
@@ -85,6 +86,8 @@ static DEFINE_MUTEX(loop_index_mutex); | |||
85 | static int max_part; | 86 | static int max_part; |
86 | static int part_shift; | 87 | static int part_shift; |
87 | 88 | ||
89 | static struct workqueue_struct *loop_wq; | ||
90 | |||
88 | /* | 91 | /* |
89 | * Transfer functions | 92 | * Transfer functions |
90 | */ | 93 | */ |
@@ -466,109 +469,36 @@ out: | |||
466 | return ret; | 469 | return ret; |
467 | } | 470 | } |
468 | 471 | ||
469 | /* | ||
470 | * Add bio to back of pending list | ||
471 | */ | ||
472 | static void loop_add_bio(struct loop_device *lo, struct bio *bio) | ||
473 | { | ||
474 | lo->lo_bio_count++; | ||
475 | bio_list_add(&lo->lo_bio_list, bio); | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * Grab first pending buffer | ||
480 | */ | ||
481 | static struct bio *loop_get_bio(struct loop_device *lo) | ||
482 | { | ||
483 | lo->lo_bio_count--; | ||
484 | return bio_list_pop(&lo->lo_bio_list); | ||
485 | } | ||
486 | |||
487 | static void loop_make_request(struct request_queue *q, struct bio *old_bio) | ||
488 | { | ||
489 | struct loop_device *lo = q->queuedata; | ||
490 | int rw = bio_rw(old_bio); | ||
491 | |||
492 | if (rw == READA) | ||
493 | rw = READ; | ||
494 | |||
495 | BUG_ON(!lo || (rw != READ && rw != WRITE)); | ||
496 | |||
497 | spin_lock_irq(&lo->lo_lock); | ||
498 | if (lo->lo_state != Lo_bound) | ||
499 | goto out; | ||
500 | if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY))) | ||
501 | goto out; | ||
502 | if (lo->lo_bio_count >= q->nr_congestion_on) | ||
503 | wait_event_lock_irq(lo->lo_req_wait, | ||
504 | lo->lo_bio_count < q->nr_congestion_off, | ||
505 | lo->lo_lock); | ||
506 | loop_add_bio(lo, old_bio); | ||
507 | wake_up(&lo->lo_event); | ||
508 | spin_unlock_irq(&lo->lo_lock); | ||
509 | return; | ||
510 | |||
511 | out: | ||
512 | spin_unlock_irq(&lo->lo_lock); | ||
513 | bio_io_error(old_bio); | ||
514 | } | ||
515 | |||
516 | struct switch_request { | 472 | struct switch_request { |
517 | struct file *file; | 473 | struct file *file; |
518 | struct completion wait; | 474 | struct completion wait; |
519 | }; | 475 | }; |
520 | 476 | ||
521 | static void do_loop_switch(struct loop_device *, struct switch_request *); | 477 | static inline int loop_handle_bio(struct loop_device *lo, struct bio *bio) |
522 | |||
523 | static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) | ||
524 | { | 478 | { |
525 | if (unlikely(!bio->bi_bdev)) { | 479 | return do_bio_filebacked(lo, bio); |
526 | do_loop_switch(lo, bio->bi_private); | ||
527 | bio_put(bio); | ||
528 | } else { | ||
529 | int ret = do_bio_filebacked(lo, bio); | ||
530 | bio_endio(bio, ret); | ||
531 | } | ||
532 | } | 480 | } |
533 | 481 | ||
534 | /* | 482 | /* |
535 | * worker thread that handles reads/writes to file backed loop devices, | 483 | * Do the actual switch; called from the BIO completion routine |
536 | * to avoid blocking in our make_request_fn. it also does loop decrypting | ||
537 | * on reads for block backed loop, as that is too heavy to do from | ||
538 | * b_end_io context where irqs may be disabled. | ||
539 | * | ||
540 | * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before | ||
541 | * calling kthread_stop(). Therefore once kthread_should_stop() is | ||
542 | * true, make_request will not place any more requests. Therefore | ||
543 | * once kthread_should_stop() is true and lo_bio is NULL, we are | ||
544 | * done with the loop. | ||
545 | */ | 484 | */ |
546 | static int loop_thread(void *data) | 485 | static void do_loop_switch(struct loop_device *lo, struct switch_request *p) |
547 | { | 486 | { |
548 | struct loop_device *lo = data; | 487 | struct file *file = p->file; |
549 | struct bio *bio; | 488 | struct file *old_file = lo->lo_backing_file; |
550 | 489 | struct address_space *mapping; | |
551 | set_user_nice(current, MIN_NICE); | ||
552 | |||
553 | while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) { | ||
554 | |||
555 | wait_event_interruptible(lo->lo_event, | ||
556 | !bio_list_empty(&lo->lo_bio_list) || | ||
557 | kthread_should_stop()); | ||
558 | |||
559 | if (bio_list_empty(&lo->lo_bio_list)) | ||
560 | continue; | ||
561 | spin_lock_irq(&lo->lo_lock); | ||
562 | bio = loop_get_bio(lo); | ||
563 | if (lo->lo_bio_count < lo->lo_queue->nr_congestion_off) | ||
564 | wake_up(&lo->lo_req_wait); | ||
565 | spin_unlock_irq(&lo->lo_lock); | ||
566 | 490 | ||
567 | BUG_ON(!bio); | 491 | /* if no new file, only flush of queued bios requested */ |
568 | loop_handle_bio(lo, bio); | 492 | if (!file) |
569 | } | 493 | return; |
570 | 494 | ||
571 | return 0; | 495 | mapping = file->f_mapping; |
496 | mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); | ||
497 | lo->lo_backing_file = file; | ||
498 | lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? | ||
499 | mapping->host->i_bdev->bd_block_size : PAGE_SIZE; | ||
500 | lo->old_gfp_mask = mapping_gfp_mask(mapping); | ||
501 | mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); | ||
572 | } | 502 | } |
573 | 503 | ||
574 | /* | 504 | /* |
@@ -579,15 +509,18 @@ static int loop_thread(void *data) | |||
579 | static int loop_switch(struct loop_device *lo, struct file *file) | 509 | static int loop_switch(struct loop_device *lo, struct file *file) |
580 | { | 510 | { |
581 | struct switch_request w; | 511 | struct switch_request w; |
582 | struct bio *bio = bio_alloc(GFP_KERNEL, 0); | 512 | |
583 | if (!bio) | ||
584 | return -ENOMEM; | ||
585 | init_completion(&w.wait); | ||
586 | w.file = file; | 513 | w.file = file; |
587 | bio->bi_private = &w; | 514 | |
588 | bio->bi_bdev = NULL; | 515 | /* freeze queue and wait for completion of scheduled requests */ |
589 | loop_make_request(lo->lo_queue, bio); | 516 | blk_mq_freeze_queue(lo->lo_queue); |
590 | wait_for_completion(&w.wait); | 517 | |
518 | /* do the switch action */ | ||
519 | do_loop_switch(lo, &w); | ||
520 | |||
521 | /* unfreeze */ | ||
522 | blk_mq_unfreeze_queue(lo->lo_queue); | ||
523 | |||
591 | return 0; | 524 | return 0; |
592 | } | 525 | } |
593 | 526 | ||
@@ -596,39 +529,10 @@ static int loop_switch(struct loop_device *lo, struct file *file) | |||
596 | */ | 529 | */ |
597 | static int loop_flush(struct loop_device *lo) | 530 | static int loop_flush(struct loop_device *lo) |
598 | { | 531 | { |
599 | /* loop not yet configured, no running thread, nothing to flush */ | ||
600 | if (!lo->lo_thread) | ||
601 | return 0; | ||
602 | |||
603 | return loop_switch(lo, NULL); | 532 | return loop_switch(lo, NULL); |
604 | } | 533 | } |
605 | 534 | ||
606 | /* | 535 | /* |
607 | * Do the actual switch; called from the BIO completion routine | ||
608 | */ | ||
609 | static void do_loop_switch(struct loop_device *lo, struct switch_request *p) | ||
610 | { | ||
611 | struct file *file = p->file; | ||
612 | struct file *old_file = lo->lo_backing_file; | ||
613 | struct address_space *mapping; | ||
614 | |||
615 | /* if no new file, only flush of queued bios requested */ | ||
616 | if (!file) | ||
617 | goto out; | ||
618 | |||
619 | mapping = file->f_mapping; | ||
620 | mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); | ||
621 | lo->lo_backing_file = file; | ||
622 | lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? | ||
623 | mapping->host->i_bdev->bd_block_size : PAGE_SIZE; | ||
624 | lo->old_gfp_mask = mapping_gfp_mask(mapping); | ||
625 | mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); | ||
626 | out: | ||
627 | complete(&p->wait); | ||
628 | } | ||
629 | |||
630 | |||
631 | /* | ||
632 | * loop_change_fd switched the backing store of a loopback device to | 536 | * loop_change_fd switched the backing store of a loopback device to |
633 | * a new file. This is useful for operating system installers to free up | 537 | * a new file. This is useful for operating system installers to free up |
634 | * the original file and in High Availability environments to switch to | 538 | * the original file and in High Availability environments to switch to |
@@ -889,12 +793,9 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
889 | lo->transfer = transfer_none; | 793 | lo->transfer = transfer_none; |
890 | lo->ioctl = NULL; | 794 | lo->ioctl = NULL; |
891 | lo->lo_sizelimit = 0; | 795 | lo->lo_sizelimit = 0; |
892 | lo->lo_bio_count = 0; | ||
893 | lo->old_gfp_mask = mapping_gfp_mask(mapping); | 796 | lo->old_gfp_mask = mapping_gfp_mask(mapping); |
894 | mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); | 797 | mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); |
895 | 798 | ||
896 | bio_list_init(&lo->lo_bio_list); | ||
897 | |||
898 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) | 799 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) |
899 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); | 800 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); |
900 | 801 | ||
@@ -906,14 +807,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
906 | 807 | ||
907 | set_blocksize(bdev, lo_blocksize); | 808 | set_blocksize(bdev, lo_blocksize); |
908 | 809 | ||
909 | lo->lo_thread = kthread_create(loop_thread, lo, "loop%d", | ||
910 | lo->lo_number); | ||
911 | if (IS_ERR(lo->lo_thread)) { | ||
912 | error = PTR_ERR(lo->lo_thread); | ||
913 | goto out_clr; | ||
914 | } | ||
915 | lo->lo_state = Lo_bound; | 810 | lo->lo_state = Lo_bound; |
916 | wake_up_process(lo->lo_thread); | ||
917 | if (part_shift) | 811 | if (part_shift) |
918 | lo->lo_flags |= LO_FLAGS_PARTSCAN; | 812 | lo->lo_flags |= LO_FLAGS_PARTSCAN; |
919 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) | 813 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) |
@@ -925,18 +819,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
925 | bdgrab(bdev); | 819 | bdgrab(bdev); |
926 | return 0; | 820 | return 0; |
927 | 821 | ||
928 | out_clr: | ||
929 | loop_sysfs_exit(lo); | ||
930 | lo->lo_thread = NULL; | ||
931 | lo->lo_device = NULL; | ||
932 | lo->lo_backing_file = NULL; | ||
933 | lo->lo_flags = 0; | ||
934 | set_capacity(lo->lo_disk, 0); | ||
935 | invalidate_bdev(bdev); | ||
936 | bd_set_size(bdev, 0); | ||
937 | kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); | ||
938 | mapping_set_gfp_mask(mapping, lo->old_gfp_mask); | ||
939 | lo->lo_state = Lo_unbound; | ||
940 | out_putf: | 822 | out_putf: |
941 | fput(file); | 823 | fput(file); |
942 | out: | 824 | out: |
@@ -1012,11 +894,6 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1012 | 894 | ||
1013 | spin_lock_irq(&lo->lo_lock); | 895 | spin_lock_irq(&lo->lo_lock); |
1014 | lo->lo_state = Lo_rundown; | 896 | lo->lo_state = Lo_rundown; |
1015 | spin_unlock_irq(&lo->lo_lock); | ||
1016 | |||
1017 | kthread_stop(lo->lo_thread); | ||
1018 | |||
1019 | spin_lock_irq(&lo->lo_lock); | ||
1020 | lo->lo_backing_file = NULL; | 897 | lo->lo_backing_file = NULL; |
1021 | spin_unlock_irq(&lo->lo_lock); | 898 | spin_unlock_irq(&lo->lo_lock); |
1022 | 899 | ||
@@ -1028,7 +905,6 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1028 | lo->lo_offset = 0; | 905 | lo->lo_offset = 0; |
1029 | lo->lo_sizelimit = 0; | 906 | lo->lo_sizelimit = 0; |
1030 | lo->lo_encrypt_key_size = 0; | 907 | lo->lo_encrypt_key_size = 0; |
1031 | lo->lo_thread = NULL; | ||
1032 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); | 908 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); |
1033 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); | 909 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); |
1034 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); | 910 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); |
@@ -1601,6 +1477,108 @@ int loop_unregister_transfer(int number) | |||
1601 | EXPORT_SYMBOL(loop_register_transfer); | 1477 | EXPORT_SYMBOL(loop_register_transfer); |
1602 | EXPORT_SYMBOL(loop_unregister_transfer); | 1478 | EXPORT_SYMBOL(loop_unregister_transfer); |
1603 | 1479 | ||
1480 | static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, | ||
1481 | const struct blk_mq_queue_data *bd) | ||
1482 | { | ||
1483 | struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); | ||
1484 | |||
1485 | blk_mq_start_request(bd->rq); | ||
1486 | |||
1487 | if (cmd->rq->cmd_flags & REQ_WRITE) { | ||
1488 | struct loop_device *lo = cmd->rq->q->queuedata; | ||
1489 | bool need_sched = true; | ||
1490 | |||
1491 | spin_lock_irq(&lo->lo_lock); | ||
1492 | if (lo->write_started) | ||
1493 | need_sched = false; | ||
1494 | else | ||
1495 | lo->write_started = true; | ||
1496 | list_add_tail(&cmd->list, &lo->write_cmd_head); | ||
1497 | spin_unlock_irq(&lo->lo_lock); | ||
1498 | |||
1499 | if (need_sched) | ||
1500 | queue_work(loop_wq, &lo->write_work); | ||
1501 | } else { | ||
1502 | queue_work(loop_wq, &cmd->read_work); | ||
1503 | } | ||
1504 | |||
1505 | return BLK_MQ_RQ_QUEUE_OK; | ||
1506 | } | ||
1507 | |||
1508 | static void loop_handle_cmd(struct loop_cmd *cmd) | ||
1509 | { | ||
1510 | const bool write = cmd->rq->cmd_flags & REQ_WRITE; | ||
1511 | struct loop_device *lo = cmd->rq->q->queuedata; | ||
1512 | int ret = -EIO; | ||
1513 | struct bio *bio; | ||
1514 | |||
1515 | if (lo->lo_state != Lo_bound) | ||
1516 | goto failed; | ||
1517 | |||
1518 | if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) | ||
1519 | goto failed; | ||
1520 | |||
1521 | ret = 0; | ||
1522 | __rq_for_each_bio(bio, cmd->rq) | ||
1523 | ret |= loop_handle_bio(lo, bio); | ||
1524 | |||
1525 | failed: | ||
1526 | if (ret) | ||
1527 | cmd->rq->errors = -EIO; | ||
1528 | blk_mq_complete_request(cmd->rq); | ||
1529 | } | ||
1530 | |||
1531 | static void loop_queue_write_work(struct work_struct *work) | ||
1532 | { | ||
1533 | struct loop_device *lo = | ||
1534 | container_of(work, struct loop_device, write_work); | ||
1535 | LIST_HEAD(cmd_list); | ||
1536 | |||
1537 | spin_lock_irq(&lo->lo_lock); | ||
1538 | repeat: | ||
1539 | list_splice_init(&lo->write_cmd_head, &cmd_list); | ||
1540 | spin_unlock_irq(&lo->lo_lock); | ||
1541 | |||
1542 | while (!list_empty(&cmd_list)) { | ||
1543 | struct loop_cmd *cmd = list_first_entry(&cmd_list, | ||
1544 | struct loop_cmd, list); | ||
1545 | list_del_init(&cmd->list); | ||
1546 | loop_handle_cmd(cmd); | ||
1547 | } | ||
1548 | |||
1549 | spin_lock_irq(&lo->lo_lock); | ||
1550 | if (!list_empty(&lo->write_cmd_head)) | ||
1551 | goto repeat; | ||
1552 | lo->write_started = false; | ||
1553 | spin_unlock_irq(&lo->lo_lock); | ||
1554 | } | ||
1555 | |||
1556 | static void loop_queue_read_work(struct work_struct *work) | ||
1557 | { | ||
1558 | struct loop_cmd *cmd = | ||
1559 | container_of(work, struct loop_cmd, read_work); | ||
1560 | |||
1561 | loop_handle_cmd(cmd); | ||
1562 | } | ||
1563 | |||
1564 | static int loop_init_request(void *data, struct request *rq, | ||
1565 | unsigned int hctx_idx, unsigned int request_idx, | ||
1566 | unsigned int numa_node) | ||
1567 | { | ||
1568 | struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); | ||
1569 | |||
1570 | cmd->rq = rq; | ||
1571 | INIT_WORK(&cmd->read_work, loop_queue_read_work); | ||
1572 | |||
1573 | return 0; | ||
1574 | } | ||
1575 | |||
1576 | static struct blk_mq_ops loop_mq_ops = { | ||
1577 | .queue_rq = loop_queue_rq, | ||
1578 | .map_queue = blk_mq_map_queue, | ||
1579 | .init_request = loop_init_request, | ||
1580 | }; | ||
1581 | |||
1604 | static int loop_add(struct loop_device **l, int i) | 1582 | static int loop_add(struct loop_device **l, int i) |
1605 | { | 1583 | { |
1606 | struct loop_device *lo; | 1584 | struct loop_device *lo; |
@@ -1627,16 +1605,28 @@ static int loop_add(struct loop_device **l, int i) | |||
1627 | i = err; | 1605 | i = err; |
1628 | 1606 | ||
1629 | err = -ENOMEM; | 1607 | err = -ENOMEM; |
1630 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); | 1608 | lo->tag_set.ops = &loop_mq_ops; |
1631 | if (!lo->lo_queue) | 1609 | lo->tag_set.nr_hw_queues = 1; |
1610 | lo->tag_set.queue_depth = 128; | ||
1611 | lo->tag_set.numa_node = NUMA_NO_NODE; | ||
1612 | lo->tag_set.cmd_size = sizeof(struct loop_cmd); | ||
1613 | lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; | ||
1614 | lo->tag_set.driver_data = lo; | ||
1615 | |||
1616 | err = blk_mq_alloc_tag_set(&lo->tag_set); | ||
1617 | if (err) | ||
1632 | goto out_free_idr; | 1618 | goto out_free_idr; |
1633 | 1619 | ||
1634 | /* | 1620 | lo->lo_queue = blk_mq_init_queue(&lo->tag_set); |
1635 | * set queue make_request_fn | 1621 | if (IS_ERR_OR_NULL(lo->lo_queue)) { |
1636 | */ | 1622 | err = PTR_ERR(lo->lo_queue); |
1637 | blk_queue_make_request(lo->lo_queue, loop_make_request); | 1623 | goto out_cleanup_tags; |
1624 | } | ||
1638 | lo->lo_queue->queuedata = lo; | 1625 | lo->lo_queue->queuedata = lo; |
1639 | 1626 | ||
1627 | INIT_LIST_HEAD(&lo->write_cmd_head); | ||
1628 | INIT_WORK(&lo->write_work, loop_queue_write_work); | ||
1629 | |||
1640 | disk = lo->lo_disk = alloc_disk(1 << part_shift); | 1630 | disk = lo->lo_disk = alloc_disk(1 << part_shift); |
1641 | if (!disk) | 1631 | if (!disk) |
1642 | goto out_free_queue; | 1632 | goto out_free_queue; |
@@ -1664,9 +1654,6 @@ static int loop_add(struct loop_device **l, int i) | |||
1664 | disk->flags |= GENHD_FL_EXT_DEVT; | 1654 | disk->flags |= GENHD_FL_EXT_DEVT; |
1665 | mutex_init(&lo->lo_ctl_mutex); | 1655 | mutex_init(&lo->lo_ctl_mutex); |
1666 | lo->lo_number = i; | 1656 | lo->lo_number = i; |
1667 | lo->lo_thread = NULL; | ||
1668 | init_waitqueue_head(&lo->lo_event); | ||
1669 | init_waitqueue_head(&lo->lo_req_wait); | ||
1670 | spin_lock_init(&lo->lo_lock); | 1657 | spin_lock_init(&lo->lo_lock); |
1671 | disk->major = LOOP_MAJOR; | 1658 | disk->major = LOOP_MAJOR; |
1672 | disk->first_minor = i << part_shift; | 1659 | disk->first_minor = i << part_shift; |
@@ -1680,6 +1667,8 @@ static int loop_add(struct loop_device **l, int i) | |||
1680 | 1667 | ||
1681 | out_free_queue: | 1668 | out_free_queue: |
1682 | blk_cleanup_queue(lo->lo_queue); | 1669 | blk_cleanup_queue(lo->lo_queue); |
1670 | out_cleanup_tags: | ||
1671 | blk_mq_free_tag_set(&lo->tag_set); | ||
1683 | out_free_idr: | 1672 | out_free_idr: |
1684 | idr_remove(&loop_index_idr, i); | 1673 | idr_remove(&loop_index_idr, i); |
1685 | out_free_dev: | 1674 | out_free_dev: |
@@ -1692,6 +1681,7 @@ static void loop_remove(struct loop_device *lo) | |||
1692 | { | 1681 | { |
1693 | del_gendisk(lo->lo_disk); | 1682 | del_gendisk(lo->lo_disk); |
1694 | blk_cleanup_queue(lo->lo_queue); | 1683 | blk_cleanup_queue(lo->lo_queue); |
1684 | blk_mq_free_tag_set(&lo->tag_set); | ||
1695 | put_disk(lo->lo_disk); | 1685 | put_disk(lo->lo_disk); |
1696 | kfree(lo); | 1686 | kfree(lo); |
1697 | } | 1687 | } |
@@ -1875,6 +1865,13 @@ static int __init loop_init(void) | |||
1875 | goto misc_out; | 1865 | goto misc_out; |
1876 | } | 1866 | } |
1877 | 1867 | ||
1868 | loop_wq = alloc_workqueue("kloopd", | ||
1869 | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0); | ||
1870 | if (!loop_wq) { | ||
1871 | err = -ENOMEM; | ||
1872 | goto misc_out; | ||
1873 | } | ||
1874 | |||
1878 | blk_register_region(MKDEV(LOOP_MAJOR, 0), range, | 1875 | blk_register_region(MKDEV(LOOP_MAJOR, 0), range, |
1879 | THIS_MODULE, loop_probe, NULL, NULL); | 1876 | THIS_MODULE, loop_probe, NULL, NULL); |
1880 | 1877 | ||
@@ -1912,6 +1909,8 @@ static void __exit loop_exit(void) | |||
1912 | blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); | 1909 | blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); |
1913 | unregister_blkdev(LOOP_MAJOR, "loop"); | 1910 | unregister_blkdev(LOOP_MAJOR, "loop"); |
1914 | 1911 | ||
1912 | destroy_workqueue(loop_wq); | ||
1913 | |||
1915 | misc_deregister(&loop_misc); | 1914 | misc_deregister(&loop_misc); |
1916 | } | 1915 | } |
1917 | 1916 | ||
diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 90df5d6485b6..e20cdbbff7d5 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/blkdev.h> | 13 | #include <linux/blkdev.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
16 | #include <linux/workqueue.h> | ||
16 | #include <uapi/linux/loop.h> | 17 | #include <uapi/linux/loop.h> |
17 | 18 | ||
18 | /* Possible states of device */ | 19 | /* Possible states of device */ |
@@ -52,19 +53,23 @@ struct loop_device { | |||
52 | gfp_t old_gfp_mask; | 53 | gfp_t old_gfp_mask; |
53 | 54 | ||
54 | spinlock_t lo_lock; | 55 | spinlock_t lo_lock; |
55 | struct bio_list lo_bio_list; | 56 | struct list_head write_cmd_head; |
56 | unsigned int lo_bio_count; | 57 | struct work_struct write_work; |
58 | bool write_started; | ||
57 | int lo_state; | 59 | int lo_state; |
58 | struct mutex lo_ctl_mutex; | 60 | struct mutex lo_ctl_mutex; |
59 | struct task_struct *lo_thread; | ||
60 | wait_queue_head_t lo_event; | ||
61 | /* wait queue for incoming requests */ | ||
62 | wait_queue_head_t lo_req_wait; | ||
63 | 61 | ||
64 | struct request_queue *lo_queue; | 62 | struct request_queue *lo_queue; |
63 | struct blk_mq_tag_set tag_set; | ||
65 | struct gendisk *lo_disk; | 64 | struct gendisk *lo_disk; |
66 | }; | 65 | }; |
67 | 66 | ||
67 | struct loop_cmd { | ||
68 | struct work_struct read_work; | ||
69 | struct request *rq; | ||
70 | struct list_head list; | ||
71 | }; | ||
72 | |||
68 | /* Support for loadable transfer modules */ | 73 | /* Support for loadable transfer modules */ |
69 | struct loop_func_table { | 74 | struct loop_func_table { |
70 | int number; /* filter type */ | 75 | int number; /* filter type */ |