summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 17:12:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 17:12:17 -0500
commit80201fe175cbf7f3e372f53eba0a881a702ad926 (patch)
tree8026c68d52763614268a9c3c80759ad386bd5967 /drivers/block
parent4221b807d1f73c03d22543416d303b60a5d1ef31 (diff)
parentaaeee62c841cc1e48231e1d60c304d2da9c4e41c (diff)
Merge tag 'for-5.1/block-20190302' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: "Not a huge amount of changes in this round, the biggest one is that we finally have Mings multi-page bvec support merged. Apart from that, this pull request contains: - Small series that avoids quiescing the queue for sysfs changes that match what we currently have (Aleksei) - Series of bcache fixes (via Coly) - Series of lightnvm fixes (via Mathias) - NVMe pull request from Christoph. Nothing major, just SPDX/license cleanups, RR mp policy (Hannes), and little fixes (Bart, Chaitanya). - BFQ series (Paolo) - Save blk-mq cpu -> hw queue mapping, removing a pointer indirection for the fast path (Jianchao) - fops->iopoll() added for async IO polling, this is a feature that the upcoming io_uring interface will use (Christoph, me) - Partition scan loop fixes (Dongli) - mtip32xx conversion from managed resource API (Christoph) - cdrom registration race fix (Guenter) - MD pull from Song, two minor fixes. - Various documentation fixes (Marcos) - Multi-page bvec feature. This brings a lot of nice improvements with it, like more efficient splitting, larger IOs can be supported without growing the bvec table size, and so on. (Ming) - Various little fixes to core and drivers" * tag 'for-5.1/block-20190302' of git://git.kernel.dk/linux-block: (117 commits) block: fix updating bio's front segment size block: Replace function name in string with __func__ nbd: propagate genlmsg_reply return code floppy: remove set but not used variable 'q' null_blk: fix checking for REQ_FUA block: fix NULL pointer dereference in register_disk fs: fix guard_bio_eod to check for real EOD errors blk-mq: use HCTX_TYPE_DEFAULT but not 0 to index blk_mq_tag_set->map block: optimize bvec iteration in bvec_iter_advance block: introduce mp_bvec_for_each_page() for iterating over page block: optimize blk_bio_segment_split for single-page bvec block: optimize __blk_segment_map_sg() for single-page bvec block: introduce bvec_nth_page() iomap: wire up the iopoll method block: add bio_set_polled() helper block: wire up block device iopoll method fs: add an iopoll method to struct file_operations loop: set GENHD_FL_NO_PART_SCAN after blkdev_reread_part() loop: do not print warn message if partition scan is successful block: bounce: make sure that bvec table is updated ...
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/loop.c48
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c37
-rw-r--r--drivers/block/nbd.c5
-rw-r--r--drivers/block/null_blk_main.c3
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/skd_main.c1
-rw-r--r--drivers/block/xen-blkfront.c2
8 files changed, 52 insertions, 49 deletions
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 55481b40df9a..95f608d1a098 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2230,7 +2230,6 @@ static void floppy_end_request(struct request *req, blk_status_t error)
2230static void request_done(int uptodate) 2230static void request_done(int uptodate)
2231{ 2231{
2232 struct request *req = current_req; 2232 struct request *req = current_req;
2233 struct request_queue *q;
2234 int block; 2233 int block;
2235 char msg[sizeof("request done ") + sizeof(int) * 3]; 2234 char msg[sizeof("request done ") + sizeof(int) * 3];
2236 2235
@@ -2243,8 +2242,6 @@ static void request_done(int uptodate)
2243 return; 2242 return;
2244 } 2243 }
2245 2244
2246 q = req->q;
2247
2248 if (uptodate) { 2245 if (uptodate) {
2249 /* maintain values for invalidation on geometry 2246 /* maintain values for invalidation on geometry
2250 * change */ 2247 * change */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cf5538942834..1e6edd568214 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,21 +511,22 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
511 loff_t pos, bool rw) 511 loff_t pos, bool rw)
512{ 512{
513 struct iov_iter iter; 513 struct iov_iter iter;
514 struct req_iterator rq_iter;
514 struct bio_vec *bvec; 515 struct bio_vec *bvec;
515 struct request *rq = blk_mq_rq_from_pdu(cmd); 516 struct request *rq = blk_mq_rq_from_pdu(cmd);
516 struct bio *bio = rq->bio; 517 struct bio *bio = rq->bio;
517 struct file *file = lo->lo_backing_file; 518 struct file *file = lo->lo_backing_file;
519 struct bio_vec tmp;
518 unsigned int offset; 520 unsigned int offset;
519 int segments = 0; 521 int nr_bvec = 0;
520 int ret; 522 int ret;
521 523
524 rq_for_each_bvec(tmp, rq, rq_iter)
525 nr_bvec++;
526
522 if (rq->bio != rq->biotail) { 527 if (rq->bio != rq->biotail) {
523 struct req_iterator iter;
524 struct bio_vec tmp;
525 528
526 __rq_for_each_bio(bio, rq) 529 bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
527 segments += bio_segments(bio);
528 bvec = kmalloc_array(segments, sizeof(struct bio_vec),
529 GFP_NOIO); 530 GFP_NOIO);
530 if (!bvec) 531 if (!bvec)
531 return -EIO; 532 return -EIO;
@@ -534,10 +535,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
534 /* 535 /*
535 * The bios of the request may be started from the middle of 536 * The bios of the request may be started from the middle of
536 * the 'bvec' because of bio splitting, so we can't directly 537 * the 'bvec' because of bio splitting, so we can't directly
537 * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment 538 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
538 * API will take care of all details for us. 539 * API will take care of all details for us.
539 */ 540 */
540 rq_for_each_segment(tmp, rq, iter) { 541 rq_for_each_bvec(tmp, rq, rq_iter) {
541 *bvec = tmp; 542 *bvec = tmp;
542 bvec++; 543 bvec++;
543 } 544 }
@@ -551,11 +552,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
551 */ 552 */
552 offset = bio->bi_iter.bi_bvec_done; 553 offset = bio->bi_iter.bi_bvec_done;
553 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 554 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
554 segments = bio_segments(bio);
555 } 555 }
556 atomic_set(&cmd->ref, 2); 556 atomic_set(&cmd->ref, 2);
557 557
558 iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq)); 558 iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
559 iter.iov_offset = offset; 559 iter.iov_offset = offset;
560 560
561 cmd->iocb.ki_pos = pos; 561 cmd->iocb.ki_pos = pos;
@@ -1089,16 +1089,12 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
1089 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 1089 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1090 } 1090 }
1091 mapping_set_gfp_mask(filp->f_mapping, gfp); 1091 mapping_set_gfp_mask(filp->f_mapping, gfp);
1092 lo->lo_state = Lo_unbound;
1093 /* This is safe: open() is still holding a reference. */ 1092 /* This is safe: open() is still holding a reference. */
1094 module_put(THIS_MODULE); 1093 module_put(THIS_MODULE);
1095 blk_mq_unfreeze_queue(lo->lo_queue); 1094 blk_mq_unfreeze_queue(lo->lo_queue);
1096 1095
1097 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev; 1096 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
1098 lo_number = lo->lo_number; 1097 lo_number = lo->lo_number;
1099 lo->lo_flags = 0;
1100 if (!part_shift)
1101 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1102 loop_unprepare_queue(lo); 1098 loop_unprepare_queue(lo);
1103out_unlock: 1099out_unlock:
1104 mutex_unlock(&loop_ctl_mutex); 1100 mutex_unlock(&loop_ctl_mutex);
@@ -1115,11 +1111,29 @@ out_unlock:
1115 err = __blkdev_reread_part(bdev); 1111 err = __blkdev_reread_part(bdev);
1116 else 1112 else
1117 err = blkdev_reread_part(bdev); 1113 err = blkdev_reread_part(bdev);
1118 pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", 1114 if (err)
1119 __func__, lo_number, err); 1115 pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1116 __func__, lo_number, err);
1120 /* Device is gone, no point in returning error */ 1117 /* Device is gone, no point in returning error */
1121 err = 0; 1118 err = 0;
1122 } 1119 }
1120
1121 /*
1122 * lo->lo_state is set to Lo_unbound here after above partscan has
1123 * finished.
1124 *
1125 * There cannot be anybody else entering __loop_clr_fd() as
1126 * lo->lo_backing_file is already cleared and Lo_rundown state
1127 * protects us from all the other places trying to change the 'lo'
1128 * device.
1129 */
1130 mutex_lock(&loop_ctl_mutex);
1131 lo->lo_flags = 0;
1132 if (!part_shift)
1133 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1134 lo->lo_state = Lo_unbound;
1135 mutex_unlock(&loop_ctl_mutex);
1136
1123 /* 1137 /*
1124 * Need not hold loop_ctl_mutex to fput backing file. 1138 * Need not hold loop_ctl_mutex to fput backing file.
1125 * Calling fput holding loop_ctl_mutex triggers a circular 1139 * Calling fput holding loop_ctl_mutex triggers a circular
@@ -1937,7 +1951,7 @@ static int loop_add(struct loop_device **l, int i)
1937 lo->tag_set.queue_depth = 128; 1951 lo->tag_set.queue_depth = 128;
1938 lo->tag_set.numa_node = NUMA_NO_NODE; 1952 lo->tag_set.numa_node = NUMA_NO_NODE;
1939 lo->tag_set.cmd_size = sizeof(struct loop_cmd); 1953 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
1940 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 1954 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1941 lo->tag_set.driver_data = lo; 1955 lo->tag_set.driver_data = lo;
1942 1956
1943 err = blk_mq_alloc_tag_set(&lo->tag_set); 1957 err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 2f3ee4d6af82..83302ecdc8db 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1416,7 +1416,7 @@ static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba,
1416 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE); 1416 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
1417 1417
1418 /* Allocate a DMA buffer for the trim structure */ 1418 /* Allocate a DMA buffer for the trim structure */
1419 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, 1419 buf = dma_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
1420 GFP_KERNEL); 1420 GFP_KERNEL);
1421 if (!buf) 1421 if (!buf)
1422 return BLK_STS_RESOURCE; 1422 return BLK_STS_RESOURCE;
@@ -1453,7 +1453,7 @@ static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba,
1453 MTIP_TRIM_TIMEOUT_MS) < 0) 1453 MTIP_TRIM_TIMEOUT_MS) < 0)
1454 ret = BLK_STS_IOERR; 1454 ret = BLK_STS_IOERR;
1455 1455
1456 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); 1456 dma_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
1457 return ret; 1457 return ret;
1458} 1458}
1459 1459
@@ -1656,7 +1656,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1656 if (!user_buffer) 1656 if (!user_buffer)
1657 return -EFAULT; 1657 return -EFAULT;
1658 1658
1659 buf = dmam_alloc_coherent(&port->dd->pdev->dev, 1659 buf = dma_alloc_coherent(&port->dd->pdev->dev,
1660 ATA_SECT_SIZE * xfer_sz, 1660 ATA_SECT_SIZE * xfer_sz,
1661 &dma_addr, 1661 &dma_addr,
1662 GFP_KERNEL); 1662 GFP_KERNEL);
@@ -1734,7 +1734,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1734 } 1734 }
1735exit_drive_command: 1735exit_drive_command:
1736 if (buf) 1736 if (buf)
1737 dmam_free_coherent(&port->dd->pdev->dev, 1737 dma_free_coherent(&port->dd->pdev->dev,
1738 ATA_SECT_SIZE * xfer_sz, buf, dma_addr); 1738 ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
1739 return rv; 1739 return rv;
1740} 1740}
@@ -2838,11 +2838,11 @@ static void mtip_dma_free(struct driver_data *dd)
2838 struct mtip_port *port = dd->port; 2838 struct mtip_port *port = dd->port;
2839 2839
2840 if (port->block1) 2840 if (port->block1)
2841 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 2841 dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2842 port->block1, port->block1_dma); 2842 port->block1, port->block1_dma);
2843 2843
2844 if (port->command_list) { 2844 if (port->command_list) {
2845 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 2845 dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2846 port->command_list, port->command_list_dma); 2846 port->command_list, port->command_list_dma);
2847 } 2847 }
2848} 2848}
@@ -2861,7 +2861,7 @@ static int mtip_dma_alloc(struct driver_data *dd)
2861 2861
2862 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ 2862 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
2863 port->block1 = 2863 port->block1 =
2864 dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 2864 dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2865 &port->block1_dma, GFP_KERNEL); 2865 &port->block1_dma, GFP_KERNEL);
2866 if (!port->block1) 2866 if (!port->block1)
2867 return -ENOMEM; 2867 return -ENOMEM;
@@ -2869,10 +2869,10 @@ static int mtip_dma_alloc(struct driver_data *dd)
2869 2869
2870 /* Allocate dma memory for command list */ 2870 /* Allocate dma memory for command list */
2871 port->command_list = 2871 port->command_list =
2872 dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 2872 dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2873 &port->command_list_dma, GFP_KERNEL); 2873 &port->command_list_dma, GFP_KERNEL);
2874 if (!port->command_list) { 2874 if (!port->command_list) {
2875 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 2875 dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2876 port->block1, port->block1_dma); 2876 port->block1, port->block1_dma);
2877 port->block1 = NULL; 2877 port->block1 = NULL;
2878 port->block1_dma = 0; 2878 port->block1_dma = 0;
@@ -3057,13 +3057,8 @@ static int mtip_hw_init(struct driver_data *dd)
3057 mtip_start_port(dd->port); 3057 mtip_start_port(dd->port);
3058 3058
3059 /* Setup the ISR and enable interrupts. */ 3059 /* Setup the ISR and enable interrupts. */
3060 rv = devm_request_irq(&dd->pdev->dev, 3060 rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED,
3061 dd->pdev->irq, 3061 dev_driver_string(&dd->pdev->dev), dd);
3062 mtip_irq_handler,
3063 IRQF_SHARED,
3064 dev_driver_string(&dd->pdev->dev),
3065 dd);
3066
3067 if (rv) { 3062 if (rv) {
3068 dev_err(&dd->pdev->dev, 3063 dev_err(&dd->pdev->dev,
3069 "Unable to allocate IRQ %d\n", dd->pdev->irq); 3064 "Unable to allocate IRQ %d\n", dd->pdev->irq);
@@ -3091,7 +3086,7 @@ out3:
3091 3086
3092 /* Release the IRQ. */ 3087 /* Release the IRQ. */
3093 irq_set_affinity_hint(dd->pdev->irq, NULL); 3088 irq_set_affinity_hint(dd->pdev->irq, NULL);
3094 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3089 free_irq(dd->pdev->irq, dd);
3095 3090
3096out2: 3091out2:
3097 mtip_deinit_port(dd->port); 3092 mtip_deinit_port(dd->port);
@@ -3146,7 +3141,7 @@ static int mtip_hw_exit(struct driver_data *dd)
3146 3141
3147 /* Release the IRQ. */ 3142 /* Release the IRQ. */
3148 irq_set_affinity_hint(dd->pdev->irq, NULL); 3143 irq_set_affinity_hint(dd->pdev->irq, NULL);
3149 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3144 free_irq(dd->pdev->irq, dd);
3150 msleep(1000); 3145 msleep(1000);
3151 3146
3152 /* Free dma regions */ 3147 /* Free dma regions */
@@ -3610,8 +3605,8 @@ static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
3610 if (!cmd->command) 3605 if (!cmd->command)
3611 return; 3606 return;
3612 3607
3613 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3608 dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command,
3614 cmd->command, cmd->command_dma); 3609 cmd->command_dma);
3615} 3610}
3616 3611
3617static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, 3612static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
@@ -3620,7 +3615,7 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
3620 struct driver_data *dd = set->driver_data; 3615 struct driver_data *dd = set->driver_data;
3621 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3616 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3622 3617
3623 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3618 cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3624 &cmd->command_dma, GFP_KERNEL); 3619 &cmd->command_dma, GFP_KERNEL);
3625 if (!cmd->command) 3620 if (!cmd->command)
3626 return -ENOMEM; 3621 return -ENOMEM;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7c9a949e876b..90ba9f4c03f3 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1571,7 +1571,7 @@ static int nbd_dev_add(int index)
1571 nbd->tag_set.numa_node = NUMA_NO_NODE; 1571 nbd->tag_set.numa_node = NUMA_NO_NODE;
1572 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1572 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1573 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1573 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1574 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; 1574 BLK_MQ_F_BLOCKING;
1575 nbd->tag_set.driver_data = nbd; 1575 nbd->tag_set.driver_data = nbd;
1576 1576
1577 err = blk_mq_alloc_tag_set(&nbd->tag_set); 1577 err = blk_mq_alloc_tag_set(&nbd->tag_set);
@@ -2118,8 +2118,7 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2118 } 2118 }
2119 nla_nest_end(reply, dev_list); 2119 nla_nest_end(reply, dev_list);
2120 genlmsg_end(reply, reply_head); 2120 genlmsg_end(reply, reply_head);
2121 genlmsg_reply(reply, info); 2121 ret = genlmsg_reply(reply, info);
2122 ret = 0;
2123out: 2122out:
2124 mutex_unlock(&nbd_index_mutex); 2123 mutex_unlock(&nbd_index_mutex);
2125 return ret; 2124 return ret;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 62c9654b9ce8..417a9f15c116 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1104,7 +1104,7 @@ static int null_handle_bio(struct nullb_cmd *cmd)
1104 len = bvec.bv_len; 1104 len = bvec.bv_len;
1105 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 1105 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1106 op_is_write(bio_op(bio)), sector, 1106 op_is_write(bio_op(bio)), sector,
1107 bio_op(bio) & REQ_FUA); 1107 bio->bi_opf & REQ_FUA);
1108 if (err) { 1108 if (err) {
1109 spin_unlock_irq(&nullb->lock); 1109 spin_unlock_irq(&nullb->lock);
1110 return err; 1110 return err;
@@ -1678,7 +1678,6 @@ static int null_add_dev(struct nullb_device *dev)
1678 if (dev->cache_size > 0) { 1678 if (dev->cache_size > 0) {
1679 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); 1679 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1680 blk_queue_write_cache(nullb->q, true, true); 1680 blk_queue_write_cache(nullb->q, true, true);
1681 blk_queue_flush_queueable(nullb->q, true);
1682 } 1681 }
1683 1682
1684 if (dev->zoned) { 1683 if (dev->zoned) {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 282e2e82d849..74088d8dbaf3 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3987,7 +3987,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3987 rbd_dev->tag_set.ops = &rbd_mq_ops; 3987 rbd_dev->tag_set.ops = &rbd_mq_ops;
3988 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; 3988 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3989 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; 3989 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3990 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 3990 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3991 rbd_dev->tag_set.nr_hw_queues = 1; 3991 rbd_dev->tag_set.nr_hw_queues = 1;
3992 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); 3992 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3993 3993
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index ab893a7571a2..7d3ad6c22ee5 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2843,7 +2843,6 @@ static int skd_cons_disk(struct skd_device *skdev)
2843 skdev->sgs_per_request * sizeof(struct scatterlist); 2843 skdev->sgs_per_request * sizeof(struct scatterlist);
2844 skdev->tag_set.numa_node = NUMA_NO_NODE; 2844 skdev->tag_set.numa_node = NUMA_NO_NODE;
2845 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 2845 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2846 BLK_MQ_F_SG_MERGE |
2847 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO); 2846 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
2848 skdev->tag_set.driver_data = skdev; 2847 skdev->tag_set.driver_data = skdev;
2849 rc = blk_mq_alloc_tag_set(&skdev->tag_set); 2848 rc = blk_mq_alloc_tag_set(&skdev->tag_set);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0ed4b200fa58..d43a5677ccbc 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -977,7 +977,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
977 } else 977 } else
978 info->tag_set.queue_depth = BLK_RING_SIZE(info); 978 info->tag_set.queue_depth = BLK_RING_SIZE(info);
979 info->tag_set.numa_node = NUMA_NO_NODE; 979 info->tag_set.numa_node = NUMA_NO_NODE;
980 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 980 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
981 info->tag_set.cmd_size = sizeof(struct blkif_req); 981 info->tag_set.cmd_size = sizeof(struct blkif_req);
982 info->tag_set.driver_data = info; 982 info->tag_set.driver_data = info;
983 983