aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-04-07 19:28:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-04-07 19:28:36 -0400
commit429fba106e82e2792010a825b9dbeadd00bf9e9c (patch)
treeacaa2cefe9a35a7d25467957ea07f20eef63ecbc
parent3b04689147085f5c8f47835d1c7e48203cba80d3 (diff)
parent47b16820c490149c2923e8474048f2c6e7557cab (diff)
Merge tag 'for-linus-20190407' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - Fixups for the pf/pcd queue handling (YueHaibing) - Revert of the three direct issue changes as they have been proven to cause an issue with dm-mpath (Bart) - Plug rq_count reset fix (Dongli) - io_uring double free in fileset registration error handling (me) - Make null_blk handle bad numa node passed in (John) - BFQ ifdef fix (Konstantin) - Flush queue leak fix (Shenghui) - Plug trace fix (Yufen) * tag 'for-linus-20190407' of git://git.kernel.dk/linux-block: xsysace: Fix error handling in ace_setup null_blk: prevent crash from bad home_node value block: Revert v5.0 blk_mq_request_issue_directly() changes paride/pcd: Fix potential NULL pointer dereference and mem leak blk-mq: do not reset plug->rq_count before the list is sorted paride/pf: Fix potential NULL pointer dereference io_uring: fix double free in case of fileset regitration failure blk-mq: add trace block plug and unplug for multiple queues block: use blk_free_flush_queue() to free hctx->fq in blk_mq_init_hctx block/bfq: fix ifdef for CONFIG_BFQ_GROUP_IOSCHED=y
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bfq-wf2q.c2
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq-sched.c8
-rw-r--r--block/blk-mq.c129
-rw-r--r--block/blk-mq.h6
-rw-r--r--drivers/block/null_blk_main.c5
-rw-r--r--drivers/block/paride/pcd.c14
-rw-r--r--drivers/block/paride/pf.c12
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--fs/io_uring.c1
11 files changed, 110 insertions, 75 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4c592496a16a..fac188dd78fa 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
674 * at least two nodes. 674 * at least two nodes.
675 */ 675 */
676 return !(varied_queue_weights || multiple_classes_busy 676 return !(varied_queue_weights || multiple_classes_busy
677#ifdef BFQ_GROUP_IOSCHED_ENABLED 677#ifdef CONFIG_BFQ_GROUP_IOSCHED
678 || bfqd->num_groups_with_pending_reqs > 0 678 || bfqd->num_groups_with_pending_reqs > 0
679#endif 679#endif
680 ); 680 );
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 63311d1ff1ed..a11bef75483d 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
1012 entity->on_st = true; 1012 entity->on_st = true;
1013 } 1013 }
1014 1014
1015#ifdef BFQ_GROUP_IOSCHED_ENABLED 1015#ifdef CONFIG_BFQ_GROUP_IOSCHED
1016 if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ 1016 if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
1017 struct bfq_group *bfqg = 1017 struct bfq_group *bfqg =
1018 container_of(entity, struct bfq_group, entity); 1018 container_of(entity, struct bfq_group, entity);
diff --git a/block/blk-core.c b/block/blk-core.c
index 4673ebe42255..a55389ba8779 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
1245 */ 1245 */
1246blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1246blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1247{ 1247{
1248 blk_qc_t unused;
1249
1250 if (blk_cloned_rq_check_limits(q, rq)) 1248 if (blk_cloned_rq_check_limits(q, rq))
1251 return BLK_STS_IOERR; 1249 return BLK_STS_IOERR;
1252 1250
@@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
1262 * bypass a potential scheduler on the bottom device for 1260 * bypass a potential scheduler on the bottom device for
1263 * insert. 1261 * insert.
1264 */ 1262 */
1265 return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true); 1263 return blk_mq_request_issue_directly(rq, true);
1266} 1264}
1267EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1265EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1268 1266
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 40905539afed..aa6bc5c02643 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
423 * busy in case of 'none' scheduler, and this way may save 423 * busy in case of 'none' scheduler, and this way may save
424 * us one extra enqueue & dequeue to sw queue. 424 * us one extra enqueue & dequeue to sw queue.
425 */ 425 */
426 if (!hctx->dispatch_busy && !e && !run_queue_async) 426 if (!hctx->dispatch_busy && !e && !run_queue_async) {
427 blk_mq_try_issue_list_directly(hctx, list); 427 blk_mq_try_issue_list_directly(hctx, list);
428 else 428 if (list_empty(list))
429 blk_mq_insert_requests(hctx, ctx, list); 429 return;
430 }
431 blk_mq_insert_requests(hctx, ctx, list);
430 } 432 }
431 433
432 blk_mq_run_hw_queue(hctx, run_queue_async); 434 blk_mq_run_hw_queue(hctx, run_queue_async);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ff3d7b49969..a9354835cf51 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1711,11 +1711,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1711 unsigned int depth; 1711 unsigned int depth;
1712 1712
1713 list_splice_init(&plug->mq_list, &list); 1713 list_splice_init(&plug->mq_list, &list);
1714 plug->rq_count = 0;
1715 1714
1716 if (plug->rq_count > 2 && plug->multiple_queues) 1715 if (plug->rq_count > 2 && plug->multiple_queues)
1717 list_sort(NULL, &list, plug_rq_cmp); 1716 list_sort(NULL, &list, plug_rq_cmp);
1718 1717
1718 plug->rq_count = 0;
1719
1719 this_q = NULL; 1720 this_q = NULL;
1720 this_hctx = NULL; 1721 this_hctx = NULL;
1721 this_ctx = NULL; 1722 this_ctx = NULL;
@@ -1800,74 +1801,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1800 return ret; 1801 return ret;
1801} 1802}
1802 1803
1803blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1804static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1804 struct request *rq, 1805 struct request *rq,
1805 blk_qc_t *cookie, 1806 blk_qc_t *cookie,
1806 bool bypass, bool last) 1807 bool bypass_insert, bool last)
1807{ 1808{
1808 struct request_queue *q = rq->q; 1809 struct request_queue *q = rq->q;
1809 bool run_queue = true; 1810 bool run_queue = true;
1810 blk_status_t ret = BLK_STS_RESOURCE;
1811 int srcu_idx;
1812 bool force = false;
1813 1811
1814 hctx_lock(hctx, &srcu_idx);
1815 /* 1812 /*
1816 * hctx_lock is needed before checking quiesced flag. 1813 * RCU or SRCU read lock is needed before checking quiesced flag.
1817 * 1814 *
1818 * When queue is stopped or quiesced, ignore 'bypass', insert 1815 * When queue is stopped or quiesced, ignore 'bypass_insert' from
1819 * and return BLK_STS_OK to caller, and avoid driver to try to 1816 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1820 * dispatch again. 1817 * and avoid driver to try to dispatch again.
1821 */ 1818 */
1822 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) { 1819 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1823 run_queue = false; 1820 run_queue = false;
1824 bypass = false; 1821 bypass_insert = false;
1825 goto out_unlock; 1822 goto insert;
1826 } 1823 }
1827 1824
1828 if (unlikely(q->elevator && !bypass)) 1825 if (q->elevator && !bypass_insert)
1829 goto out_unlock; 1826 goto insert;
1830 1827
1831 if (!blk_mq_get_dispatch_budget(hctx)) 1828 if (!blk_mq_get_dispatch_budget(hctx))
1832 goto out_unlock; 1829 goto insert;
1833 1830
1834 if (!blk_mq_get_driver_tag(rq)) { 1831 if (!blk_mq_get_driver_tag(rq)) {
1835 blk_mq_put_dispatch_budget(hctx); 1832 blk_mq_put_dispatch_budget(hctx);
1836 goto out_unlock; 1833 goto insert;
1837 } 1834 }
1838 1835
1839 /* 1836 return __blk_mq_issue_directly(hctx, rq, cookie, last);
1840 * Always add a request that has been through 1837insert:
1841 *.queue_rq() to the hardware dispatch list. 1838 if (bypass_insert)
1842 */ 1839 return BLK_STS_RESOURCE;
1843 force = true; 1840
1844 ret = __blk_mq_issue_directly(hctx, rq, cookie, last); 1841 blk_mq_request_bypass_insert(rq, run_queue);
1845out_unlock: 1842 return BLK_STS_OK;
1843}
1844
1845static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1846 struct request *rq, blk_qc_t *cookie)
1847{
1848 blk_status_t ret;
1849 int srcu_idx;
1850
1851 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1852
1853 hctx_lock(hctx, &srcu_idx);
1854
1855 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1856 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1857 blk_mq_request_bypass_insert(rq, true);
1858 else if (ret != BLK_STS_OK)
1859 blk_mq_end_request(rq, ret);
1860
1861 hctx_unlock(hctx, srcu_idx);
1862}
1863
1864blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1865{
1866 blk_status_t ret;
1867 int srcu_idx;
1868 blk_qc_t unused_cookie;
1869 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1870
1871 hctx_lock(hctx, &srcu_idx);
1872 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1846 hctx_unlock(hctx, srcu_idx); 1873 hctx_unlock(hctx, srcu_idx);
1847 switch (ret) {
1848 case BLK_STS_OK:
1849 break;
1850 case BLK_STS_DEV_RESOURCE:
1851 case BLK_STS_RESOURCE:
1852 if (force) {
1853 blk_mq_request_bypass_insert(rq, run_queue);
1854 /*
1855 * We have to return BLK_STS_OK for the DM
1856 * to avoid livelock. Otherwise, we return
1857 * the real result to indicate whether the
1858 * request is direct-issued successfully.
1859 */
1860 ret = bypass ? BLK_STS_OK : ret;
1861 } else if (!bypass) {
1862 blk_mq_sched_insert_request(rq, false,
1863 run_queue, false);
1864 }
1865 break;
1866 default:
1867 if (!bypass)
1868 blk_mq_end_request(rq, ret);
1869 break;
1870 }
1871 1874
1872 return ret; 1875 return ret;
1873} 1876}
@@ -1875,20 +1878,22 @@ out_unlock:
1875void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 1878void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1876 struct list_head *list) 1879 struct list_head *list)
1877{ 1880{
1878 blk_qc_t unused;
1879 blk_status_t ret = BLK_STS_OK;
1880
1881 while (!list_empty(list)) { 1881 while (!list_empty(list)) {
1882 blk_status_t ret;
1882 struct request *rq = list_first_entry(list, struct request, 1883 struct request *rq = list_first_entry(list, struct request,
1883 queuelist); 1884 queuelist);
1884 1885
1885 list_del_init(&rq->queuelist); 1886 list_del_init(&rq->queuelist);
1886 if (ret == BLK_STS_OK) 1887 ret = blk_mq_request_issue_directly(rq, list_empty(list));
1887 ret = blk_mq_try_issue_directly(hctx, rq, &unused, 1888 if (ret != BLK_STS_OK) {
1888 false, 1889 if (ret == BLK_STS_RESOURCE ||
1890 ret == BLK_STS_DEV_RESOURCE) {
1891 blk_mq_request_bypass_insert(rq,
1889 list_empty(list)); 1892 list_empty(list));
1890 else 1893 break;
1891 blk_mq_sched_insert_request(rq, false, true, false); 1894 }
1895 blk_mq_end_request(rq, ret);
1896 }
1892 } 1897 }
1893 1898
1894 /* 1899 /*
@@ -1896,7 +1901,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1896 * the driver there was more coming, but that turned out to 1901 * the driver there was more coming, but that turned out to
1897 * be a lie. 1902 * be a lie.
1898 */ 1903 */
1899 if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs) 1904 if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
1900 hctx->queue->mq_ops->commit_rqs(hctx); 1905 hctx->queue->mq_ops->commit_rqs(hctx);
1901} 1906}
1902 1907
@@ -2003,19 +2008,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
2003 plug->rq_count--; 2008 plug->rq_count--;
2004 } 2009 }
2005 blk_add_rq_to_plug(plug, rq); 2010 blk_add_rq_to_plug(plug, rq);
2011 trace_block_plug(q);
2006 2012
2007 blk_mq_put_ctx(data.ctx); 2013 blk_mq_put_ctx(data.ctx);
2008 2014
2009 if (same_queue_rq) { 2015 if (same_queue_rq) {
2010 data.hctx = same_queue_rq->mq_hctx; 2016 data.hctx = same_queue_rq->mq_hctx;
2017 trace_block_unplug(q, 1, true);
2011 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 2018 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2012 &cookie, false, true); 2019 &cookie);
2013 } 2020 }
2014 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && 2021 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2015 !data.hctx->dispatch_busy)) { 2022 !data.hctx->dispatch_busy)) {
2016 blk_mq_put_ctx(data.ctx); 2023 blk_mq_put_ctx(data.ctx);
2017 blk_mq_bio_to_request(rq, bio); 2024 blk_mq_bio_to_request(rq, bio);
2018 blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true); 2025 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2019 } else { 2026 } else {
2020 blk_mq_put_ctx(data.ctx); 2027 blk_mq_put_ctx(data.ctx);
2021 blk_mq_bio_to_request(rq, bio); 2028 blk_mq_bio_to_request(rq, bio);
@@ -2332,7 +2339,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
2332 return 0; 2339 return 0;
2333 2340
2334 free_fq: 2341 free_fq:
2335 kfree(hctx->fq); 2342 blk_free_flush_queue(hctx->fq);
2336 exit_hctx: 2343 exit_hctx:
2337 if (set->ops->exit_hctx) 2344 if (set->ops->exit_hctx)
2338 set->ops->exit_hctx(hctx, hctx_idx); 2345 set->ops->exit_hctx(hctx, hctx_idx);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d704fc7766f4..423ea88ab6fb 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -70,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
70void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 70void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
71 struct list_head *list); 71 struct list_head *list);
72 72
73blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 73/* Used by blk_insert_cloned_request() to issue request directly */
74 struct request *rq, 74blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
75 blk_qc_t *cookie,
76 bool bypass, bool last);
77void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 75void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
78 struct list_head *list); 76 struct list_head *list);
79 77
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 417a9f15c116..d7ac09c092f2 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
1748 return -EINVAL; 1748 return -EINVAL;
1749 } 1749 }
1750 1750
1751 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
1752 pr_err("null_blk: invalid home_node value\n");
1753 g_home_node = NUMA_NO_NODE;
1754 }
1755
1751 if (g_queue_mode == NULL_Q_RQ) { 1756 if (g_queue_mode == NULL_Q_RQ) {
1752 pr_err("null_blk: legacy IO path no longer available\n"); 1757 pr_err("null_blk: legacy IO path no longer available\n");
1753 return -EINVAL; 1758 return -EINVAL;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 377a694dc228..6d415b20fb70 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
314 disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, 314 disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
315 1, BLK_MQ_F_SHOULD_MERGE); 315 1, BLK_MQ_F_SHOULD_MERGE);
316 if (IS_ERR(disk->queue)) { 316 if (IS_ERR(disk->queue)) {
317 put_disk(disk);
317 disk->queue = NULL; 318 disk->queue = NULL;
318 continue; 319 continue;
319 } 320 }
@@ -750,6 +751,8 @@ static int pcd_detect(void)
750 751
751 printk("%s: No CD-ROM drive found\n", name); 752 printk("%s: No CD-ROM drive found\n", name);
752 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { 753 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
754 if (!cd->disk)
755 continue;
753 blk_cleanup_queue(cd->disk->queue); 756 blk_cleanup_queue(cd->disk->queue);
754 cd->disk->queue = NULL; 757 cd->disk->queue = NULL;
755 blk_mq_free_tag_set(&cd->tag_set); 758 blk_mq_free_tag_set(&cd->tag_set);
@@ -1010,8 +1013,14 @@ static int __init pcd_init(void)
1010 pcd_probe_capabilities(); 1013 pcd_probe_capabilities();
1011 1014
1012 if (register_blkdev(major, name)) { 1015 if (register_blkdev(major, name)) {
1013 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) 1016 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
1017 if (!cd->disk)
1018 continue;
1019
1020 blk_cleanup_queue(cd->disk->queue);
1021 blk_mq_free_tag_set(&cd->tag_set);
1014 put_disk(cd->disk); 1022 put_disk(cd->disk);
1023 }
1015 return -EBUSY; 1024 return -EBUSY;
1016 } 1025 }
1017 1026
@@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void)
1032 int unit; 1041 int unit;
1033 1042
1034 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { 1043 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
1044 if (!cd->disk)
1045 continue;
1046
1035 if (cd->present) { 1047 if (cd->present) {
1036 del_gendisk(cd->disk); 1048 del_gendisk(cd->disk);
1037 pi_release(cd->pi); 1049 pi_release(cd->pi);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 103b617cdc31..35e6e271b219 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -762,6 +762,8 @@ static int pf_detect(void)
762 762
763 printk("%s: No ATAPI disk detected\n", name); 763 printk("%s: No ATAPI disk detected\n", name);
764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
765 if (!pf->disk)
766 continue;
765 blk_cleanup_queue(pf->disk->queue); 767 blk_cleanup_queue(pf->disk->queue);
766 pf->disk->queue = NULL; 768 pf->disk->queue = NULL;
767 blk_mq_free_tag_set(&pf->tag_set); 769 blk_mq_free_tag_set(&pf->tag_set);
@@ -1029,8 +1031,13 @@ static int __init pf_init(void)
1029 pf_busy = 0; 1031 pf_busy = 0;
1030 1032
1031 if (register_blkdev(major, name)) { 1033 if (register_blkdev(major, name)) {
1032 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) 1034 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1035 if (!pf->disk)
1036 continue;
1037 blk_cleanup_queue(pf->disk->queue);
1038 blk_mq_free_tag_set(&pf->tag_set);
1033 put_disk(pf->disk); 1039 put_disk(pf->disk);
1040 }
1034 return -EBUSY; 1041 return -EBUSY;
1035 } 1042 }
1036 1043
@@ -1051,6 +1058,9 @@ static void __exit pf_exit(void)
1051 int unit; 1058 int unit;
1052 unregister_blkdev(major, name); 1059 unregister_blkdev(major, name);
1053 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 1060 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1061 if (!pf->disk)
1062 continue;
1063
1054 if (pf->present) 1064 if (pf->present)
1055 del_gendisk(pf->disk); 1065 del_gendisk(pf->disk);
1056 1066
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 87ccef4bd69e..32a21b8d1d85 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
1090 return 0; 1090 return 0;
1091 1091
1092err_read: 1092err_read:
1093 /* prevent double queue cleanup */
1094 ace->gd->queue = NULL;
1093 put_disk(ace->gd); 1095 put_disk(ace->gd);
1094err_alloc_disk: 1096err_alloc_disk:
1095 blk_cleanup_queue(ace->queue); 1097 blk_cleanup_queue(ace->queue);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bbdbd56cf2ac..07d6ef195d05 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2215,6 +2215,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
2215 fput(ctx->user_files[i]); 2215 fput(ctx->user_files[i]);
2216 2216
2217 kfree(ctx->user_files); 2217 kfree(ctx->user_files);
2218 ctx->user_files = NULL;
2218 ctx->nr_user_files = 0; 2219 ctx->nr_user_files = 0;
2219 return ret; 2220 return ret;
2220 } 2221 }