summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c129
1 files changed, 68 insertions, 61 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ff3d7b49969..a9354835cf51 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1711,11 +1711,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1711 unsigned int depth; 1711 unsigned int depth;
1712 1712
1713 list_splice_init(&plug->mq_list, &list); 1713 list_splice_init(&plug->mq_list, &list);
1714 plug->rq_count = 0;
1715 1714
1716 if (plug->rq_count > 2 && plug->multiple_queues) 1715 if (plug->rq_count > 2 && plug->multiple_queues)
1717 list_sort(NULL, &list, plug_rq_cmp); 1716 list_sort(NULL, &list, plug_rq_cmp);
1718 1717
1718 plug->rq_count = 0;
1719
1719 this_q = NULL; 1720 this_q = NULL;
1720 this_hctx = NULL; 1721 this_hctx = NULL;
1721 this_ctx = NULL; 1722 this_ctx = NULL;
@@ -1800,74 +1801,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1800 return ret; 1801 return ret;
1801} 1802}
1802 1803
1803blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1804static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1804 struct request *rq, 1805 struct request *rq,
1805 blk_qc_t *cookie, 1806 blk_qc_t *cookie,
1806 bool bypass, bool last) 1807 bool bypass_insert, bool last)
1807{ 1808{
1808 struct request_queue *q = rq->q; 1809 struct request_queue *q = rq->q;
1809 bool run_queue = true; 1810 bool run_queue = true;
1810 blk_status_t ret = BLK_STS_RESOURCE;
1811 int srcu_idx;
1812 bool force = false;
1813 1811
1814 hctx_lock(hctx, &srcu_idx);
1815 /* 1812 /*
1816 * hctx_lock is needed before checking quiesced flag. 1813 * RCU or SRCU read lock is needed before checking quiesced flag.
1817 * 1814 *
1818 * When queue is stopped or quiesced, ignore 'bypass', insert 1815 * When queue is stopped or quiesced, ignore 'bypass_insert' from
1819 * and return BLK_STS_OK to caller, and avoid driver to try to 1816 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1820 * dispatch again. 1817 * and avoid driver to try to dispatch again.
1821 */ 1818 */
1822 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) { 1819 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1823 run_queue = false; 1820 run_queue = false;
1824 bypass = false; 1821 bypass_insert = false;
1825 goto out_unlock; 1822 goto insert;
1826 } 1823 }
1827 1824
1828 if (unlikely(q->elevator && !bypass)) 1825 if (q->elevator && !bypass_insert)
1829 goto out_unlock; 1826 goto insert;
1830 1827
1831 if (!blk_mq_get_dispatch_budget(hctx)) 1828 if (!blk_mq_get_dispatch_budget(hctx))
1832 goto out_unlock; 1829 goto insert;
1833 1830
1834 if (!blk_mq_get_driver_tag(rq)) { 1831 if (!blk_mq_get_driver_tag(rq)) {
1835 blk_mq_put_dispatch_budget(hctx); 1832 blk_mq_put_dispatch_budget(hctx);
1836 goto out_unlock; 1833 goto insert;
1837 } 1834 }
1838 1835
1839 /* 1836 return __blk_mq_issue_directly(hctx, rq, cookie, last);
1840 * Always add a request that has been through 1837insert:
1841 *.queue_rq() to the hardware dispatch list. 1838 if (bypass_insert)
1842 */ 1839 return BLK_STS_RESOURCE;
1843 force = true; 1840
1844 ret = __blk_mq_issue_directly(hctx, rq, cookie, last); 1841 blk_mq_request_bypass_insert(rq, run_queue);
1845out_unlock: 1842 return BLK_STS_OK;
1843}
1844
1845static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1846 struct request *rq, blk_qc_t *cookie)
1847{
1848 blk_status_t ret;
1849 int srcu_idx;
1850
1851 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1852
1853 hctx_lock(hctx, &srcu_idx);
1854
1855 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1856 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1857 blk_mq_request_bypass_insert(rq, true);
1858 else if (ret != BLK_STS_OK)
1859 blk_mq_end_request(rq, ret);
1860
1861 hctx_unlock(hctx, srcu_idx);
1862}
1863
1864blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1865{
1866 blk_status_t ret;
1867 int srcu_idx;
1868 blk_qc_t unused_cookie;
1869 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1870
1871 hctx_lock(hctx, &srcu_idx);
1872 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1846 hctx_unlock(hctx, srcu_idx); 1873 hctx_unlock(hctx, srcu_idx);
1847 switch (ret) {
1848 case BLK_STS_OK:
1849 break;
1850 case BLK_STS_DEV_RESOURCE:
1851 case BLK_STS_RESOURCE:
1852 if (force) {
1853 blk_mq_request_bypass_insert(rq, run_queue);
1854 /*
1855 * We have to return BLK_STS_OK for the DM
1856 * to avoid livelock. Otherwise, we return
1857 * the real result to indicate whether the
1858 * request is direct-issued successfully.
1859 */
1860 ret = bypass ? BLK_STS_OK : ret;
1861 } else if (!bypass) {
1862 blk_mq_sched_insert_request(rq, false,
1863 run_queue, false);
1864 }
1865 break;
1866 default:
1867 if (!bypass)
1868 blk_mq_end_request(rq, ret);
1869 break;
1870 }
1871 1874
1872 return ret; 1875 return ret;
1873} 1876}
@@ -1875,20 +1878,22 @@ out_unlock:
1875void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 1878void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1876 struct list_head *list) 1879 struct list_head *list)
1877{ 1880{
1878 blk_qc_t unused;
1879 blk_status_t ret = BLK_STS_OK;
1880
1881 while (!list_empty(list)) { 1881 while (!list_empty(list)) {
1882 blk_status_t ret;
1882 struct request *rq = list_first_entry(list, struct request, 1883 struct request *rq = list_first_entry(list, struct request,
1883 queuelist); 1884 queuelist);
1884 1885
1885 list_del_init(&rq->queuelist); 1886 list_del_init(&rq->queuelist);
1886 if (ret == BLK_STS_OK) 1887 ret = blk_mq_request_issue_directly(rq, list_empty(list));
1887 ret = blk_mq_try_issue_directly(hctx, rq, &unused, 1888 if (ret != BLK_STS_OK) {
1888 false, 1889 if (ret == BLK_STS_RESOURCE ||
1890 ret == BLK_STS_DEV_RESOURCE) {
1891 blk_mq_request_bypass_insert(rq,
1889 list_empty(list)); 1892 list_empty(list));
1890 else 1893 break;
1891 blk_mq_sched_insert_request(rq, false, true, false); 1894 }
1895 blk_mq_end_request(rq, ret);
1896 }
1892 } 1897 }
1893 1898
1894 /* 1899 /*
@@ -1896,7 +1901,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1896 * the driver there was more coming, but that turned out to 1901 * the driver there was more coming, but that turned out to
1897 * be a lie. 1902 * be a lie.
1898 */ 1903 */
1899 if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs) 1904 if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
1900 hctx->queue->mq_ops->commit_rqs(hctx); 1905 hctx->queue->mq_ops->commit_rqs(hctx);
1901} 1906}
1902 1907
@@ -2003,19 +2008,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
2003 plug->rq_count--; 2008 plug->rq_count--;
2004 } 2009 }
2005 blk_add_rq_to_plug(plug, rq); 2010 blk_add_rq_to_plug(plug, rq);
2011 trace_block_plug(q);
2006 2012
2007 blk_mq_put_ctx(data.ctx); 2013 blk_mq_put_ctx(data.ctx);
2008 2014
2009 if (same_queue_rq) { 2015 if (same_queue_rq) {
2010 data.hctx = same_queue_rq->mq_hctx; 2016 data.hctx = same_queue_rq->mq_hctx;
2017 trace_block_unplug(q, 1, true);
2011 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 2018 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2012 &cookie, false, true); 2019 &cookie);
2013 } 2020 }
2014 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && 2021 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2015 !data.hctx->dispatch_busy)) { 2022 !data.hctx->dispatch_busy)) {
2016 blk_mq_put_ctx(data.ctx); 2023 blk_mq_put_ctx(data.ctx);
2017 blk_mq_bio_to_request(rq, bio); 2024 blk_mq_bio_to_request(rq, bio);
2018 blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true); 2025 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2019 } else { 2026 } else {
2020 blk_mq_put_ctx(data.ctx); 2027 blk_mq_put_ctx(data.ctx);
2021 blk_mq_bio_to_request(rq, bio); 2028 blk_mq_bio_to_request(rq, bio);
@@ -2332,7 +2339,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
2332 return 0; 2339 return 0;
2333 2340
2334 free_fq: 2341 free_fq:
2335 kfree(hctx->fq); 2342 blk_free_flush_queue(hctx->fq);
2336 exit_hctx: 2343 exit_hctx:
2337 if (set->ops->exit_hctx) 2344 if (set->ops->exit_hctx)
2338 set->ops->exit_hctx(hctx, hctx_idx); 2345 set->ops->exit_hctx(hctx, hctx_idx);