summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJianchao Wang <jianchao.w.wang@oracle.com>2018-12-13 20:28:18 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-16 10:33:57 -0500
commit7f556a44e61d0b62d78db9a2662a5f0daef010f2 (patch)
treee4712b5c0ac800c08863d55fe4c4781d7ffc4f73 /block
parent4c9770c90fc5b6d6b6d190d108c061015f5804f7 (diff)
blk-mq: refactor the code of issue request directly
Merge blk_mq_try_issue_directly and __blk_mq_try_issue_directly into one interface to unify the interfaces to issue requests directly. The merged interface takes over the requests totally, it could insert, end or do nothing based on the return value of .queue_rq and 'bypass' parameter. Then caller needn't any other handling any more and then code could be cleaned up. And also the commit c616cbee ( blk-mq: punt failed direct issue to dispatch list ) always inserts requests to hctx dispatch list whenever get a BLK_STS_RESOURCE or BLK_STS_DEV_RESOURCE, this is overkill and will harm the merging. We just need to do that for the requests that has been through .queue_rq. This patch also could fix this. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c103
1 files changed, 54 insertions, 49 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9690f4f8de7e..af4dc8227954 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1792,78 +1792,83 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1792 return ret; 1792 return ret;
1793} 1793}
1794 1794
1795static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1795static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1796 struct request *rq, 1796 struct request *rq,
1797 blk_qc_t *cookie, 1797 blk_qc_t *cookie,
1798 bool bypass_insert, bool last) 1798 bool bypass, bool last)
1799{ 1799{
1800 struct request_queue *q = rq->q; 1800 struct request_queue *q = rq->q;
1801 bool run_queue = true; 1801 bool run_queue = true;
1802 blk_status_t ret = BLK_STS_RESOURCE;
1803 int srcu_idx;
1804 bool force = false;
1802 1805
1806 hctx_lock(hctx, &srcu_idx);
1803 /* 1807 /*
1804 * RCU or SRCU read lock is needed before checking quiesced flag. 1808 * hctx_lock is needed before checking quiesced flag.
1805 * 1809 *
1806 * When queue is stopped or quiesced, ignore 'bypass_insert' from 1810 * When queue is stopped or quiesced, ignore 'bypass', insert
1807 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 1811 * and return BLK_STS_OK to caller, and avoid driver to try to
1808 * and avoid driver to try to dispatch again. 1812 * dispatch again.
1809 */ 1813 */
1810 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1814 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
1811 run_queue = false; 1815 run_queue = false;
1812 bypass_insert = false; 1816 bypass = false;
1813 goto insert; 1817 goto out_unlock;
1814 } 1818 }
1815 1819
1816 if (q->elevator && !bypass_insert) 1820 if (unlikely(q->elevator && !bypass))
1817 goto insert; 1821 goto out_unlock;
1818 1822
1819 if (!blk_mq_get_dispatch_budget(hctx)) 1823 if (!blk_mq_get_dispatch_budget(hctx))
1820 goto insert; 1824 goto out_unlock;
1821 1825
1822 if (!blk_mq_get_driver_tag(rq)) { 1826 if (!blk_mq_get_driver_tag(rq)) {
1823 blk_mq_put_dispatch_budget(hctx); 1827 blk_mq_put_dispatch_budget(hctx);
1824 goto insert; 1828 goto out_unlock;
1825 } 1829 }
1826 1830
1827 return __blk_mq_issue_directly(hctx, rq, cookie, last); 1831 /*
1828insert: 1832 * Always add a request that has been through
1829 if (bypass_insert) 1833 *.queue_rq() to the hardware dispatch list.
1830 return BLK_STS_RESOURCE; 1834 */
1831 1835 force = true;
1832 blk_mq_request_bypass_insert(rq, run_queue); 1836 ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
1833 return BLK_STS_OK; 1837out_unlock:
1834}
1835
1836static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1837 struct request *rq, blk_qc_t *cookie)
1838{
1839 blk_status_t ret;
1840 int srcu_idx;
1841
1842 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1843
1844 hctx_lock(hctx, &srcu_idx);
1845
1846 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1847 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1848 blk_mq_request_bypass_insert(rq, true);
1849 else if (ret != BLK_STS_OK)
1850 blk_mq_end_request(rq, ret);
1851
1852 hctx_unlock(hctx, srcu_idx); 1838 hctx_unlock(hctx, srcu_idx);
1839 switch (ret) {
1840 case BLK_STS_OK:
1841 break;
1842 case BLK_STS_DEV_RESOURCE:
1843 case BLK_STS_RESOURCE:
1844 if (force) {
1845 blk_mq_request_bypass_insert(rq, run_queue);
1846 /*
1847 * We have to return BLK_STS_OK for the DM
1848 * to avoid livelock. Otherwise, we return
1849 * the real result to indicate whether the
1850 * request is direct-issued successfully.
1851 */
1852 ret = bypass ? BLK_STS_OK : ret;
1853 } else if (!bypass) {
1854 blk_mq_sched_insert_request(rq, false,
1855 run_queue, false);
1856 }
1857 break;
1858 default:
1859 if (!bypass)
1860 blk_mq_end_request(rq, ret);
1861 break;
1862 }
1863
1864 return ret;
1853} 1865}
1854 1866
1855blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 1867blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1856{ 1868{
1857 blk_status_t ret; 1869 blk_qc_t unused;
1858 int srcu_idx;
1859 blk_qc_t unused_cookie;
1860 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1861 1870
1862 hctx_lock(hctx, &srcu_idx); 1871 return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, last);
1863 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1864 hctx_unlock(hctx, srcu_idx);
1865
1866 return ret;
1867} 1872}
1868 1873
1869void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 1874void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@@ -2004,13 +2009,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
2004 if (same_queue_rq) { 2009 if (same_queue_rq) {
2005 data.hctx = same_queue_rq->mq_hctx; 2010 data.hctx = same_queue_rq->mq_hctx;
2006 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 2011 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2007 &cookie); 2012 &cookie, false, true);
2008 } 2013 }
2009 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && 2014 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2010 !data.hctx->dispatch_busy)) { 2015 !data.hctx->dispatch_busy)) {
2011 blk_mq_put_ctx(data.ctx); 2016 blk_mq_put_ctx(data.ctx);
2012 blk_mq_bio_to_request(rq, bio); 2017 blk_mq_bio_to_request(rq, bio);
2013 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 2018 blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
2014 } else { 2019 } else {
2015 blk_mq_put_ctx(data.ctx); 2020 blk_mq_put_ctx(data.ctx);
2016 blk_mq_bio_to_request(rq, bio); 2021 blk_mq_bio_to_request(rq, bio);