summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2017-06-12 10:30:13 -0400
committerJens Axboe <axboe@kernel.dk>2017-06-12 10:30:13 -0400
commit8f66439eec46d652255b9351abebb540ee5b2fd9 (patch)
tree94f4a41dc343cf769cd92f1f7711e9ce8ad43728 /block/blk-mq.c
parent22ec656bcc3f38207ad5476ebad1e5005fb0f1ff (diff)
parent32c1431eea4881a6b17bd7c639315010aeefa452 (diff)
Merge tag 'v4.12-rc5' into for-4.13/block
We've already got a few conflicts and upcoming work depends on some of the changes that have gone into mainline as regression fixes for this series. Pull in 4.12-rc5 to resolve these conflicts and make it easier on down stream trees to continue working on 4.13 changes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7af78b1e9db9..da2f21961525 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1396,22 +1396,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1396 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1396 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1397} 1397}
1398 1398
1399static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, 1399static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1400 bool may_sleep) 1400 struct request *rq,
1401 blk_qc_t *cookie, bool may_sleep)
1401{ 1402{
1402 struct request_queue *q = rq->q; 1403 struct request_queue *q = rq->q;
1403 struct blk_mq_queue_data bd = { 1404 struct blk_mq_queue_data bd = {
1404 .rq = rq, 1405 .rq = rq,
1405 .last = true, 1406 .last = true,
1406 }; 1407 };
1407 struct blk_mq_hw_ctx *hctx;
1408 blk_qc_t new_cookie; 1408 blk_qc_t new_cookie;
1409 blk_status_t ret; 1409 int ret;
1410 bool run_queue = true;
1411
1412 if (blk_mq_hctx_stopped(hctx)) {
1413 run_queue = false;
1414 goto insert;
1415 }
1410 1416
1411 if (q->elevator) 1417 if (q->elevator)
1412 goto insert; 1418 goto insert;
1413 1419
1414 if (!blk_mq_get_driver_tag(rq, &hctx, false)) 1420 if (!blk_mq_get_driver_tag(rq, NULL, false))
1415 goto insert; 1421 goto insert;
1416 1422
1417 new_cookie = request_to_qc_t(hctx, rq); 1423 new_cookie = request_to_qc_t(hctx, rq);
@@ -1436,7 +1442,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1436 } 1442 }
1437 1443
1438insert: 1444insert:
1439 blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1445 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1440} 1446}
1441 1447
1442static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1448static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1444,7 +1450,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1444{ 1450{
1445 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1451 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1446 rcu_read_lock(); 1452 rcu_read_lock();
1447 __blk_mq_try_issue_directly(rq, cookie, false); 1453 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1448 rcu_read_unlock(); 1454 rcu_read_unlock();
1449 } else { 1455 } else {
1450 unsigned int srcu_idx; 1456 unsigned int srcu_idx;
@@ -1452,7 +1458,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1452 might_sleep(); 1458 might_sleep();
1453 1459
1454 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); 1460 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1455 __blk_mq_try_issue_directly(rq, cookie, true); 1461 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
1456 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); 1462 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1457 } 1463 }
1458} 1464}
@@ -1555,9 +1561,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1555 1561
1556 blk_mq_put_ctx(data.ctx); 1562 blk_mq_put_ctx(data.ctx);
1557 1563
1558 if (same_queue_rq) 1564 if (same_queue_rq) {
1565 data.hctx = blk_mq_map_queue(q,
1566 same_queue_rq->mq_ctx->cpu);
1559 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1567 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1560 &cookie); 1568 &cookie);
1569 }
1561 } else if (q->nr_hw_queues > 1 && is_sync) { 1570 } else if (q->nr_hw_queues > 1 && is_sync) {
1562 blk_mq_put_ctx(data.ctx); 1571 blk_mq_put_ctx(data.ctx);
1563 blk_mq_bio_to_request(rq, bio); 1572 blk_mq_bio_to_request(rq, bio);
@@ -2578,7 +2587,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2578 return ret; 2587 return ret;
2579} 2588}
2580 2589
2581void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2590static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2591 int nr_hw_queues)
2582{ 2592{
2583 struct request_queue *q; 2593 struct request_queue *q;
2584 2594
@@ -2602,6 +2612,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2602 list_for_each_entry(q, &set->tag_list, tag_set_list) 2612 list_for_each_entry(q, &set->tag_list, tag_set_list)
2603 blk_mq_unfreeze_queue(q); 2613 blk_mq_unfreeze_queue(q);
2604} 2614}
2615
2616void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2617{
2618 mutex_lock(&set->tag_list_lock);
2619 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2620 mutex_unlock(&set->tag_list_lock);
2621}
2605EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2622EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2606 2623
2607/* Enable polling stats and return whether they were already enabled. */ 2624/* Enable polling stats and return whether they were already enabled. */