aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-09-23 19:24:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-09-23 19:24:36 -0400
commit4ee69866258620b105d06ae07da3bbe9fdb1699a (patch)
treee17e4aea5675b4abcf548f1d87f9586e1cd19541
parentb22734a55067adbc10216e459762dbd7dcef29d5 (diff)
parentc8712c6a674e3382fe4d26d108251ccfa55d08e0 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Three fixes, two regressions and one that poses a problem in blk-mq with the new nvmef code" * 'for-linus' of git://git.kernel.dk/linux-block: blk-mq: skip unmapped queues in blk_mq_alloc_request_hctx nvme-rdma: only clear queue flags after successful connect blk-throttle: Extend slice if throttle group is not empty
-rw-r--r--block/blk-mq.c16
-rw-r--r--block/blk-throttle.c6
-rw-r--r--drivers/nvme/host/rdma.c2
3 files changed, 19 insertions, 5 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 13f5a6c1de76..c207fa9870eb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
296 if (ret) 296 if (ret)
297 return ERR_PTR(ret); 297 return ERR_PTR(ret);
298 298
299 /*
300 * Check if the hardware context is actually mapped to anything.
301 * If not tell the caller that it should skip this queue.
302 */
299 hctx = q->queue_hw_ctx[hctx_idx]; 303 hctx = q->queue_hw_ctx[hctx_idx];
304 if (!blk_mq_hw_queue_mapped(hctx)) {
305 ret = -EXDEV;
306 goto out_queue_exit;
307 }
300 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 308 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
301 309
302 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 310 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
303 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 311 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
304 if (!rq) { 312 if (!rq) {
305 blk_queue_exit(q); 313 ret = -EWOULDBLOCK;
306 return ERR_PTR(-EWOULDBLOCK); 314 goto out_queue_exit;
307 } 315 }
308 316
309 return rq; 317 return rq;
318
319out_queue_exit:
320 blk_queue_exit(q);
321 return ERR_PTR(ret);
310} 322}
311EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 323EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
312 324
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f1aba26f4719..a3ea8260c94c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
780 /* 780 /*
781 * If previous slice expired, start a new one otherwise renew/extend 781 * If previous slice expired, start a new one otherwise renew/extend
782 * existing slice to make sure it is at least throtl_slice interval 782 * existing slice to make sure it is at least throtl_slice interval
783 * long since now. 783 * long since now. New slice is started only for empty throttle group.
784 * If there is queued bio, that means there should be an active
785 * slice and it should be extended instead.
784 */ 786 */
785 if (throtl_slice_used(tg, rw)) 787 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
786 throtl_start_new_slice(tg, rw); 788 throtl_start_new_slice(tg, rw);
787 else { 789 else {
788 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) 790 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c2c2c28e6eb5..fbdb2267e460 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
561 561
562 queue = &ctrl->queues[idx]; 562 queue = &ctrl->queues[idx];
563 queue->ctrl = ctrl; 563 queue->ctrl = ctrl;
564 queue->flags = 0;
565 init_completion(&queue->cm_done); 564 init_completion(&queue->cm_done);
566 565
567 if (idx > 0) 566 if (idx > 0)
@@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
595 goto out_destroy_cm_id; 594 goto out_destroy_cm_id;
596 } 595 }
597 596
597 clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
598 set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); 598 set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
599 599
600 return 0; 600 return 0;