aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-09-08 00:41:53 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-08 00:41:53 -0400
commiteb84d6b60491a3ca3d90d62ee5346b007770d40d (patch)
tree22aadf9ada15e1ae5ba4c400aafab6f2541996e6 /block/blk-mq.c
parent97a13e5289baa96eaddd06e61d277457d837af3a (diff)
parentd030671f3f261e528dc6e396a13f10859a74ae7c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5189cb1e478a..4aac82615a46 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
112 */ 112 */
113void blk_mq_freeze_queue(struct request_queue *q) 113void blk_mq_freeze_queue(struct request_queue *q)
114{ 114{
115 bool freeze;
116
115 spin_lock_irq(q->queue_lock); 117 spin_lock_irq(q->queue_lock);
116 q->mq_freeze_depth++; 118 freeze = !q->mq_freeze_depth++;
117 spin_unlock_irq(q->queue_lock); 119 spin_unlock_irq(q->queue_lock);
118 120
119 percpu_ref_kill(&q->mq_usage_counter); 121 if (freeze) {
120 blk_mq_run_queues(q, false); 122 percpu_ref_kill(&q->mq_usage_counter);
123 blk_mq_run_queues(q, false);
124 }
121 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 125 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
122} 126}
123 127
124static void blk_mq_unfreeze_queue(struct request_queue *q) 128static void blk_mq_unfreeze_queue(struct request_queue *q)
125{ 129{
126 bool wake = false; 130 bool wake;
127 131
128 spin_lock_irq(q->queue_lock); 132 spin_lock_irq(q->queue_lock);
129 wake = !--q->mq_freeze_depth; 133 wake = !--q->mq_freeze_depth;
@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
172 /* tag was already set */ 176 /* tag was already set */
173 rq->errors = 0; 177 rq->errors = 0;
174 178
179 rq->cmd = rq->__cmd;
180
175 rq->extra_len = 0; 181 rq->extra_len = 0;
176 rq->sense_len = 0; 182 rq->sense_len = 0;
177 rq->resid_len = 0; 183 rq->resid_len = 0;
@@ -1068,13 +1074,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1068 blk_account_io_start(rq, 1); 1074 blk_account_io_start(rq, 1);
1069} 1075}
1070 1076
1077static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1078{
1079 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1080 !blk_queue_nomerges(hctx->queue);
1081}
1082
1071static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1083static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1072 struct blk_mq_ctx *ctx, 1084 struct blk_mq_ctx *ctx,
1073 struct request *rq, struct bio *bio) 1085 struct request *rq, struct bio *bio)
1074{ 1086{
1075 struct request_queue *q = hctx->queue; 1087 if (!hctx_allow_merges(hctx)) {
1076
1077 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1078 blk_mq_bio_to_request(rq, bio); 1088 blk_mq_bio_to_request(rq, bio);
1079 spin_lock(&ctx->lock); 1089 spin_lock(&ctx->lock);
1080insert_rq: 1090insert_rq:
@@ -1082,6 +1092,8 @@ insert_rq:
1082 spin_unlock(&ctx->lock); 1092 spin_unlock(&ctx->lock);
1083 return false; 1093 return false;
1084 } else { 1094 } else {
1095 struct request_queue *q = hctx->queue;
1096
1085 spin_lock(&ctx->lock); 1097 spin_lock(&ctx->lock);
1086 if (!blk_mq_attempt_merge(q, ctx, bio)) { 1098 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1087 blk_mq_bio_to_request(rq, bio); 1099 blk_mq_bio_to_request(rq, bio);
@@ -1574,7 +1586,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1574 hctx->tags = set->tags[i]; 1586 hctx->tags = set->tags[i];
1575 1587
1576 /* 1588 /*
1577 * Allocate space for all possible cpus to avoid allocation in 1589 * Allocate space for all possible cpus to avoid allocation at
1578 * runtime 1590 * runtime
1579 */ 1591 */
1580 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1592 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
@@ -1662,8 +1674,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1662 1674
1663 queue_for_each_hw_ctx(q, hctx, i) { 1675 queue_for_each_hw_ctx(q, hctx, i) {
1664 /* 1676 /*
1665 * If not software queues are mapped to this hardware queue, 1677 * If no software queues are mapped to this hardware queue,
1666 * disable it and free the request entries 1678 * disable it and free the request entries.
1667 */ 1679 */
1668 if (!hctx->nr_ctx) { 1680 if (!hctx->nr_ctx) {
1669 struct blk_mq_tag_set *set = q->tag_set; 1681 struct blk_mq_tag_set *set = q->tag_set;
@@ -1713,14 +1725,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
1713{ 1725{
1714 struct blk_mq_tag_set *set = q->tag_set; 1726 struct blk_mq_tag_set *set = q->tag_set;
1715 1727
1716 blk_mq_freeze_queue(q);
1717
1718 mutex_lock(&set->tag_list_lock); 1728 mutex_lock(&set->tag_list_lock);
1719 list_del_init(&q->tag_set_list); 1729 list_del_init(&q->tag_set_list);
1720 blk_mq_update_tag_set_depth(set); 1730 blk_mq_update_tag_set_depth(set);
1721 mutex_unlock(&set->tag_list_lock); 1731 mutex_unlock(&set->tag_list_lock);
1722
1723 blk_mq_unfreeze_queue(q);
1724} 1732}
1725 1733
1726static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 1734static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,