aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-09-11 11:31:18 -0400
committerJens Axboe <axboe@fb.com>2014-09-11 11:31:18 -0400
commitb207892b061da7608878e273ae22ba9bf9be264b (patch)
tree51daa46b89b83cad422941f52110b19571b85b79 /block/blk-mq.c
parent018a17bdc8658ad448497c84d4ba21b6985820ec (diff)
parenta516440542afcb9647f88d12c35640baf02d07ea (diff)
Merge branch 'for-linus' into for-3.18/core
A bit of churn on the for-linus side that would be nice to have in the core bits for 3.18, so pull it in to catch us up and make forward progress easier. Signed-off-by: Jens Axboe <axboe@fb.com> Conflicts: block/scsi_ioctl.c
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c127
1 files changed, 94 insertions, 33 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 940aa8a34b70..067e600002d3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
112 */ 112 */
113void blk_mq_freeze_queue(struct request_queue *q) 113void blk_mq_freeze_queue(struct request_queue *q)
114{ 114{
115 bool freeze;
116
115 spin_lock_irq(q->queue_lock); 117 spin_lock_irq(q->queue_lock);
116 q->mq_freeze_depth++; 118 freeze = !q->mq_freeze_depth++;
117 spin_unlock_irq(q->queue_lock); 119 spin_unlock_irq(q->queue_lock);
118 120
119 percpu_ref_kill(&q->mq_usage_counter); 121 if (freeze) {
120 blk_mq_run_queues(q, false); 122 percpu_ref_kill(&q->mq_usage_counter);
123 blk_mq_run_queues(q, false);
124 }
121 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 125 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
122} 126}
123 127
124static void blk_mq_unfreeze_queue(struct request_queue *q) 128static void blk_mq_unfreeze_queue(struct request_queue *q)
125{ 129{
126 bool wake = false; 130 bool wake;
127 131
128 spin_lock_irq(q->queue_lock); 132 spin_lock_irq(q->queue_lock);
129 wake = !--q->mq_freeze_depth; 133 wake = !--q->mq_freeze_depth;
@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
172 /* tag was already set */ 176 /* tag was already set */
173 rq->errors = 0; 177 rq->errors = 0;
174 178
179 rq->cmd = rq->__cmd;
180
175 rq->extra_len = 0; 181 rq->extra_len = 0;
176 rq->sense_len = 0; 182 rq->sense_len = 0;
177 rq->resid_len = 0; 183 rq->resid_len = 0;
@@ -1072,13 +1078,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1072 blk_account_io_start(rq, 1); 1078 blk_account_io_start(rq, 1);
1073} 1079}
1074 1080
1081static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1082{
1083 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1084 !blk_queue_nomerges(hctx->queue);
1085}
1086
1075static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1087static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1076 struct blk_mq_ctx *ctx, 1088 struct blk_mq_ctx *ctx,
1077 struct request *rq, struct bio *bio) 1089 struct request *rq, struct bio *bio)
1078{ 1090{
1079 struct request_queue *q = hctx->queue; 1091 if (!hctx_allow_merges(hctx)) {
1080
1081 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1082 blk_mq_bio_to_request(rq, bio); 1092 blk_mq_bio_to_request(rq, bio);
1083 spin_lock(&ctx->lock); 1093 spin_lock(&ctx->lock);
1084insert_rq: 1094insert_rq:
@@ -1086,6 +1096,8 @@ insert_rq:
1086 spin_unlock(&ctx->lock); 1096 spin_unlock(&ctx->lock);
1087 return false; 1097 return false;
1088 } else { 1098 } else {
1099 struct request_queue *q = hctx->queue;
1100
1089 spin_lock(&ctx->lock); 1101 spin_lock(&ctx->lock);
1090 if (!blk_mq_attempt_merge(q, ctx, bio)) { 1102 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1091 blk_mq_bio_to_request(rq, bio); 1103 blk_mq_bio_to_request(rq, bio);
@@ -1313,6 +1325,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1313 continue; 1325 continue;
1314 set->ops->exit_request(set->driver_data, tags->rqs[i], 1326 set->ops->exit_request(set->driver_data, tags->rqs[i],
1315 hctx_idx, i); 1327 hctx_idx, i);
1328 tags->rqs[i] = NULL;
1316 } 1329 }
1317 } 1330 }
1318 1331
@@ -1346,8 +1359,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1346 1359
1347 INIT_LIST_HEAD(&tags->page_list); 1360 INIT_LIST_HEAD(&tags->page_list);
1348 1361
1349 tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *), 1362 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1350 GFP_KERNEL, set->numa_node); 1363 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1364 set->numa_node);
1351 if (!tags->rqs) { 1365 if (!tags->rqs) {
1352 blk_mq_free_tags(tags); 1366 blk_mq_free_tags(tags);
1353 return NULL; 1367 return NULL;
@@ -1371,8 +1385,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1371 this_order--; 1385 this_order--;
1372 1386
1373 do { 1387 do {
1374 page = alloc_pages_node(set->numa_node, GFP_KERNEL, 1388 page = alloc_pages_node(set->numa_node,
1375 this_order); 1389 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1390 this_order);
1376 if (page) 1391 if (page)
1377 break; 1392 break;
1378 if (!this_order--) 1393 if (!this_order--)
@@ -1396,8 +1411,10 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1396 if (set->ops->init_request) { 1411 if (set->ops->init_request) {
1397 if (set->ops->init_request(set->driver_data, 1412 if (set->ops->init_request(set->driver_data,
1398 tags->rqs[i], hctx_idx, i, 1413 tags->rqs[i], hctx_idx, i,
1399 set->numa_node)) 1414 set->numa_node)) {
1415 tags->rqs[i] = NULL;
1400 goto fail; 1416 goto fail;
1417 }
1401 } 1418 }
1402 1419
1403 p += rq_size; 1420 p += rq_size;
@@ -1408,7 +1425,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1408 return tags; 1425 return tags;
1409 1426
1410fail: 1427fail:
1411 pr_warn("%s: failed to allocate requests\n", __func__);
1412 blk_mq_free_rq_map(set, tags, hctx_idx); 1428 blk_mq_free_rq_map(set, tags, hctx_idx);
1413 return NULL; 1429 return NULL;
1414} 1430}
@@ -1578,7 +1594,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1578 hctx->tags = set->tags[i]; 1594 hctx->tags = set->tags[i];
1579 1595
1580 /* 1596 /*
1581 * Allocate space for all possible cpus to avoid allocation in 1597 * Allocate space for all possible cpus to avoid allocation at
1582 * runtime 1598 * runtime
1583 */ 1599 */
1584 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1600 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
@@ -1666,8 +1682,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1666 1682
1667 queue_for_each_hw_ctx(q, hctx, i) { 1683 queue_for_each_hw_ctx(q, hctx, i) {
1668 /* 1684 /*
1669 * If not software queues are mapped to this hardware queue, 1685 * If no software queues are mapped to this hardware queue,
1670 * disable it and free the request entries 1686 * disable it and free the request entries.
1671 */ 1687 */
1672 if (!hctx->nr_ctx) { 1688 if (!hctx->nr_ctx) {
1673 struct blk_mq_tag_set *set = q->tag_set; 1689 struct blk_mq_tag_set *set = q->tag_set;
@@ -1717,14 +1733,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
1717{ 1733{
1718 struct blk_mq_tag_set *set = q->tag_set; 1734 struct blk_mq_tag_set *set = q->tag_set;
1719 1735
1720 blk_mq_freeze_queue(q);
1721
1722 mutex_lock(&set->tag_list_lock); 1736 mutex_lock(&set->tag_list_lock);
1723 list_del_init(&q->tag_set_list); 1737 list_del_init(&q->tag_set_list);
1724 blk_mq_update_tag_set_depth(set); 1738 blk_mq_update_tag_set_depth(set);
1725 mutex_unlock(&set->tag_list_lock); 1739 mutex_unlock(&set->tag_list_lock);
1726
1727 blk_mq_unfreeze_queue(q);
1728} 1740}
1729 1741
1730static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 1742static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -1932,6 +1944,61 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1932 return NOTIFY_OK; 1944 return NOTIFY_OK;
1933} 1945}
1934 1946
1947static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1948{
1949 int i;
1950
1951 for (i = 0; i < set->nr_hw_queues; i++) {
1952 set->tags[i] = blk_mq_init_rq_map(set, i);
1953 if (!set->tags[i])
1954 goto out_unwind;
1955 }
1956
1957 return 0;
1958
1959out_unwind:
1960 while (--i >= 0)
1961 blk_mq_free_rq_map(set, set->tags[i], i);
1962
1963 set->tags = NULL;
1964 return -ENOMEM;
1965}
1966
1967/*
1968 * Allocate the request maps associated with this tag_set. Note that this
1969 * may reduce the depth asked for, if memory is tight. set->queue_depth
1970 * will be updated to reflect the allocated depth.
1971 */
1972static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1973{
1974 unsigned int depth;
1975 int err;
1976
1977 depth = set->queue_depth;
1978 do {
1979 err = __blk_mq_alloc_rq_maps(set);
1980 if (!err)
1981 break;
1982
1983 set->queue_depth >>= 1;
1984 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
1985 err = -ENOMEM;
1986 break;
1987 }
1988 } while (set->queue_depth);
1989
1990 if (!set->queue_depth || err) {
1991 pr_err("blk-mq: failed to allocate request map\n");
1992 return -ENOMEM;
1993 }
1994
1995 if (depth != set->queue_depth)
1996 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
1997 depth, set->queue_depth);
1998
1999 return 0;
2000}
2001
1935/* 2002/*
1936 * Alloc a tag set to be associated with one or more request queues. 2003 * Alloc a tag set to be associated with one or more request queues.
1937 * May fail with EINVAL for various error conditions. May adjust the 2004 * May fail with EINVAL for various error conditions. May adjust the
@@ -1940,8 +2007,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1940 */ 2007 */
1941int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2008int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1942{ 2009{
1943 int i;
1944
1945 if (!set->nr_hw_queues) 2010 if (!set->nr_hw_queues)
1946 return -EINVAL; 2011 return -EINVAL;
1947 if (!set->queue_depth) 2012 if (!set->queue_depth)
@@ -1962,23 +2027,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1962 sizeof(struct blk_mq_tags *), 2027 sizeof(struct blk_mq_tags *),
1963 GFP_KERNEL, set->numa_node); 2028 GFP_KERNEL, set->numa_node);
1964 if (!set->tags) 2029 if (!set->tags)
1965 goto out; 2030 return -ENOMEM;
1966 2031
1967 for (i = 0; i < set->nr_hw_queues; i++) { 2032 if (blk_mq_alloc_rq_maps(set))
1968 set->tags[i] = blk_mq_init_rq_map(set, i); 2033 goto enomem;
1969 if (!set->tags[i])
1970 goto out_unwind;
1971 }
1972 2034
1973 mutex_init(&set->tag_list_lock); 2035 mutex_init(&set->tag_list_lock);
1974 INIT_LIST_HEAD(&set->tag_list); 2036 INIT_LIST_HEAD(&set->tag_list);
1975 2037
1976 return 0; 2038 return 0;
1977 2039enomem:
1978out_unwind: 2040 kfree(set->tags);
1979 while (--i >= 0) 2041 set->tags = NULL;
1980 blk_mq_free_rq_map(set, set->tags[i], i);
1981out:
1982 return -ENOMEM; 2042 return -ENOMEM;
1983} 2043}
1984EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2044EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1993,6 +2053,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1993 } 2053 }
1994 2054
1995 kfree(set->tags); 2055 kfree(set->tags);
2056 set->tags = NULL;
1996} 2057}
1997EXPORT_SYMBOL(blk_mq_free_tag_set); 2058EXPORT_SYMBOL(blk_mq_free_tag_set);
1998 2059