summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJianchao Wang <jianchao.w.wang@oracle.com>2018-10-12 06:07:26 -0400
committerJens Axboe <axboe@kernel.dk>2018-10-13 17:42:01 -0400
commit5b202853ffbc54b29f23c4b1b5f3948efab489a2 (patch)
tree115c95fa117934258563e7c6530e94153651a4dc /block
parent477e19dedc9d3e1f4443a1d4ae00572a988120ea (diff)
blk-mq: change gfp flags to GFP_NOIO in blk_mq_realloc_hw_ctxs
blk_mq_realloc_hw_ctxs could be invoked during update hw queues. At the momemt, IO is blocked. Change the gfp flags from GFP_KERNEL to GFP_NOIO to avoid forever hang during memory allocation in blk_mq_realloc_hw_ctxs. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-mq.c17
-rw-r--r--block/blk.h2
4 files changed, 15 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f12d2b65e5a5..c5539eed0202 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1163,7 +1163,7 @@ int blk_init_allocated_queue(struct request_queue *q)
1163{ 1163{
1164 WARN_ON_ONCE(q->mq_ops); 1164 WARN_ON_ONCE(q->mq_ops);
1165 1165
1166 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); 1166 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
1167 if (!q->fq) 1167 if (!q->fq)
1168 return -ENOMEM; 1168 return -ENOMEM;
1169 1169
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ce41f666de3e..8b44b86779da 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -566,12 +566,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
566EXPORT_SYMBOL(blkdev_issue_flush); 566EXPORT_SYMBOL(blkdev_issue_flush);
567 567
568struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 568struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
569 int node, int cmd_size) 569 int node, int cmd_size, gfp_t flags)
570{ 570{
571 struct blk_flush_queue *fq; 571 struct blk_flush_queue *fq;
572 int rq_sz = sizeof(struct request); 572 int rq_sz = sizeof(struct request);
573 573
574 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); 574 fq = kzalloc_node(sizeof(*fq), flags, node);
575 if (!fq) 575 if (!fq)
576 goto fail; 576 goto fail;
577 577
@@ -579,7 +579,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
579 spin_lock_init(&fq->mq_flush_lock); 579 spin_lock_init(&fq->mq_flush_lock);
580 580
581 rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); 581 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
582 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); 582 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
583 if (!fq->flush_rq) 583 if (!fq->flush_rq)
584 goto fail_rq; 584 goto fail_rq;
585 585
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 99a72c650728..6b734461fd39 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2210,12 +2210,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
2210 * runtime 2210 * runtime
2211 */ 2211 */
2212 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 2212 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2213 GFP_KERNEL, node); 2213 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
2214 if (!hctx->ctxs) 2214 if (!hctx->ctxs)
2215 goto unregister_cpu_notifier; 2215 goto unregister_cpu_notifier;
2216 2216
2217 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, 2217 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2218 node)) 2218 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
2219 goto free_ctxs; 2219 goto free_ctxs;
2220 2220
2221 hctx->nr_ctx = 0; 2221 hctx->nr_ctx = 0;
@@ -2228,7 +2228,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
2228 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2228 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2229 goto free_bitmap; 2229 goto free_bitmap;
2230 2230
2231 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2231 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
2232 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
2232 if (!hctx->fq) 2233 if (!hctx->fq)
2233 goto exit_hctx; 2234 goto exit_hctx;
2234 2235
@@ -2536,12 +2537,14 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2536 2537
2537 node = blk_mq_hw_queue_to_node(q->mq_map, i); 2538 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2538 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set), 2539 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2539 GFP_KERNEL, node); 2540 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2541 node);
2540 if (!hctxs[i]) 2542 if (!hctxs[i])
2541 break; 2543 break;
2542 2544
2543 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, 2545 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask,
2544 node)) { 2546 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2547 node)) {
2545 kfree(hctxs[i]); 2548 kfree(hctxs[i]);
2546 hctxs[i] = NULL; 2549 hctxs[i] = NULL;
2547 break; 2550 break;
diff --git a/block/blk.h b/block/blk.h
index 58c030f727e9..3d2aecba96a4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -125,7 +125,7 @@ static inline void __blk_get_queue(struct request_queue *q)
125} 125}
126 126
127struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 127struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
128 int node, int cmd_size); 128 int node, int cmd_size, gfp_t flags);
129void blk_free_flush_queue(struct blk_flush_queue *q); 129void blk_free_flush_queue(struct blk_flush_queue *q);
130 130
131int blk_init_rl(struct request_list *rl, struct request_queue *q, 131int blk_init_rl(struct request_list *rl, struct request_queue *q,