aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2015-03-11 23:56:39 -0400
committerJens Axboe <axboe@fb.com>2015-03-13 10:30:55 -0400
commitbfd343aa1718457d34b99ce6573085ac340da288 (patch)
tree43e7156fc29628569a5a065e38e54719fa754325 /block
parentb94ec296403e99d5ac9a8c48332cec4118d44b94 (diff)
blk-mq: don't wait in blk_mq_queue_enter() if __GFP_WAIT isn't set
Return -EBUSY if we're unable to enter a queue immediately when allocating a blk-mq request without __GFP_WAIT. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 06614ce0f475..59fa23935a0f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -77,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
77 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); 77 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78} 78}
79 79
80static int blk_mq_queue_enter(struct request_queue *q) 80static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
81{ 81{
82 while (true) { 82 while (true) {
83 int ret; 83 int ret;
@@ -85,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
85 if (percpu_ref_tryget_live(&q->mq_usage_counter)) 85 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86 return 0; 86 return 0;
87 87
88 if (!(gfp & __GFP_WAIT))
89 return -EBUSY;
90
88 ret = wait_event_interruptible(q->mq_freeze_wq, 91 ret = wait_event_interruptible(q->mq_freeze_wq,
89 !q->mq_freeze_depth || blk_queue_dying(q)); 92 !q->mq_freeze_depth || blk_queue_dying(q));
90 if (blk_queue_dying(q)) 93 if (blk_queue_dying(q))
@@ -256,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
256 struct blk_mq_alloc_data alloc_data; 259 struct blk_mq_alloc_data alloc_data;
257 int ret; 260 int ret;
258 261
259 ret = blk_mq_queue_enter(q); 262 ret = blk_mq_queue_enter(q, gfp);
260 if (ret) 263 if (ret)
261 return ERR_PTR(ret); 264 return ERR_PTR(ret);
262 265
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1186 int rw = bio_data_dir(bio); 1189 int rw = bio_data_dir(bio);
1187 struct blk_mq_alloc_data alloc_data; 1190 struct blk_mq_alloc_data alloc_data;
1188 1191
1189 if (unlikely(blk_mq_queue_enter(q))) { 1192 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1190 bio_endio(bio, -EIO); 1193 bio_endio(bio, -EIO);
1191 return NULL; 1194 return NULL;
1192 } 1195 }