aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-05-27 14:59:48 -0400
committerJens Axboe <axboe@fb.com>2014-05-28 11:49:23 -0400
commita3bd77567cae6af700dcd245148befc73fc89a50 (patch)
tree465934c4f1fb0687da5d862e5221d54c0d34eb2f /block
parent5dee857720db15e2c8ef0c03f7eeac00c4c63cb2 (diff)
blk-mq: remove blk_mq_wait_for_tags
The current logic for blocking tag allocation is rather confusing, as we first allocated and then free again a tag in blk_mq_wait_for_tags, just to attempt a non-blocking allocation and then repeat if someone else managed to grab the tag before us. Instead change blk_mq_alloc_request_pinned to simply do a blocking tag allocation itself and use the request we get back from it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-tag.c8
-rw-r--r--block/blk-mq-tag.h1
-rw-r--r--block/blk-mq.c13
3 files changed, 6 insertions, 16 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 05e2baf4fa0d..0d0640d38a06 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -7,14 +7,6 @@
7#include "blk-mq.h" 7#include "blk-mq.h"
8#include "blk-mq-tag.h" 8#include "blk-mq-tag.h"
9 9
10void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved)
11{
12 int tag, zero = 0;
13
14 tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved);
15 blk_mq_put_tag(hctx, tag, &zero);
16}
17
18static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) 10static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
19{ 11{
20 int i; 12 int i;
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 2e5e6872d089..c959de58d2a5 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -49,7 +49,6 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
49extern void blk_mq_free_tags(struct blk_mq_tags *tags); 49extern void blk_mq_free_tags(struct blk_mq_tags *tags);
50 50
51extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); 51extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved);
52extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved);
53extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); 52extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
54extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); 53extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
55extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); 54extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 04ef7ecb3c7f..3224888d329a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -264,31 +264,30 @@ __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
264 return NULL; 264 return NULL;
265} 265}
266 266
267
268static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 267static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
269 int rw, gfp_t gfp, 268 int rw, gfp_t gfp,
270 bool reserved) 269 bool reserved)
271{ 270{
271 bool gfp_mask = gfp & ~__GFP_WAIT;
272 struct request *rq; 272 struct request *rq;
273 273
274 do { 274 do {
275 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 275 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
276 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); 276 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
277 277
278 rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, 278 rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp_mask,
279 reserved); 279 reserved);
280 if (rq) 280 if (rq)
281 break; 281 break;
282 282
283 if (gfp & __GFP_WAIT) { 283 if (!(gfp & __GFP_WAIT)) {
284 __blk_mq_run_hw_queue(hctx);
285 blk_mq_put_ctx(ctx);
286 } else {
287 blk_mq_put_ctx(ctx); 284 blk_mq_put_ctx(ctx);
288 break; 285 break;
289 } 286 }
290 287
291 blk_mq_wait_for_tags(hctx, reserved); 288 __blk_mq_run_hw_queue(hctx);
289 blk_mq_put_ctx(ctx);
290 gfp_mask = gfp;
292 } while (1); 291 } while (1);
293 292
294 return rq; 293 return rq;