diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 17:26:35 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 17:26:35 -0400 |
| commit | f1a7cd0ffe8c4ecd57a55aab7cfd4c813e0f5f3d (patch) | |
| tree | 93d657a5c14b86f00b839a5d2af3ce7f8d3f1401 | |
| parent | aaeb2554337217dfa4eac2fcc90da7be540b9a73 (diff) | |
| parent | 14b83e172f0bc83b8dcf78ee8b1844beeffb418d (diff) | |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block into next
Pull block follow-up bits from Jens Axboe:
"A few minor (but important) fixes for blk-mq for the -rc1 window.
- Hot removal potential oops fix for single queue devices. From me.
- Two merged patches in late May meant that we accidentally lost a
fix for freeing an active queue. Fix that up. From me.
- A change of the blk_mq_tag_to_rq() API, passing in blk_mq_tags, to
make life considerably easier for scsi-mq. From me.
- A schedule-while-atomic fix from Ming Lei, which would hit if the
tag space was exhausted.
- Missing __percpu annotation in one place in blk-mq. Found by the
magic Wu compile bot due to code being moved around by the previous
patch, but it's actually an older issue. From Ming Lei.
- Clearing of tag of a flush request at end_io time. From Ming Lei"
* 'for-linus' of git://git.kernel.dk/linux-block:
block: mq flush: clear flush_rq's tag in flush_end_io()
blk-mq: let blk_mq_tag_to_rq() take blk_mq_tags as the main parameter
blk-mq: fix regression from commit 624dbe475416
blk-mq: handle NULL req return from blk_map_request in single queue mode
blk-mq: fix sparse warning on missed __percpu annotation
blk-mq: fix schedule from atomic context
blk-mq: move blk_mq_get_ctx/blk_mq_put_ctx to mq private header
| -rw-r--r-- | block/blk-flush.c | 2 | ||||
| -rw-r--r-- | block/blk-mq-tag.c | 48 | ||||
| -rw-r--r-- | block/blk-mq-tag.h | 2 | ||||
| -rw-r--r-- | block/blk-mq.c | 83 | ||||
| -rw-r--r-- | block/blk-mq.h | 45 | ||||
| -rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 4 | ||||
| -rw-r--r-- | include/linux/blk-mq.h | 2 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 2 |
8 files changed, 123 insertions, 65 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index ff87c664b7df..8ffee4b5f93d 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
| @@ -225,7 +225,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
| 225 | 225 | ||
| 226 | if (q->mq_ops) { | 226 | if (q->mq_ops) { |
| 227 | spin_lock_irqsave(&q->mq_flush_lock, flags); | 227 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
| 228 | q->flush_rq->cmd_flags = 0; | 228 | q->flush_rq->tag = -1; |
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | running = &q->flush_queue[q->flush_running_idx]; | 231 | running = &q->flush_queue[q->flush_running_idx]; |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d90c4aeb7dd3..1aab39f71d95 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
| @@ -221,8 +221,10 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, | |||
| 221 | return bs; | 221 | return bs; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, | 224 | static int bt_get(struct blk_mq_alloc_data *data, |
| 225 | unsigned int *last_tag, gfp_t gfp) | 225 | struct blk_mq_bitmap_tags *bt, |
| 226 | struct blk_mq_hw_ctx *hctx, | ||
| 227 | unsigned int *last_tag) | ||
| 226 | { | 228 | { |
| 227 | struct bt_wait_state *bs; | 229 | struct bt_wait_state *bs; |
| 228 | DEFINE_WAIT(wait); | 230 | DEFINE_WAIT(wait); |
| @@ -232,7 +234,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, | |||
| 232 | if (tag != -1) | 234 | if (tag != -1) |
| 233 | return tag; | 235 | return tag; |
| 234 | 236 | ||
| 235 | if (!(gfp & __GFP_WAIT)) | 237 | if (!(data->gfp & __GFP_WAIT)) |
| 236 | return -1; | 238 | return -1; |
| 237 | 239 | ||
| 238 | bs = bt_wait_ptr(bt, hctx); | 240 | bs = bt_wait_ptr(bt, hctx); |
| @@ -249,50 +251,62 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, | |||
| 249 | if (was_empty) | 251 | if (was_empty) |
| 250 | atomic_set(&bs->wait_cnt, bt->wake_cnt); | 252 | atomic_set(&bs->wait_cnt, bt->wake_cnt); |
| 251 | 253 | ||
| 254 | blk_mq_put_ctx(data->ctx); | ||
| 255 | |||
| 252 | io_schedule(); | 256 | io_schedule(); |
| 257 | |||
| 258 | data->ctx = blk_mq_get_ctx(data->q); | ||
| 259 | data->hctx = data->q->mq_ops->map_queue(data->q, | ||
| 260 | data->ctx->cpu); | ||
| 261 | if (data->reserved) { | ||
| 262 | bt = &data->hctx->tags->breserved_tags; | ||
| 263 | } else { | ||
| 264 | last_tag = &data->ctx->last_tag; | ||
| 265 | hctx = data->hctx; | ||
| 266 | bt = &hctx->tags->bitmap_tags; | ||
| 267 | } | ||
| 268 | finish_wait(&bs->wait, &wait); | ||
| 269 | bs = bt_wait_ptr(bt, hctx); | ||
| 253 | } while (1); | 270 | } while (1); |
| 254 | 271 | ||
| 255 | finish_wait(&bs->wait, &wait); | 272 | finish_wait(&bs->wait, &wait); |
| 256 | return tag; | 273 | return tag; |
| 257 | } | 274 | } |
| 258 | 275 | ||
| 259 | static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, | 276 | static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
| 260 | struct blk_mq_hw_ctx *hctx, | ||
| 261 | unsigned int *last_tag, gfp_t gfp) | ||
| 262 | { | 277 | { |
| 263 | int tag; | 278 | int tag; |
| 264 | 279 | ||
| 265 | tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp); | 280 | tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
| 281 | &data->ctx->last_tag); | ||
| 266 | if (tag >= 0) | 282 | if (tag >= 0) |
| 267 | return tag + tags->nr_reserved_tags; | 283 | return tag + data->hctx->tags->nr_reserved_tags; |
| 268 | 284 | ||
| 269 | return BLK_MQ_TAG_FAIL; | 285 | return BLK_MQ_TAG_FAIL; |
| 270 | } | 286 | } |
| 271 | 287 | ||
| 272 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, | 288 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
| 273 | gfp_t gfp) | ||
| 274 | { | 289 | { |
| 275 | int tag, zero = 0; | 290 | int tag, zero = 0; |
| 276 | 291 | ||
| 277 | if (unlikely(!tags->nr_reserved_tags)) { | 292 | if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
| 278 | WARN_ON_ONCE(1); | 293 | WARN_ON_ONCE(1); |
| 279 | return BLK_MQ_TAG_FAIL; | 294 | return BLK_MQ_TAG_FAIL; |
| 280 | } | 295 | } |
| 281 | 296 | ||
| 282 | tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp); | 297 | tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero); |
| 283 | if (tag < 0) | 298 | if (tag < 0) |
| 284 | return BLK_MQ_TAG_FAIL; | 299 | return BLK_MQ_TAG_FAIL; |
| 285 | 300 | ||
| 286 | return tag; | 301 | return tag; |
| 287 | } | 302 | } |
| 288 | 303 | ||
| 289 | unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, | 304 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
| 290 | gfp_t gfp, bool reserved) | ||
| 291 | { | 305 | { |
| 292 | if (!reserved) | 306 | if (!data->reserved) |
| 293 | return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp); | 307 | return __blk_mq_get_tag(data); |
| 294 | 308 | ||
| 295 | return __blk_mq_get_reserved_tag(hctx->tags, gfp); | 309 | return __blk_mq_get_reserved_tag(data); |
| 296 | } | 310 | } |
| 297 | 311 | ||
| 298 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) | 312 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) |
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index c959de58d2a5..98696a65d4d4 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h | |||
| @@ -48,7 +48,7 @@ struct blk_mq_tags { | |||
| 48 | extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); | 48 | extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); |
| 49 | extern void blk_mq_free_tags(struct blk_mq_tags *tags); | 49 | extern void blk_mq_free_tags(struct blk_mq_tags *tags); |
| 50 | 50 | ||
| 51 | extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); | 51 | extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); |
| 52 | extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); | 52 | extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); |
| 53 | extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); | 53 | extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); |
| 54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); | 54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 0f5879c42dcd..4e4cd6208052 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -33,28 +33,6 @@ static LIST_HEAD(all_q_list); | |||
| 33 | 33 | ||
| 34 | static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); | 34 | static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 35 | 35 | ||
| 36 | static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, | ||
| 37 | unsigned int cpu) | ||
| 38 | { | ||
| 39 | return per_cpu_ptr(q->queue_ctx, cpu); | ||
| 40 | } | ||
| 41 | |||
| 42 | /* | ||
| 43 | * This assumes per-cpu software queueing queues. They could be per-node | ||
| 44 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | ||
| 45 | * care about preemption, since we know the ctx's are persistent. This does | ||
| 46 | * mean that we can't rely on ctx always matching the currently running CPU. | ||
| 47 | */ | ||
| 48 | static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | ||
| 49 | { | ||
| 50 | return __blk_mq_get_ctx(q, get_cpu()); | ||
| 51 | } | ||
| 52 | |||
| 53 | static void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | ||
| 54 | { | ||
| 55 | put_cpu(); | ||
| 56 | } | ||
| 57 | |||
| 58 | /* | 36 | /* |
| 59 | * Check if any of the ctx's have pending work in this hardware queue | 37 | * Check if any of the ctx's have pending work in this hardware queue |
| 60 | */ | 38 | */ |
| @@ -232,24 +210,23 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
| 232 | } | 210 | } |
| 233 | 211 | ||
| 234 | static struct request * | 212 | static struct request * |
| 235 | __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | 213 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) |
| 236 | struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved) | ||
| 237 | { | 214 | { |
| 238 | struct request *rq; | 215 | struct request *rq; |
| 239 | unsigned int tag; | 216 | unsigned int tag; |
| 240 | 217 | ||
| 241 | tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); | 218 | tag = blk_mq_get_tag(data); |
| 242 | if (tag != BLK_MQ_TAG_FAIL) { | 219 | if (tag != BLK_MQ_TAG_FAIL) { |
| 243 | rq = hctx->tags->rqs[tag]; | 220 | rq = data->hctx->tags->rqs[tag]; |
| 244 | 221 | ||
| 245 | rq->cmd_flags = 0; | 222 | rq->cmd_flags = 0; |
| 246 | if (blk_mq_tag_busy(hctx)) { | 223 | if (blk_mq_tag_busy(data->hctx)) { |
| 247 | rq->cmd_flags = REQ_MQ_INFLIGHT; | 224 | rq->cmd_flags = REQ_MQ_INFLIGHT; |
| 248 | atomic_inc(&hctx->nr_active); | 225 | atomic_inc(&data->hctx->nr_active); |
| 249 | } | 226 | } |
| 250 | 227 | ||
| 251 | rq->tag = tag; | 228 | rq->tag = tag; |
| 252 | blk_mq_rq_ctx_init(q, ctx, rq, rw); | 229 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); |
| 253 | return rq; | 230 | return rq; |
| 254 | } | 231 | } |
| 255 | 232 | ||
| @@ -262,22 +239,27 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, | |||
| 262 | struct blk_mq_ctx *ctx; | 239 | struct blk_mq_ctx *ctx; |
| 263 | struct blk_mq_hw_ctx *hctx; | 240 | struct blk_mq_hw_ctx *hctx; |
| 264 | struct request *rq; | 241 | struct request *rq; |
| 242 | struct blk_mq_alloc_data alloc_data; | ||
| 265 | 243 | ||
| 266 | if (blk_mq_queue_enter(q)) | 244 | if (blk_mq_queue_enter(q)) |
| 267 | return NULL; | 245 | return NULL; |
| 268 | 246 | ||
| 269 | ctx = blk_mq_get_ctx(q); | 247 | ctx = blk_mq_get_ctx(q); |
| 270 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 248 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
| 249 | blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, | ||
| 250 | reserved, ctx, hctx); | ||
| 271 | 251 | ||
| 272 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, | 252 | rq = __blk_mq_alloc_request(&alloc_data, rw); |
| 273 | reserved); | ||
| 274 | if (!rq && (gfp & __GFP_WAIT)) { | 253 | if (!rq && (gfp & __GFP_WAIT)) { |
| 275 | __blk_mq_run_hw_queue(hctx); | 254 | __blk_mq_run_hw_queue(hctx); |
| 276 | blk_mq_put_ctx(ctx); | 255 | blk_mq_put_ctx(ctx); |
| 277 | 256 | ||
| 278 | ctx = blk_mq_get_ctx(q); | 257 | ctx = blk_mq_get_ctx(q); |
| 279 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 258 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
| 280 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved); | 259 | blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, |
| 260 | hctx); | ||
| 261 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
| 262 | ctx = alloc_data.ctx; | ||
| 281 | } | 263 | } |
| 282 | blk_mq_put_ctx(ctx); | 264 | blk_mq_put_ctx(ctx); |
| 283 | return rq; | 265 | return rq; |
| @@ -547,15 +529,20 @@ void blk_mq_kick_requeue_list(struct request_queue *q) | |||
| 547 | } | 529 | } |
| 548 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); | 530 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); |
| 549 | 531 | ||
| 550 | struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag) | 532 | static inline bool is_flush_request(struct request *rq, unsigned int tag) |
| 551 | { | 533 | { |
| 552 | struct request_queue *q = hctx->queue; | 534 | return ((rq->cmd_flags & REQ_FLUSH_SEQ) && |
| 535 | rq->q->flush_rq->tag == tag); | ||
| 536 | } | ||
| 537 | |||
| 538 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) | ||
| 539 | { | ||
| 540 | struct request *rq = tags->rqs[tag]; | ||
| 553 | 541 | ||
| 554 | if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) && | 542 | if (!is_flush_request(rq, tag)) |
| 555 | q->flush_rq->tag == tag) | 543 | return rq; |
| 556 | return q->flush_rq; | ||
| 557 | 544 | ||
| 558 | return hctx->tags->rqs[tag]; | 545 | return rq->q->flush_rq; |
| 559 | } | 546 | } |
| 560 | EXPORT_SYMBOL(blk_mq_tag_to_rq); | 547 | EXPORT_SYMBOL(blk_mq_tag_to_rq); |
| 561 | 548 | ||
| @@ -584,7 +571,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) | |||
| 584 | if (tag >= hctx->tags->nr_tags) | 571 | if (tag >= hctx->tags->nr_tags) |
| 585 | break; | 572 | break; |
| 586 | 573 | ||
| 587 | rq = blk_mq_tag_to_rq(hctx, tag++); | 574 | rq = blk_mq_tag_to_rq(hctx->tags, tag++); |
| 588 | if (rq->q != hctx->queue) | 575 | if (rq->q != hctx->queue) |
| 589 | continue; | 576 | continue; |
| 590 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) | 577 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) |
| @@ -1158,6 +1145,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
| 1158 | struct blk_mq_ctx *ctx; | 1145 | struct blk_mq_ctx *ctx; |
| 1159 | struct request *rq; | 1146 | struct request *rq; |
| 1160 | int rw = bio_data_dir(bio); | 1147 | int rw = bio_data_dir(bio); |
| 1148 | struct blk_mq_alloc_data alloc_data; | ||
| 1161 | 1149 | ||
| 1162 | if (unlikely(blk_mq_queue_enter(q))) { | 1150 | if (unlikely(blk_mq_queue_enter(q))) { |
| 1163 | bio_endio(bio, -EIO); | 1151 | bio_endio(bio, -EIO); |
| @@ -1171,7 +1159,9 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
| 1171 | rw |= REQ_SYNC; | 1159 | rw |= REQ_SYNC; |
| 1172 | 1160 | ||
| 1173 | trace_block_getrq(q, bio, rw); | 1161 | trace_block_getrq(q, bio, rw); |
| 1174 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); | 1162 | blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, |
| 1163 | hctx); | ||
| 1164 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
| 1175 | if (unlikely(!rq)) { | 1165 | if (unlikely(!rq)) { |
| 1176 | __blk_mq_run_hw_queue(hctx); | 1166 | __blk_mq_run_hw_queue(hctx); |
| 1177 | blk_mq_put_ctx(ctx); | 1167 | blk_mq_put_ctx(ctx); |
| @@ -1179,8 +1169,11 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
| 1179 | 1169 | ||
| 1180 | ctx = blk_mq_get_ctx(q); | 1170 | ctx = blk_mq_get_ctx(q); |
| 1181 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1171 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
| 1182 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, | 1172 | blk_mq_set_alloc_data(&alloc_data, q, |
| 1183 | __GFP_WAIT|GFP_ATOMIC, false); | 1173 | __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); |
| 1174 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
| 1175 | ctx = alloc_data.ctx; | ||
| 1176 | hctx = alloc_data.hctx; | ||
| 1184 | } | 1177 | } |
| 1185 | 1178 | ||
| 1186 | hctx->queued++; | 1179 | hctx->queued++; |
| @@ -1288,6 +1281,8 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1288 | return; | 1281 | return; |
| 1289 | 1282 | ||
| 1290 | rq = blk_mq_map_request(q, bio, &data); | 1283 | rq = blk_mq_map_request(q, bio, &data); |
| 1284 | if (unlikely(!rq)) | ||
| 1285 | return; | ||
| 1291 | 1286 | ||
| 1292 | if (unlikely(is_flush_fua)) { | 1287 | if (unlikely(is_flush_fua)) { |
| 1293 | blk_mq_bio_to_request(rq, bio); | 1288 | blk_mq_bio_to_request(rq, bio); |
| @@ -1562,6 +1557,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, | |||
| 1562 | if (i == nr_queue) | 1557 | if (i == nr_queue) |
| 1563 | break; | 1558 | break; |
| 1564 | 1559 | ||
| 1560 | blk_mq_tag_idle(hctx); | ||
| 1561 | |||
| 1565 | if (set->ops->exit_hctx) | 1562 | if (set->ops->exit_hctx) |
| 1566 | set->ops->exit_hctx(hctx, i); | 1563 | set->ops->exit_hctx(hctx, i); |
| 1567 | 1564 | ||
| @@ -1779,7 +1776,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, | |||
| 1779 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | 1776 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) |
| 1780 | { | 1777 | { |
| 1781 | struct blk_mq_hw_ctx **hctxs; | 1778 | struct blk_mq_hw_ctx **hctxs; |
| 1782 | struct blk_mq_ctx *ctx; | 1779 | struct blk_mq_ctx __percpu *ctx; |
| 1783 | struct request_queue *q; | 1780 | struct request_queue *q; |
| 1784 | unsigned int *map; | 1781 | unsigned int *map; |
| 1785 | int i; | 1782 | int i; |
diff --git a/block/blk-mq.h b/block/blk-mq.h index de7b3bbd5bd6..26460884c6cd 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
| @@ -69,4 +69,49 @@ struct blk_align_bitmap { | |||
| 69 | unsigned long depth; | 69 | unsigned long depth; |
| 70 | } ____cacheline_aligned_in_smp; | 70 | } ____cacheline_aligned_in_smp; |
| 71 | 71 | ||
| 72 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, | ||
| 73 | unsigned int cpu) | ||
| 74 | { | ||
| 75 | return per_cpu_ptr(q->queue_ctx, cpu); | ||
| 76 | } | ||
| 77 | |||
| 78 | /* | ||
| 79 | * This assumes per-cpu software queueing queues. They could be per-node | ||
| 80 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | ||
| 81 | * care about preemption, since we know the ctx's are persistent. This does | ||
| 82 | * mean that we can't rely on ctx always matching the currently running CPU. | ||
| 83 | */ | ||
| 84 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | ||
| 85 | { | ||
| 86 | return __blk_mq_get_ctx(q, get_cpu()); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | ||
| 90 | { | ||
| 91 | put_cpu(); | ||
| 92 | } | ||
| 93 | |||
| 94 | struct blk_mq_alloc_data { | ||
| 95 | /* input parameter */ | ||
| 96 | struct request_queue *q; | ||
| 97 | gfp_t gfp; | ||
| 98 | bool reserved; | ||
| 99 | |||
| 100 | /* input & output parameter */ | ||
| 101 | struct blk_mq_ctx *ctx; | ||
| 102 | struct blk_mq_hw_ctx *hctx; | ||
| 103 | }; | ||
| 104 | |||
| 105 | static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, | ||
| 106 | struct request_queue *q, gfp_t gfp, bool reserved, | ||
| 107 | struct blk_mq_ctx *ctx, | ||
| 108 | struct blk_mq_hw_ctx *hctx) | ||
| 109 | { | ||
| 110 | data->q = q; | ||
| 111 | data->gfp = gfp; | ||
| 112 | data->reserved = reserved; | ||
| 113 | data->ctx = ctx; | ||
| 114 | data->hctx = hctx; | ||
| 115 | } | ||
| 116 | |||
| 72 | #endif | 117 | #endif |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index abc858b3528b..74abd49fabdc 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
| @@ -193,7 +193,9 @@ static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd) | |||
| 193 | static struct request *mtip_rq_from_tag(struct driver_data *dd, | 193 | static struct request *mtip_rq_from_tag(struct driver_data *dd, |
| 194 | unsigned int tag) | 194 | unsigned int tag) |
| 195 | { | 195 | { |
| 196 | return blk_mq_tag_to_rq(dd->queue->queue_hw_ctx[0], tag); | 196 | struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; |
| 197 | |||
| 198 | return blk_mq_tag_to_rq(hctx->tags, tag); | ||
| 197 | } | 199 | } |
| 198 | 200 | ||
| 199 | static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, | 201 | static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c15128833100..0feedebfde48 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -155,7 +155,7 @@ void blk_mq_free_request(struct request *rq); | |||
| 155 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 155 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
| 156 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | 156 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
| 157 | gfp_t gfp, bool reserved); | 157 | gfp_t gfp, bool reserved); |
| 158 | struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag); | 158 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
| 159 | 159 | ||
| 160 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | 160 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
| 161 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); | 161 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8aba35f46f87..5c6f836afa1b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -335,7 +335,7 @@ struct request_queue { | |||
| 335 | unsigned int *mq_map; | 335 | unsigned int *mq_map; |
| 336 | 336 | ||
| 337 | /* sw queues */ | 337 | /* sw queues */ |
| 338 | struct blk_mq_ctx *queue_ctx; | 338 | struct blk_mq_ctx __percpu *queue_ctx; |
| 339 | unsigned int nr_queues; | 339 | unsigned int nr_queues; |
| 340 | 340 | ||
| 341 | /* hw dispatch queues */ | 341 | /* hw dispatch queues */ |
