diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-03-07 12:59:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-03-07 12:59:44 -0500 |
commit | 2a75184d52f41db1f54ad9eee7fa8b3ad820f4ac (patch) | |
tree | 126192eee76510426ee08b9f3a2197ff766a0d50 | |
parent | 8ab47d3ec77d94ad9a6bb01efd696e1e34cfe80d (diff) | |
parent | 739c3eea711a255df5ed1246face0a4dce5e589f (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"Small collection of fixes for 3.14-rc. It contains:
- Three minor update to blk-mq from Christoph.
- Reduce number of unaligned (< 4kb) in-flight writes on mtip32xx to
two. From Micron.
- Make the blk-mq CPU notify spinlock raw, since it can't be a
sleeper spinlock on RT. From Mike Galbraith.
- Drop now bogus BUG_ON() for bio iteration with blk integrity. From
Nic Bellinger.
- Properly propagate the SYNC flag on requests. From Shaohua"
* 'for-linus' of git://git.kernel.dk/linux-block:
blk-mq: add REQ_SYNC early
rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock
bio-integrity: Drop bio_integrity_verify BUG_ON in post bip->bip_iter world
blk-mq: support partial I/O completions
blk-mq: merge blk_mq_insert_request and blk_mq_run_request
blk-mq: remove blk_mq_alloc_rq
mtip32xx: Reduce the number of unaligned writes to 2
-rw-r--r-- | block/blk-exec.c | 2 | ||||
-rw-r--r-- | block/blk-flush.c | 4 | ||||
-rw-r--r-- | block/blk-mq-cpu.c | 14 | ||||
-rw-r--r-- | block/blk-mq.c | 102 | ||||
-rw-r--r-- | block/blk-mq.h | 1 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.h | 2 | ||||
-rw-r--r-- | fs/bio-integrity.c | 5 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 11 |
8 files changed, 39 insertions, 102 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c index c68613bb4c79..dbf4502b1d67 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
65 | * be resued after dying flag is set | 65 | * be resued after dying flag is set |
66 | */ | 66 | */ |
67 | if (q->mq_ops) { | 67 | if (q->mq_ops) { |
68 | blk_mq_insert_request(q, rq, at_head, true); | 68 | blk_mq_insert_request(rq, at_head, true, false); |
69 | return; | 69 | return; |
70 | } | 70 | } |
71 | 71 | ||
diff --git a/block/blk-flush.c b/block/blk-flush.c index 66e2b697f5db..f598f794c3c6 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work) | |||
137 | rq = container_of(work, struct request, mq_flush_work); | 137 | rq = container_of(work, struct request, mq_flush_work); |
138 | 138 | ||
139 | memset(&rq->csd, 0, sizeof(rq->csd)); | 139 | memset(&rq->csd, 0, sizeof(rq->csd)); |
140 | blk_mq_run_request(rq, true, false); | 140 | blk_mq_insert_request(rq, false, true, false); |
141 | } | 141 | } |
142 | 142 | ||
143 | static bool blk_flush_queue_rq(struct request *rq) | 143 | static bool blk_flush_queue_rq(struct request *rq) |
@@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq) | |||
411 | if ((policy & REQ_FSEQ_DATA) && | 411 | if ((policy & REQ_FSEQ_DATA) && |
412 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | 412 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
413 | if (q->mq_ops) { | 413 | if (q->mq_ops) { |
414 | blk_mq_run_request(rq, false, true); | 414 | blk_mq_insert_request(rq, false, false, true); |
415 | } else | 415 | } else |
416 | list_add_tail(&rq->queuelist, &q->queue_head); | 416 | list_add_tail(&rq->queuelist, &q->queue_head); |
417 | return; | 417 | return; |
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 3146befb56aa..136ef8643bba 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include "blk-mq.h" | 11 | #include "blk-mq.h" |
12 | 12 | ||
13 | static LIST_HEAD(blk_mq_cpu_notify_list); | 13 | static LIST_HEAD(blk_mq_cpu_notify_list); |
14 | static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); | 14 | static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); |
15 | 15 | ||
16 | static int blk_mq_main_cpu_notify(struct notifier_block *self, | 16 | static int blk_mq_main_cpu_notify(struct notifier_block *self, |
17 | unsigned long action, void *hcpu) | 17 | unsigned long action, void *hcpu) |
@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, | |||
19 | unsigned int cpu = (unsigned long) hcpu; | 19 | unsigned int cpu = (unsigned long) hcpu; |
20 | struct blk_mq_cpu_notifier *notify; | 20 | struct blk_mq_cpu_notifier *notify; |
21 | 21 | ||
22 | spin_lock(&blk_mq_cpu_notify_lock); | 22 | raw_spin_lock(&blk_mq_cpu_notify_lock); |
23 | 23 | ||
24 | list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) | 24 | list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) |
25 | notify->notify(notify->data, action, cpu); | 25 | notify->notify(notify->data, action, cpu); |
26 | 26 | ||
27 | spin_unlock(&blk_mq_cpu_notify_lock); | 27 | raw_spin_unlock(&blk_mq_cpu_notify_lock); |
28 | return NOTIFY_OK; | 28 | return NOTIFY_OK; |
29 | } | 29 | } |
30 | 30 | ||
@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) | |||
32 | { | 32 | { |
33 | BUG_ON(!notifier->notify); | 33 | BUG_ON(!notifier->notify); |
34 | 34 | ||
35 | spin_lock(&blk_mq_cpu_notify_lock); | 35 | raw_spin_lock(&blk_mq_cpu_notify_lock); |
36 | list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); | 36 | list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); |
37 | spin_unlock(&blk_mq_cpu_notify_lock); | 37 | raw_spin_unlock(&blk_mq_cpu_notify_lock); |
38 | } | 38 | } |
39 | 39 | ||
40 | void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) | 40 | void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) |
41 | { | 41 | { |
42 | spin_lock(&blk_mq_cpu_notify_lock); | 42 | raw_spin_lock(&blk_mq_cpu_notify_lock); |
43 | list_del(¬ifier->list); | 43 | list_del(¬ifier->list); |
44 | spin_unlock(&blk_mq_cpu_notify_lock); | 44 | raw_spin_unlock(&blk_mq_cpu_notify_lock); |
45 | } | 45 | } |
46 | 46 | ||
47 | void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, | 47 | void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 1fa9dd153fde..883f72089015 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, | |||
73 | set_bit(ctx->index_hw, hctx->ctx_map); | 73 | set_bit(ctx->index_hw, hctx->ctx_map); |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, | 76 | static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, |
77 | bool reserved) | 77 | gfp_t gfp, bool reserved) |
78 | { | 78 | { |
79 | struct request *rq; | 79 | struct request *rq; |
80 | unsigned int tag; | 80 | unsigned int tag; |
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
193 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; | 193 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; |
194 | } | 194 | } |
195 | 195 | ||
196 | static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, | ||
197 | gfp_t gfp, bool reserved) | ||
198 | { | ||
199 | return blk_mq_alloc_rq(hctx, gfp, reserved); | ||
200 | } | ||
201 | |||
202 | static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, | 196 | static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, |
203 | int rw, gfp_t gfp, | 197 | int rw, gfp_t gfp, |
204 | bool reserved) | 198 | bool reserved) |
@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq) | |||
289 | __blk_mq_free_request(hctx, ctx, rq); | 283 | __blk_mq_free_request(hctx, ctx, rq); |
290 | } | 284 | } |
291 | 285 | ||
292 | static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) | 286 | bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) |
293 | { | ||
294 | if (error) | ||
295 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
296 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | ||
297 | error = -EIO; | ||
298 | |||
299 | if (unlikely(rq->cmd_flags & REQ_QUIET)) | ||
300 | set_bit(BIO_QUIET, &bio->bi_flags); | ||
301 | |||
302 | /* don't actually finish bio if it's part of flush sequence */ | ||
303 | if (!(rq->cmd_flags & REQ_FLUSH_SEQ)) | ||
304 | bio_endio(bio, error); | ||
305 | } | ||
306 | |||
307 | void blk_mq_end_io(struct request *rq, int error) | ||
308 | { | 287 | { |
309 | struct bio *bio = rq->bio; | 288 | if (blk_update_request(rq, error, blk_rq_bytes(rq))) |
310 | unsigned int bytes = 0; | 289 | return true; |
311 | |||
312 | trace_block_rq_complete(rq->q, rq); | ||
313 | |||
314 | while (bio) { | ||
315 | struct bio *next = bio->bi_next; | ||
316 | |||
317 | bio->bi_next = NULL; | ||
318 | bytes += bio->bi_iter.bi_size; | ||
319 | blk_mq_bio_endio(rq, bio, error); | ||
320 | bio = next; | ||
321 | } | ||
322 | |||
323 | blk_account_io_completion(rq, bytes); | ||
324 | 290 | ||
325 | blk_account_io_done(rq); | 291 | blk_account_io_done(rq); |
326 | 292 | ||
@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error) | |||
328 | rq->end_io(rq, error); | 294 | rq->end_io(rq, error); |
329 | else | 295 | else |
330 | blk_mq_free_request(rq); | 296 | blk_mq_free_request(rq); |
297 | return false; | ||
331 | } | 298 | } |
332 | EXPORT_SYMBOL(blk_mq_end_io); | 299 | EXPORT_SYMBOL(blk_mq_end_io_partial); |
333 | 300 | ||
334 | static void __blk_mq_complete_request_remote(void *data) | 301 | static void __blk_mq_complete_request_remote(void *data) |
335 | { | 302 | { |
@@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | |||
730 | blk_mq_add_timer(rq); | 697 | blk_mq_add_timer(rq); |
731 | } | 698 | } |
732 | 699 | ||
733 | void blk_mq_insert_request(struct request_queue *q, struct request *rq, | 700 | void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, |
734 | bool at_head, bool run_queue) | 701 | bool async) |
735 | { | 702 | { |
703 | struct request_queue *q = rq->q; | ||
736 | struct blk_mq_hw_ctx *hctx; | 704 | struct blk_mq_hw_ctx *hctx; |
737 | struct blk_mq_ctx *ctx, *current_ctx; | 705 | struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; |
706 | |||
707 | current_ctx = blk_mq_get_ctx(q); | ||
708 | if (!cpu_online(ctx->cpu)) | ||
709 | rq->mq_ctx = ctx = current_ctx; | ||
738 | 710 | ||
739 | ctx = rq->mq_ctx; | ||
740 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 711 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
741 | 712 | ||
742 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | 713 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && |
714 | !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { | ||
743 | blk_insert_flush(rq); | 715 | blk_insert_flush(rq); |
744 | } else { | 716 | } else { |
745 | current_ctx = blk_mq_get_ctx(q); | ||
746 | |||
747 | if (!cpu_online(ctx->cpu)) { | ||
748 | ctx = current_ctx; | ||
749 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | ||
750 | rq->mq_ctx = ctx; | ||
751 | } | ||
752 | spin_lock(&ctx->lock); | 717 | spin_lock(&ctx->lock); |
753 | __blk_mq_insert_request(hctx, rq, at_head); | 718 | __blk_mq_insert_request(hctx, rq, at_head); |
754 | spin_unlock(&ctx->lock); | 719 | spin_unlock(&ctx->lock); |
755 | |||
756 | blk_mq_put_ctx(current_ctx); | ||
757 | } | ||
758 | |||
759 | if (run_queue) | ||
760 | __blk_mq_run_hw_queue(hctx); | ||
761 | } | ||
762 | EXPORT_SYMBOL(blk_mq_insert_request); | ||
763 | |||
764 | /* | ||
765 | * This is a special version of blk_mq_insert_request to bypass FLUSH request | ||
766 | * check. Should only be used internally. | ||
767 | */ | ||
768 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async) | ||
769 | { | ||
770 | struct request_queue *q = rq->q; | ||
771 | struct blk_mq_hw_ctx *hctx; | ||
772 | struct blk_mq_ctx *ctx, *current_ctx; | ||
773 | |||
774 | current_ctx = blk_mq_get_ctx(q); | ||
775 | |||
776 | ctx = rq->mq_ctx; | ||
777 | if (!cpu_online(ctx->cpu)) { | ||
778 | ctx = current_ctx; | ||
779 | rq->mq_ctx = ctx; | ||
780 | } | 720 | } |
781 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | ||
782 | |||
783 | /* ctx->cpu might be offline */ | ||
784 | spin_lock(&ctx->lock); | ||
785 | __blk_mq_insert_request(hctx, rq, false); | ||
786 | spin_unlock(&ctx->lock); | ||
787 | 721 | ||
788 | blk_mq_put_ctx(current_ctx); | 722 | blk_mq_put_ctx(current_ctx); |
789 | 723 | ||
@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
926 | ctx = blk_mq_get_ctx(q); | 860 | ctx = blk_mq_get_ctx(q); |
927 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 861 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
928 | 862 | ||
863 | if (is_sync) | ||
864 | rw |= REQ_SYNC; | ||
929 | trace_block_getrq(q, bio, rw); | 865 | trace_block_getrq(q, bio, rw); |
930 | rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); | 866 | rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); |
931 | if (likely(rq)) | 867 | if (likely(rq)) |
diff --git a/block/blk-mq.h b/block/blk-mq.h index ed0035cd458e..72beba1f9d55 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -23,7 +23,6 @@ struct blk_mq_ctx { | |||
23 | }; | 23 | }; |
24 | 24 | ||
25 | void __blk_mq_complete_request(struct request *rq); | 25 | void __blk_mq_complete_request(struct request *rq); |
26 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async); | ||
27 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | 26 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
28 | void blk_mq_init_flush(struct request_queue *q); | 27 | void blk_mq_init_flush(struct request_queue *q); |
29 | void blk_mq_drain_queue(struct request_queue *q); | 28 | void blk_mq_drain_queue(struct request_queue *q); |
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index b52e9a6d6aad..54174cb32feb 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -53,7 +53,7 @@ | |||
53 | #define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 | 53 | #define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 |
54 | 54 | ||
55 | /* unaligned IO handling */ | 55 | /* unaligned IO handling */ |
56 | #define MTIP_MAX_UNALIGNED_SLOTS 8 | 56 | #define MTIP_MAX_UNALIGNED_SLOTS 2 |
57 | 57 | ||
58 | /* Macro to extract the tag bit number from a tag value. */ | 58 | /* Macro to extract the tag bit number from a tag value. */ |
59 | #define MTIP_TAG_BIT(tag) (tag & 0x1F) | 59 | #define MTIP_TAG_BIT(tag) (tag & 0x1F) |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 0129b78a6908..4f70f383132c 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -458,11 +458,10 @@ static int bio_integrity_verify(struct bio *bio) | |||
458 | struct blk_integrity_exchg bix; | 458 | struct blk_integrity_exchg bix; |
459 | struct bio_vec *bv; | 459 | struct bio_vec *bv; |
460 | sector_t sector = bio->bi_integrity->bip_iter.bi_sector; | 460 | sector_t sector = bio->bi_integrity->bip_iter.bi_sector; |
461 | unsigned int sectors, total, ret; | 461 | unsigned int sectors, ret = 0; |
462 | void *prot_buf = bio->bi_integrity->bip_buf; | 462 | void *prot_buf = bio->bi_integrity->bip_buf; |
463 | int i; | 463 | int i; |
464 | 464 | ||
465 | ret = total = 0; | ||
466 | bix.disk_name = bio->bi_bdev->bd_disk->disk_name; | 465 | bix.disk_name = bio->bi_bdev->bd_disk->disk_name; |
467 | bix.sector_size = bi->sector_size; | 466 | bix.sector_size = bi->sector_size; |
468 | 467 | ||
@@ -484,8 +483,6 @@ static int bio_integrity_verify(struct bio *bio) | |||
484 | sectors = bv->bv_len / bi->sector_size; | 483 | sectors = bv->bv_len / bi->sector_size; |
485 | sector += sectors; | 484 | sector += sectors; |
486 | prot_buf += sectors * bi->tuple_size; | 485 | prot_buf += sectors * bi->tuple_size; |
487 | total += sectors * bi->tuple_size; | ||
488 | BUG_ON(total > bio->bi_integrity->bip_iter.bi_size); | ||
489 | 486 | ||
490 | kunmap_atomic(kaddr); | 487 | kunmap_atomic(kaddr); |
491 | } | 488 | } |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 18ba8a627f46..2ff2e8d982be 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc | |||
121 | 121 | ||
122 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); | 122 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
123 | 123 | ||
124 | void blk_mq_insert_request(struct request_queue *, struct request *, | 124 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
125 | bool, bool); | ||
126 | void blk_mq_run_queues(struct request_queue *q, bool async); | 125 | void blk_mq_run_queues(struct request_queue *q, bool async); |
127 | void blk_mq_free_request(struct request *rq); | 126 | void blk_mq_free_request(struct request *rq); |
128 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 127 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
@@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind | |||
134 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); | 133 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); |
135 | void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); | 134 | void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); |
136 | 135 | ||
137 | void blk_mq_end_io(struct request *rq, int error); | 136 | bool blk_mq_end_io_partial(struct request *rq, int error, |
137 | unsigned int nr_bytes); | ||
138 | static inline void blk_mq_end_io(struct request *rq, int error) | ||
139 | { | ||
140 | bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); | ||
141 | BUG_ON(!done); | ||
142 | } | ||
138 | 143 | ||
139 | void blk_mq_complete_request(struct request *rq); | 144 | void blk_mq_complete_request(struct request *rq); |
140 | 145 | ||