diff options
| author | Coly Li <colyli@suse.de> | 2018-03-18 20:36:24 -0400 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2018-03-18 22:15:20 -0400 |
| commit | 27a40ab9269e79b55672312b324f8f29d94463d4 (patch) | |
| tree | 82fd33e12b58e2ac79f5146984be7e02cd62f62a /drivers/md | |
| parent | df2b94313ae5b4f60d49e01d4dff5acb4c2757cf (diff) | |
bcache: add backing_request_endio() for bi_end_io
In order to catch I/O error of backing device, a separate bi_end_io
call back is required. Then a per backing device counter can record I/O
errors number and retire the backing device if the counter reaches a
per backing device I/O error limit.
This patch adds backing_request_endio() to bcache backing device I/O code
path, this is a preparation for further complicated backing device failure
handling. So far there is no real code logic change, I make this change a
separate patch to make sure it is stable and reliable for further work.
Changelog:
v2: Fix code comments typo, remove a redundant bch_writeback_add() line
added in v4 patch set.
v1: indeed this is new added in this patch set.
[mlyle: truncated commit subject]
Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Junhui Tang <tang.junhui@zte.com.cn>
Cc: Michael Lyle <mlyle@lyle.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md')
| -rw-r--r-- | drivers/md/bcache/request.c | 93 | ||||
| -rw-r--r-- | drivers/md/bcache/super.c | 1 | ||||
| -rw-r--r-- | drivers/md/bcache/writeback.c | 1 |
3 files changed, 79 insertions, 16 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 5c8ae69c8502..b4a5768afbe9 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
| @@ -139,6 +139,7 @@ static void bch_data_invalidate(struct closure *cl) | |||
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | op->insert_data_done = true; | 141 | op->insert_data_done = true; |
| 142 | /* get in bch_data_insert() */ | ||
| 142 | bio_put(bio); | 143 | bio_put(bio); |
| 143 | out: | 144 | out: |
| 144 | continue_at(cl, bch_data_insert_keys, op->wq); | 145 | continue_at(cl, bch_data_insert_keys, op->wq); |
| @@ -630,6 +631,38 @@ static void request_endio(struct bio *bio) | |||
| 630 | closure_put(cl); | 631 | closure_put(cl); |
| 631 | } | 632 | } |
| 632 | 633 | ||
| 634 | static void backing_request_endio(struct bio *bio) | ||
| 635 | { | ||
| 636 | struct closure *cl = bio->bi_private; | ||
| 637 | |||
| 638 | if (bio->bi_status) { | ||
| 639 | struct search *s = container_of(cl, struct search, cl); | ||
| 640 | /* | ||
| 641 | * If a bio has REQ_PREFLUSH for writeback mode, it is | ||
| 642 | * speically assembled in cached_dev_write() for a non-zero | ||
| 643 | * write request which has REQ_PREFLUSH. we don't set | ||
| 644 | * s->iop.status by this failure, the status will be decided | ||
| 645 | * by result of bch_data_insert() operation. | ||
| 646 | */ | ||
| 647 | if (unlikely(s->iop.writeback && | ||
| 648 | bio->bi_opf & REQ_PREFLUSH)) { | ||
| 649 | char buf[BDEVNAME_SIZE]; | ||
| 650 | |||
| 651 | bio_devname(bio, buf); | ||
| 652 | pr_err("Can't flush %s: returned bi_status %i", | ||
| 653 | buf, bio->bi_status); | ||
| 654 | } else { | ||
| 655 | /* set to orig_bio->bi_status in bio_complete() */ | ||
| 656 | s->iop.status = bio->bi_status; | ||
| 657 | } | ||
| 658 | s->recoverable = false; | ||
| 659 | /* should count I/O error for backing device here */ | ||
| 660 | } | ||
| 661 | |||
| 662 | bio_put(bio); | ||
| 663 | closure_put(cl); | ||
| 664 | } | ||
| 665 | |||
| 633 | static void bio_complete(struct search *s) | 666 | static void bio_complete(struct search *s) |
| 634 | { | 667 | { |
| 635 | if (s->orig_bio) { | 668 | if (s->orig_bio) { |
| @@ -644,13 +677,21 @@ static void bio_complete(struct search *s) | |||
| 644 | } | 677 | } |
| 645 | } | 678 | } |
| 646 | 679 | ||
| 647 | static void do_bio_hook(struct search *s, struct bio *orig_bio) | 680 | static void do_bio_hook(struct search *s, |
| 681 | struct bio *orig_bio, | ||
| 682 | bio_end_io_t *end_io_fn) | ||
| 648 | { | 683 | { |
| 649 | struct bio *bio = &s->bio.bio; | 684 | struct bio *bio = &s->bio.bio; |
| 650 | 685 | ||
| 651 | bio_init(bio, NULL, 0); | 686 | bio_init(bio, NULL, 0); |
| 652 | __bio_clone_fast(bio, orig_bio); | 687 | __bio_clone_fast(bio, orig_bio); |
| 653 | bio->bi_end_io = request_endio; | 688 | /* |
| 689 | * bi_end_io can be set separately somewhere else, e.g. the | ||
| 690 | * variants in, | ||
| 691 | * - cache_bio->bi_end_io from cached_dev_cache_miss() | ||
| 692 | * - n->bi_end_io from cache_lookup_fn() | ||
| 693 | */ | ||
| 694 | bio->bi_end_io = end_io_fn; | ||
| 654 | bio->bi_private = &s->cl; | 695 | bio->bi_private = &s->cl; |
| 655 | 696 | ||
| 656 | bio_cnt_set(bio, 3); | 697 | bio_cnt_set(bio, 3); |
| @@ -676,7 +717,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
| 676 | s = mempool_alloc(d->c->search, GFP_NOIO); | 717 | s = mempool_alloc(d->c->search, GFP_NOIO); |
| 677 | 718 | ||
| 678 | closure_init(&s->cl, NULL); | 719 | closure_init(&s->cl, NULL); |
| 679 | do_bio_hook(s, bio); | 720 | do_bio_hook(s, bio, request_endio); |
| 680 | 721 | ||
| 681 | s->orig_bio = bio; | 722 | s->orig_bio = bio; |
| 682 | s->cache_miss = NULL; | 723 | s->cache_miss = NULL; |
| @@ -743,10 +784,11 @@ static void cached_dev_read_error(struct closure *cl) | |||
| 743 | trace_bcache_read_retry(s->orig_bio); | 784 | trace_bcache_read_retry(s->orig_bio); |
| 744 | 785 | ||
| 745 | s->iop.status = 0; | 786 | s->iop.status = 0; |
| 746 | do_bio_hook(s, s->orig_bio); | 787 | do_bio_hook(s, s->orig_bio, backing_request_endio); |
| 747 | 788 | ||
| 748 | /* XXX: invalidate cache */ | 789 | /* XXX: invalidate cache */ |
| 749 | 790 | ||
| 791 | /* I/O request sent to backing device */ | ||
| 750 | closure_bio_submit(s->iop.c, bio, cl); | 792 | closure_bio_submit(s->iop.c, bio, cl); |
| 751 | } | 793 | } |
| 752 | 794 | ||
| @@ -859,7 +901,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
| 859 | bio_copy_dev(cache_bio, miss); | 901 | bio_copy_dev(cache_bio, miss); |
| 860 | cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; | 902 | cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
| 861 | 903 | ||
| 862 | cache_bio->bi_end_io = request_endio; | 904 | cache_bio->bi_end_io = backing_request_endio; |
| 863 | cache_bio->bi_private = &s->cl; | 905 | cache_bio->bi_private = &s->cl; |
| 864 | 906 | ||
| 865 | bch_bio_map(cache_bio, NULL); | 907 | bch_bio_map(cache_bio, NULL); |
| @@ -872,14 +914,16 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
| 872 | s->cache_miss = miss; | 914 | s->cache_miss = miss; |
| 873 | s->iop.bio = cache_bio; | 915 | s->iop.bio = cache_bio; |
| 874 | bio_get(cache_bio); | 916 | bio_get(cache_bio); |
| 917 | /* I/O request sent to backing device */ | ||
| 875 | closure_bio_submit(s->iop.c, cache_bio, &s->cl); | 918 | closure_bio_submit(s->iop.c, cache_bio, &s->cl); |
| 876 | 919 | ||
| 877 | return ret; | 920 | return ret; |
| 878 | out_put: | 921 | out_put: |
| 879 | bio_put(cache_bio); | 922 | bio_put(cache_bio); |
| 880 | out_submit: | 923 | out_submit: |
| 881 | miss->bi_end_io = request_endio; | 924 | miss->bi_end_io = backing_request_endio; |
| 882 | miss->bi_private = &s->cl; | 925 | miss->bi_private = &s->cl; |
| 926 | /* I/O request sent to backing device */ | ||
| 883 | closure_bio_submit(s->iop.c, miss, &s->cl); | 927 | closure_bio_submit(s->iop.c, miss, &s->cl); |
| 884 | return ret; | 928 | return ret; |
| 885 | } | 929 | } |
| @@ -943,31 +987,46 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
| 943 | s->iop.bio = s->orig_bio; | 987 | s->iop.bio = s->orig_bio; |
| 944 | bio_get(s->iop.bio); | 988 | bio_get(s->iop.bio); |
| 945 | 989 | ||
| 946 | if ((bio_op(bio) != REQ_OP_DISCARD) || | 990 | if (bio_op(bio) == REQ_OP_DISCARD && |
| 947 | blk_queue_discard(bdev_get_queue(dc->bdev))) | 991 | !blk_queue_discard(bdev_get_queue(dc->bdev))) |
| 948 | closure_bio_submit(s->iop.c, bio, cl); | 992 | goto insert_data; |
| 993 | |||
| 994 | /* I/O request sent to backing device */ | ||
| 995 | bio->bi_end_io = backing_request_endio; | ||
| 996 | closure_bio_submit(s->iop.c, bio, cl); | ||
| 997 | |||
| 949 | } else if (s->iop.writeback) { | 998 | } else if (s->iop.writeback) { |
| 950 | bch_writeback_add(dc); | 999 | bch_writeback_add(dc); |
| 951 | s->iop.bio = bio; | 1000 | s->iop.bio = bio; |
| 952 | 1001 | ||
| 953 | if (bio->bi_opf & REQ_PREFLUSH) { | 1002 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 954 | /* Also need to send a flush to the backing device */ | 1003 | /* |
| 955 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, | 1004 | * Also need to send a flush to the backing |
| 956 | dc->disk.bio_split); | 1005 | * device. |
| 957 | 1006 | */ | |
| 1007 | struct bio *flush; | ||
| 1008 | |||
| 1009 | flush = bio_alloc_bioset(GFP_NOIO, 0, | ||
| 1010 | dc->disk.bio_split); | ||
| 1011 | if (!flush) { | ||
| 1012 | s->iop.status = BLK_STS_RESOURCE; | ||
| 1013 | goto insert_data; | ||
| 1014 | } | ||
| 958 | bio_copy_dev(flush, bio); | 1015 | bio_copy_dev(flush, bio); |
| 959 | flush->bi_end_io = request_endio; | 1016 | flush->bi_end_io = backing_request_endio; |
| 960 | flush->bi_private = cl; | 1017 | flush->bi_private = cl; |
| 961 | flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; | 1018 | flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
| 962 | 1019 | /* I/O request sent to backing device */ | |
| 963 | closure_bio_submit(s->iop.c, flush, cl); | 1020 | closure_bio_submit(s->iop.c, flush, cl); |
| 964 | } | 1021 | } |
| 965 | } else { | 1022 | } else { |
| 966 | s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); | 1023 | s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); |
| 967 | 1024 | /* I/O request sent to backing device */ | |
| 1025 | bio->bi_end_io = backing_request_endio; | ||
| 968 | closure_bio_submit(s->iop.c, bio, cl); | 1026 | closure_bio_submit(s->iop.c, bio, cl); |
| 969 | } | 1027 | } |
| 970 | 1028 | ||
| 1029 | insert_data: | ||
| 971 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); | 1030 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); |
| 972 | continue_at(cl, cached_dev_write_complete, NULL); | 1031 | continue_at(cl, cached_dev_write_complete, NULL); |
| 973 | } | 1032 | } |
| @@ -981,6 +1040,7 @@ static void cached_dev_nodata(struct closure *cl) | |||
| 981 | bch_journal_meta(s->iop.c, cl); | 1040 | bch_journal_meta(s->iop.c, cl); |
| 982 | 1041 | ||
| 983 | /* If it's a flush, we send the flush to the backing device too */ | 1042 | /* If it's a flush, we send the flush to the backing device too */ |
| 1043 | bio->bi_end_io = backing_request_endio; | ||
| 984 | closure_bio_submit(s->iop.c, bio, cl); | 1044 | closure_bio_submit(s->iop.c, bio, cl); |
| 985 | 1045 | ||
| 986 | continue_at(cl, cached_dev_bio_complete, NULL); | 1046 | continue_at(cl, cached_dev_bio_complete, NULL); |
| @@ -1078,6 +1138,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, | |||
| 1078 | cached_dev_read(dc, s); | 1138 | cached_dev_read(dc, s); |
| 1079 | } | 1139 | } |
| 1080 | } else | 1140 | } else |
| 1141 | /* I/O request sent to backing device */ | ||
| 1081 | detached_dev_do_request(d, bio); | 1142 | detached_dev_do_request(d, bio); |
| 1082 | 1143 | ||
| 1083 | return BLK_QC_T_NONE; | 1144 | return BLK_QC_T_NONE; |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f1f64853114b..2f8e70aefc90 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
| @@ -273,6 +273,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) | |||
| 273 | bio->bi_private = dc; | 273 | bio->bi_private = dc; |
| 274 | 274 | ||
| 275 | closure_get(cl); | 275 | closure_get(cl); |
| 276 | /* I/O request sent to backing device */ | ||
| 276 | __write_super(&dc->sb, bio); | 277 | __write_super(&dc->sb, bio); |
| 277 | 278 | ||
| 278 | closure_return_with_destructor(cl, bch_write_bdev_super_unlock); | 279 | closure_return_with_destructor(cl, bch_write_bdev_super_unlock); |
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 70092ada68e6..4a9547cdcdc5 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
| @@ -289,6 +289,7 @@ static void write_dirty(struct closure *cl) | |||
| 289 | bio_set_dev(&io->bio, io->dc->bdev); | 289 | bio_set_dev(&io->bio, io->dc->bdev); |
| 290 | io->bio.bi_end_io = dirty_endio; | 290 | io->bio.bi_end_io = dirty_endio; |
| 291 | 291 | ||
| 292 | /* I/O request sent to backing device */ | ||
| 292 | closure_bio_submit(io->dc->disk.c, &io->bio, cl); | 293 | closure_bio_submit(io->dc->disk.c, &io->bio, cl); |
| 293 | } | 294 | } |
| 294 | 295 | ||
