diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-09-10 20:06:17 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 00:56:01 -0500 |
commit | cdd972b164be8fc69f6ee8533c5a07b621da74c7 (patch) | |
tree | 883fb80f115175d813106ca80f4a121497710d8f /drivers/md/bcache/request.c | |
parent | 84f0db03ea1e024f2a9e6cfcf7ac0323e4f84d3a (diff) |
bcache: Refactor read request code a bit
More refactoring, and renaming.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r-- | drivers/md/bcache/request.c | 71 |
1 files changed, 35 insertions, 36 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index dbc2ef6e7a35..3b85f33ae4c7 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -884,7 +884,7 @@ skip: | |||
884 | 884 | ||
885 | /* Process reads */ | 885 | /* Process reads */ |
886 | 886 | ||
887 | static void cached_dev_read_complete(struct closure *cl) | 887 | static void cached_dev_cache_miss_done(struct closure *cl) |
888 | { | 888 | { |
889 | struct search *s = container_of(cl, struct search, cl); | 889 | struct search *s = container_of(cl, struct search, cl); |
890 | 890 | ||
@@ -902,9 +902,10 @@ static void cached_dev_read_complete(struct closure *cl) | |||
902 | cached_dev_bio_complete(cl); | 902 | cached_dev_bio_complete(cl); |
903 | } | 903 | } |
904 | 904 | ||
905 | static void request_read_error(struct closure *cl) | 905 | static void cached_dev_read_error(struct closure *cl) |
906 | { | 906 | { |
907 | struct search *s = container_of(cl, struct search, cl); | 907 | struct search *s = container_of(cl, struct search, cl); |
908 | struct bio *bio = &s->bio.bio; | ||
908 | struct bio_vec *bv; | 909 | struct bio_vec *bv; |
909 | int i; | 910 | int i; |
910 | 911 | ||
@@ -928,20 +929,20 @@ static void request_read_error(struct closure *cl) | |||
928 | 929 | ||
929 | /* XXX: invalidate cache */ | 930 | /* XXX: invalidate cache */ |
930 | 931 | ||
931 | closure_bio_submit(&s->bio.bio, &s->cl, s->d); | 932 | closure_bio_submit(bio, cl, s->d); |
932 | } | 933 | } |
933 | 934 | ||
934 | continue_at(cl, cached_dev_read_complete, NULL); | 935 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
935 | } | 936 | } |
936 | 937 | ||
937 | static void request_read_done(struct closure *cl) | 938 | static void cached_dev_read_done(struct closure *cl) |
938 | { | 939 | { |
939 | struct search *s = container_of(cl, struct search, cl); | 940 | struct search *s = container_of(cl, struct search, cl); |
940 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | 941 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
941 | 942 | ||
942 | /* | 943 | /* |
943 | * s->cache_bio != NULL implies that we had a cache miss; cache_bio now | 944 | * We had a cache miss; cache_bio now contains data ready to be inserted |
944 | * contains data ready to be inserted into the cache. | 945 | * into the cache. |
945 | * | 946 | * |
946 | * First, we copy the data we just read from cache_bio's bounce buffers | 947 | * First, we copy the data we just read from cache_bio's bounce buffers |
947 | * to the buffers the original bio pointed to: | 948 | * to the buffers the original bio pointed to: |
@@ -971,10 +972,10 @@ static void request_read_done(struct closure *cl) | |||
971 | closure_call(&s->op.cl, bch_insert_data, NULL, cl); | 972 | closure_call(&s->op.cl, bch_insert_data, NULL, cl); |
972 | } | 973 | } |
973 | 974 | ||
974 | continue_at(cl, cached_dev_read_complete, NULL); | 975 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
975 | } | 976 | } |
976 | 977 | ||
977 | static void request_read_done_bh(struct closure *cl) | 978 | static void cached_dev_read_done_bh(struct closure *cl) |
978 | { | 979 | { |
979 | struct search *s = container_of(cl, struct search, cl); | 980 | struct search *s = container_of(cl, struct search, cl); |
980 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | 981 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
@@ -983,11 +984,11 @@ static void request_read_done_bh(struct closure *cl) | |||
983 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.bypass); | 984 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.bypass); |
984 | 985 | ||
985 | if (s->error) | 986 | if (s->error) |
986 | continue_at_nobarrier(cl, request_read_error, bcache_wq); | 987 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); |
987 | else if (s->op.cache_bio || verify(dc, &s->bio.bio)) | 988 | else if (s->op.cache_bio || verify(dc, &s->bio.bio)) |
988 | continue_at_nobarrier(cl, request_read_done, bcache_wq); | 989 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); |
989 | else | 990 | else |
990 | continue_at_nobarrier(cl, cached_dev_read_complete, NULL); | 991 | continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); |
991 | } | 992 | } |
992 | 993 | ||
993 | static int cached_dev_cache_miss(struct btree *b, struct search *s, | 994 | static int cached_dev_cache_miss(struct btree *b, struct search *s, |
@@ -996,7 +997,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
996 | int ret = 0; | 997 | int ret = 0; |
997 | unsigned reada = 0; | 998 | unsigned reada = 0; |
998 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | 999 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
999 | struct bio *miss; | 1000 | struct bio *miss, *cache_bio; |
1000 | 1001 | ||
1001 | if (s->cache_miss || s->op.bypass) { | 1002 | if (s->cache_miss || s->op.bypass) { |
1002 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 1003 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
@@ -1027,33 +1028,31 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
1027 | /* btree_search_recurse()'s btree iterator is no good anymore */ | 1028 | /* btree_search_recurse()'s btree iterator is no good anymore */ |
1028 | ret = -EINTR; | 1029 | ret = -EINTR; |
1029 | 1030 | ||
1030 | s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, | 1031 | cache_bio = bio_alloc_bioset(GFP_NOWAIT, |
1031 | DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), | 1032 | DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), |
1032 | dc->disk.bio_split); | 1033 | dc->disk.bio_split); |
1033 | 1034 | if (!cache_bio) | |
1034 | if (!s->op.cache_bio) | ||
1035 | goto out_submit; | 1035 | goto out_submit; |
1036 | 1036 | ||
1037 | s->op.cache_bio->bi_sector = miss->bi_sector; | 1037 | cache_bio->bi_sector = miss->bi_sector; |
1038 | s->op.cache_bio->bi_bdev = miss->bi_bdev; | 1038 | cache_bio->bi_bdev = miss->bi_bdev; |
1039 | s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; | 1039 | cache_bio->bi_size = s->cache_bio_sectors << 9; |
1040 | 1040 | ||
1041 | s->op.cache_bio->bi_end_io = request_endio; | 1041 | cache_bio->bi_end_io = request_endio; |
1042 | s->op.cache_bio->bi_private = &s->cl; | 1042 | cache_bio->bi_private = &s->cl; |
1043 | 1043 | ||
1044 | bch_bio_map(s->op.cache_bio, NULL); | 1044 | bch_bio_map(cache_bio, NULL); |
1045 | if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) | 1045 | if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) |
1046 | goto out_put; | 1046 | goto out_put; |
1047 | 1047 | ||
1048 | s->cache_miss = miss; | 1048 | s->cache_miss = miss; |
1049 | bio_get(s->op.cache_bio); | 1049 | s->op.cache_bio = cache_bio; |
1050 | 1050 | bio_get(cache_bio); | |
1051 | closure_bio_submit(s->op.cache_bio, &s->cl, s->d); | 1051 | closure_bio_submit(cache_bio, &s->cl, s->d); |
1052 | 1052 | ||
1053 | return ret; | 1053 | return ret; |
1054 | out_put: | 1054 | out_put: |
1055 | bio_put(s->op.cache_bio); | 1055 | bio_put(cache_bio); |
1056 | s->op.cache_bio = NULL; | ||
1057 | out_submit: | 1056 | out_submit: |
1058 | miss->bi_end_io = request_endio; | 1057 | miss->bi_end_io = request_endio; |
1059 | miss->bi_private = &s->cl; | 1058 | miss->bi_private = &s->cl; |
@@ -1061,12 +1060,12 @@ out_submit: | |||
1061 | return ret; | 1060 | return ret; |
1062 | } | 1061 | } |
1063 | 1062 | ||
1064 | static void request_read(struct cached_dev *dc, struct search *s) | 1063 | static void cached_dev_read(struct cached_dev *dc, struct search *s) |
1065 | { | 1064 | { |
1066 | struct closure *cl = &s->cl; | 1065 | struct closure *cl = &s->cl; |
1067 | 1066 | ||
1068 | closure_call(&s->op.cl, btree_read_async, NULL, cl); | 1067 | closure_call(&s->op.cl, btree_read_async, NULL, cl); |
1069 | continue_at(cl, request_read_done_bh, NULL); | 1068 | continue_at(cl, cached_dev_read_done_bh, NULL); |
1070 | } | 1069 | } |
1071 | 1070 | ||
1072 | /* Process writes */ | 1071 | /* Process writes */ |
@@ -1080,7 +1079,7 @@ static void cached_dev_write_complete(struct closure *cl) | |||
1080 | cached_dev_bio_complete(cl); | 1079 | cached_dev_bio_complete(cl); |
1081 | } | 1080 | } |
1082 | 1081 | ||
1083 | static void request_write(struct cached_dev *dc, struct search *s) | 1082 | static void cached_dev_write(struct cached_dev *dc, struct search *s) |
1084 | { | 1083 | { |
1085 | struct closure *cl = &s->cl; | 1084 | struct closure *cl = &s->cl; |
1086 | struct bio *bio = &s->bio.bio; | 1085 | struct bio *bio = &s->bio.bio; |
@@ -1152,7 +1151,7 @@ static void request_write(struct cached_dev *dc, struct search *s) | |||
1152 | continue_at(cl, cached_dev_write_complete, NULL); | 1151 | continue_at(cl, cached_dev_write_complete, NULL); |
1153 | } | 1152 | } |
1154 | 1153 | ||
1155 | static void request_nodata(struct cached_dev *dc, struct search *s) | 1154 | static void cached_dev_nodata(struct cached_dev *dc, struct search *s) |
1156 | { | 1155 | { |
1157 | struct closure *cl = &s->cl; | 1156 | struct closure *cl = &s->cl; |
1158 | struct bio *bio = &s->bio.bio; | 1157 | struct bio *bio = &s->bio.bio; |
@@ -1188,14 +1187,14 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1188 | trace_bcache_request_start(s, bio); | 1187 | trace_bcache_request_start(s, bio); |
1189 | 1188 | ||
1190 | if (!bio->bi_size) | 1189 | if (!bio->bi_size) |
1191 | request_nodata(dc, s); | 1190 | cached_dev_nodata(dc, s); |
1192 | else { | 1191 | else { |
1193 | s->op.bypass = check_should_bypass(dc, s); | 1192 | s->op.bypass = check_should_bypass(dc, s); |
1194 | 1193 | ||
1195 | if (rw) | 1194 | if (rw) |
1196 | request_write(dc, s); | 1195 | cached_dev_write(dc, s); |
1197 | else | 1196 | else |
1198 | request_read(dc, s); | 1197 | cached_dev_read(dc, s); |
1199 | } | 1198 | } |
1200 | } else { | 1199 | } else { |
1201 | if ((bio->bi_rw & REQ_DISCARD) && | 1200 | if ((bio->bi_rw & REQ_DISCARD) && |