aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-06-06 21:15:57 -0400
committerKent Overstreet <kmo@daterainc.com>2013-07-01 17:43:53 -0400
commit8e51e414a3c6d92ef2cc41720c67342a8e2c0bf7 (patch)
tree3155648dff173925a882ee182cd188e246ce9498 /drivers/md/bcache/request.c
parent47cd2eb0ee05d9b1f8acd4808a1c829d63e93ac1 (diff)
bcache: Use standard utility code
Some of bcache's utility code has made it into the rest of the kernel, so drop the bcache versions. Bcache used to have a workaround for allocating from a bio set under generic_make_request() (if you allocated more than once, the bios you already allocated would get stuck on current->bio_list when you submitted, and you'd risk deadlock) - bcache would mask out __GFP_WAIT when allocating bios under generic_make_request() so that allocation could fail and it could retry from workqueue. But bio_alloc_bioset() has a workaround now, so we can drop this hack and the associated error handling. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c87
1 files changed, 18 insertions, 69 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index bcdf1f782c3e..b6e74d3c8faf 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -509,10 +509,6 @@ static void bch_insert_data_loop(struct closure *cl)
509 goto err; 509 goto err;
510 510
511 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 511 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
512 if (!n) {
513 __bkey_put(op->c, k);
514 continue_at(cl, bch_insert_data_loop, bcache_wq);
515 }
516 512
517 n->bi_end_io = bch_insert_data_endio; 513 n->bi_end_io = bch_insert_data_endio;
518 n->bi_private = cl; 514 n->bi_private = cl;
@@ -821,53 +817,13 @@ static void request_read_done(struct closure *cl)
821 */ 817 */
822 818
823 if (s->op.cache_bio) { 819 if (s->op.cache_bio) {
824 struct bio_vec *src, *dst;
825 unsigned src_offset, dst_offset, bytes;
826 void *dst_ptr;
827
828 bio_reset(s->op.cache_bio); 820 bio_reset(s->op.cache_bio);
829 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; 821 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
830 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; 822 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
831 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; 823 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
832 bch_bio_map(s->op.cache_bio, NULL); 824 bch_bio_map(s->op.cache_bio, NULL);
833 825
834 src = bio_iovec(s->op.cache_bio); 826 bio_copy_data(s->cache_miss, s->op.cache_bio);
835 dst = bio_iovec(s->cache_miss);
836 src_offset = src->bv_offset;
837 dst_offset = dst->bv_offset;
838 dst_ptr = kmap(dst->bv_page);
839
840 while (1) {
841 if (dst_offset == dst->bv_offset + dst->bv_len) {
842 kunmap(dst->bv_page);
843 dst++;
844 if (dst == bio_iovec_idx(s->cache_miss,
845 s->cache_miss->bi_vcnt))
846 break;
847
848 dst_offset = dst->bv_offset;
849 dst_ptr = kmap(dst->bv_page);
850 }
851
852 if (src_offset == src->bv_offset + src->bv_len) {
853 src++;
854 if (src == bio_iovec_idx(s->op.cache_bio,
855 s->op.cache_bio->bi_vcnt))
856 BUG();
857
858 src_offset = src->bv_offset;
859 }
860
861 bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
862 src->bv_offset + src->bv_len - src_offset);
863
864 memcpy(dst_ptr + dst_offset,
865 page_address(src->bv_page) + src_offset,
866 bytes);
867
868 src_offset += bytes;
869 dst_offset += bytes;
870 }
871 827
872 bio_put(s->cache_miss); 828 bio_put(s->cache_miss);
873 s->cache_miss = NULL; 829 s->cache_miss = NULL;
@@ -912,9 +868,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
912 struct bio *miss; 868 struct bio *miss;
913 869
914 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 870 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
915 if (!miss)
916 return -EAGAIN;
917
918 if (miss == bio) 871 if (miss == bio)
919 s->op.lookup_done = true; 872 s->op.lookup_done = true;
920 873
@@ -933,8 +886,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
933 reada = min(dc->readahead >> 9, 886 reada = min(dc->readahead >> 9,
934 sectors - bio_sectors(miss)); 887 sectors - bio_sectors(miss));
935 888
936 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev)) 889 if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
937 reada = bdev_sectors(miss->bi_bdev) - bio_end(miss); 890 reada = bdev_sectors(miss->bi_bdev) -
891 bio_end_sector(miss);
938 } 892 }
939 893
940 s->cache_bio_sectors = bio_sectors(miss) + reada; 894 s->cache_bio_sectors = bio_sectors(miss) + reada;
@@ -958,7 +912,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
958 goto out_put; 912 goto out_put;
959 913
960 bch_bio_map(s->op.cache_bio, NULL); 914 bch_bio_map(s->op.cache_bio, NULL);
961 if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) 915 if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
962 goto out_put; 916 goto out_put;
963 917
964 s->cache_miss = miss; 918 s->cache_miss = miss;
@@ -1002,7 +956,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
1002 struct bio *bio = &s->bio.bio; 956 struct bio *bio = &s->bio.bio;
1003 struct bkey start, end; 957 struct bkey start, end;
1004 start = KEY(dc->disk.id, bio->bi_sector, 0); 958 start = KEY(dc->disk.id, bio->bi_sector, 0);
1005 end = KEY(dc->disk.id, bio_end(bio), 0); 959 end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1006 960
1007 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); 961 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1008 962
@@ -1176,7 +1130,7 @@ found:
1176 if (i->sequential + bio->bi_size > i->sequential) 1130 if (i->sequential + bio->bi_size > i->sequential)
1177 i->sequential += bio->bi_size; 1131 i->sequential += bio->bi_size;
1178 1132
1179 i->last = bio_end(bio); 1133 i->last = bio_end_sector(bio);
1180 i->jiffies = jiffies + msecs_to_jiffies(5000); 1134 i->jiffies = jiffies + msecs_to_jiffies(5000);
1181 s->task->sequential_io = i->sequential; 1135 s->task->sequential_io = i->sequential;
1182 1136
@@ -1294,30 +1248,25 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1294static int flash_dev_cache_miss(struct btree *b, struct search *s, 1248static int flash_dev_cache_miss(struct btree *b, struct search *s,
1295 struct bio *bio, unsigned sectors) 1249 struct bio *bio, unsigned sectors)
1296{ 1250{
1251 struct bio_vec *bv;
1252 int i;
1253
1297 /* Zero fill bio */ 1254 /* Zero fill bio */
1298 1255
1299 while (bio->bi_idx != bio->bi_vcnt) { 1256 bio_for_each_segment(bv, bio, i) {
1300 struct bio_vec *bv = bio_iovec(bio);
1301 unsigned j = min(bv->bv_len >> 9, sectors); 1257 unsigned j = min(bv->bv_len >> 9, sectors);
1302 1258
1303 void *p = kmap(bv->bv_page); 1259 void *p = kmap(bv->bv_page);
1304 memset(p + bv->bv_offset, 0, j << 9); 1260 memset(p + bv->bv_offset, 0, j << 9);
1305 kunmap(bv->bv_page); 1261 kunmap(bv->bv_page);
1306 1262
1307 bv->bv_len -= j << 9; 1263 sectors -= j;
1308 bv->bv_offset += j << 9;
1309
1310 if (bv->bv_len)
1311 return 0;
1312
1313 bio->bi_sector += j;
1314 bio->bi_size -= j << 9;
1315
1316 bio->bi_idx++;
1317 sectors -= j;
1318 } 1264 }
1319 1265
1320 s->op.lookup_done = true; 1266 bio_advance(bio, min(sectors << 9, bio->bi_size));
1267
1268 if (!bio->bi_size)
1269 s->op.lookup_done = true;
1321 1270
1322 return 0; 1271 return 0;
1323} 1272}
@@ -1344,8 +1293,8 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1344 closure_call(&s->op.cl, btree_read_async, NULL, cl); 1293 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1345 } else if (bio_has_data(bio) || s->op.skip) { 1294 } else if (bio_has_data(bio) || s->op.skip) {
1346 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, 1295 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1347 &KEY(d->id, bio->bi_sector, 0), 1296 &KEY(d->id, bio->bi_sector, 0),
1348 &KEY(d->id, bio_end(bio), 0)); 1297 &KEY(d->id, bio_end_sector(bio), 0));
1349 1298
1350 s->writeback = true; 1299 s->writeback = true;
1351 s->op.cache_bio = bio; 1300 s->op.cache_bio = bio;