aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2013-11-24 02:11:25 -0500
committerJens Axboe <axboe@fb.com>2015-08-13 14:31:40 -0400
commit749b61dab30736eb95b1ee23738cae90973d4fc3 (patch)
treea4523ba849c4d173d71cb4e97f4363f1a11b460d
parentc66a14d07c136cc35d4dcb84bfc7bc188be24d4c (diff)
bcache: remove driver private bio splitting code
The bcache driver has always accepted arbitrarily large bios and split them internally. Now that every driver must accept arbitrarily large bios this code isn't nessecary anymore. Cc: linux-bcache@vger.kernel.org Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> [dpark: add more description in commit message] Signed-off-by: Dongsu Park <dpark@posteo.net> Signed-off-by: Ming Lin <ming.l@ssi.samsung.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/md/bcache/bcache.h18
-rw-r--r--drivers/md/bcache/io.c101
-rw-r--r--drivers/md/bcache/journal.c4
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/bcache/super.c32
-rw-r--r--drivers/md/bcache/util.h5
-rw-r--r--drivers/md/bcache/writeback.c4
7 files changed, 18 insertions, 162 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 04f7bc28ef83..6b420a55c745 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -243,19 +243,6 @@ struct keybuf {
243 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); 243 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
244}; 244};
245 245
246struct bio_split_pool {
247 struct bio_set *bio_split;
248 mempool_t *bio_split_hook;
249};
250
251struct bio_split_hook {
252 struct closure cl;
253 struct bio_split_pool *p;
254 struct bio *bio;
255 bio_end_io_t *bi_end_io;
256 void *bi_private;
257};
258
259struct bcache_device { 246struct bcache_device {
260 struct closure cl; 247 struct closure cl;
261 248
@@ -288,8 +275,6 @@ struct bcache_device {
288 int (*cache_miss)(struct btree *, struct search *, 275 int (*cache_miss)(struct btree *, struct search *,
289 struct bio *, unsigned); 276 struct bio *, unsigned);
290 int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); 277 int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
291
292 struct bio_split_pool bio_split_hook;
293}; 278};
294 279
295struct io { 280struct io {
@@ -454,8 +439,6 @@ struct cache {
454 atomic_long_t meta_sectors_written; 439 atomic_long_t meta_sectors_written;
455 atomic_long_t btree_sectors_written; 440 atomic_long_t btree_sectors_written;
456 atomic_long_t sectors_written; 441 atomic_long_t sectors_written;
457
458 struct bio_split_pool bio_split_hook;
459}; 442};
460 443
461struct gc_stat { 444struct gc_stat {
@@ -873,7 +856,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
873void bch_bbio_free(struct bio *, struct cache_set *); 856void bch_bbio_free(struct bio *, struct cache_set *);
874struct bio *bch_bbio_alloc(struct cache_set *); 857struct bio *bch_bbio_alloc(struct cache_set *);
875 858
876void bch_generic_make_request(struct bio *, struct bio_split_pool *);
877void __bch_submit_bbio(struct bio *, struct cache_set *); 859void __bch_submit_bbio(struct bio *, struct cache_set *);
878void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); 860void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
879 861
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9440df94bc83..86a0bb87124e 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,105 +11,6 @@
11 11
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13 13
14static unsigned bch_bio_max_sectors(struct bio *bio)
15{
16 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
17 struct bio_vec bv;
18 struct bvec_iter iter;
19 unsigned ret = 0, seg = 0;
20
21 if (bio->bi_rw & REQ_DISCARD)
22 return min(bio_sectors(bio), q->limits.max_discard_sectors);
23
24 bio_for_each_segment(bv, bio, iter) {
25 struct bvec_merge_data bvm = {
26 .bi_bdev = bio->bi_bdev,
27 .bi_sector = bio->bi_iter.bi_sector,
28 .bi_size = ret << 9,
29 .bi_rw = bio->bi_rw,
30 };
31
32 if (seg == min_t(unsigned, BIO_MAX_PAGES,
33 queue_max_segments(q)))
34 break;
35
36 if (q->merge_bvec_fn &&
37 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
38 break;
39
40 seg++;
41 ret += bv.bv_len >> 9;
42 }
43
44 ret = min(ret, queue_max_sectors(q));
45
46 WARN_ON(!ret);
47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
48
49 return ret;
50}
51
52static void bch_bio_submit_split_done(struct closure *cl)
53{
54 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
55
56 s->bio->bi_end_io = s->bi_end_io;
57 s->bio->bi_private = s->bi_private;
58 bio_endio(s->bio);
59
60 closure_debug_destroy(&s->cl);
61 mempool_free(s, s->p->bio_split_hook);
62}
63
64static void bch_bio_submit_split_endio(struct bio *bio)
65{
66 struct closure *cl = bio->bi_private;
67 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
68
69 if (bio->bi_error)
70 s->bio->bi_error = bio->bi_error;
71
72 bio_put(bio);
73 closure_put(cl);
74}
75
76void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
77{
78 struct bio_split_hook *s;
79 struct bio *n;
80
81 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
82 goto submit;
83
84 if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
85 goto submit;
86
87 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
88 closure_init(&s->cl, NULL);
89
90 s->bio = bio;
91 s->p = p;
92 s->bi_end_io = bio->bi_end_io;
93 s->bi_private = bio->bi_private;
94 bio_get(bio);
95
96 do {
97 n = bio_next_split(bio, bch_bio_max_sectors(bio),
98 GFP_NOIO, s->p->bio_split);
99
100 n->bi_end_io = bch_bio_submit_split_endio;
101 n->bi_private = &s->cl;
102
103 closure_get(&s->cl);
104 generic_make_request(n);
105 } while (n != bio);
106
107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
108 return;
109submit:
110 generic_make_request(bio);
111}
112
113/* Bios with headers */ 14/* Bios with headers */
114 15
115void bch_bbio_free(struct bio *bio, struct cache_set *c) 16void bch_bbio_free(struct bio *bio, struct cache_set *c)
@@ -139,7 +40,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
139 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 40 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
140 41
141 b->submit_time_us = local_clock_us(); 42 b->submit_time_us = local_clock_us();
142 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); 43 closure_bio_submit(bio, bio->bi_private);
143} 44}
144 45
145void bch_submit_bbio(struct bio *bio, struct cache_set *c, 46void bch_submit_bbio(struct bio *bio, struct cache_set *c,
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index d6a4e16030a6..29eba7219b01 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -61,7 +61,7 @@ reread: left = ca->sb.bucket_size - offset;
61 bio->bi_private = &cl; 61 bio->bi_private = &cl;
62 bch_bio_map(bio, data); 62 bch_bio_map(bio, data);
63 63
64 closure_bio_submit(bio, &cl, ca); 64 closure_bio_submit(bio, &cl);
65 closure_sync(&cl); 65 closure_sync(&cl);
66 66
67 /* This function could be simpler now since we no longer write 67 /* This function could be simpler now since we no longer write
@@ -648,7 +648,7 @@ static void journal_write_unlocked(struct closure *cl)
648 spin_unlock(&c->journal.lock); 648 spin_unlock(&c->journal.lock);
649 649
650 while ((bio = bio_list_pop(&list))) 650 while ((bio = bio_list_pop(&list)))
651 closure_bio_submit(bio, cl, c->cache[0]); 651 closure_bio_submit(bio, cl);
652 652
653 continue_at(cl, journal_write_done, NULL); 653 continue_at(cl, journal_write_done, NULL);
654} 654}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a09b9462ff49..8e9877b04637 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -719,7 +719,7 @@ static void cached_dev_read_error(struct closure *cl)
719 719
720 /* XXX: invalidate cache */ 720 /* XXX: invalidate cache */
721 721
722 closure_bio_submit(bio, cl, s->d); 722 closure_bio_submit(bio, cl);
723 } 723 }
724 724
725 continue_at(cl, cached_dev_cache_miss_done, NULL); 725 continue_at(cl, cached_dev_cache_miss_done, NULL);
@@ -842,7 +842,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
842 s->cache_miss = miss; 842 s->cache_miss = miss;
843 s->iop.bio = cache_bio; 843 s->iop.bio = cache_bio;
844 bio_get(cache_bio); 844 bio_get(cache_bio);
845 closure_bio_submit(cache_bio, &s->cl, s->d); 845 closure_bio_submit(cache_bio, &s->cl);
846 846
847 return ret; 847 return ret;
848out_put: 848out_put:
@@ -850,7 +850,7 @@ out_put:
850out_submit: 850out_submit:
851 miss->bi_end_io = request_endio; 851 miss->bi_end_io = request_endio;
852 miss->bi_private = &s->cl; 852 miss->bi_private = &s->cl;
853 closure_bio_submit(miss, &s->cl, s->d); 853 closure_bio_submit(miss, &s->cl);
854 return ret; 854 return ret;
855} 855}
856 856
@@ -915,7 +915,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
915 915
916 if (!(bio->bi_rw & REQ_DISCARD) || 916 if (!(bio->bi_rw & REQ_DISCARD) ||
917 blk_queue_discard(bdev_get_queue(dc->bdev))) 917 blk_queue_discard(bdev_get_queue(dc->bdev)))
918 closure_bio_submit(bio, cl, s->d); 918 closure_bio_submit(bio, cl);
919 } else if (s->iop.writeback) { 919 } else if (s->iop.writeback) {
920 bch_writeback_add(dc); 920 bch_writeback_add(dc);
921 s->iop.bio = bio; 921 s->iop.bio = bio;
@@ -930,12 +930,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
930 flush->bi_end_io = request_endio; 930 flush->bi_end_io = request_endio;
931 flush->bi_private = cl; 931 flush->bi_private = cl;
932 932
933 closure_bio_submit(flush, cl, s->d); 933 closure_bio_submit(flush, cl);
934 } 934 }
935 } else { 935 } else {
936 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); 936 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
937 937
938 closure_bio_submit(bio, cl, s->d); 938 closure_bio_submit(bio, cl);
939 } 939 }
940 940
941 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 941 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
@@ -951,7 +951,7 @@ static void cached_dev_nodata(struct closure *cl)
951 bch_journal_meta(s->iop.c, cl); 951 bch_journal_meta(s->iop.c, cl);
952 952
953 /* If it's a flush, we send the flush to the backing device too */ 953 /* If it's a flush, we send the flush to the backing device too */
954 closure_bio_submit(bio, cl, s->d); 954 closure_bio_submit(bio, cl);
955 955
956 continue_at(cl, cached_dev_bio_complete, NULL); 956 continue_at(cl, cached_dev_bio_complete, NULL);
957} 957}
@@ -995,7 +995,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
995 !blk_queue_discard(bdev_get_queue(dc->bdev))) 995 !blk_queue_discard(bdev_get_queue(dc->bdev)))
996 bio_endio(bio); 996 bio_endio(bio);
997 else 997 else
998 bch_generic_make_request(bio, &d->bio_split_hook); 998 generic_make_request(bio);
999 } 999 }
1000} 1000}
1001 1001
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index be01fd3c87f1..679a093a3bf6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -59,29 +59,6 @@ struct workqueue_struct *bcache_wq;
59 59
60#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 60#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
61 61
62static void bio_split_pool_free(struct bio_split_pool *p)
63{
64 if (p->bio_split_hook)
65 mempool_destroy(p->bio_split_hook);
66
67 if (p->bio_split)
68 bioset_free(p->bio_split);
69}
70
71static int bio_split_pool_init(struct bio_split_pool *p)
72{
73 p->bio_split = bioset_create(4, 0);
74 if (!p->bio_split)
75 return -ENOMEM;
76
77 p->bio_split_hook = mempool_create_kmalloc_pool(4,
78 sizeof(struct bio_split_hook));
79 if (!p->bio_split_hook)
80 return -ENOMEM;
81
82 return 0;
83}
84
85/* Superblock */ 62/* Superblock */
86 63
87static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 64static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
@@ -537,7 +514,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
537 bio->bi_private = ca; 514 bio->bi_private = ca;
538 bch_bio_map(bio, ca->disk_buckets); 515 bch_bio_map(bio, ca->disk_buckets);
539 516
540 closure_bio_submit(bio, &ca->prio, ca); 517 closure_bio_submit(bio, &ca->prio);
541 closure_sync(cl); 518 closure_sync(cl);
542} 519}
543 520
@@ -757,7 +734,6 @@ static void bcache_device_free(struct bcache_device *d)
757 put_disk(d->disk); 734 put_disk(d->disk);
758 } 735 }
759 736
760 bio_split_pool_free(&d->bio_split_hook);
761 if (d->bio_split) 737 if (d->bio_split)
762 bioset_free(d->bio_split); 738 bioset_free(d->bio_split);
763 kvfree(d->full_dirty_stripes); 739 kvfree(d->full_dirty_stripes);
@@ -804,7 +780,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
804 return minor; 780 return minor;
805 781
806 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 782 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
807 bio_split_pool_init(&d->bio_split_hook) ||
808 !(d->disk = alloc_disk(1))) { 783 !(d->disk = alloc_disk(1))) {
809 ida_simple_remove(&bcache_minor, minor); 784 ida_simple_remove(&bcache_minor, minor);
810 return -ENOMEM; 785 return -ENOMEM;
@@ -1793,8 +1768,6 @@ void bch_cache_release(struct kobject *kobj)
1793 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1768 ca->set->cache[ca->sb.nr_this_dev] = NULL;
1794 } 1769 }
1795 1770
1796 bio_split_pool_free(&ca->bio_split_hook);
1797
1798 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1771 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
1799 kfree(ca->prio_buckets); 1772 kfree(ca->prio_buckets);
1800 vfree(ca->buckets); 1773 vfree(ca->buckets);
@@ -1839,8 +1812,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1839 ca->sb.nbuckets)) || 1812 ca->sb.nbuckets)) ||
1840 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1813 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1841 2, GFP_KERNEL)) || 1814 2, GFP_KERNEL)) ||
1842 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1815 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
1843 bio_split_pool_init(&ca->bio_split_hook))
1844 return -ENOMEM; 1816 return -ENOMEM;
1845 1817
1846 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1818 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1d04c4859c70..cf2cbc211d83 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/blkdev.h> 5#include <linux/blkdev.h>
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/blkdev.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/llist.h> 9#include <linux/llist.h>
9#include <linux/ratelimit.h> 10#include <linux/ratelimit.h>
@@ -570,10 +571,10 @@ static inline sector_t bdev_sectors(struct block_device *bdev)
570 return bdev->bd_inode->i_size >> 9; 571 return bdev->bd_inode->i_size >> 9;
571} 572}
572 573
573#define closure_bio_submit(bio, cl, dev) \ 574#define closure_bio_submit(bio, cl) \
574do { \ 575do { \
575 closure_get(cl); \ 576 closure_get(cl); \
576 bch_generic_make_request(bio, &(dev)->bio_split_hook); \ 577 generic_make_request(bio); \
577} while (0) 578} while (0)
578 579
579uint64_t bch_crc64_update(uint64_t, const void *, size_t); 580uint64_t bch_crc64_update(uint64_t, const void *, size_t);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b4fc874c30fd..b23f88d9f18c 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -188,7 +188,7 @@ static void write_dirty(struct closure *cl)
188 io->bio.bi_bdev = io->dc->bdev; 188 io->bio.bi_bdev = io->dc->bdev;
189 io->bio.bi_end_io = dirty_endio; 189 io->bio.bi_end_io = dirty_endio;
190 190
191 closure_bio_submit(&io->bio, cl, &io->dc->disk); 191 closure_bio_submit(&io->bio, cl);
192 192
193 continue_at(cl, write_dirty_finish, system_wq); 193 continue_at(cl, write_dirty_finish, system_wq);
194} 194}
@@ -208,7 +208,7 @@ static void read_dirty_submit(struct closure *cl)
208{ 208{
209 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 209 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
210 210
211 closure_bio_submit(&io->bio, cl, &io->dc->disk); 211 closure_bio_submit(&io->bio, cl);
212 212
213 continue_at(cl, write_dirty, system_wq); 213 continue_at(cl, write_dirty, system_wq);
214} 214}