diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-09-10 21:39:16 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 00:55:59 -0500 |
commit | e7c590eb63509c5d5f48a390d23aa25f4417ac96 (patch) | |
tree | 22accb493ac9f36b5bf005d5a19080ab67a96cf2 | |
parent | 403b6cdeb1a38d896ffcb1f99ddcfd4e343b5d69 (diff) |
bcache: Convert btree_insert_check_key() to btree_insert_node()
This was the main point of all this refactoring - now,
btree_insert_check_key() won't fail just because the leaf node happened
to be full.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r-- | drivers/md/bcache/bcache.h | 8 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 82 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 6 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 55 |
4 files changed, 79 insertions, 72 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 6e836f22f276..10ce0c825fce 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -1091,14 +1091,6 @@ do { \ | |||
1091 | for (b = (ca)->buckets + (ca)->sb.first_bucket; \ | 1091 | for (b = (ca)->buckets + (ca)->sb.first_bucket; \ |
1092 | b < (ca)->buckets + (ca)->sb.nbuckets; b++) | 1092 | b < (ca)->buckets + (ca)->sb.nbuckets; b++) |
1093 | 1093 | ||
1094 | static inline void __bkey_put(struct cache_set *c, struct bkey *k) | ||
1095 | { | ||
1096 | unsigned i; | ||
1097 | |||
1098 | for (i = 0; i < KEY_PTRS(k); i++) | ||
1099 | atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); | ||
1100 | } | ||
1101 | |||
1102 | static inline void cached_dev_put(struct cached_dev *dc) | 1094 | static inline void cached_dev_put(struct cached_dev *dc) |
1103 | { | 1095 | { |
1104 | if (atomic_dec_and_test(&dc->count)) | 1096 | if (atomic_dec_and_test(&dc->count)) |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 60d06465d772..08a85327431c 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -118,6 +118,15 @@ void bch_btree_op_init_stack(struct btree_op *op) | |||
118 | 118 | ||
119 | /* Btree key manipulation */ | 119 | /* Btree key manipulation */ |
120 | 120 | ||
121 | void __bkey_put(struct cache_set *c, struct bkey *k) | ||
122 | { | ||
123 | unsigned i; | ||
124 | |||
125 | for (i = 0; i < KEY_PTRS(k); i++) | ||
126 | if (ptr_available(c, k, i)) | ||
127 | atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); | ||
128 | } | ||
129 | |||
121 | static void bkey_put(struct cache_set *c, struct bkey *k, int level) | 130 | static void bkey_put(struct cache_set *c, struct bkey *k, int level) |
122 | { | 131 | { |
123 | if ((level && KEY_OFFSET(k)) || !level) | 132 | if ((level && KEY_OFFSET(k)) || !level) |
@@ -1855,8 +1864,6 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, | |||
1855 | bool ret = false; | 1864 | bool ret = false; |
1856 | unsigned oldsize = bch_count_data(b); | 1865 | unsigned oldsize = bch_count_data(b); |
1857 | 1866 | ||
1858 | BUG_ON(!insert_lock(op, b)); | ||
1859 | |||
1860 | while (!bch_keylist_empty(insert_keys)) { | 1867 | while (!bch_keylist_empty(insert_keys)) { |
1861 | struct bset *i = write_block(b); | 1868 | struct bset *i = write_block(b); |
1862 | struct bkey *k = insert_keys->bottom; | 1869 | struct bkey *k = insert_keys->bottom; |
@@ -1898,39 +1905,6 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, | |||
1898 | return ret; | 1905 | return ret; |
1899 | } | 1906 | } |
1900 | 1907 | ||
1901 | bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op, | ||
1902 | struct bio *bio) | ||
1903 | { | ||
1904 | bool ret = false; | ||
1905 | uint64_t btree_ptr = b->key.ptr[0]; | ||
1906 | unsigned long seq = b->seq; | ||
1907 | BKEY_PADDED(k) tmp; | ||
1908 | |||
1909 | rw_unlock(false, b); | ||
1910 | rw_lock(true, b, b->level); | ||
1911 | |||
1912 | if (b->key.ptr[0] != btree_ptr || | ||
1913 | b->seq != seq + 1 || | ||
1914 | should_split(b)) | ||
1915 | goto out; | ||
1916 | |||
1917 | op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio)); | ||
1918 | |||
1919 | SET_KEY_PTRS(&op->replace, 1); | ||
1920 | get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); | ||
1921 | |||
1922 | SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV); | ||
1923 | |||
1924 | bkey_copy(&tmp.k, &op->replace); | ||
1925 | |||
1926 | BUG_ON(op->type != BTREE_INSERT); | ||
1927 | BUG_ON(!btree_insert_key(b, op, &tmp.k)); | ||
1928 | ret = true; | ||
1929 | out: | ||
1930 | downgrade_write(&b->lock); | ||
1931 | return ret; | ||
1932 | } | ||
1933 | |||
1934 | static int btree_split(struct btree *b, struct btree_op *op, | 1908 | static int btree_split(struct btree *b, struct btree_op *op, |
1935 | struct keylist *insert_keys, | 1909 | struct keylist *insert_keys, |
1936 | struct keylist *parent_keys) | 1910 | struct keylist *parent_keys) |
@@ -2097,6 +2071,44 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op, | |||
2097 | return ret; | 2071 | return ret; |
2098 | } | 2072 | } |
2099 | 2073 | ||
2074 | int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, | ||
2075 | struct bkey *check_key) | ||
2076 | { | ||
2077 | int ret = -EINTR; | ||
2078 | uint64_t btree_ptr = b->key.ptr[0]; | ||
2079 | unsigned long seq = b->seq; | ||
2080 | struct keylist insert; | ||
2081 | bool upgrade = op->lock == -1; | ||
2082 | |||
2083 | bch_keylist_init(&insert); | ||
2084 | |||
2085 | if (upgrade) { | ||
2086 | rw_unlock(false, b); | ||
2087 | rw_lock(true, b, b->level); | ||
2088 | |||
2089 | if (b->key.ptr[0] != btree_ptr || | ||
2090 | b->seq != seq + 1) | ||
2091 | goto out; | ||
2092 | } | ||
2093 | |||
2094 | SET_KEY_PTRS(check_key, 1); | ||
2095 | get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); | ||
2096 | |||
2097 | SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); | ||
2098 | |||
2099 | bch_keylist_add(&insert, check_key); | ||
2100 | |||
2101 | BUG_ON(op->type != BTREE_INSERT); | ||
2102 | |||
2103 | ret = bch_btree_insert_node(b, op, &insert); | ||
2104 | |||
2105 | BUG_ON(!ret && !bch_keylist_empty(&insert)); | ||
2106 | out: | ||
2107 | if (upgrade) | ||
2108 | downgrade_write(&b->lock); | ||
2109 | return ret; | ||
2110 | } | ||
2111 | |||
2100 | static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op) | 2112 | static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op) |
2101 | { | 2113 | { |
2102 | if (bch_keylist_empty(&op->keys)) | 2114 | if (bch_keylist_empty(&op->keys)) |
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 6d2fb7550706..73bd62105e39 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
@@ -216,6 +216,8 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b, | |||
216 | return __bch_btree_iter_init(b, iter, search, b->sets); | 216 | return __bch_btree_iter_init(b, iter, search, b->sets); |
217 | } | 217 | } |
218 | 218 | ||
219 | void __bkey_put(struct cache_set *c, struct bkey *k); | ||
220 | |||
219 | /* Looping macros */ | 221 | /* Looping macros */ |
220 | 222 | ||
221 | #define for_each_cached_btree(b, c, iter) \ | 223 | #define for_each_cached_btree(b, c, iter) \ |
@@ -380,8 +382,8 @@ struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); | |||
380 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, | 382 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, |
381 | int, struct btree_op *); | 383 | int, struct btree_op *); |
382 | 384 | ||
383 | bool bch_btree_insert_check_key(struct btree *, struct btree_op *, | 385 | int bch_btree_insert_check_key(struct btree *, struct btree_op *, |
384 | struct bio *); | 386 | struct bkey *); |
385 | int bch_btree_insert(struct btree_op *, struct cache_set *); | 387 | int bch_btree_insert(struct btree_op *, struct cache_set *); |
386 | 388 | ||
387 | int bch_btree_search_recurse(struct btree *, struct btree_op *); | 389 | int bch_btree_search_recurse(struct btree *, struct btree_op *); |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 2a7f0dd6abab..9ed334ca484d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -869,35 +869,39 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
869 | struct bio *bio, unsigned sectors) | 869 | struct bio *bio, unsigned sectors) |
870 | { | 870 | { |
871 | int ret = 0; | 871 | int ret = 0; |
872 | unsigned reada; | 872 | unsigned reada = 0; |
873 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | 873 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
874 | struct bio *miss; | 874 | struct bio *miss; |
875 | 875 | ||
876 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 876 | if (s->cache_miss || s->op.skip) { |
877 | if (miss == bio) | 877 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
878 | s->op.lookup_done = true; | 878 | if (miss == bio) |
879 | s->op.lookup_done = true; | ||
880 | goto out_submit; | ||
881 | } | ||
879 | 882 | ||
880 | miss->bi_end_io = request_endio; | 883 | if (!(bio->bi_rw & REQ_RAHEAD) && |
881 | miss->bi_private = &s->cl; | 884 | !(bio->bi_rw & REQ_META) && |
885 | s->op.c->gc_stats.in_use < CUTOFF_CACHE_READA) | ||
886 | reada = min_t(sector_t, dc->readahead >> 9, | ||
887 | bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); | ||
882 | 888 | ||
883 | if (s->cache_miss || s->op.skip) | 889 | s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada); |
884 | goto out_submit; | ||
885 | 890 | ||
886 | if (miss != bio || | 891 | s->op.replace = KEY(s->op.inode, bio->bi_sector + |
887 | (bio->bi_rw & REQ_RAHEAD) || | 892 | s->cache_bio_sectors, s->cache_bio_sectors); |
888 | (bio->bi_rw & REQ_META) || | 893 | |
889 | s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA) | 894 | ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace); |
890 | reada = 0; | 895 | if (ret) |
891 | else { | 896 | return ret; |
892 | reada = min(dc->readahead >> 9, | 897 | |
893 | sectors - bio_sectors(miss)); | 898 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
894 | 899 | if (miss == bio) | |
895 | if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev)) | 900 | s->op.lookup_done = true; |
896 | reada = bdev_sectors(miss->bi_bdev) - | 901 | else |
897 | bio_end_sector(miss); | 902 | /* btree_search_recurse()'s btree iterator is no good anymore */ |
898 | } | 903 | ret = -EINTR; |
899 | 904 | ||
900 | s->cache_bio_sectors = bio_sectors(miss) + reada; | ||
901 | s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, | 905 | s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, |
902 | DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), | 906 | DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), |
903 | dc->disk.bio_split); | 907 | dc->disk.bio_split); |
@@ -912,11 +916,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
912 | s->op.cache_bio->bi_end_io = request_endio; | 916 | s->op.cache_bio->bi_end_io = request_endio; |
913 | s->op.cache_bio->bi_private = &s->cl; | 917 | s->op.cache_bio->bi_private = &s->cl; |
914 | 918 | ||
915 | /* btree_search_recurse()'s btree iterator is no good anymore */ | ||
916 | ret = -EINTR; | ||
917 | if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio)) | ||
918 | goto out_put; | ||
919 | |||
920 | bch_bio_map(s->op.cache_bio, NULL); | 919 | bch_bio_map(s->op.cache_bio, NULL); |
921 | if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) | 920 | if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) |
922 | goto out_put; | 921 | goto out_put; |
@@ -931,6 +930,8 @@ out_put: | |||
931 | bio_put(s->op.cache_bio); | 930 | bio_put(s->op.cache_bio); |
932 | s->op.cache_bio = NULL; | 931 | s->op.cache_bio = NULL; |
933 | out_submit: | 932 | out_submit: |
933 | miss->bi_end_io = request_endio; | ||
934 | miss->bi_private = &s->cl; | ||
934 | closure_bio_submit(miss, &s->cl, s->d); | 935 | closure_bio_submit(miss, &s->cl, s->d); |
935 | return ret; | 936 | return ret; |
936 | } | 937 | } |