aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-09-10 21:52:54 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:29 -0500
commit1b207d80d5b986fb305bc899357435d319319513 (patch)
treebf75b0befff3f48606832df0a64ced1c3c3ae91d /drivers/md
parentfaadf0c96547ec8277ad0abd6959f2ef48522f31 (diff)
bcache: Kill op->replace
This is prep work for converting bch_btree_insert to bch_btree_map_leaf_nodes() - we have to convert all its arguments to actual arguments. Bunch of churn, but should be straightforward. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c102
-rw-r--r--drivers/md/bcache/btree.h10
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c14
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/writeback.c10
7 files changed, 71 insertions, 73 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 3e0c90130c2e..7a1d8dc19e61 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -89,15 +89,6 @@
89 * Test module load/unload 89 * Test module load/unload
90 */ 90 */
91 91
92static const char * const op_types[] = {
93 "insert", "replace"
94};
95
96static const char *op_type(struct btree_op *op)
97{
98 return op_types[op->type];
99}
100
101enum { 92enum {
102 BTREE_INSERT_STATUS_INSERT, 93 BTREE_INSERT_STATUS_INSERT,
103 BTREE_INSERT_STATUS_BACK_MERGE, 94 BTREE_INSERT_STATUS_BACK_MERGE,
@@ -1699,10 +1690,9 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
1699 bch_bset_fix_lookup_table(b, where); 1690 bch_bset_fix_lookup_table(b, where);
1700} 1691}
1701 1692
1702static bool fix_overlapping_extents(struct btree *b, 1693static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
1703 struct bkey *insert,
1704 struct btree_iter *iter, 1694 struct btree_iter *iter,
1705 struct btree_op *op) 1695 struct bkey *replace_key)
1706{ 1696{
1707 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) 1697 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
1708 { 1698 {
@@ -1730,39 +1720,38 @@ static bool fix_overlapping_extents(struct btree *b,
1730 * We might overlap with 0 size extents; we can't skip these 1720 * We might overlap with 0 size extents; we can't skip these
1731 * because if they're in the set we're inserting to we have to 1721 * because if they're in the set we're inserting to we have to
1732 * adjust them so they don't overlap with the key we're 1722 * adjust them so they don't overlap with the key we're
1733 * inserting. But we don't want to check them for BTREE_REPLACE 1723 * inserting. But we don't want to check them for replace
1734 * operations. 1724 * operations.
1735 */ 1725 */
1736 1726
1737 if (op->type == BTREE_REPLACE && 1727 if (replace_key && KEY_SIZE(k)) {
1738 KEY_SIZE(k)) {
1739 /* 1728 /*
1740 * k might have been split since we inserted/found the 1729 * k might have been split since we inserted/found the
1741 * key we're replacing 1730 * key we're replacing
1742 */ 1731 */
1743 unsigned i; 1732 unsigned i;
1744 uint64_t offset = KEY_START(k) - 1733 uint64_t offset = KEY_START(k) -
1745 KEY_START(&op->replace); 1734 KEY_START(replace_key);
1746 1735
1747 /* But it must be a subset of the replace key */ 1736 /* But it must be a subset of the replace key */
1748 if (KEY_START(k) < KEY_START(&op->replace) || 1737 if (KEY_START(k) < KEY_START(replace_key) ||
1749 KEY_OFFSET(k) > KEY_OFFSET(&op->replace)) 1738 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
1750 goto check_failed; 1739 goto check_failed;
1751 1740
1752 /* We didn't find a key that we were supposed to */ 1741 /* We didn't find a key that we were supposed to */
1753 if (KEY_START(k) > KEY_START(insert) + sectors_found) 1742 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1754 goto check_failed; 1743 goto check_failed;
1755 1744
1756 if (KEY_PTRS(&op->replace) != KEY_PTRS(k)) 1745 if (KEY_PTRS(replace_key) != KEY_PTRS(k))
1757 goto check_failed; 1746 goto check_failed;
1758 1747
1759 /* skip past gen */ 1748 /* skip past gen */
1760 offset <<= 8; 1749 offset <<= 8;
1761 1750
1762 BUG_ON(!KEY_PTRS(&op->replace)); 1751 BUG_ON(!KEY_PTRS(replace_key));
1763 1752
1764 for (i = 0; i < KEY_PTRS(&op->replace); i++) 1753 for (i = 0; i < KEY_PTRS(replace_key); i++)
1765 if (k->ptr[i] != op->replace.ptr[i] + offset) 1754 if (k->ptr[i] != replace_key->ptr[i] + offset)
1766 goto check_failed; 1755 goto check_failed;
1767 1756
1768 sectors_found = KEY_OFFSET(k) - KEY_START(insert); 1757 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1833,9 +1822,8 @@ static bool fix_overlapping_extents(struct btree *b,
1833 } 1822 }
1834 1823
1835check_failed: 1824check_failed:
1836 if (op->type == BTREE_REPLACE) { 1825 if (replace_key) {
1837 if (!sectors_found) { 1826 if (!sectors_found) {
1838 op->insert_collision = true;
1839 return true; 1827 return true;
1840 } else if (sectors_found < KEY_SIZE(insert)) { 1828 } else if (sectors_found < KEY_SIZE(insert)) {
1841 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - 1829 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1848,7 +1836,7 @@ check_failed:
1848} 1836}
1849 1837
1850static bool btree_insert_key(struct btree *b, struct btree_op *op, 1838static bool btree_insert_key(struct btree *b, struct btree_op *op,
1851 struct bkey *k) 1839 struct bkey *k, struct bkey *replace_key)
1852{ 1840{
1853 struct bset *i = b->sets[b->nsets].data; 1841 struct bset *i = b->sets[b->nsets].data;
1854 struct bkey *m, *prev; 1842 struct bkey *m, *prev;
@@ -1874,8 +1862,10 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1874 prev = NULL; 1862 prev = NULL;
1875 m = bch_btree_iter_init(b, &iter, &search); 1863 m = bch_btree_iter_init(b, &iter, &search);
1876 1864
1877 if (fix_overlapping_extents(b, k, &iter, op)) 1865 if (fix_overlapping_extents(b, k, &iter, replace_key)) {
1866 op->insert_collision = true;
1878 return false; 1867 return false;
1868 }
1879 1869
1880 if (KEY_DIRTY(k)) 1870 if (KEY_DIRTY(k))
1881 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), 1871 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
@@ -1903,24 +1893,28 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1903 if (m != end(i) && 1893 if (m != end(i) &&
1904 bch_bkey_try_merge(b, k, m)) 1894 bch_bkey_try_merge(b, k, m))
1905 goto copy; 1895 goto copy;
1906 } else 1896 } else {
1897 BUG_ON(replace_key);
1907 m = bch_bset_search(b, &b->sets[b->nsets], k); 1898 m = bch_bset_search(b, &b->sets[b->nsets], k);
1899 }
1908 1900
1909insert: shift_keys(b, m, k); 1901insert: shift_keys(b, m, k);
1910copy: bkey_copy(m, k); 1902copy: bkey_copy(m, k);
1911merged: 1903merged:
1912 bch_check_keys(b, "%u for %s", status, op_type(op)); 1904 bch_check_keys(b, "%u for %s", status,
1905 replace_key ? "replace" : "insert");
1913 1906
1914 if (b->level && !KEY_OFFSET(k)) 1907 if (b->level && !KEY_OFFSET(k))
1915 btree_current_write(b)->prio_blocked++; 1908 btree_current_write(b)->prio_blocked++;
1916 1909
1917 trace_bcache_btree_insert_key(b, k, op->type, status); 1910 trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
1918 1911
1919 return true; 1912 return true;
1920} 1913}
1921 1914
1922static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 1915static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1923 struct keylist *insert_keys) 1916 struct keylist *insert_keys,
1917 struct bkey *replace_key)
1924{ 1918{
1925 bool ret = false; 1919 bool ret = false;
1926 unsigned oldsize = bch_count_data(b); 1920 unsigned oldsize = bch_count_data(b);
@@ -1936,11 +1930,11 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1936 if (bkey_cmp(k, &b->key) <= 0) { 1930 if (bkey_cmp(k, &b->key) <= 0) {
1937 bkey_put(b->c, k, b->level); 1931 bkey_put(b->c, k, b->level);
1938 1932
1939 ret |= btree_insert_key(b, op, k); 1933 ret |= btree_insert_key(b, op, k, replace_key);
1940 bch_keylist_pop_front(insert_keys); 1934 bch_keylist_pop_front(insert_keys);
1941 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 1935 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1942#if 0 1936#if 0
1943 if (op->type == BTREE_REPLACE) { 1937 if (replace_key) {
1944 bkey_put(b->c, k, b->level); 1938 bkey_put(b->c, k, b->level);
1945 bch_keylist_pop_front(insert_keys); 1939 bch_keylist_pop_front(insert_keys);
1946 op->insert_collision = true; 1940 op->insert_collision = true;
@@ -1953,7 +1947,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1953 bch_cut_back(&b->key, &temp.key); 1947 bch_cut_back(&b->key, &temp.key);
1954 bch_cut_front(&b->key, insert_keys->keys); 1948 bch_cut_front(&b->key, insert_keys->keys);
1955 1949
1956 ret |= btree_insert_key(b, op, &temp.key); 1950 ret |= btree_insert_key(b, op, &temp.key, replace_key);
1957 break; 1951 break;
1958 } else { 1952 } else {
1959 break; 1953 break;
@@ -1968,7 +1962,8 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1968 1962
1969static int btree_split(struct btree *b, struct btree_op *op, 1963static int btree_split(struct btree *b, struct btree_op *op,
1970 struct keylist *insert_keys, 1964 struct keylist *insert_keys,
1971 struct keylist *parent_keys) 1965 struct keylist *parent_keys,
1966 struct bkey *replace_key)
1972{ 1967{
1973 bool split; 1968 bool split;
1974 struct btree *n1, *n2 = NULL, *n3 = NULL; 1969 struct btree *n1, *n2 = NULL, *n3 = NULL;
@@ -1998,7 +1993,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
1998 goto err_free2; 1993 goto err_free2;
1999 } 1994 }
2000 1995
2001 bch_btree_insert_keys(n1, op, insert_keys); 1996 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2002 1997
2003 /* 1998 /*
2004 * Has to be a linear search because we don't have an auxiliary 1999 * Has to be a linear search because we don't have an auxiliary
@@ -2026,7 +2021,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
2026 } else { 2021 } else {
2027 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys); 2022 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
2028 2023
2029 bch_btree_insert_keys(n1, op, insert_keys); 2024 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2030 } 2025 }
2031 2026
2032 bch_keylist_add(parent_keys, &n1->key); 2027 bch_keylist_add(parent_keys, &n1->key);
@@ -2036,7 +2031,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
2036 /* Depth increases, make a new root */ 2031 /* Depth increases, make a new root */
2037 2032
2038 bkey_copy_key(&n3->key, &MAX_KEY); 2033 bkey_copy_key(&n3->key, &MAX_KEY);
2039 bch_btree_insert_keys(n3, op, parent_keys); 2034 bch_btree_insert_keys(n3, op, parent_keys, NULL);
2040 bch_btree_node_write(n3, &cl); 2035 bch_btree_node_write(n3, &cl);
2041 2036
2042 closure_sync(&cl); 2037 closure_sync(&cl);
@@ -2091,7 +2086,8 @@ err:
2091 2086
2092static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2087static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2093 struct keylist *insert_keys, 2088 struct keylist *insert_keys,
2094 atomic_t *journal_ref) 2089 atomic_t *journal_ref,
2090 struct bkey *replace_key)
2095{ 2091{
2096 int ret = 0; 2092 int ret = 0;
2097 struct keylist split_keys; 2093 struct keylist split_keys;
@@ -2101,6 +2097,8 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2101 BUG_ON(b->level); 2097 BUG_ON(b->level);
2102 2098
2103 do { 2099 do {
2100 BUG_ON(b->level && replace_key);
2101
2104 if (should_split(b)) { 2102 if (should_split(b)) {
2105 if (current->bio_list) { 2103 if (current->bio_list) {
2106 op->lock = b->c->root->level + 1; 2104 op->lock = b->c->root->level + 1;
@@ -2112,8 +2110,9 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2112 struct btree *parent = b->parent; 2110 struct btree *parent = b->parent;
2113 2111
2114 ret = btree_split(b, op, insert_keys, 2112 ret = btree_split(b, op, insert_keys,
2115 &split_keys); 2113 &split_keys, replace_key);
2116 insert_keys = &split_keys; 2114 insert_keys = &split_keys;
2115 replace_key = NULL;
2117 b = parent; 2116 b = parent;
2118 if (!ret) 2117 if (!ret)
2119 ret = -EINTR; 2118 ret = -EINTR;
@@ -2121,7 +2120,8 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2121 } else { 2120 } else {
2122 BUG_ON(write_block(b) != b->sets[b->nsets].data); 2121 BUG_ON(write_block(b) != b->sets[b->nsets].data);
2123 2122
2124 if (bch_btree_insert_keys(b, op, insert_keys)) { 2123 if (bch_btree_insert_keys(b, op, insert_keys,
2124 replace_key)) {
2125 if (!b->level) { 2125 if (!b->level) {
2126 bch_btree_leaf_dirty(b, journal_ref); 2126 bch_btree_leaf_dirty(b, journal_ref);
2127 } else { 2127 } else {
@@ -2165,9 +2165,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2165 2165
2166 bch_keylist_add(&insert, check_key); 2166 bch_keylist_add(&insert, check_key);
2167 2167
2168 BUG_ON(op->type != BTREE_INSERT); 2168 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2169
2170 ret = bch_btree_insert_node(b, op, &insert, NULL);
2171 2169
2172 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2170 BUG_ON(!ret && !bch_keylist_empty(&insert));
2173out: 2171out:
@@ -2177,7 +2175,8 @@ out:
2177} 2175}
2178 2176
2179static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, 2177static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
2180 struct keylist *keys, atomic_t *journal_ref) 2178 struct keylist *keys, atomic_t *journal_ref,
2179 struct bkey *replace_key)
2181{ 2180{
2182 if (bch_keylist_empty(keys)) 2181 if (bch_keylist_empty(keys))
2183 return 0; 2182 return 0;
@@ -2194,14 +2193,17 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
2194 return -EIO; 2193 return -EIO;
2195 } 2194 }
2196 2195
2197 return btree(insert_recurse, k, b, op, keys, journal_ref); 2196 return btree(insert_recurse, k, b, op, keys,
2197 journal_ref, replace_key);
2198 } else { 2198 } else {
2199 return bch_btree_insert_node(b, op, keys, journal_ref); 2199 return bch_btree_insert_node(b, op, keys,
2200 journal_ref, replace_key);
2200 } 2201 }
2201} 2202}
2202 2203
2203int bch_btree_insert(struct btree_op *op, struct cache_set *c, 2204int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2204 struct keylist *keys, atomic_t *journal_ref) 2205 struct keylist *keys, atomic_t *journal_ref,
2206 struct bkey *replace_key)
2205{ 2207{
2206 int ret = 0; 2208 int ret = 0;
2207 2209
@@ -2209,7 +2211,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2209 2211
2210 while (!bch_keylist_empty(keys)) { 2212 while (!bch_keylist_empty(keys)) {
2211 op->lock = 0; 2213 op->lock = 0;
2212 ret = btree_root(insert_recurse, c, op, keys, journal_ref); 2214 ret = btree_root(insert_recurse, c, op, keys,
2215 journal_ref, replace_key);
2213 2216
2214 if (ret == -EAGAIN) { 2217 if (ret == -EAGAIN) {
2215 BUG(); 2218 BUG();
@@ -2217,8 +2220,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2217 } else if (ret) { 2220 } else if (ret) {
2218 struct bkey *k; 2221 struct bkey *k;
2219 2222
2220 pr_err("error %i trying to insert key for %s", 2223 pr_err("error %i", ret);
2221 ret, op_type(op));
2222 2224
2223 while ((k = bch_keylist_pop(keys))) 2225 while ((k = bch_keylist_pop(keys)))
2224 bkey_put(c, k, 0); 2226 bkey_put(c, k, 0);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 34ee5359b262..6ff08be3e0c9 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -240,15 +240,7 @@ struct btree_op {
240 /* Btree level at which we start taking write locks */ 240 /* Btree level at which we start taking write locks */
241 short lock; 241 short lock;
242 242
243 /* Btree insertion type */
244 enum {
245 BTREE_INSERT,
246 BTREE_REPLACE
247 } type:8;
248
249 unsigned insert_collision:1; 243 unsigned insert_collision:1;
250
251 BKEY_PADDED(replace);
252}; 244};
253 245
254static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) 246static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
@@ -290,7 +282,7 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
290int bch_btree_insert_check_key(struct btree *, struct btree_op *, 282int bch_btree_insert_check_key(struct btree *, struct btree_op *,
291 struct bkey *); 283 struct bkey *);
292int bch_btree_insert(struct btree_op *, struct cache_set *, 284int bch_btree_insert(struct btree_op *, struct cache_set *,
293 struct keylist *, atomic_t *); 285 struct keylist *, atomic_t *, struct bkey *);
294 286
295int bch_gc_thread_start(struct cache_set *); 287int bch_gc_thread_start(struct cache_set *);
296size_t bch_btree_gc_finish(struct cache_set *); 288size_t bch_btree_gc_finish(struct cache_set *);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 20e900ad5010..592adf51128f 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -322,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
322 bkey_copy(keylist.top, k); 322 bkey_copy(keylist.top, k);
323 bch_keylist_push(&keylist); 323 bch_keylist_push(&keylist);
324 324
325 ret = bch_btree_insert(&op, s, &keylist, i->pin); 325 ret = bch_btree_insert(&op, s, &keylist, i->pin, NULL);
326 if (ret) 326 if (ret)
327 goto err; 327 goto err;
328 328
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 219356f6159d..c45ba4f21bae 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -105,8 +105,8 @@ static void write_moving(struct closure *cl)
105 s->writeback = KEY_DIRTY(&io->w->key); 105 s->writeback = KEY_DIRTY(&io->w->key);
106 s->csum = KEY_CSUM(&io->w->key); 106 s->csum = KEY_CSUM(&io->w->key);
107 107
108 s->op.type = BTREE_REPLACE; 108 bkey_copy(&s->replace_key, &io->w->key);
109 bkey_copy(&s->op.replace, &io->w->key); 109 s->replace = true;
110 110
111 closure_init(&s->btree, cl); 111 closure_init(&s->btree, cl);
112 bch_data_insert(&s->btree); 112 bch_data_insert(&s->btree);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 5df44fbc9e1d..16a3e16f3ff4 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -217,6 +217,7 @@ static void bch_data_insert_keys(struct closure *cl)
217{ 217{
218 struct search *s = container_of(cl, struct search, btree); 218 struct search *s = container_of(cl, struct search, btree);
219 atomic_t *journal_ref = NULL; 219 atomic_t *journal_ref = NULL;
220 struct bkey *replace_key = s->replace ? &s->replace_key : NULL;
220 221
221 /* 222 /*
222 * If we're looping, might already be waiting on 223 * If we're looping, might already be waiting on
@@ -235,7 +236,8 @@ static void bch_data_insert_keys(struct closure *cl)
235 s->flush_journal 236 s->flush_journal
236 ? &s->cl : NULL); 237 ? &s->cl : NULL);
237 238
238 if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) { 239 if (bch_btree_insert(&s->op, s->c, &s->insert_keys,
240 journal_ref, replace_key)) {
239 s->error = -ENOMEM; 241 s->error = -ENOMEM;
240 s->insert_data_done = true; 242 s->insert_data_done = true;
241 } 243 }
@@ -1056,7 +1058,7 @@ static void cached_dev_read_done(struct closure *cl)
1056 1058
1057 if (s->cache_bio && 1059 if (s->cache_bio &&
1058 !test_bit(CACHE_SET_STOPPING, &s->c->flags)) { 1060 !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
1059 s->op.type = BTREE_REPLACE; 1061 BUG_ON(!s->replace);
1060 closure_call(&s->btree, bch_data_insert, NULL, cl); 1062 closure_call(&s->btree, bch_data_insert, NULL, cl);
1061 } 1063 }
1062 1064
@@ -1101,13 +1103,15 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
1101 1103
1102 s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada); 1104 s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada);
1103 1105
1104 s->op.replace = KEY(s->inode, bio->bi_sector + 1106 s->replace_key = KEY(s->inode, bio->bi_sector +
1105 s->cache_bio_sectors, s->cache_bio_sectors); 1107 s->cache_bio_sectors, s->cache_bio_sectors);
1106 1108
1107 ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace); 1109 ret = bch_btree_insert_check_key(b, &s->op, &s->replace_key);
1108 if (ret) 1110 if (ret)
1109 return ret; 1111 return ret;
1110 1112
1113 s->replace = true;
1114
1111 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 1115 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1112 1116
1113 /* btree_search_recurse()'s btree iterator is no good anymore */ 1117 /* btree_search_recurse()'s btree iterator is no good anymore */
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index ed578aa53ee2..5ea7a0e6fca0 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -33,6 +33,7 @@ struct search {
33 unsigned flush_journal:1; 33 unsigned flush_journal:1;
34 34
35 unsigned insert_data_done:1; 35 unsigned insert_data_done:1;
36 unsigned replace:1;
36 37
37 uint16_t write_prio; 38 uint16_t write_prio;
38 39
@@ -44,6 +45,7 @@ struct search {
44 45
45 /* Anything past this point won't get zeroed in search_alloc() */ 46 /* Anything past this point won't get zeroed in search_alloc() */
46 struct keylist insert_keys; 47 struct keylist insert_keys;
48 BKEY_PADDED(replace_key);
47}; 49};
48 50
49unsigned bch_get_congested(struct cache_set *); 51unsigned bch_get_congested(struct cache_set *);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index d0968e8938f7..346a5341faca 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -146,16 +146,14 @@ static void write_dirty_finish(struct closure *cl)
146 bch_btree_op_init(&op, -1); 146 bch_btree_op_init(&op, -1);
147 bch_keylist_init(&keys); 147 bch_keylist_init(&keys);
148 148
149 op.type = BTREE_REPLACE; 149 bkey_copy(keys.top, &w->key);
150 bkey_copy(&op.replace, &w->key); 150 SET_KEY_DIRTY(keys.top, false);
151 151 bch_keylist_push(&keys);
152 SET_KEY_DIRTY(&w->key, false);
153 bch_keylist_add(&keys, &w->key);
154 152
155 for (i = 0; i < KEY_PTRS(&w->key); i++) 153 for (i = 0; i < KEY_PTRS(&w->key); i++)
156 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 154 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
157 155
158 bch_btree_insert(&op, dc->disk.c, &keys, NULL); 156 bch_btree_insert(&op, dc->disk.c, &keys, NULL, &w->key);
159 157
160 if (op.insert_collision) 158 if (op.insert_collision)
161 trace_bcache_writeback_collision(&w->key); 159 trace_bcache_writeback_collision(&w->key);