aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/bset.c50
-rw-r--r--drivers/md/bcache/bset.h12
-rw-r--r--drivers/md/bcache/btree.c246
-rw-r--r--drivers/md/bcache/extents.c178
4 files changed, 257 insertions, 229 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 2ff75f3199fa..4a7113236034 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -676,6 +676,8 @@ void bch_bset_build_written_tree(struct btree_keys *b)
676} 676}
677EXPORT_SYMBOL(bch_bset_build_written_tree); 677EXPORT_SYMBOL(bch_bset_build_written_tree);
678 678
679/* Insert */
680
679void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) 681void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
680{ 682{
681 struct bset_tree *t; 683 struct bset_tree *t;
@@ -791,6 +793,54 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
791} 793}
792EXPORT_SYMBOL(bch_bset_insert); 794EXPORT_SYMBOL(bch_bset_insert);
793 795
796unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
797 struct bkey *replace_key)
798{
799 unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
800 struct bset *i = bset_tree_last(b)->data;
801 struct bkey *m, *prev = NULL;
802 struct btree_iter iter;
803
804 BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
805
806 m = bch_btree_iter_init(b, &iter, b->ops->is_extents
807 ? PRECEDING_KEY(&START_KEY(k))
808 : PRECEDING_KEY(k));
809
810 if (b->ops->insert_fixup(b, k, &iter, replace_key))
811 return status;
812
813 status = BTREE_INSERT_STATUS_INSERT;
814
815 while (m != bset_bkey_last(i) &&
816 bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
817 prev = m, m = bkey_next(m);
818
819 /* prev is in the tree, if we merge we're done */
820 status = BTREE_INSERT_STATUS_BACK_MERGE;
821 if (prev &&
822 bch_bkey_try_merge(b, prev, k))
823 goto merged;
824#if 0
825 status = BTREE_INSERT_STATUS_OVERWROTE;
826 if (m != bset_bkey_last(i) &&
827 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
828 goto copy;
829#endif
830 status = BTREE_INSERT_STATUS_FRONT_MERGE;
831 if (m != bset_bkey_last(i) &&
832 bch_bkey_try_merge(b, k, m))
833 goto copy;
834
835 bch_bset_insert(b, m, k);
836copy: bkey_copy(m, k);
837merged:
838 return status;
839}
840EXPORT_SYMBOL(bch_btree_insert_key);
841
842/* Lookup */
843
794struct bset_search_iter { 844struct bset_search_iter {
795 struct bkey *l, *r; 845 struct bkey *l, *r;
796}; 846};
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 4aa199d03344..759df830bb14 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -189,6 +189,8 @@ struct btree_keys_ops {
189 bool (*sort_cmp)(struct btree_iter_set, 189 bool (*sort_cmp)(struct btree_iter_set,
190 struct btree_iter_set); 190 struct btree_iter_set);
191 struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *); 191 struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
192 bool (*insert_fixup)(struct btree_keys *, struct bkey *,
193 struct btree_iter *, struct bkey *);
192 bool (*key_invalid)(struct btree_keys *, 194 bool (*key_invalid)(struct btree_keys *,
193 const struct bkey *); 195 const struct bkey *);
194 bool (*key_bad)(struct btree_keys *, const struct bkey *); 196 bool (*key_bad)(struct btree_keys *, const struct bkey *);
@@ -286,6 +288,16 @@ void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
286void bch_bset_build_written_tree(struct btree_keys *); 288void bch_bset_build_written_tree(struct btree_keys *);
287void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); 289void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
288void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); 290void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
291unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
292 struct bkey *);
293
294enum {
295 BTREE_INSERT_STATUS_NO_INSERT = 0,
296 BTREE_INSERT_STATUS_INSERT,
297 BTREE_INSERT_STATUS_BACK_MERGE,
298 BTREE_INSERT_STATUS_OVERWROTE,
299 BTREE_INSERT_STATUS_FRONT_MERGE,
300};
289 301
290/* 302/*
291 * Tries to merge l and r: l should be lower than r 303 * Tries to merge l and r: l should be lower than r
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index b14f34aa927d..463d2800a955 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -24,7 +24,6 @@
24#include "btree.h" 24#include "btree.h"
25#include "debug.h" 25#include "debug.h"
26#include "extents.h" 26#include "extents.h"
27#include "writeback.h"
28 27
29#include <linux/slab.h> 28#include <linux/slab.h>
30#include <linux/bitops.h> 29#include <linux/bitops.h>
@@ -90,13 +89,6 @@
90 * Test module load/unload 89 * Test module load/unload
91 */ 90 */
92 91
93enum {
94 BTREE_INSERT_STATUS_INSERT,
95 BTREE_INSERT_STATUS_BACK_MERGE,
96 BTREE_INSERT_STATUS_OVERWROTE,
97 BTREE_INSERT_STATUS_FRONT_MERGE,
98};
99
100#define MAX_NEED_GC 64 92#define MAX_NEED_GC 64
101#define MAX_SAVE_PRIO 72 93#define MAX_SAVE_PRIO 72
102 94
@@ -1792,230 +1784,23 @@ err:
1792 1784
1793/* Btree insertion */ 1785/* Btree insertion */
1794 1786
1795static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, 1787static bool btree_insert_key(struct btree *b, struct bkey *k,
1796 struct btree_iter *iter, 1788 struct bkey *replace_key)
1797 struct bkey *replace_key)
1798{ 1789{
1799 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) 1790 unsigned status;
1800 {
1801 if (KEY_DIRTY(k))
1802 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1803 offset, -sectors);
1804 }
1805
1806 uint64_t old_offset;
1807 unsigned old_size, sectors_found = 0;
1808
1809 while (1) {
1810 struct bkey *k = bch_btree_iter_next(iter);
1811 if (!k)
1812 break;
1813
1814 if (bkey_cmp(&START_KEY(k), insert) >= 0) {
1815 if (KEY_SIZE(k))
1816 break;
1817 else
1818 continue;
1819 }
1820
1821 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1822 continue;
1823
1824 old_offset = KEY_START(k);
1825 old_size = KEY_SIZE(k);
1826
1827 /*
1828 * We might overlap with 0 size extents; we can't skip these
1829 * because if they're in the set we're inserting to we have to
1830 * adjust them so they don't overlap with the key we're
1831 * inserting. But we don't want to check them for replace
1832 * operations.
1833 */
1834
1835 if (replace_key && KEY_SIZE(k)) {
1836 /*
1837 * k might have been split since we inserted/found the
1838 * key we're replacing
1839 */
1840 unsigned i;
1841 uint64_t offset = KEY_START(k) -
1842 KEY_START(replace_key);
1843
1844 /* But it must be a subset of the replace key */
1845 if (KEY_START(k) < KEY_START(replace_key) ||
1846 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
1847 goto check_failed;
1848
1849 /* We didn't find a key that we were supposed to */
1850 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1851 goto check_failed;
1852
1853 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1854 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
1855 goto check_failed;
1856
1857 /* skip past gen */
1858 offset <<= 8;
1859
1860 BUG_ON(!KEY_PTRS(replace_key));
1861
1862 for (i = 0; i < KEY_PTRS(replace_key); i++)
1863 if (k->ptr[i] != replace_key->ptr[i] + offset)
1864 goto check_failed;
1865
1866 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1867 }
1868
1869 if (bkey_cmp(insert, k) < 0 &&
1870 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1871 /*
1872 * We overlapped in the middle of an existing key: that
1873 * means we have to split the old key. But we have to do
1874 * slightly different things depending on whether the
1875 * old key has been written out yet.
1876 */
1877
1878 struct bkey *top;
1879
1880 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
1881
1882 if (bkey_written(&b->keys, k)) {
1883 /*
1884 * We insert a new key to cover the top of the
1885 * old key, and the old key is modified in place
1886 * to represent the bottom split.
1887 *
1888 * It's completely arbitrary whether the new key
1889 * is the top or the bottom, but it has to match
1890 * up with what btree_sort_fixup() does - it
1891 * doesn't check for this kind of overlap, it
1892 * depends on us inserting a new key for the top
1893 * here.
1894 */
1895 top = bch_bset_search(&b->keys,
1896 bset_tree_last(&b->keys),
1897 insert);
1898 bch_bset_insert(&b->keys, top, k);
1899 } else {
1900 BKEY_PADDED(key) temp;
1901 bkey_copy(&temp.key, k);
1902 bch_bset_insert(&b->keys, k, &temp.key);
1903 top = bkey_next(k);
1904 }
1905
1906 bch_cut_front(insert, top);
1907 bch_cut_back(&START_KEY(insert), k);
1908 bch_bset_fix_invalidated_key(&b->keys, k);
1909 return false;
1910 }
1911
1912 if (bkey_cmp(insert, k) < 0) {
1913 bch_cut_front(insert, k);
1914 } else {
1915 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1916 old_offset = KEY_START(insert);
1917
1918 if (bkey_written(&b->keys, k) &&
1919 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1920 /*
1921 * Completely overwrote, so we don't have to
1922 * invalidate the binary search tree
1923 */
1924 bch_cut_front(k, k);
1925 } else {
1926 __bch_cut_back(&START_KEY(insert), k);
1927 bch_bset_fix_invalidated_key(&b->keys, k);
1928 }
1929 }
1930
1931 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
1932 }
1933
1934check_failed:
1935 if (replace_key) {
1936 if (!sectors_found) {
1937 return true;
1938 } else if (sectors_found < KEY_SIZE(insert)) {
1939 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1940 (KEY_SIZE(insert) - sectors_found));
1941 SET_KEY_SIZE(insert, sectors_found);
1942 }
1943 }
1944
1945 return false;
1946}
1947
1948static bool btree_insert_key(struct btree *b, struct btree_op *op,
1949 struct bkey *k, struct bkey *replace_key)
1950{
1951 struct bset *i = btree_bset_last(b);
1952 struct bkey *m, *prev;
1953 unsigned status = BTREE_INSERT_STATUS_INSERT;
1954 1791
1955 BUG_ON(bkey_cmp(k, &b->key) > 0); 1792 BUG_ON(bkey_cmp(k, &b->key) > 0);
1956 BUG_ON(b->level && !KEY_PTRS(k));
1957 BUG_ON(!b->level && !KEY_OFFSET(k));
1958 1793
1959 if (!b->level) { 1794 status = bch_btree_insert_key(&b->keys, k, replace_key);
1960 struct btree_iter iter; 1795 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1796 bch_check_keys(&b->keys, "%u for %s", status,
1797 replace_key ? "replace" : "insert");
1961 1798
1962 /* 1799 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1963 * bset_search() returns the first key that is strictly greater 1800 status);
1964 * than the search key - but for back merging, we want to find 1801 return true;
1965 * the previous key. 1802 } else
1966 */ 1803 return false;
1967 prev = NULL;
1968 m = bch_btree_iter_init(&b->keys, &iter,
1969 PRECEDING_KEY(&START_KEY(k)));
1970
1971 if (fix_overlapping_extents(b, k, &iter, replace_key)) {
1972 op->insert_collision = true;
1973 return false;
1974 }
1975
1976 if (KEY_DIRTY(k))
1977 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1978 KEY_START(k), KEY_SIZE(k));
1979
1980 while (m != bset_bkey_last(i) &&
1981 bkey_cmp(k, &START_KEY(m)) > 0)
1982 prev = m, m = bkey_next(m);
1983
1984 if (key_merging_disabled(b->c))
1985 goto insert;
1986
1987 /* prev is in the tree, if we merge we're done */
1988 status = BTREE_INSERT_STATUS_BACK_MERGE;
1989 if (prev &&
1990 bch_bkey_try_merge(&b->keys, prev, k))
1991 goto merged;
1992
1993 status = BTREE_INSERT_STATUS_OVERWROTE;
1994 if (m != bset_bkey_last(i) &&
1995 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1996 goto copy;
1997
1998 status = BTREE_INSERT_STATUS_FRONT_MERGE;
1999 if (m != bset_bkey_last(i) &&
2000 bch_bkey_try_merge(&b->keys, k, m))
2001 goto copy;
2002 } else {
2003 BUG_ON(replace_key);
2004 m = bch_bset_search(&b->keys, bset_tree_last(&b->keys), k);
2005 }
2006
2007insert: bch_bset_insert(&b->keys, m, k);
2008copy: bkey_copy(m, k);
2009merged:
2010 bch_check_keys(&b->keys, "%u for %s", status,
2011 replace_key ? "replace" : "insert");
2012
2013 if (b->level && !KEY_OFFSET(k))
2014 btree_current_write(b)->prio_blocked++;
2015
2016 trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
2017
2018 return true;
2019} 1804}
2020 1805
2021static size_t insert_u64s_remaining(struct btree *b) 1806static size_t insert_u64s_remaining(struct btree *b)
@@ -2048,7 +1833,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2048 if (!b->level) 1833 if (!b->level)
2049 bkey_put(b->c, k); 1834 bkey_put(b->c, k);
2050 1835
2051 ret |= btree_insert_key(b, op, k, replace_key); 1836 ret |= btree_insert_key(b, k, replace_key);
2052 bch_keylist_pop_front(insert_keys); 1837 bch_keylist_pop_front(insert_keys);
2053 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 1838 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2054 BKEY_PADDED(key) temp; 1839 BKEY_PADDED(key) temp;
@@ -2057,13 +1842,16 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2057 bch_cut_back(&b->key, &temp.key); 1842 bch_cut_back(&b->key, &temp.key);
2058 bch_cut_front(&b->key, insert_keys->keys); 1843 bch_cut_front(&b->key, insert_keys->keys);
2059 1844
2060 ret |= btree_insert_key(b, op, &temp.key, replace_key); 1845 ret |= btree_insert_key(b, &temp.key, replace_key);
2061 break; 1846 break;
2062 } else { 1847 } else {
2063 break; 1848 break;
2064 } 1849 }
2065 } 1850 }
2066 1851
1852 if (!ret)
1853 op->insert_collision = true;
1854
2067 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 1855 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2068 1856
2069 BUG_ON(bch_count_data(&b->keys) < oldsize); 1857 BUG_ON(bch_count_data(&b->keys) < oldsize);
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index bc1c3eed07e7..d6de3c76a87b 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -222,8 +222,22 @@ static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
222 return false; 222 return false;
223} 223}
224 224
225static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
226 struct bkey *insert,
227 struct btree_iter *iter,
228 struct bkey *replace_key)
229{
230 struct btree *b = container_of(bk, struct btree, keys);
231
232 if (!KEY_OFFSET(insert))
233 btree_current_write(b)->prio_blocked++;
234
235 return false;
236}
237
225const struct btree_keys_ops bch_btree_keys_ops = { 238const struct btree_keys_ops bch_btree_keys_ops = {
226 .sort_cmp = bch_key_sort_cmp, 239 .sort_cmp = bch_key_sort_cmp,
240 .insert_fixup = bch_btree_ptr_insert_fixup,
227 .key_invalid = bch_btree_ptr_invalid, 241 .key_invalid = bch_btree_ptr_invalid,
228 .key_bad = bch_btree_ptr_bad, 242 .key_bad = bch_btree_ptr_bad,
229 .key_to_text = bch_extent_to_text, 243 .key_to_text = bch_extent_to_text,
@@ -294,6 +308,169 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
294 return NULL; 308 return NULL;
295} 309}
296 310
311static bool bch_extent_insert_fixup(struct btree_keys *b,
312 struct bkey *insert,
313 struct btree_iter *iter,
314 struct bkey *replace_key)
315{
316 struct cache_set *c = container_of(b, struct btree, keys)->c;
317
318 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
319 {
320 if (KEY_DIRTY(k))
321 bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
322 offset, -sectors);
323 }
324
325 uint64_t old_offset;
326 unsigned old_size, sectors_found = 0;
327
328 BUG_ON(!KEY_OFFSET(insert));
329 BUG_ON(!KEY_SIZE(insert));
330
331 while (1) {
332 struct bkey *k = bch_btree_iter_next(iter);
333 if (!k)
334 break;
335
336 if (bkey_cmp(&START_KEY(k), insert) >= 0) {
337 if (KEY_SIZE(k))
338 break;
339 else
340 continue;
341 }
342
343 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
344 continue;
345
346 old_offset = KEY_START(k);
347 old_size = KEY_SIZE(k);
348
349 /*
350 * We might overlap with 0 size extents; we can't skip these
351 * because if they're in the set we're inserting to we have to
352 * adjust them so they don't overlap with the key we're
353 * inserting. But we don't want to check them for replace
354 * operations.
355 */
356
357 if (replace_key && KEY_SIZE(k)) {
358 /*
359 * k might have been split since we inserted/found the
360 * key we're replacing
361 */
362 unsigned i;
363 uint64_t offset = KEY_START(k) -
364 KEY_START(replace_key);
365
366 /* But it must be a subset of the replace key */
367 if (KEY_START(k) < KEY_START(replace_key) ||
368 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
369 goto check_failed;
370
371 /* We didn't find a key that we were supposed to */
372 if (KEY_START(k) > KEY_START(insert) + sectors_found)
373 goto check_failed;
374
375 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
376 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
377 goto check_failed;
378
379 /* skip past gen */
380 offset <<= 8;
381
382 BUG_ON(!KEY_PTRS(replace_key));
383
384 for (i = 0; i < KEY_PTRS(replace_key); i++)
385 if (k->ptr[i] != replace_key->ptr[i] + offset)
386 goto check_failed;
387
388 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
389 }
390
391 if (bkey_cmp(insert, k) < 0 &&
392 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
393 /*
394 * We overlapped in the middle of an existing key: that
395 * means we have to split the old key. But we have to do
396 * slightly different things depending on whether the
397 * old key has been written out yet.
398 */
399
400 struct bkey *top;
401
402 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
403
404 if (bkey_written(b, k)) {
405 /*
406 * We insert a new key to cover the top of the
407 * old key, and the old key is modified in place
408 * to represent the bottom split.
409 *
410 * It's completely arbitrary whether the new key
411 * is the top or the bottom, but it has to match
412 * up with what btree_sort_fixup() does - it
413 * doesn't check for this kind of overlap, it
414 * depends on us inserting a new key for the top
415 * here.
416 */
417 top = bch_bset_search(b, bset_tree_last(b),
418 insert);
419 bch_bset_insert(b, top, k);
420 } else {
421 BKEY_PADDED(key) temp;
422 bkey_copy(&temp.key, k);
423 bch_bset_insert(b, k, &temp.key);
424 top = bkey_next(k);
425 }
426
427 bch_cut_front(insert, top);
428 bch_cut_back(&START_KEY(insert), k);
429 bch_bset_fix_invalidated_key(b, k);
430 goto out;
431 }
432
433 if (bkey_cmp(insert, k) < 0) {
434 bch_cut_front(insert, k);
435 } else {
436 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
437 old_offset = KEY_START(insert);
438
439 if (bkey_written(b, k) &&
440 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
441 /*
442 * Completely overwrote, so we don't have to
443 * invalidate the binary search tree
444 */
445 bch_cut_front(k, k);
446 } else {
447 __bch_cut_back(&START_KEY(insert), k);
448 bch_bset_fix_invalidated_key(b, k);
449 }
450 }
451
452 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
453 }
454
455check_failed:
456 if (replace_key) {
457 if (!sectors_found) {
458 return true;
459 } else if (sectors_found < KEY_SIZE(insert)) {
460 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
461 (KEY_SIZE(insert) - sectors_found));
462 SET_KEY_SIZE(insert, sectors_found);
463 }
464 }
465out:
466 if (KEY_DIRTY(insert))
467 bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
468 KEY_START(insert),
469 KEY_SIZE(insert));
470
471 return false;
472}
473
297static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) 474static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
298{ 475{
299 struct btree *b = container_of(bk, struct btree, keys); 476 struct btree *b = container_of(bk, struct btree, keys);
@@ -435,6 +612,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
435const struct btree_keys_ops bch_extent_keys_ops = { 612const struct btree_keys_ops bch_extent_keys_ops = {
436 .sort_cmp = bch_extent_sort_cmp, 613 .sort_cmp = bch_extent_sort_cmp,
437 .sort_fixup = bch_extent_sort_fixup, 614 .sort_fixup = bch_extent_sort_fixup,
615 .insert_fixup = bch_extent_insert_fixup,
438 .key_invalid = bch_extent_invalid, 616 .key_invalid = bch_extent_invalid,
439 .key_bad = bch_extent_bad, 617 .key_bad = bch_extent_bad,
440 .key_merge = bch_extent_merge, 618 .key_merge = bch_extent_merge,