aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/btree.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-11 20:02:31 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:14 -0500
commit829a60b9055c319f3656a01eb8cb78b1b86232ef (patch)
treed6e709a97b9fc3274ef8de84cb52c2e3e0078807 /drivers/md/bcache/btree.c
parent89ebb4a28ba9efb5c9b18ba552e784021957b14a (diff)
bcache: Move insert_fixup() to btree_keys_ops
Now handling overlapping extents/keys is a method that's specific to what the btree node contains. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r--drivers/md/bcache/btree.c246
1 files changed, 17 insertions, 229 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index b14f34aa927d..463d2800a955 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -24,7 +24,6 @@
24#include "btree.h" 24#include "btree.h"
25#include "debug.h" 25#include "debug.h"
26#include "extents.h" 26#include "extents.h"
27#include "writeback.h"
28 27
29#include <linux/slab.h> 28#include <linux/slab.h>
30#include <linux/bitops.h> 29#include <linux/bitops.h>
@@ -90,13 +89,6 @@
90 * Test module load/unload 89 * Test module load/unload
91 */ 90 */
92 91
93enum {
94 BTREE_INSERT_STATUS_INSERT,
95 BTREE_INSERT_STATUS_BACK_MERGE,
96 BTREE_INSERT_STATUS_OVERWROTE,
97 BTREE_INSERT_STATUS_FRONT_MERGE,
98};
99
100#define MAX_NEED_GC 64 92#define MAX_NEED_GC 64
101#define MAX_SAVE_PRIO 72 93#define MAX_SAVE_PRIO 72
102 94
@@ -1792,230 +1784,23 @@ err:
1792 1784
1793/* Btree insertion */ 1785/* Btree insertion */
1794 1786
1795static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, 1787static bool btree_insert_key(struct btree *b, struct bkey *k,
1796 struct btree_iter *iter, 1788 struct bkey *replace_key)
1797 struct bkey *replace_key)
1798{ 1789{
1799 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) 1790 unsigned status;
1800 {
1801 if (KEY_DIRTY(k))
1802 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1803 offset, -sectors);
1804 }
1805
1806 uint64_t old_offset;
1807 unsigned old_size, sectors_found = 0;
1808
1809 while (1) {
1810 struct bkey *k = bch_btree_iter_next(iter);
1811 if (!k)
1812 break;
1813
1814 if (bkey_cmp(&START_KEY(k), insert) >= 0) {
1815 if (KEY_SIZE(k))
1816 break;
1817 else
1818 continue;
1819 }
1820
1821 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1822 continue;
1823
1824 old_offset = KEY_START(k);
1825 old_size = KEY_SIZE(k);
1826
1827 /*
1828 * We might overlap with 0 size extents; we can't skip these
1829 * because if they're in the set we're inserting to we have to
1830 * adjust them so they don't overlap with the key we're
1831 * inserting. But we don't want to check them for replace
1832 * operations.
1833 */
1834
1835 if (replace_key && KEY_SIZE(k)) {
1836 /*
1837 * k might have been split since we inserted/found the
1838 * key we're replacing
1839 */
1840 unsigned i;
1841 uint64_t offset = KEY_START(k) -
1842 KEY_START(replace_key);
1843
1844 /* But it must be a subset of the replace key */
1845 if (KEY_START(k) < KEY_START(replace_key) ||
1846 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
1847 goto check_failed;
1848
1849 /* We didn't find a key that we were supposed to */
1850 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1851 goto check_failed;
1852
1853 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1854 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
1855 goto check_failed;
1856
1857 /* skip past gen */
1858 offset <<= 8;
1859
1860 BUG_ON(!KEY_PTRS(replace_key));
1861
1862 for (i = 0; i < KEY_PTRS(replace_key); i++)
1863 if (k->ptr[i] != replace_key->ptr[i] + offset)
1864 goto check_failed;
1865
1866 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
1867 }
1868
1869 if (bkey_cmp(insert, k) < 0 &&
1870 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
1871 /*
1872 * We overlapped in the middle of an existing key: that
1873 * means we have to split the old key. But we have to do
1874 * slightly different things depending on whether the
1875 * old key has been written out yet.
1876 */
1877
1878 struct bkey *top;
1879
1880 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
1881
1882 if (bkey_written(&b->keys, k)) {
1883 /*
1884 * We insert a new key to cover the top of the
1885 * old key, and the old key is modified in place
1886 * to represent the bottom split.
1887 *
1888 * It's completely arbitrary whether the new key
1889 * is the top or the bottom, but it has to match
1890 * up with what btree_sort_fixup() does - it
1891 * doesn't check for this kind of overlap, it
1892 * depends on us inserting a new key for the top
1893 * here.
1894 */
1895 top = bch_bset_search(&b->keys,
1896 bset_tree_last(&b->keys),
1897 insert);
1898 bch_bset_insert(&b->keys, top, k);
1899 } else {
1900 BKEY_PADDED(key) temp;
1901 bkey_copy(&temp.key, k);
1902 bch_bset_insert(&b->keys, k, &temp.key);
1903 top = bkey_next(k);
1904 }
1905
1906 bch_cut_front(insert, top);
1907 bch_cut_back(&START_KEY(insert), k);
1908 bch_bset_fix_invalidated_key(&b->keys, k);
1909 return false;
1910 }
1911
1912 if (bkey_cmp(insert, k) < 0) {
1913 bch_cut_front(insert, k);
1914 } else {
1915 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
1916 old_offset = KEY_START(insert);
1917
1918 if (bkey_written(&b->keys, k) &&
1919 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
1920 /*
1921 * Completely overwrote, so we don't have to
1922 * invalidate the binary search tree
1923 */
1924 bch_cut_front(k, k);
1925 } else {
1926 __bch_cut_back(&START_KEY(insert), k);
1927 bch_bset_fix_invalidated_key(&b->keys, k);
1928 }
1929 }
1930
1931 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
1932 }
1933
1934check_failed:
1935 if (replace_key) {
1936 if (!sectors_found) {
1937 return true;
1938 } else if (sectors_found < KEY_SIZE(insert)) {
1939 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
1940 (KEY_SIZE(insert) - sectors_found));
1941 SET_KEY_SIZE(insert, sectors_found);
1942 }
1943 }
1944
1945 return false;
1946}
1947
1948static bool btree_insert_key(struct btree *b, struct btree_op *op,
1949 struct bkey *k, struct bkey *replace_key)
1950{
1951 struct bset *i = btree_bset_last(b);
1952 struct bkey *m, *prev;
1953 unsigned status = BTREE_INSERT_STATUS_INSERT;
1954 1791
1955 BUG_ON(bkey_cmp(k, &b->key) > 0); 1792 BUG_ON(bkey_cmp(k, &b->key) > 0);
1956 BUG_ON(b->level && !KEY_PTRS(k));
1957 BUG_ON(!b->level && !KEY_OFFSET(k));
1958 1793
1959 if (!b->level) { 1794 status = bch_btree_insert_key(&b->keys, k, replace_key);
1960 struct btree_iter iter; 1795 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1796 bch_check_keys(&b->keys, "%u for %s", status,
1797 replace_key ? "replace" : "insert");
1961 1798
1962 /* 1799 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1963 * bset_search() returns the first key that is strictly greater 1800 status);
1964 * than the search key - but for back merging, we want to find 1801 return true;
1965 * the previous key. 1802 } else
1966 */ 1803 return false;
1967 prev = NULL;
1968 m = bch_btree_iter_init(&b->keys, &iter,
1969 PRECEDING_KEY(&START_KEY(k)));
1970
1971 if (fix_overlapping_extents(b, k, &iter, replace_key)) {
1972 op->insert_collision = true;
1973 return false;
1974 }
1975
1976 if (KEY_DIRTY(k))
1977 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1978 KEY_START(k), KEY_SIZE(k));
1979
1980 while (m != bset_bkey_last(i) &&
1981 bkey_cmp(k, &START_KEY(m)) > 0)
1982 prev = m, m = bkey_next(m);
1983
1984 if (key_merging_disabled(b->c))
1985 goto insert;
1986
1987 /* prev is in the tree, if we merge we're done */
1988 status = BTREE_INSERT_STATUS_BACK_MERGE;
1989 if (prev &&
1990 bch_bkey_try_merge(&b->keys, prev, k))
1991 goto merged;
1992
1993 status = BTREE_INSERT_STATUS_OVERWROTE;
1994 if (m != bset_bkey_last(i) &&
1995 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1996 goto copy;
1997
1998 status = BTREE_INSERT_STATUS_FRONT_MERGE;
1999 if (m != bset_bkey_last(i) &&
2000 bch_bkey_try_merge(&b->keys, k, m))
2001 goto copy;
2002 } else {
2003 BUG_ON(replace_key);
2004 m = bch_bset_search(&b->keys, bset_tree_last(&b->keys), k);
2005 }
2006
2007insert: bch_bset_insert(&b->keys, m, k);
2008copy: bkey_copy(m, k);
2009merged:
2010 bch_check_keys(&b->keys, "%u for %s", status,
2011 replace_key ? "replace" : "insert");
2012
2013 if (b->level && !KEY_OFFSET(k))
2014 btree_current_write(b)->prio_blocked++;
2015
2016 trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
2017
2018 return true;
2019} 1804}
2020 1805
2021static size_t insert_u64s_remaining(struct btree *b) 1806static size_t insert_u64s_remaining(struct btree *b)
@@ -2048,7 +1833,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2048 if (!b->level) 1833 if (!b->level)
2049 bkey_put(b->c, k); 1834 bkey_put(b->c, k);
2050 1835
2051 ret |= btree_insert_key(b, op, k, replace_key); 1836 ret |= btree_insert_key(b, k, replace_key);
2052 bch_keylist_pop_front(insert_keys); 1837 bch_keylist_pop_front(insert_keys);
2053 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 1838 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2054 BKEY_PADDED(key) temp; 1839 BKEY_PADDED(key) temp;
@@ -2057,13 +1842,16 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2057 bch_cut_back(&b->key, &temp.key); 1842 bch_cut_back(&b->key, &temp.key);
2058 bch_cut_front(&b->key, insert_keys->keys); 1843 bch_cut_front(&b->key, insert_keys->keys);
2059 1844
2060 ret |= btree_insert_key(b, op, &temp.key, replace_key); 1845 ret |= btree_insert_key(b, &temp.key, replace_key);
2061 break; 1846 break;
2062 } else { 1847 } else {
2063 break; 1848 break;
2064 } 1849 }
2065 } 1850 }
2066 1851
1852 if (!ret)
1853 op->insert_collision = true;
1854
2067 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 1855 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2068 1856
2069 BUG_ON(bch_count_data(&b->keys) < oldsize); 1857 BUG_ON(bch_count_data(&b->keys) < oldsize);