aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2018-09-27 02:42:31 -0400
committerDavid Sterba <dsterba@suse.com>2018-10-15 11:23:36 -0400
commitea49f3e73c4b7252c1569906c1b2cd54605af3c9 (patch)
tree37a5e6c58bbeb69f30698ebd243399ebb64f2a0e
parent25982561db7f43f29305704f9f24ff36ea7d5671 (diff)
btrfs: qgroup: Introduce function to find all new tree blocks of reloc tree
Introduce new function, qgroup_trace_new_subtree_blocks(), to iterate all new tree blocks in a reloc tree. So that qgroup could skip unrelated tree blocks during balance, which should hugely speedup balance speed when quota is enabled. The function qgroup_trace_new_subtree_blocks() itself only cares about new tree blocks in reloc tree. All its main works are: 1) Read out tree blocks according to parent pointers 2) Do recursive depth-first search Will call the same function on all its children tree blocks, with search level set to current level -1. And will also skip all children whose generation is smaller than @last_snapshot. 3) Call qgroup_trace_extent_swap() to trace tree blocks So although we have parameter list related to source file tree, it's not used at all, but only passed to qgroup_trace_extent_swap(). Thus despite the tree read code, the core should be pretty short and all about recursive depth-first search. Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/qgroup.c135
1 files changed, 135 insertions, 0 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 7086353153f3..0b49575698da 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1874,6 +1874,141 @@ out:
1874 return ret; 1874 return ret;
1875} 1875}
1876 1876
1877/*
1878 * Helper function to do recursive generation-aware depth-first search, to
1879 * locate all new tree blocks in a subtree of reloc tree.
1880 *
1881 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1882 * reloc tree
1883 * L2 NN (a)
1884 * / \
1885 * L1 OO NN (b)
1886 * / \ / \
1887 * L0 OO OO OO NN
1888 * (c) (d)
1889 * If we pass:
1890 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1891 * @cur_level = 1
1892 * @root_level = 1
1893 *
1894 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1895 * above tree blocks along with their counter parts in file tree.
1896 * While during search, old tree blocsk OO(c) will be skiped as tree block swap
1897 * won't affect OO(c).
1898 */
1899static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
1900 struct extent_buffer *src_eb,
1901 struct btrfs_path *dst_path,
1902 int cur_level, int root_level,
1903 u64 last_snapshot)
1904{
1905 struct btrfs_fs_info *fs_info = trans->fs_info;
1906 struct extent_buffer *eb;
1907 bool need_cleanup = false;
1908 int ret = 0;
1909 int i;
1910
1911 /* Level sanity check */
1912 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
1913 root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
1914 root_level < cur_level) {
1915 btrfs_err_rl(fs_info,
1916 "%s: bad levels, cur_level=%d root_level=%d",
1917 __func__, cur_level, root_level);
1918 return -EUCLEAN;
1919 }
1920
1921 /* Read the tree block if needed */
1922 if (dst_path->nodes[cur_level] == NULL) {
1923 struct btrfs_key first_key;
1924 int parent_slot;
1925 u64 child_gen;
1926 u64 child_bytenr;
1927
1928 /*
1929 * dst_path->nodes[root_level] must be initialized before
1930 * calling this function.
1931 */
1932 if (cur_level == root_level) {
1933 btrfs_err_rl(fs_info,
1934 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
1935 __func__, root_level, root_level, cur_level);
1936 return -EUCLEAN;
1937 }
1938
1939 /*
1940 * We need to get child blockptr/gen from parent before we can
1941 * read it.
1942 */
1943 eb = dst_path->nodes[cur_level + 1];
1944 parent_slot = dst_path->slots[cur_level + 1];
1945 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1946 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1947 btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1948
1949 /* This node is old, no need to trace */
1950 if (child_gen < last_snapshot)
1951 goto out;
1952
1953 eb = read_tree_block(fs_info, child_bytenr, child_gen,
1954 cur_level, &first_key);
1955 if (IS_ERR(eb)) {
1956 ret = PTR_ERR(eb);
1957 goto out;
1958 } else if (!extent_buffer_uptodate(eb)) {
1959 free_extent_buffer(eb);
1960 ret = -EIO;
1961 goto out;
1962 }
1963
1964 dst_path->nodes[cur_level] = eb;
1965 dst_path->slots[cur_level] = 0;
1966
1967 btrfs_tree_read_lock(eb);
1968 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1969 dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1970 need_cleanup = true;
1971 }
1972
1973 /* Now record this tree block and its counter part for qgroups */
1974 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
1975 root_level);
1976 if (ret < 0)
1977 goto cleanup;
1978
1979 eb = dst_path->nodes[cur_level];
1980
1981 if (cur_level > 0) {
1982 /* Iterate all child tree blocks */
1983 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1984 /* Skip old tree blocks as they won't be swapped */
1985 if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
1986 continue;
1987 dst_path->slots[cur_level] = i;
1988
1989 /* Recursive call (at most 7 times) */
1990 ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
1991 dst_path, cur_level - 1, root_level,
1992 last_snapshot);
1993 if (ret < 0)
1994 goto cleanup;
1995 }
1996 }
1997
1998cleanup:
1999 if (need_cleanup) {
2000 /* Clean up */
2001 btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2002 dst_path->locks[cur_level]);
2003 free_extent_buffer(dst_path->nodes[cur_level]);
2004 dst_path->nodes[cur_level] = NULL;
2005 dst_path->slots[cur_level] = 0;
2006 dst_path->locks[cur_level] = 0;
2007 }
2008out:
2009 return ret;
2010}
2011
1877int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 2012int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
1878 struct extent_buffer *root_eb, 2013 struct extent_buffer *root_eb,
1879 u64 root_gen, int root_level) 2014 u64 root_gen, int root_level)