aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/qgroup.c
diff options
context:
space:
mode:
authorJan Schmidt <list.btrfs@jan-o-sch.net>2013-05-28 11:47:24 -0400
committerJosef Bacik <jbacik@fusionio.com>2013-06-14 11:30:10 -0400
commitb382a324b60f4923e9fc8e11f023e4f493c51318 (patch)
tree4f93e6d8a6671abbd240df83ee49f972e212bc4b /fs/btrfs/qgroup.c
parenteb1716af887375f1e2099f69bb89dfc5bd169bfa (diff)
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until open_ctree is done. This commit restuctures the qgroup rescan internals to enable a clean deferral of the rescan resume operation. First of all, the struct qgroup_rescan is removed, saving us a malloc and some initialization synchronizations problems. Its only element (the worker struct) now lives within fs_info just as the rest of the rescan code. Then setting up a rescan worker is split into several reusable stages. Currently we have three different rescan startup scenarios: (A) rescan ioctl (B) rescan resume by mount (C) rescan by quota enable Each case needs its own combination of the four following steps: (1) set the progress [A, C: zero; B: state of umount] (2) commit the transaction [A] (3) set the counters [A, C: zero; B: state of umount] (4) start worker [A, B, C] qgroup_rescan_init does step (1). There's no extra function added to commit a transaction, we've got that already. qgroup_rescan_zero_tracking does step (3). Step (4) is nothing more than a call to the generic btrfs_queue_worker. We also get rid of a double check for the rescan progress during btrfs_qgroup_account_ref, which is no longer required due to having step 2 from the list above. As a side effect, this commit prepares to move the rescan start code from btrfs_run_qgroups (which is run during commit) to a less time critical section. Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/qgroup.c')
-rw-r--r--fs/btrfs/qgroup.c190
1 files changed, 121 insertions, 69 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index c6ce64276869..1280eff8af56 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -98,13 +98,10 @@ struct btrfs_qgroup_list {
98 struct btrfs_qgroup *member; 98 struct btrfs_qgroup *member;
99}; 99};
100 100
101struct qgroup_rescan { 101static int
102 struct btrfs_work work; 102qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
103 struct btrfs_fs_info *fs_info; 103 int init_flags);
104}; 104static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
105
106static void qgroup_rescan_start(struct btrfs_fs_info *fs_info,
107 struct qgroup_rescan *qscan);
108 105
109/* must be called with qgroup_ioctl_lock held */ 106/* must be called with qgroup_ioctl_lock held */
110static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 107static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
@@ -255,6 +252,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
255 int slot; 252 int slot;
256 int ret = 0; 253 int ret = 0;
257 u64 flags = 0; 254 u64 flags = 0;
255 u64 rescan_progress = 0;
258 256
259 if (!fs_info->quota_enabled) 257 if (!fs_info->quota_enabled)
260 return 0; 258 return 0;
@@ -312,20 +310,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
312 } 310 }
313 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 311 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
314 ptr); 312 ptr);
315 fs_info->qgroup_rescan_progress.objectid = 313 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
316 btrfs_qgroup_status_rescan(l, ptr);
317 if (fs_info->qgroup_flags &
318 BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
319 struct qgroup_rescan *qscan =
320 kmalloc(sizeof(*qscan), GFP_NOFS);
321 if (!qscan) {
322 ret = -ENOMEM;
323 goto out;
324 }
325 fs_info->qgroup_rescan_progress.type = 0;
326 fs_info->qgroup_rescan_progress.offset = 0;
327 qgroup_rescan_start(fs_info, qscan);
328 }
329 goto next1; 314 goto next1;
330 } 315 }
331 316
@@ -427,12 +412,16 @@ out:
427 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) { 412 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
428 fs_info->quota_enabled = 0; 413 fs_info->quota_enabled = 0;
429 fs_info->pending_quota_state = 0; 414 fs_info->pending_quota_state = 0;
415 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
416 ret >= 0) {
417 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
430 } 418 }
431 btrfs_free_path(path); 419 btrfs_free_path(path);
432 420
433 if (ret < 0) { 421 if (ret < 0) {
434 ulist_free(fs_info->qgroup_ulist); 422 ulist_free(fs_info->qgroup_ulist);
435 fs_info->qgroup_ulist = NULL; 423 fs_info->qgroup_ulist = NULL;
424 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
436 } 425 }
437 426
438 return ret < 0 ? ret : 0; 427 return ret < 0 ? ret : 0;
@@ -1449,14 +1438,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1449 if (ret < 0) 1438 if (ret < 0)
1450 return ret; 1439 return ret;
1451 1440
1452 mutex_lock(&fs_info->qgroup_rescan_lock);
1453 spin_lock(&fs_info->qgroup_lock); 1441 spin_lock(&fs_info->qgroup_lock);
1454 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1455 if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
1456 ret = 0;
1457 goto unlock;
1458 }
1459 }
1460 1442
1461 quota_root = fs_info->quota_root; 1443 quota_root = fs_info->quota_root;
1462 if (!quota_root) 1444 if (!quota_root)
@@ -1496,7 +1478,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1496 1478
1497unlock: 1479unlock:
1498 spin_unlock(&fs_info->qgroup_lock); 1480 spin_unlock(&fs_info->qgroup_lock);
1499 mutex_unlock(&fs_info->qgroup_rescan_lock);
1500 ulist_free(roots); 1481 ulist_free(roots);
1501 1482
1502 return ret; 1483 return ret;
@@ -1544,9 +1525,12 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1544 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1525 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1545 1526
1546 if (!ret && start_rescan_worker) { 1527 if (!ret && start_rescan_worker) {
1547 ret = btrfs_qgroup_rescan(fs_info); 1528 ret = qgroup_rescan_init(fs_info, 0, 1);
1548 if (ret) 1529 if (!ret) {
1549 pr_err("btrfs: start rescan quota failed: %d\n", ret); 1530 qgroup_rescan_zero_tracking(fs_info);
1531 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
1532 &fs_info->qgroup_rescan_work);
1533 }
1550 ret = 0; 1534 ret = 0;
1551 } 1535 }
1552 1536
@@ -1880,12 +1864,11 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
1880 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared. 1864 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
1881 */ 1865 */
1882static int 1866static int
1883qgroup_rescan_leaf(struct qgroup_rescan *qscan, struct btrfs_path *path, 1867qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1884 struct btrfs_trans_handle *trans, struct ulist *tmp, 1868 struct btrfs_trans_handle *trans, struct ulist *tmp,
1885 struct extent_buffer *scratch_leaf) 1869 struct extent_buffer *scratch_leaf)
1886{ 1870{
1887 struct btrfs_key found; 1871 struct btrfs_key found;
1888 struct btrfs_fs_info *fs_info = qscan->fs_info;
1889 struct ulist *roots = NULL; 1872 struct ulist *roots = NULL;
1890 struct ulist_node *unode; 1873 struct ulist_node *unode;
1891 struct ulist_iterator uiter; 1874 struct ulist_iterator uiter;
@@ -2013,11 +1996,10 @@ out:
2013 1996
2014static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 1997static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2015{ 1998{
2016 struct qgroup_rescan *qscan = container_of(work, struct qgroup_rescan, 1999 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2017 work); 2000 qgroup_rescan_work);
2018 struct btrfs_path *path; 2001 struct btrfs_path *path;
2019 struct btrfs_trans_handle *trans = NULL; 2002 struct btrfs_trans_handle *trans = NULL;
2020 struct btrfs_fs_info *fs_info = qscan->fs_info;
2021 struct ulist *tmp = NULL; 2003 struct ulist *tmp = NULL;
2022 struct extent_buffer *scratch_leaf = NULL; 2004 struct extent_buffer *scratch_leaf = NULL;
2023 int err = -ENOMEM; 2005 int err = -ENOMEM;
@@ -2042,7 +2024,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2042 if (!fs_info->quota_enabled) { 2024 if (!fs_info->quota_enabled) {
2043 err = -EINTR; 2025 err = -EINTR;
2044 } else { 2026 } else {
2045 err = qgroup_rescan_leaf(qscan, path, trans, 2027 err = qgroup_rescan_leaf(fs_info, path, trans,
2046 tmp, scratch_leaf); 2028 tmp, scratch_leaf);
2047 } 2029 }
2048 if (err > 0) 2030 if (err > 0)
@@ -2055,7 +2037,6 @@ out:
2055 kfree(scratch_leaf); 2037 kfree(scratch_leaf);
2056 ulist_free(tmp); 2038 ulist_free(tmp);
2057 btrfs_free_path(path); 2039 btrfs_free_path(path);
2058 kfree(qscan);
2059 2040
2060 mutex_lock(&fs_info->qgroup_rescan_lock); 2041 mutex_lock(&fs_info->qgroup_rescan_lock);
2061 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2042 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
@@ -2078,46 +2059,70 @@ out:
2078 complete_all(&fs_info->qgroup_rescan_completion); 2059 complete_all(&fs_info->qgroup_rescan_completion);
2079} 2060}
2080 2061
2081static void 2062/*
2082qgroup_rescan_start(struct btrfs_fs_info *fs_info, struct qgroup_rescan *qscan) 2063 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2083{ 2064 * memory required for the rescan context.
2084 memset(&qscan->work, 0, sizeof(qscan->work)); 2065 */
2085 qscan->work.func = btrfs_qgroup_rescan_worker; 2066static int
2086 qscan->fs_info = fs_info; 2067qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2087 2068 int init_flags)
2088 pr_info("btrfs: qgroup scan started\n");
2089 btrfs_queue_worker(&fs_info->qgroup_rescan_workers, &qscan->work);
2090}
2091
2092int
2093btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2094{ 2069{
2095 int ret = 0; 2070 int ret = 0;
2096 struct rb_node *n;
2097 struct btrfs_qgroup *qgroup;
2098 struct qgroup_rescan *qscan = kmalloc(sizeof(*qscan), GFP_NOFS);
2099 2071
2100 if (!qscan) 2072 if (!init_flags &&
2101 return -ENOMEM; 2073 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2074 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2075 ret = -EINVAL;
2076 goto err;
2077 }
2102 2078
2103 mutex_lock(&fs_info->qgroup_rescan_lock); 2079 mutex_lock(&fs_info->qgroup_rescan_lock);
2104 spin_lock(&fs_info->qgroup_lock); 2080 spin_lock(&fs_info->qgroup_lock);
2105 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 2081
2106 ret = -EINPROGRESS; 2082 if (init_flags) {
2107 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) 2083 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2108 ret = -EINVAL; 2084 ret = -EINPROGRESS;
2109 if (ret) { 2085 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2110 spin_unlock(&fs_info->qgroup_lock); 2086 ret = -EINVAL;
2111 mutex_unlock(&fs_info->qgroup_rescan_lock); 2087
2112 kfree(qscan); 2088 if (ret) {
2113 return ret; 2089 spin_unlock(&fs_info->qgroup_lock);
2090 mutex_unlock(&fs_info->qgroup_rescan_lock);
2091 goto err;
2092 }
2093
2094 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2114 } 2095 }
2115 2096
2116 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2117 memset(&fs_info->qgroup_rescan_progress, 0, 2097 memset(&fs_info->qgroup_rescan_progress, 0,
2118 sizeof(fs_info->qgroup_rescan_progress)); 2098 sizeof(fs_info->qgroup_rescan_progress));
2099 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2100
2101 spin_unlock(&fs_info->qgroup_lock);
2102 mutex_unlock(&fs_info->qgroup_rescan_lock);
2103
2119 init_completion(&fs_info->qgroup_rescan_completion); 2104 init_completion(&fs_info->qgroup_rescan_completion);
2120 2105
2106 memset(&fs_info->qgroup_rescan_work, 0,
2107 sizeof(fs_info->qgroup_rescan_work));
2108 fs_info->qgroup_rescan_work.func = btrfs_qgroup_rescan_worker;
2109
2110 if (ret) {
2111err:
2112 pr_info("btrfs: qgroup_rescan_init failed with %d\n", ret);
2113 return ret;
2114 }
2115
2116 return 0;
2117}
2118
2119static void
2120qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2121{
2122 struct rb_node *n;
2123 struct btrfs_qgroup *qgroup;
2124
2125 spin_lock(&fs_info->qgroup_lock);
2121 /* clear all current qgroup tracking information */ 2126 /* clear all current qgroup tracking information */
2122 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 2127 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2123 qgroup = rb_entry(n, struct btrfs_qgroup, node); 2128 qgroup = rb_entry(n, struct btrfs_qgroup, node);
@@ -2127,9 +2132,44 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2127 qgroup->excl_cmpr = 0; 2132 qgroup->excl_cmpr = 0;
2128 } 2133 }
2129 spin_unlock(&fs_info->qgroup_lock); 2134 spin_unlock(&fs_info->qgroup_lock);
2130 mutex_unlock(&fs_info->qgroup_rescan_lock); 2135}
2131 2136
2132 qgroup_rescan_start(fs_info, qscan); 2137int
2138btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2139{
2140 int ret = 0;
2141 struct btrfs_trans_handle *trans;
2142
2143 ret = qgroup_rescan_init(fs_info, 0, 1);
2144 if (ret)
2145 return ret;
2146
2147 /*
2148 * We have set the rescan_progress to 0, which means no more
2149 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2150 * However, btrfs_qgroup_account_ref may be right after its call
2151 * to btrfs_find_all_roots, in which case it would still do the
2152 * accounting.
2153 * To solve this, we're committing the transaction, which will
2154 * ensure we run all delayed refs and only after that, we are
2155 * going to clear all tracking information for a clean start.
2156 */
2157
2158 trans = btrfs_join_transaction(fs_info->fs_root);
2159 if (IS_ERR(trans)) {
2160 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2161 return PTR_ERR(trans);
2162 }
2163 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2164 if (ret) {
2165 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2166 return ret;
2167 }
2168
2169 qgroup_rescan_zero_tracking(fs_info);
2170
2171 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2172 &fs_info->qgroup_rescan_work);
2133 2173
2134 return 0; 2174 return 0;
2135} 2175}
@@ -2151,3 +2191,15 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2151 2191
2152 return ret; 2192 return ret;
2153} 2193}
2194
2195/*
2196 * this is only called from open_ctree where we're still single threaded, thus
2197 * locking is omitted here.
2198 */
2199void
2200btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2201{
2202 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2203 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2204 &fs_info->qgroup_rescan_work);
2205}