diff options
Diffstat (limited to 'fs/btrfs/send.c')
-rw-r--r-- | fs/btrfs/send.c | 821 |
1 files changed, 557 insertions, 264 deletions
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 9dde9717c1b9..9b6da9d55f9a 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -51,15 +51,18 @@ struct fs_path { | |||
51 | struct { | 51 | struct { |
52 | char *start; | 52 | char *start; |
53 | char *end; | 53 | char *end; |
54 | char *prepared; | ||
55 | 54 | ||
56 | char *buf; | 55 | char *buf; |
57 | int buf_len; | 56 | unsigned short buf_len:15; |
58 | unsigned int reversed:1; | 57 | unsigned short reversed:1; |
59 | unsigned int virtual_mem:1; | ||
60 | char inline_buf[]; | 58 | char inline_buf[]; |
61 | }; | 59 | }; |
62 | char pad[PAGE_SIZE]; | 60 | /* |
61 | * Average path length does not exceed 200 bytes, we'll have | ||
62 | * better packing in the slab and higher chance to satisfy | ||
63 | * a allocation later during send. | ||
64 | */ | ||
65 | char pad[256]; | ||
63 | }; | 66 | }; |
64 | }; | 67 | }; |
65 | #define FS_PATH_INLINE_SIZE \ | 68 | #define FS_PATH_INLINE_SIZE \ |
@@ -109,6 +112,7 @@ struct send_ctx { | |||
109 | int cur_inode_deleted; | 112 | int cur_inode_deleted; |
110 | u64 cur_inode_size; | 113 | u64 cur_inode_size; |
111 | u64 cur_inode_mode; | 114 | u64 cur_inode_mode; |
115 | u64 cur_inode_rdev; | ||
112 | u64 cur_inode_last_extent; | 116 | u64 cur_inode_last_extent; |
113 | 117 | ||
114 | u64 send_progress; | 118 | u64 send_progress; |
@@ -120,6 +124,8 @@ struct send_ctx { | |||
120 | struct list_head name_cache_list; | 124 | struct list_head name_cache_list; |
121 | int name_cache_size; | 125 | int name_cache_size; |
122 | 126 | ||
127 | struct file_ra_state ra; | ||
128 | |||
123 | char *read_buf; | 129 | char *read_buf; |
124 | 130 | ||
125 | /* | 131 | /* |
@@ -175,6 +181,47 @@ struct send_ctx { | |||
175 | * own move/rename can be performed. | 181 | * own move/rename can be performed. |
176 | */ | 182 | */ |
177 | struct rb_root waiting_dir_moves; | 183 | struct rb_root waiting_dir_moves; |
184 | |||
185 | /* | ||
186 | * A directory that is going to be rm'ed might have a child directory | ||
187 | * which is in the pending directory moves index above. In this case, | ||
188 | * the directory can only be removed after the move/rename of its child | ||
189 | * is performed. Example: | ||
190 | * | ||
191 | * Parent snapshot: | ||
192 | * | ||
193 | * . (ino 256) | ||
194 | * |-- a/ (ino 257) | ||
195 | * |-- b/ (ino 258) | ||
196 | * |-- c/ (ino 259) | ||
197 | * | |-- x/ (ino 260) | ||
198 | * | | ||
199 | * |-- y/ (ino 261) | ||
200 | * | ||
201 | * Send snapshot: | ||
202 | * | ||
203 | * . (ino 256) | ||
204 | * |-- a/ (ino 257) | ||
205 | * |-- b/ (ino 258) | ||
206 | * |-- YY/ (ino 261) | ||
207 | * |-- x/ (ino 260) | ||
208 | * | ||
209 | * Sequence of steps that lead to the send snapshot: | ||
210 | * rm -f /a/b/c/foo.txt | ||
211 | * mv /a/b/y /a/b/YY | ||
212 | * mv /a/b/c/x /a/b/YY | ||
213 | * rmdir /a/b/c | ||
214 | * | ||
215 | * When the child is processed, its move/rename is delayed until its | ||
216 | * parent is processed (as explained above), but all other operations | ||
217 | * like update utimes, chown, chgrp, etc, are performed and the paths | ||
218 | * that it uses for those operations must use the orphanized name of | ||
219 | * its parent (the directory we're going to rm later), so we need to | ||
220 | * memorize that name. | ||
221 | * | ||
222 | * Indexed by the inode number of the directory to be deleted. | ||
223 | */ | ||
224 | struct rb_root orphan_dirs; | ||
178 | }; | 225 | }; |
179 | 226 | ||
180 | struct pending_dir_move { | 227 | struct pending_dir_move { |
@@ -189,6 +236,18 @@ struct pending_dir_move { | |||
189 | struct waiting_dir_move { | 236 | struct waiting_dir_move { |
190 | struct rb_node node; | 237 | struct rb_node node; |
191 | u64 ino; | 238 | u64 ino; |
239 | /* | ||
240 | * There might be some directory that could not be removed because it | ||
241 | * was waiting for this directory inode to be moved first. Therefore | ||
242 | * after this directory is moved, we can try to rmdir the ino rmdir_ino. | ||
243 | */ | ||
244 | u64 rmdir_ino; | ||
245 | }; | ||
246 | |||
247 | struct orphan_dir_info { | ||
248 | struct rb_node node; | ||
249 | u64 ino; | ||
250 | u64 gen; | ||
192 | }; | 251 | }; |
193 | 252 | ||
194 | struct name_cache_entry { | 253 | struct name_cache_entry { |
@@ -214,6 +273,11 @@ struct name_cache_entry { | |||
214 | 273 | ||
215 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); | 274 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); |
216 | 275 | ||
276 | static struct waiting_dir_move * | ||
277 | get_waiting_dir_move(struct send_ctx *sctx, u64 ino); | ||
278 | |||
279 | static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino); | ||
280 | |||
217 | static int need_send_hole(struct send_ctx *sctx) | 281 | static int need_send_hole(struct send_ctx *sctx) |
218 | { | 282 | { |
219 | return (sctx->parent_root && !sctx->cur_inode_new && | 283 | return (sctx->parent_root && !sctx->cur_inode_new && |
@@ -242,7 +306,6 @@ static struct fs_path *fs_path_alloc(void) | |||
242 | if (!p) | 306 | if (!p) |
243 | return NULL; | 307 | return NULL; |
244 | p->reversed = 0; | 308 | p->reversed = 0; |
245 | p->virtual_mem = 0; | ||
246 | p->buf = p->inline_buf; | 309 | p->buf = p->inline_buf; |
247 | p->buf_len = FS_PATH_INLINE_SIZE; | 310 | p->buf_len = FS_PATH_INLINE_SIZE; |
248 | fs_path_reset(p); | 311 | fs_path_reset(p); |
@@ -265,12 +328,8 @@ static void fs_path_free(struct fs_path *p) | |||
265 | { | 328 | { |
266 | if (!p) | 329 | if (!p) |
267 | return; | 330 | return; |
268 | if (p->buf != p->inline_buf) { | 331 | if (p->buf != p->inline_buf) |
269 | if (p->virtual_mem) | 332 | kfree(p->buf); |
270 | vfree(p->buf); | ||
271 | else | ||
272 | kfree(p->buf); | ||
273 | } | ||
274 | kfree(p); | 333 | kfree(p); |
275 | } | 334 | } |
276 | 335 | ||
@@ -292,40 +351,23 @@ static int fs_path_ensure_buf(struct fs_path *p, int len) | |||
292 | 351 | ||
293 | path_len = p->end - p->start; | 352 | path_len = p->end - p->start; |
294 | old_buf_len = p->buf_len; | 353 | old_buf_len = p->buf_len; |
295 | len = PAGE_ALIGN(len); | 354 | |
296 | 355 | /* | |
297 | if (p->buf == p->inline_buf) { | 356 | * First time the inline_buf does not suffice |
298 | tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN); | 357 | */ |
299 | if (!tmp_buf) { | 358 | if (p->buf == p->inline_buf) |
300 | tmp_buf = vmalloc(len); | 359 | tmp_buf = kmalloc(len, GFP_NOFS); |
301 | if (!tmp_buf) | 360 | else |
302 | return -ENOMEM; | 361 | tmp_buf = krealloc(p->buf, len, GFP_NOFS); |
303 | p->virtual_mem = 1; | 362 | if (!tmp_buf) |
304 | } | 363 | return -ENOMEM; |
305 | memcpy(tmp_buf, p->buf, p->buf_len); | 364 | p->buf = tmp_buf; |
306 | p->buf = tmp_buf; | 365 | /* |
307 | p->buf_len = len; | 366 | * The real size of the buffer is bigger, this will let the fast path |
308 | } else { | 367 | * happen most of the time |
309 | if (p->virtual_mem) { | 368 | */ |
310 | tmp_buf = vmalloc(len); | 369 | p->buf_len = ksize(p->buf); |
311 | if (!tmp_buf) | 370 | |
312 | return -ENOMEM; | ||
313 | memcpy(tmp_buf, p->buf, p->buf_len); | ||
314 | vfree(p->buf); | ||
315 | } else { | ||
316 | tmp_buf = krealloc(p->buf, len, GFP_NOFS); | ||
317 | if (!tmp_buf) { | ||
318 | tmp_buf = vmalloc(len); | ||
319 | if (!tmp_buf) | ||
320 | return -ENOMEM; | ||
321 | memcpy(tmp_buf, p->buf, p->buf_len); | ||
322 | kfree(p->buf); | ||
323 | p->virtual_mem = 1; | ||
324 | } | ||
325 | } | ||
326 | p->buf = tmp_buf; | ||
327 | p->buf_len = len; | ||
328 | } | ||
329 | if (p->reversed) { | 371 | if (p->reversed) { |
330 | tmp_buf = p->buf + old_buf_len - path_len - 1; | 372 | tmp_buf = p->buf + old_buf_len - path_len - 1; |
331 | p->end = p->buf + p->buf_len - 1; | 373 | p->end = p->buf + p->buf_len - 1; |
@@ -338,7 +380,8 @@ static int fs_path_ensure_buf(struct fs_path *p, int len) | |||
338 | return 0; | 380 | return 0; |
339 | } | 381 | } |
340 | 382 | ||
341 | static int fs_path_prepare_for_add(struct fs_path *p, int name_len) | 383 | static int fs_path_prepare_for_add(struct fs_path *p, int name_len, |
384 | char **prepared) | ||
342 | { | 385 | { |
343 | int ret; | 386 | int ret; |
344 | int new_len; | 387 | int new_len; |
@@ -354,11 +397,11 @@ static int fs_path_prepare_for_add(struct fs_path *p, int name_len) | |||
354 | if (p->start != p->end) | 397 | if (p->start != p->end) |
355 | *--p->start = '/'; | 398 | *--p->start = '/'; |
356 | p->start -= name_len; | 399 | p->start -= name_len; |
357 | p->prepared = p->start; | 400 | *prepared = p->start; |
358 | } else { | 401 | } else { |
359 | if (p->start != p->end) | 402 | if (p->start != p->end) |
360 | *p->end++ = '/'; | 403 | *p->end++ = '/'; |
361 | p->prepared = p->end; | 404 | *prepared = p->end; |
362 | p->end += name_len; | 405 | p->end += name_len; |
363 | *p->end = 0; | 406 | *p->end = 0; |
364 | } | 407 | } |
@@ -370,12 +413,12 @@ out: | |||
370 | static int fs_path_add(struct fs_path *p, const char *name, int name_len) | 413 | static int fs_path_add(struct fs_path *p, const char *name, int name_len) |
371 | { | 414 | { |
372 | int ret; | 415 | int ret; |
416 | char *prepared; | ||
373 | 417 | ||
374 | ret = fs_path_prepare_for_add(p, name_len); | 418 | ret = fs_path_prepare_for_add(p, name_len, &prepared); |
375 | if (ret < 0) | 419 | if (ret < 0) |
376 | goto out; | 420 | goto out; |
377 | memcpy(p->prepared, name, name_len); | 421 | memcpy(prepared, name, name_len); |
378 | p->prepared = NULL; | ||
379 | 422 | ||
380 | out: | 423 | out: |
381 | return ret; | 424 | return ret; |
@@ -384,12 +427,12 @@ out: | |||
384 | static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) | 427 | static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) |
385 | { | 428 | { |
386 | int ret; | 429 | int ret; |
430 | char *prepared; | ||
387 | 431 | ||
388 | ret = fs_path_prepare_for_add(p, p2->end - p2->start); | 432 | ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); |
389 | if (ret < 0) | 433 | if (ret < 0) |
390 | goto out; | 434 | goto out; |
391 | memcpy(p->prepared, p2->start, p2->end - p2->start); | 435 | memcpy(prepared, p2->start, p2->end - p2->start); |
392 | p->prepared = NULL; | ||
393 | 436 | ||
394 | out: | 437 | out: |
395 | return ret; | 438 | return ret; |
@@ -400,13 +443,13 @@ static int fs_path_add_from_extent_buffer(struct fs_path *p, | |||
400 | unsigned long off, int len) | 443 | unsigned long off, int len) |
401 | { | 444 | { |
402 | int ret; | 445 | int ret; |
446 | char *prepared; | ||
403 | 447 | ||
404 | ret = fs_path_prepare_for_add(p, len); | 448 | ret = fs_path_prepare_for_add(p, len, &prepared); |
405 | if (ret < 0) | 449 | if (ret < 0) |
406 | goto out; | 450 | goto out; |
407 | 451 | ||
408 | read_extent_buffer(eb, p->prepared, off, len); | 452 | read_extent_buffer(eb, prepared, off, len); |
409 | p->prepared = NULL; | ||
410 | 453 | ||
411 | out: | 454 | out: |
412 | return ret; | 455 | return ret; |
@@ -915,9 +958,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
915 | struct btrfs_dir_item *di; | 958 | struct btrfs_dir_item *di; |
916 | struct btrfs_key di_key; | 959 | struct btrfs_key di_key; |
917 | char *buf = NULL; | 960 | char *buf = NULL; |
918 | char *buf2 = NULL; | 961 | const int buf_len = PATH_MAX; |
919 | int buf_len; | ||
920 | int buf_virtual = 0; | ||
921 | u32 name_len; | 962 | u32 name_len; |
922 | u32 data_len; | 963 | u32 data_len; |
923 | u32 cur; | 964 | u32 cur; |
@@ -927,7 +968,6 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
927 | int num; | 968 | int num; |
928 | u8 type; | 969 | u8 type; |
929 | 970 | ||
930 | buf_len = PAGE_SIZE; | ||
931 | buf = kmalloc(buf_len, GFP_NOFS); | 971 | buf = kmalloc(buf_len, GFP_NOFS); |
932 | if (!buf) { | 972 | if (!buf) { |
933 | ret = -ENOMEM; | 973 | ret = -ENOMEM; |
@@ -949,30 +989,12 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
949 | type = btrfs_dir_type(eb, di); | 989 | type = btrfs_dir_type(eb, di); |
950 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | 990 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); |
951 | 991 | ||
992 | /* | ||
993 | * Path too long | ||
994 | */ | ||
952 | if (name_len + data_len > buf_len) { | 995 | if (name_len + data_len > buf_len) { |
953 | buf_len = PAGE_ALIGN(name_len + data_len); | 996 | ret = -ENAMETOOLONG; |
954 | if (buf_virtual) { | 997 | goto out; |
955 | buf2 = vmalloc(buf_len); | ||
956 | if (!buf2) { | ||
957 | ret = -ENOMEM; | ||
958 | goto out; | ||
959 | } | ||
960 | vfree(buf); | ||
961 | } else { | ||
962 | buf2 = krealloc(buf, buf_len, GFP_NOFS); | ||
963 | if (!buf2) { | ||
964 | buf2 = vmalloc(buf_len); | ||
965 | if (!buf2) { | ||
966 | ret = -ENOMEM; | ||
967 | goto out; | ||
968 | } | ||
969 | kfree(buf); | ||
970 | buf_virtual = 1; | ||
971 | } | ||
972 | } | ||
973 | |||
974 | buf = buf2; | ||
975 | buf2 = NULL; | ||
976 | } | 998 | } |
977 | 999 | ||
978 | read_extent_buffer(eb, buf, (unsigned long)(di + 1), | 1000 | read_extent_buffer(eb, buf, (unsigned long)(di + 1), |
@@ -995,10 +1017,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
995 | } | 1017 | } |
996 | 1018 | ||
997 | out: | 1019 | out: |
998 | if (buf_virtual) | 1020 | kfree(buf); |
999 | vfree(buf); | ||
1000 | else | ||
1001 | kfree(buf); | ||
1002 | return ret; | 1021 | return ret; |
1003 | } | 1022 | } |
1004 | 1023 | ||
@@ -1292,8 +1311,6 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
1292 | extent_item_pos = logical - found_key.objectid; | 1311 | extent_item_pos = logical - found_key.objectid; |
1293 | else | 1312 | else |
1294 | extent_item_pos = 0; | 1313 | extent_item_pos = 0; |
1295 | |||
1296 | extent_item_pos = logical - found_key.objectid; | ||
1297 | ret = iterate_extent_inodes(sctx->send_root->fs_info, | 1314 | ret = iterate_extent_inodes(sctx->send_root->fs_info, |
1298 | found_key.objectid, extent_item_pos, 1, | 1315 | found_key.objectid, extent_item_pos, 1, |
1299 | __iterate_backrefs, backref_ctx); | 1316 | __iterate_backrefs, backref_ctx); |
@@ -1418,11 +1435,7 @@ static int gen_unique_name(struct send_ctx *sctx, | |||
1418 | while (1) { | 1435 | while (1) { |
1419 | len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", | 1436 | len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", |
1420 | ino, gen, idx); | 1437 | ino, gen, idx); |
1421 | if (len >= sizeof(tmp)) { | 1438 | ASSERT(len < sizeof(tmp)); |
1422 | /* should really not happen */ | ||
1423 | ret = -EOVERFLOW; | ||
1424 | goto out; | ||
1425 | } | ||
1426 | 1439 | ||
1427 | di = btrfs_lookup_dir_item(NULL, sctx->send_root, | 1440 | di = btrfs_lookup_dir_item(NULL, sctx->send_root, |
1428 | path, BTRFS_FIRST_FREE_OBJECTID, | 1441 | path, BTRFS_FIRST_FREE_OBJECTID, |
@@ -1898,13 +1911,20 @@ static void name_cache_delete(struct send_ctx *sctx, | |||
1898 | 1911 | ||
1899 | nce_head = radix_tree_lookup(&sctx->name_cache, | 1912 | nce_head = radix_tree_lookup(&sctx->name_cache, |
1900 | (unsigned long)nce->ino); | 1913 | (unsigned long)nce->ino); |
1901 | BUG_ON(!nce_head); | 1914 | if (!nce_head) { |
1915 | btrfs_err(sctx->send_root->fs_info, | ||
1916 | "name_cache_delete lookup failed ino %llu cache size %d, leaking memory", | ||
1917 | nce->ino, sctx->name_cache_size); | ||
1918 | } | ||
1902 | 1919 | ||
1903 | list_del(&nce->radix_list); | 1920 | list_del(&nce->radix_list); |
1904 | list_del(&nce->list); | 1921 | list_del(&nce->list); |
1905 | sctx->name_cache_size--; | 1922 | sctx->name_cache_size--; |
1906 | 1923 | ||
1907 | if (list_empty(nce_head)) { | 1924 | /* |
1925 | * We may not get to the final release of nce_head if the lookup fails | ||
1926 | */ | ||
1927 | if (nce_head && list_empty(nce_head)) { | ||
1908 | radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); | 1928 | radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); |
1909 | kfree(nce_head); | 1929 | kfree(nce_head); |
1910 | } | 1930 | } |
@@ -1977,7 +1997,6 @@ static void name_cache_free(struct send_ctx *sctx) | |||
1977 | */ | 1997 | */ |
1978 | static int __get_cur_name_and_parent(struct send_ctx *sctx, | 1998 | static int __get_cur_name_and_parent(struct send_ctx *sctx, |
1979 | u64 ino, u64 gen, | 1999 | u64 ino, u64 gen, |
1980 | int skip_name_cache, | ||
1981 | u64 *parent_ino, | 2000 | u64 *parent_ino, |
1982 | u64 *parent_gen, | 2001 | u64 *parent_gen, |
1983 | struct fs_path *dest) | 2002 | struct fs_path *dest) |
@@ -1987,8 +2006,6 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
1987 | struct btrfs_path *path = NULL; | 2006 | struct btrfs_path *path = NULL; |
1988 | struct name_cache_entry *nce = NULL; | 2007 | struct name_cache_entry *nce = NULL; |
1989 | 2008 | ||
1990 | if (skip_name_cache) | ||
1991 | goto get_ref; | ||
1992 | /* | 2009 | /* |
1993 | * First check if we already did a call to this function with the same | 2010 | * First check if we already did a call to this function with the same |
1994 | * ino/gen. If yes, check if the cache entry is still up-to-date. If yes | 2011 | * ino/gen. If yes, check if the cache entry is still up-to-date. If yes |
@@ -2033,12 +2050,11 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
2033 | goto out_cache; | 2050 | goto out_cache; |
2034 | } | 2051 | } |
2035 | 2052 | ||
2036 | get_ref: | ||
2037 | /* | 2053 | /* |
2038 | * Depending on whether the inode was already processed or not, use | 2054 | * Depending on whether the inode was already processed or not, use |
2039 | * send_root or parent_root for ref lookup. | 2055 | * send_root or parent_root for ref lookup. |
2040 | */ | 2056 | */ |
2041 | if (ino < sctx->send_progress && !skip_name_cache) | 2057 | if (ino < sctx->send_progress) |
2042 | ret = get_first_ref(sctx->send_root, ino, | 2058 | ret = get_first_ref(sctx->send_root, ino, |
2043 | parent_ino, parent_gen, dest); | 2059 | parent_ino, parent_gen, dest); |
2044 | else | 2060 | else |
@@ -2062,8 +2078,6 @@ get_ref: | |||
2062 | goto out; | 2078 | goto out; |
2063 | ret = 1; | 2079 | ret = 1; |
2064 | } | 2080 | } |
2065 | if (skip_name_cache) | ||
2066 | goto out; | ||
2067 | 2081 | ||
2068 | out_cache: | 2082 | out_cache: |
2069 | /* | 2083 | /* |
@@ -2131,9 +2145,6 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, | |||
2131 | u64 parent_inode = 0; | 2145 | u64 parent_inode = 0; |
2132 | u64 parent_gen = 0; | 2146 | u64 parent_gen = 0; |
2133 | int stop = 0; | 2147 | int stop = 0; |
2134 | u64 start_ino = ino; | ||
2135 | u64 start_gen = gen; | ||
2136 | int skip_name_cache = 0; | ||
2137 | 2148 | ||
2138 | name = fs_path_alloc(); | 2149 | name = fs_path_alloc(); |
2139 | if (!name) { | 2150 | if (!name) { |
@@ -2141,31 +2152,33 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, | |||
2141 | goto out; | 2152 | goto out; |
2142 | } | 2153 | } |
2143 | 2154 | ||
2144 | if (is_waiting_for_move(sctx, ino)) | ||
2145 | skip_name_cache = 1; | ||
2146 | |||
2147 | again: | ||
2148 | dest->reversed = 1; | 2155 | dest->reversed = 1; |
2149 | fs_path_reset(dest); | 2156 | fs_path_reset(dest); |
2150 | 2157 | ||
2151 | while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { | 2158 | while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { |
2152 | fs_path_reset(name); | 2159 | fs_path_reset(name); |
2153 | 2160 | ||
2154 | ret = __get_cur_name_and_parent(sctx, ino, gen, skip_name_cache, | 2161 | if (is_waiting_for_rm(sctx, ino)) { |
2155 | &parent_inode, &parent_gen, name); | 2162 | ret = gen_unique_name(sctx, ino, gen, name); |
2163 | if (ret < 0) | ||
2164 | goto out; | ||
2165 | ret = fs_path_add_path(dest, name); | ||
2166 | break; | ||
2167 | } | ||
2168 | |||
2169 | if (is_waiting_for_move(sctx, ino)) { | ||
2170 | ret = get_first_ref(sctx->parent_root, ino, | ||
2171 | &parent_inode, &parent_gen, name); | ||
2172 | } else { | ||
2173 | ret = __get_cur_name_and_parent(sctx, ino, gen, | ||
2174 | &parent_inode, | ||
2175 | &parent_gen, name); | ||
2176 | if (ret) | ||
2177 | stop = 1; | ||
2178 | } | ||
2179 | |||
2156 | if (ret < 0) | 2180 | if (ret < 0) |
2157 | goto out; | 2181 | goto out; |
2158 | if (ret) | ||
2159 | stop = 1; | ||
2160 | |||
2161 | if (!skip_name_cache && | ||
2162 | is_waiting_for_move(sctx, parent_inode)) { | ||
2163 | ino = start_ino; | ||
2164 | gen = start_gen; | ||
2165 | stop = 0; | ||
2166 | skip_name_cache = 1; | ||
2167 | goto again; | ||
2168 | } | ||
2169 | 2182 | ||
2170 | ret = fs_path_add_path(dest, name); | 2183 | ret = fs_path_add_path(dest, name); |
2171 | if (ret < 0) | 2184 | if (ret < 0) |
@@ -2429,10 +2442,16 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino); | |||
2429 | if (!p) | 2442 | if (!p) |
2430 | return -ENOMEM; | 2443 | return -ENOMEM; |
2431 | 2444 | ||
2432 | ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL, | 2445 | if (ino != sctx->cur_ino) { |
2433 | NULL, &rdev); | 2446 | ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, |
2434 | if (ret < 0) | 2447 | NULL, NULL, &rdev); |
2435 | goto out; | 2448 | if (ret < 0) |
2449 | goto out; | ||
2450 | } else { | ||
2451 | gen = sctx->cur_inode_gen; | ||
2452 | mode = sctx->cur_inode_mode; | ||
2453 | rdev = sctx->cur_inode_rdev; | ||
2454 | } | ||
2436 | 2455 | ||
2437 | if (S_ISREG(mode)) { | 2456 | if (S_ISREG(mode)) { |
2438 | cmd = BTRFS_SEND_C_MKFILE; | 2457 | cmd = BTRFS_SEND_C_MKFILE; |
@@ -2512,17 +2531,26 @@ static int did_create_dir(struct send_ctx *sctx, u64 dir) | |||
2512 | key.objectid = dir; | 2531 | key.objectid = dir; |
2513 | key.type = BTRFS_DIR_INDEX_KEY; | 2532 | key.type = BTRFS_DIR_INDEX_KEY; |
2514 | key.offset = 0; | 2533 | key.offset = 0; |
2534 | ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); | ||
2535 | if (ret < 0) | ||
2536 | goto out; | ||
2537 | |||
2515 | while (1) { | 2538 | while (1) { |
2516 | ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, | 2539 | eb = path->nodes[0]; |
2517 | 1, 0); | 2540 | slot = path->slots[0]; |
2518 | if (ret < 0) | 2541 | if (slot >= btrfs_header_nritems(eb)) { |
2519 | goto out; | 2542 | ret = btrfs_next_leaf(sctx->send_root, path); |
2520 | if (!ret) { | 2543 | if (ret < 0) { |
2521 | eb = path->nodes[0]; | 2544 | goto out; |
2522 | slot = path->slots[0]; | 2545 | } else if (ret > 0) { |
2523 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 2546 | ret = 0; |
2547 | break; | ||
2548 | } | ||
2549 | continue; | ||
2524 | } | 2550 | } |
2525 | if (ret || found_key.objectid != key.objectid || | 2551 | |
2552 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
2553 | if (found_key.objectid != key.objectid || | ||
2526 | found_key.type != key.type) { | 2554 | found_key.type != key.type) { |
2527 | ret = 0; | 2555 | ret = 0; |
2528 | goto out; | 2556 | goto out; |
@@ -2537,8 +2565,7 @@ static int did_create_dir(struct send_ctx *sctx, u64 dir) | |||
2537 | goto out; | 2565 | goto out; |
2538 | } | 2566 | } |
2539 | 2567 | ||
2540 | key.offset = found_key.offset + 1; | 2568 | path->slots[0]++; |
2541 | btrfs_release_path(path); | ||
2542 | } | 2569 | } |
2543 | 2570 | ||
2544 | out: | 2571 | out: |
@@ -2590,7 +2617,7 @@ struct recorded_ref { | |||
2590 | * everything mixed. So we first record all refs and later process them. | 2617 | * everything mixed. So we first record all refs and later process them. |
2591 | * This function is a helper to record one ref. | 2618 | * This function is a helper to record one ref. |
2592 | */ | 2619 | */ |
2593 | static int record_ref(struct list_head *head, u64 dir, | 2620 | static int __record_ref(struct list_head *head, u64 dir, |
2594 | u64 dir_gen, struct fs_path *path) | 2621 | u64 dir_gen, struct fs_path *path) |
2595 | { | 2622 | { |
2596 | struct recorded_ref *ref; | 2623 | struct recorded_ref *ref; |
@@ -2676,12 +2703,78 @@ out: | |||
2676 | return ret; | 2703 | return ret; |
2677 | } | 2704 | } |
2678 | 2705 | ||
2706 | static struct orphan_dir_info * | ||
2707 | add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) | ||
2708 | { | ||
2709 | struct rb_node **p = &sctx->orphan_dirs.rb_node; | ||
2710 | struct rb_node *parent = NULL; | ||
2711 | struct orphan_dir_info *entry, *odi; | ||
2712 | |||
2713 | odi = kmalloc(sizeof(*odi), GFP_NOFS); | ||
2714 | if (!odi) | ||
2715 | return ERR_PTR(-ENOMEM); | ||
2716 | odi->ino = dir_ino; | ||
2717 | odi->gen = 0; | ||
2718 | |||
2719 | while (*p) { | ||
2720 | parent = *p; | ||
2721 | entry = rb_entry(parent, struct orphan_dir_info, node); | ||
2722 | if (dir_ino < entry->ino) { | ||
2723 | p = &(*p)->rb_left; | ||
2724 | } else if (dir_ino > entry->ino) { | ||
2725 | p = &(*p)->rb_right; | ||
2726 | } else { | ||
2727 | kfree(odi); | ||
2728 | return entry; | ||
2729 | } | ||
2730 | } | ||
2731 | |||
2732 | rb_link_node(&odi->node, parent, p); | ||
2733 | rb_insert_color(&odi->node, &sctx->orphan_dirs); | ||
2734 | return odi; | ||
2735 | } | ||
2736 | |||
2737 | static struct orphan_dir_info * | ||
2738 | get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) | ||
2739 | { | ||
2740 | struct rb_node *n = sctx->orphan_dirs.rb_node; | ||
2741 | struct orphan_dir_info *entry; | ||
2742 | |||
2743 | while (n) { | ||
2744 | entry = rb_entry(n, struct orphan_dir_info, node); | ||
2745 | if (dir_ino < entry->ino) | ||
2746 | n = n->rb_left; | ||
2747 | else if (dir_ino > entry->ino) | ||
2748 | n = n->rb_right; | ||
2749 | else | ||
2750 | return entry; | ||
2751 | } | ||
2752 | return NULL; | ||
2753 | } | ||
2754 | |||
2755 | static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) | ||
2756 | { | ||
2757 | struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); | ||
2758 | |||
2759 | return odi != NULL; | ||
2760 | } | ||
2761 | |||
2762 | static void free_orphan_dir_info(struct send_ctx *sctx, | ||
2763 | struct orphan_dir_info *odi) | ||
2764 | { | ||
2765 | if (!odi) | ||
2766 | return; | ||
2767 | rb_erase(&odi->node, &sctx->orphan_dirs); | ||
2768 | kfree(odi); | ||
2769 | } | ||
2770 | |||
2679 | /* | 2771 | /* |
2680 | * Returns 1 if a directory can be removed at this point in time. | 2772 | * Returns 1 if a directory can be removed at this point in time. |
2681 | * We check this by iterating all dir items and checking if the inode behind | 2773 | * We check this by iterating all dir items and checking if the inode behind |
2682 | * the dir item was already processed. | 2774 | * the dir item was already processed. |
2683 | */ | 2775 | */ |
2684 | static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | 2776 | static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, |
2777 | u64 send_progress) | ||
2685 | { | 2778 | { |
2686 | int ret = 0; | 2779 | int ret = 0; |
2687 | struct btrfs_root *root = sctx->parent_root; | 2780 | struct btrfs_root *root = sctx->parent_root; |
@@ -2704,31 +2797,52 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | |||
2704 | key.objectid = dir; | 2797 | key.objectid = dir; |
2705 | key.type = BTRFS_DIR_INDEX_KEY; | 2798 | key.type = BTRFS_DIR_INDEX_KEY; |
2706 | key.offset = 0; | 2799 | key.offset = 0; |
2800 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
2801 | if (ret < 0) | ||
2802 | goto out; | ||
2707 | 2803 | ||
2708 | while (1) { | 2804 | while (1) { |
2709 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 2805 | struct waiting_dir_move *dm; |
2710 | if (ret < 0) | 2806 | |
2711 | goto out; | 2807 | if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { |
2712 | if (!ret) { | 2808 | ret = btrfs_next_leaf(root, path); |
2713 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 2809 | if (ret < 0) |
2714 | path->slots[0]); | 2810 | goto out; |
2811 | else if (ret > 0) | ||
2812 | break; | ||
2813 | continue; | ||
2715 | } | 2814 | } |
2716 | if (ret || found_key.objectid != key.objectid || | 2815 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, |
2717 | found_key.type != key.type) { | 2816 | path->slots[0]); |
2817 | if (found_key.objectid != key.objectid || | ||
2818 | found_key.type != key.type) | ||
2718 | break; | 2819 | break; |
2719 | } | ||
2720 | 2820 | ||
2721 | di = btrfs_item_ptr(path->nodes[0], path->slots[0], | 2821 | di = btrfs_item_ptr(path->nodes[0], path->slots[0], |
2722 | struct btrfs_dir_item); | 2822 | struct btrfs_dir_item); |
2723 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); | 2823 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); |
2724 | 2824 | ||
2825 | dm = get_waiting_dir_move(sctx, loc.objectid); | ||
2826 | if (dm) { | ||
2827 | struct orphan_dir_info *odi; | ||
2828 | |||
2829 | odi = add_orphan_dir_info(sctx, dir); | ||
2830 | if (IS_ERR(odi)) { | ||
2831 | ret = PTR_ERR(odi); | ||
2832 | goto out; | ||
2833 | } | ||
2834 | odi->gen = dir_gen; | ||
2835 | dm->rmdir_ino = dir; | ||
2836 | ret = 0; | ||
2837 | goto out; | ||
2838 | } | ||
2839 | |||
2725 | if (loc.objectid > send_progress) { | 2840 | if (loc.objectid > send_progress) { |
2726 | ret = 0; | 2841 | ret = 0; |
2727 | goto out; | 2842 | goto out; |
2728 | } | 2843 | } |
2729 | 2844 | ||
2730 | btrfs_release_path(path); | 2845 | path->slots[0]++; |
2731 | key.offset = found_key.offset + 1; | ||
2732 | } | 2846 | } |
2733 | 2847 | ||
2734 | ret = 1; | 2848 | ret = 1; |
@@ -2740,19 +2854,9 @@ out: | |||
2740 | 2854 | ||
2741 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) | 2855 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) |
2742 | { | 2856 | { |
2743 | struct rb_node *n = sctx->waiting_dir_moves.rb_node; | 2857 | struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); |
2744 | struct waiting_dir_move *entry; | ||
2745 | 2858 | ||
2746 | while (n) { | 2859 | return entry != NULL; |
2747 | entry = rb_entry(n, struct waiting_dir_move, node); | ||
2748 | if (ino < entry->ino) | ||
2749 | n = n->rb_left; | ||
2750 | else if (ino > entry->ino) | ||
2751 | n = n->rb_right; | ||
2752 | else | ||
2753 | return 1; | ||
2754 | } | ||
2755 | return 0; | ||
2756 | } | 2860 | } |
2757 | 2861 | ||
2758 | static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) | 2862 | static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) |
@@ -2765,6 +2869,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) | |||
2765 | if (!dm) | 2869 | if (!dm) |
2766 | return -ENOMEM; | 2870 | return -ENOMEM; |
2767 | dm->ino = ino; | 2871 | dm->ino = ino; |
2872 | dm->rmdir_ino = 0; | ||
2768 | 2873 | ||
2769 | while (*p) { | 2874 | while (*p) { |
2770 | parent = *p; | 2875 | parent = *p; |
@@ -2784,31 +2889,41 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) | |||
2784 | return 0; | 2889 | return 0; |
2785 | } | 2890 | } |
2786 | 2891 | ||
2787 | static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino) | 2892 | static struct waiting_dir_move * |
2893 | get_waiting_dir_move(struct send_ctx *sctx, u64 ino) | ||
2788 | { | 2894 | { |
2789 | struct rb_node *n = sctx->waiting_dir_moves.rb_node; | 2895 | struct rb_node *n = sctx->waiting_dir_moves.rb_node; |
2790 | struct waiting_dir_move *entry; | 2896 | struct waiting_dir_move *entry; |
2791 | 2897 | ||
2792 | while (n) { | 2898 | while (n) { |
2793 | entry = rb_entry(n, struct waiting_dir_move, node); | 2899 | entry = rb_entry(n, struct waiting_dir_move, node); |
2794 | if (ino < entry->ino) { | 2900 | if (ino < entry->ino) |
2795 | n = n->rb_left; | 2901 | n = n->rb_left; |
2796 | } else if (ino > entry->ino) { | 2902 | else if (ino > entry->ino) |
2797 | n = n->rb_right; | 2903 | n = n->rb_right; |
2798 | } else { | 2904 | else |
2799 | rb_erase(&entry->node, &sctx->waiting_dir_moves); | 2905 | return entry; |
2800 | kfree(entry); | ||
2801 | return 0; | ||
2802 | } | ||
2803 | } | 2906 | } |
2804 | return -ENOENT; | 2907 | return NULL; |
2908 | } | ||
2909 | |||
2910 | static void free_waiting_dir_move(struct send_ctx *sctx, | ||
2911 | struct waiting_dir_move *dm) | ||
2912 | { | ||
2913 | if (!dm) | ||
2914 | return; | ||
2915 | rb_erase(&dm->node, &sctx->waiting_dir_moves); | ||
2916 | kfree(dm); | ||
2805 | } | 2917 | } |
2806 | 2918 | ||
2807 | static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) | 2919 | static int add_pending_dir_move(struct send_ctx *sctx, |
2920 | u64 ino, | ||
2921 | u64 ino_gen, | ||
2922 | u64 parent_ino) | ||
2808 | { | 2923 | { |
2809 | struct rb_node **p = &sctx->pending_dir_moves.rb_node; | 2924 | struct rb_node **p = &sctx->pending_dir_moves.rb_node; |
2810 | struct rb_node *parent = NULL; | 2925 | struct rb_node *parent = NULL; |
2811 | struct pending_dir_move *entry, *pm; | 2926 | struct pending_dir_move *entry = NULL, *pm; |
2812 | struct recorded_ref *cur; | 2927 | struct recorded_ref *cur; |
2813 | int exists = 0; | 2928 | int exists = 0; |
2814 | int ret; | 2929 | int ret; |
@@ -2817,8 +2932,8 @@ static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) | |||
2817 | if (!pm) | 2932 | if (!pm) |
2818 | return -ENOMEM; | 2933 | return -ENOMEM; |
2819 | pm->parent_ino = parent_ino; | 2934 | pm->parent_ino = parent_ino; |
2820 | pm->ino = sctx->cur_ino; | 2935 | pm->ino = ino; |
2821 | pm->gen = sctx->cur_inode_gen; | 2936 | pm->gen = ino_gen; |
2822 | INIT_LIST_HEAD(&pm->list); | 2937 | INIT_LIST_HEAD(&pm->list); |
2823 | INIT_LIST_HEAD(&pm->update_refs); | 2938 | INIT_LIST_HEAD(&pm->update_refs); |
2824 | RB_CLEAR_NODE(&pm->node); | 2939 | RB_CLEAR_NODE(&pm->node); |
@@ -2888,19 +3003,52 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2888 | { | 3003 | { |
2889 | struct fs_path *from_path = NULL; | 3004 | struct fs_path *from_path = NULL; |
2890 | struct fs_path *to_path = NULL; | 3005 | struct fs_path *to_path = NULL; |
3006 | struct fs_path *name = NULL; | ||
2891 | u64 orig_progress = sctx->send_progress; | 3007 | u64 orig_progress = sctx->send_progress; |
2892 | struct recorded_ref *cur; | 3008 | struct recorded_ref *cur; |
3009 | u64 parent_ino, parent_gen; | ||
3010 | struct waiting_dir_move *dm = NULL; | ||
3011 | u64 rmdir_ino = 0; | ||
2893 | int ret; | 3012 | int ret; |
2894 | 3013 | ||
3014 | name = fs_path_alloc(); | ||
2895 | from_path = fs_path_alloc(); | 3015 | from_path = fs_path_alloc(); |
2896 | if (!from_path) | 3016 | if (!name || !from_path) { |
2897 | return -ENOMEM; | 3017 | ret = -ENOMEM; |
3018 | goto out; | ||
3019 | } | ||
2898 | 3020 | ||
2899 | sctx->send_progress = pm->ino; | 3021 | dm = get_waiting_dir_move(sctx, pm->ino); |
2900 | ret = get_cur_path(sctx, pm->ino, pm->gen, from_path); | 3022 | ASSERT(dm); |
3023 | rmdir_ino = dm->rmdir_ino; | ||
3024 | free_waiting_dir_move(sctx, dm); | ||
3025 | |||
3026 | ret = get_first_ref(sctx->parent_root, pm->ino, | ||
3027 | &parent_ino, &parent_gen, name); | ||
2901 | if (ret < 0) | 3028 | if (ret < 0) |
2902 | goto out; | 3029 | goto out; |
2903 | 3030 | ||
3031 | if (parent_ino == sctx->cur_ino) { | ||
3032 | /* child only renamed, not moved */ | ||
3033 | ASSERT(parent_gen == sctx->cur_inode_gen); | ||
3034 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, | ||
3035 | from_path); | ||
3036 | if (ret < 0) | ||
3037 | goto out; | ||
3038 | ret = fs_path_add_path(from_path, name); | ||
3039 | if (ret < 0) | ||
3040 | goto out; | ||
3041 | } else { | ||
3042 | /* child moved and maybe renamed too */ | ||
3043 | sctx->send_progress = pm->ino; | ||
3044 | ret = get_cur_path(sctx, pm->ino, pm->gen, from_path); | ||
3045 | if (ret < 0) | ||
3046 | goto out; | ||
3047 | } | ||
3048 | |||
3049 | fs_path_free(name); | ||
3050 | name = NULL; | ||
3051 | |||
2904 | to_path = fs_path_alloc(); | 3052 | to_path = fs_path_alloc(); |
2905 | if (!to_path) { | 3053 | if (!to_path) { |
2906 | ret = -ENOMEM; | 3054 | ret = -ENOMEM; |
@@ -2908,9 +3056,6 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2908 | } | 3056 | } |
2909 | 3057 | ||
2910 | sctx->send_progress = sctx->cur_ino + 1; | 3058 | sctx->send_progress = sctx->cur_ino + 1; |
2911 | ret = del_waiting_dir_move(sctx, pm->ino); | ||
2912 | ASSERT(ret == 0); | ||
2913 | |||
2914 | ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); | 3059 | ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); |
2915 | if (ret < 0) | 3060 | if (ret < 0) |
2916 | goto out; | 3061 | goto out; |
@@ -2919,6 +3064,35 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2919 | if (ret < 0) | 3064 | if (ret < 0) |
2920 | goto out; | 3065 | goto out; |
2921 | 3066 | ||
3067 | if (rmdir_ino) { | ||
3068 | struct orphan_dir_info *odi; | ||
3069 | |||
3070 | odi = get_orphan_dir_info(sctx, rmdir_ino); | ||
3071 | if (!odi) { | ||
3072 | /* already deleted */ | ||
3073 | goto finish; | ||
3074 | } | ||
3075 | ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1); | ||
3076 | if (ret < 0) | ||
3077 | goto out; | ||
3078 | if (!ret) | ||
3079 | goto finish; | ||
3080 | |||
3081 | name = fs_path_alloc(); | ||
3082 | if (!name) { | ||
3083 | ret = -ENOMEM; | ||
3084 | goto out; | ||
3085 | } | ||
3086 | ret = get_cur_path(sctx, rmdir_ino, odi->gen, name); | ||
3087 | if (ret < 0) | ||
3088 | goto out; | ||
3089 | ret = send_rmdir(sctx, name); | ||
3090 | if (ret < 0) | ||
3091 | goto out; | ||
3092 | free_orphan_dir_info(sctx, odi); | ||
3093 | } | ||
3094 | |||
3095 | finish: | ||
2922 | ret = send_utimes(sctx, pm->ino, pm->gen); | 3096 | ret = send_utimes(sctx, pm->ino, pm->gen); |
2923 | if (ret < 0) | 3097 | if (ret < 0) |
2924 | goto out; | 3098 | goto out; |
@@ -2928,12 +3102,15 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2928 | * and old parent(s). | 3102 | * and old parent(s). |
2929 | */ | 3103 | */ |
2930 | list_for_each_entry(cur, &pm->update_refs, list) { | 3104 | list_for_each_entry(cur, &pm->update_refs, list) { |
3105 | if (cur->dir == rmdir_ino) | ||
3106 | continue; | ||
2931 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); | 3107 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); |
2932 | if (ret < 0) | 3108 | if (ret < 0) |
2933 | goto out; | 3109 | goto out; |
2934 | } | 3110 | } |
2935 | 3111 | ||
2936 | out: | 3112 | out: |
3113 | fs_path_free(name); | ||
2937 | fs_path_free(from_path); | 3114 | fs_path_free(from_path); |
2938 | fs_path_free(to_path); | 3115 | fs_path_free(to_path); |
2939 | sctx->send_progress = orig_progress; | 3116 | sctx->send_progress = orig_progress; |
@@ -3005,17 +3182,19 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3005 | int ret; | 3182 | int ret; |
3006 | u64 ino = parent_ref->dir; | 3183 | u64 ino = parent_ref->dir; |
3007 | u64 parent_ino_before, parent_ino_after; | 3184 | u64 parent_ino_before, parent_ino_after; |
3008 | u64 new_gen, old_gen; | 3185 | u64 old_gen; |
3009 | struct fs_path *path_before = NULL; | 3186 | struct fs_path *path_before = NULL; |
3010 | struct fs_path *path_after = NULL; | 3187 | struct fs_path *path_after = NULL; |
3011 | int len1, len2; | 3188 | int len1, len2; |
3012 | 3189 | int register_upper_dirs; | |
3013 | if (parent_ref->dir <= sctx->cur_ino) | 3190 | u64 gen; |
3014 | return 0; | ||
3015 | 3191 | ||
3016 | if (is_waiting_for_move(sctx, ino)) | 3192 | if (is_waiting_for_move(sctx, ino)) |
3017 | return 1; | 3193 | return 1; |
3018 | 3194 | ||
3195 | if (parent_ref->dir <= sctx->cur_ino) | ||
3196 | return 0; | ||
3197 | |||
3019 | ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen, | 3198 | ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen, |
3020 | NULL, NULL, NULL, NULL); | 3199 | NULL, NULL, NULL, NULL); |
3021 | if (ret == -ENOENT) | 3200 | if (ret == -ENOENT) |
@@ -3023,12 +3202,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3023 | else if (ret < 0) | 3202 | else if (ret < 0) |
3024 | return ret; | 3203 | return ret; |
3025 | 3204 | ||
3026 | ret = get_inode_info(sctx->send_root, ino, NULL, &new_gen, | 3205 | if (parent_ref->dir_gen != old_gen) |
3027 | NULL, NULL, NULL, NULL); | ||
3028 | if (ret < 0) | ||
3029 | return ret; | ||
3030 | |||
3031 | if (new_gen != old_gen) | ||
3032 | return 0; | 3206 | return 0; |
3033 | 3207 | ||
3034 | path_before = fs_path_alloc(); | 3208 | path_before = fs_path_alloc(); |
@@ -3051,7 +3225,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3051 | } | 3225 | } |
3052 | 3226 | ||
3053 | ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, | 3227 | ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, |
3054 | NULL, path_after); | 3228 | &gen, path_after); |
3055 | if (ret == -ENOENT) { | 3229 | if (ret == -ENOENT) { |
3056 | ret = 0; | 3230 | ret = 0; |
3057 | goto out; | 3231 | goto out; |
@@ -3061,13 +3235,67 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3061 | 3235 | ||
3062 | len1 = fs_path_len(path_before); | 3236 | len1 = fs_path_len(path_before); |
3063 | len2 = fs_path_len(path_after); | 3237 | len2 = fs_path_len(path_after); |
3064 | if ((parent_ino_before != parent_ino_after) && (len1 != len2 || | 3238 | if (parent_ino_before != parent_ino_after || len1 != len2 || |
3065 | memcmp(path_before->start, path_after->start, len1))) { | 3239 | memcmp(path_before->start, path_after->start, len1)) { |
3066 | ret = 1; | 3240 | ret = 1; |
3067 | goto out; | 3241 | goto out; |
3068 | } | 3242 | } |
3069 | ret = 0; | 3243 | ret = 0; |
3070 | 3244 | ||
3245 | /* | ||
3246 | * Ok, our new most direct ancestor has a higher inode number but | ||
3247 | * wasn't moved/renamed. So maybe some of the new ancestors higher in | ||
3248 | * the hierarchy have an higher inode number too *and* were renamed | ||
3249 | * or moved - in this case we need to wait for the ancestor's rename | ||
3250 | * or move operation before we can do the move/rename for the current | ||
3251 | * inode. | ||
3252 | */ | ||
3253 | register_upper_dirs = 0; | ||
3254 | ino = parent_ino_after; | ||
3255 | again: | ||
3256 | while ((ret == 0 || register_upper_dirs) && ino > sctx->cur_ino) { | ||
3257 | u64 parent_gen; | ||
3258 | |||
3259 | fs_path_reset(path_before); | ||
3260 | fs_path_reset(path_after); | ||
3261 | |||
3262 | ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, | ||
3263 | &parent_gen, path_after); | ||
3264 | if (ret < 0) | ||
3265 | goto out; | ||
3266 | ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, | ||
3267 | NULL, path_before); | ||
3268 | if (ret == -ENOENT) { | ||
3269 | ret = 0; | ||
3270 | break; | ||
3271 | } else if (ret < 0) { | ||
3272 | goto out; | ||
3273 | } | ||
3274 | |||
3275 | len1 = fs_path_len(path_before); | ||
3276 | len2 = fs_path_len(path_after); | ||
3277 | if (parent_ino_before != parent_ino_after || len1 != len2 || | ||
3278 | memcmp(path_before->start, path_after->start, len1)) { | ||
3279 | ret = 1; | ||
3280 | if (register_upper_dirs) { | ||
3281 | break; | ||
3282 | } else { | ||
3283 | register_upper_dirs = 1; | ||
3284 | ino = parent_ref->dir; | ||
3285 | gen = parent_ref->dir_gen; | ||
3286 | goto again; | ||
3287 | } | ||
3288 | } else if (register_upper_dirs) { | ||
3289 | ret = add_pending_dir_move(sctx, ino, gen, | ||
3290 | parent_ino_after); | ||
3291 | if (ret < 0 && ret != -EEXIST) | ||
3292 | goto out; | ||
3293 | } | ||
3294 | |||
3295 | ino = parent_ino_after; | ||
3296 | gen = parent_gen; | ||
3297 | } | ||
3298 | |||
3071 | out: | 3299 | out: |
3072 | fs_path_free(path_before); | 3300 | fs_path_free(path_before); |
3073 | fs_path_free(path_after); | 3301 | fs_path_free(path_after); |
@@ -3089,6 +3317,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) | |||
3089 | u64 ow_gen; | 3317 | u64 ow_gen; |
3090 | int did_overwrite = 0; | 3318 | int did_overwrite = 0; |
3091 | int is_orphan = 0; | 3319 | int is_orphan = 0; |
3320 | u64 last_dir_ino_rm = 0; | ||
3092 | 3321 | ||
3093 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | 3322 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); |
3094 | 3323 | ||
@@ -3227,9 +3456,14 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3227 | * dirs, we always have one new and one deleted | 3456 | * dirs, we always have one new and one deleted |
3228 | * ref. The deleted ref is ignored later. | 3457 | * ref. The deleted ref is ignored later. |
3229 | */ | 3458 | */ |
3230 | if (wait_for_parent_move(sctx, cur)) { | 3459 | ret = wait_for_parent_move(sctx, cur); |
3460 | if (ret < 0) | ||
3461 | goto out; | ||
3462 | if (ret) { | ||
3231 | ret = add_pending_dir_move(sctx, | 3463 | ret = add_pending_dir_move(sctx, |
3232 | cur->dir); | 3464 | sctx->cur_ino, |
3465 | sctx->cur_inode_gen, | ||
3466 | cur->dir); | ||
3233 | *pending_move = 1; | 3467 | *pending_move = 1; |
3234 | } else { | 3468 | } else { |
3235 | ret = send_rename(sctx, valid_path, | 3469 | ret = send_rename(sctx, valid_path, |
@@ -3259,7 +3493,8 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3259 | * later, we do this check again and rmdir it then if possible. | 3493 | * later, we do this check again and rmdir it then if possible. |
3260 | * See the use of check_dirs for more details. | 3494 | * See the use of check_dirs for more details. |
3261 | */ | 3495 | */ |
3262 | ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino); | 3496 | ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen, |
3497 | sctx->cur_ino); | ||
3263 | if (ret < 0) | 3498 | if (ret < 0) |
3264 | goto out; | 3499 | goto out; |
3265 | if (ret) { | 3500 | if (ret) { |
@@ -3350,8 +3585,10 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3350 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); | 3585 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); |
3351 | if (ret < 0) | 3586 | if (ret < 0) |
3352 | goto out; | 3587 | goto out; |
3353 | } else if (ret == inode_state_did_delete) { | 3588 | } else if (ret == inode_state_did_delete && |
3354 | ret = can_rmdir(sctx, cur->dir, sctx->cur_ino); | 3589 | cur->dir != last_dir_ino_rm) { |
3590 | ret = can_rmdir(sctx, cur->dir, cur->dir_gen, | ||
3591 | sctx->cur_ino); | ||
3355 | if (ret < 0) | 3592 | if (ret < 0) |
3356 | goto out; | 3593 | goto out; |
3357 | if (ret) { | 3594 | if (ret) { |
@@ -3362,6 +3599,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3362 | ret = send_rmdir(sctx, valid_path); | 3599 | ret = send_rmdir(sctx, valid_path); |
3363 | if (ret < 0) | 3600 | if (ret < 0) |
3364 | goto out; | 3601 | goto out; |
3602 | last_dir_ino_rm = cur->dir; | ||
3365 | } | 3603 | } |
3366 | } | 3604 | } |
3367 | } | 3605 | } |
@@ -3375,9 +3613,8 @@ out: | |||
3375 | return ret; | 3613 | return ret; |
3376 | } | 3614 | } |
3377 | 3615 | ||
3378 | static int __record_new_ref(int num, u64 dir, int index, | 3616 | static int record_ref(struct btrfs_root *root, int num, u64 dir, int index, |
3379 | struct fs_path *name, | 3617 | struct fs_path *name, void *ctx, struct list_head *refs) |
3380 | void *ctx) | ||
3381 | { | 3618 | { |
3382 | int ret = 0; | 3619 | int ret = 0; |
3383 | struct send_ctx *sctx = ctx; | 3620 | struct send_ctx *sctx = ctx; |
@@ -3388,7 +3625,7 @@ static int __record_new_ref(int num, u64 dir, int index, | |||
3388 | if (!p) | 3625 | if (!p) |
3389 | return -ENOMEM; | 3626 | return -ENOMEM; |
3390 | 3627 | ||
3391 | ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, | 3628 | ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL, |
3392 | NULL, NULL); | 3629 | NULL, NULL); |
3393 | if (ret < 0) | 3630 | if (ret < 0) |
3394 | goto out; | 3631 | goto out; |
@@ -3400,7 +3637,7 @@ static int __record_new_ref(int num, u64 dir, int index, | |||
3400 | if (ret < 0) | 3637 | if (ret < 0) |
3401 | goto out; | 3638 | goto out; |
3402 | 3639 | ||
3403 | ret = record_ref(&sctx->new_refs, dir, gen, p); | 3640 | ret = __record_ref(refs, dir, gen, p); |
3404 | 3641 | ||
3405 | out: | 3642 | out: |
3406 | if (ret) | 3643 | if (ret) |
@@ -3408,37 +3645,23 @@ out: | |||
3408 | return ret; | 3645 | return ret; |
3409 | } | 3646 | } |
3410 | 3647 | ||
3648 | static int __record_new_ref(int num, u64 dir, int index, | ||
3649 | struct fs_path *name, | ||
3650 | void *ctx) | ||
3651 | { | ||
3652 | struct send_ctx *sctx = ctx; | ||
3653 | return record_ref(sctx->send_root, num, dir, index, name, | ||
3654 | ctx, &sctx->new_refs); | ||
3655 | } | ||
3656 | |||
3657 | |||
3411 | static int __record_deleted_ref(int num, u64 dir, int index, | 3658 | static int __record_deleted_ref(int num, u64 dir, int index, |
3412 | struct fs_path *name, | 3659 | struct fs_path *name, |
3413 | void *ctx) | 3660 | void *ctx) |
3414 | { | 3661 | { |
3415 | int ret = 0; | ||
3416 | struct send_ctx *sctx = ctx; | 3662 | struct send_ctx *sctx = ctx; |
3417 | struct fs_path *p; | 3663 | return record_ref(sctx->parent_root, num, dir, index, name, |
3418 | u64 gen; | 3664 | ctx, &sctx->deleted_refs); |
3419 | |||
3420 | p = fs_path_alloc(); | ||
3421 | if (!p) | ||
3422 | return -ENOMEM; | ||
3423 | |||
3424 | ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, | ||
3425 | NULL, NULL); | ||
3426 | if (ret < 0) | ||
3427 | goto out; | ||
3428 | |||
3429 | ret = get_cur_path(sctx, dir, gen, p); | ||
3430 | if (ret < 0) | ||
3431 | goto out; | ||
3432 | ret = fs_path_add_path(p, name); | ||
3433 | if (ret < 0) | ||
3434 | goto out; | ||
3435 | |||
3436 | ret = record_ref(&sctx->deleted_refs, dir, gen, p); | ||
3437 | |||
3438 | out: | ||
3439 | if (ret) | ||
3440 | fs_path_free(p); | ||
3441 | return ret; | ||
3442 | } | 3665 | } |
3443 | 3666 | ||
3444 | static int record_new_ref(struct send_ctx *sctx) | 3667 | static int record_new_ref(struct send_ctx *sctx) |
@@ -3619,21 +3842,31 @@ static int process_all_refs(struct send_ctx *sctx, | |||
3619 | root = sctx->parent_root; | 3842 | root = sctx->parent_root; |
3620 | cb = __record_deleted_ref; | 3843 | cb = __record_deleted_ref; |
3621 | } else { | 3844 | } else { |
3622 | BUG(); | 3845 | btrfs_err(sctx->send_root->fs_info, |
3846 | "Wrong command %d in process_all_refs", cmd); | ||
3847 | ret = -EINVAL; | ||
3848 | goto out; | ||
3623 | } | 3849 | } |
3624 | 3850 | ||
3625 | key.objectid = sctx->cmp_key->objectid; | 3851 | key.objectid = sctx->cmp_key->objectid; |
3626 | key.type = BTRFS_INODE_REF_KEY; | 3852 | key.type = BTRFS_INODE_REF_KEY; |
3627 | key.offset = 0; | 3853 | key.offset = 0; |
3628 | while (1) { | 3854 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
3629 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 3855 | if (ret < 0) |
3630 | if (ret < 0) | 3856 | goto out; |
3631 | goto out; | ||
3632 | if (ret) | ||
3633 | break; | ||
3634 | 3857 | ||
3858 | while (1) { | ||
3635 | eb = path->nodes[0]; | 3859 | eb = path->nodes[0]; |
3636 | slot = path->slots[0]; | 3860 | slot = path->slots[0]; |
3861 | if (slot >= btrfs_header_nritems(eb)) { | ||
3862 | ret = btrfs_next_leaf(root, path); | ||
3863 | if (ret < 0) | ||
3864 | goto out; | ||
3865 | else if (ret > 0) | ||
3866 | break; | ||
3867 | continue; | ||
3868 | } | ||
3869 | |||
3637 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 3870 | btrfs_item_key_to_cpu(eb, &found_key, slot); |
3638 | 3871 | ||
3639 | if (found_key.objectid != key.objectid || | 3872 | if (found_key.objectid != key.objectid || |
@@ -3642,11 +3875,10 @@ static int process_all_refs(struct send_ctx *sctx, | |||
3642 | break; | 3875 | break; |
3643 | 3876 | ||
3644 | ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); | 3877 | ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); |
3645 | btrfs_release_path(path); | ||
3646 | if (ret < 0) | 3878 | if (ret < 0) |
3647 | goto out; | 3879 | goto out; |
3648 | 3880 | ||
3649 | key.offset = found_key.offset + 1; | 3881 | path->slots[0]++; |
3650 | } | 3882 | } |
3651 | btrfs_release_path(path); | 3883 | btrfs_release_path(path); |
3652 | 3884 | ||
@@ -3927,19 +4159,25 @@ static int process_all_new_xattrs(struct send_ctx *sctx) | |||
3927 | key.objectid = sctx->cmp_key->objectid; | 4159 | key.objectid = sctx->cmp_key->objectid; |
3928 | key.type = BTRFS_XATTR_ITEM_KEY; | 4160 | key.type = BTRFS_XATTR_ITEM_KEY; |
3929 | key.offset = 0; | 4161 | key.offset = 0; |
3930 | while (1) { | 4162 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
3931 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 4163 | if (ret < 0) |
3932 | if (ret < 0) | 4164 | goto out; |
3933 | goto out; | ||
3934 | if (ret) { | ||
3935 | ret = 0; | ||
3936 | goto out; | ||
3937 | } | ||
3938 | 4165 | ||
4166 | while (1) { | ||
3939 | eb = path->nodes[0]; | 4167 | eb = path->nodes[0]; |
3940 | slot = path->slots[0]; | 4168 | slot = path->slots[0]; |
3941 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 4169 | if (slot >= btrfs_header_nritems(eb)) { |
4170 | ret = btrfs_next_leaf(root, path); | ||
4171 | if (ret < 0) { | ||
4172 | goto out; | ||
4173 | } else if (ret > 0) { | ||
4174 | ret = 0; | ||
4175 | break; | ||
4176 | } | ||
4177 | continue; | ||
4178 | } | ||
3942 | 4179 | ||
4180 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
3943 | if (found_key.objectid != key.objectid || | 4181 | if (found_key.objectid != key.objectid || |
3944 | found_key.type != key.type) { | 4182 | found_key.type != key.type) { |
3945 | ret = 0; | 4183 | ret = 0; |
@@ -3951,8 +4189,7 @@ static int process_all_new_xattrs(struct send_ctx *sctx) | |||
3951 | if (ret < 0) | 4189 | if (ret < 0) |
3952 | goto out; | 4190 | goto out; |
3953 | 4191 | ||
3954 | btrfs_release_path(path); | 4192 | path->slots[0]++; |
3955 | key.offset = found_key.offset + 1; | ||
3956 | } | 4193 | } |
3957 | 4194 | ||
3958 | out: | 4195 | out: |
@@ -3991,6 +4228,13 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) | |||
3991 | goto out; | 4228 | goto out; |
3992 | 4229 | ||
3993 | last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; | 4230 | last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; |
4231 | |||
4232 | /* initial readahead */ | ||
4233 | memset(&sctx->ra, 0, sizeof(struct file_ra_state)); | ||
4234 | file_ra_state_init(&sctx->ra, inode->i_mapping); | ||
4235 | btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index, | ||
4236 | last_index - index + 1); | ||
4237 | |||
3994 | while (index <= last_index) { | 4238 | while (index <= last_index) { |
3995 | unsigned cur_len = min_t(unsigned, len, | 4239 | unsigned cur_len = min_t(unsigned, len, |
3996 | PAGE_CACHE_SIZE - pg_offset); | 4240 | PAGE_CACHE_SIZE - pg_offset); |
@@ -4763,18 +5007,19 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | |||
4763 | ret = apply_children_dir_moves(sctx); | 5007 | ret = apply_children_dir_moves(sctx); |
4764 | if (ret) | 5008 | if (ret) |
4765 | goto out; | 5009 | goto out; |
5010 | /* | ||
5011 | * Need to send that every time, no matter if it actually | ||
5012 | * changed between the two trees as we have done changes to | ||
5013 | * the inode before. If our inode is a directory and it's | ||
5014 | * waiting to be moved/renamed, we will send its utimes when | ||
5015 | * it's moved/renamed, therefore we don't need to do it here. | ||
5016 | */ | ||
5017 | sctx->send_progress = sctx->cur_ino + 1; | ||
5018 | ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); | ||
5019 | if (ret < 0) | ||
5020 | goto out; | ||
4766 | } | 5021 | } |
4767 | 5022 | ||
4768 | /* | ||
4769 | * Need to send that every time, no matter if it actually | ||
4770 | * changed between the two trees as we have done changes to | ||
4771 | * the inode before. | ||
4772 | */ | ||
4773 | sctx->send_progress = sctx->cur_ino + 1; | ||
4774 | ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); | ||
4775 | if (ret < 0) | ||
4776 | goto out; | ||
4777 | |||
4778 | out: | 5023 | out: |
4779 | return ret; | 5024 | return ret; |
4780 | } | 5025 | } |
@@ -4840,6 +5085,8 @@ static int changed_inode(struct send_ctx *sctx, | |||
4840 | sctx->left_path->nodes[0], left_ii); | 5085 | sctx->left_path->nodes[0], left_ii); |
4841 | sctx->cur_inode_mode = btrfs_inode_mode( | 5086 | sctx->cur_inode_mode = btrfs_inode_mode( |
4842 | sctx->left_path->nodes[0], left_ii); | 5087 | sctx->left_path->nodes[0], left_ii); |
5088 | sctx->cur_inode_rdev = btrfs_inode_rdev( | ||
5089 | sctx->left_path->nodes[0], left_ii); | ||
4843 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | 5090 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) |
4844 | ret = send_create_inode_if_needed(sctx); | 5091 | ret = send_create_inode_if_needed(sctx); |
4845 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { | 5092 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { |
@@ -4884,6 +5131,8 @@ static int changed_inode(struct send_ctx *sctx, | |||
4884 | sctx->left_path->nodes[0], left_ii); | 5131 | sctx->left_path->nodes[0], left_ii); |
4885 | sctx->cur_inode_mode = btrfs_inode_mode( | 5132 | sctx->cur_inode_mode = btrfs_inode_mode( |
4886 | sctx->left_path->nodes[0], left_ii); | 5133 | sctx->left_path->nodes[0], left_ii); |
5134 | sctx->cur_inode_rdev = btrfs_inode_rdev( | ||
5135 | sctx->left_path->nodes[0], left_ii); | ||
4887 | ret = send_create_inode_if_needed(sctx); | 5136 | ret = send_create_inode_if_needed(sctx); |
4888 | if (ret < 0) | 5137 | if (ret < 0) |
4889 | goto out; | 5138 | goto out; |
@@ -5118,6 +5367,7 @@ out: | |||
5118 | static int full_send_tree(struct send_ctx *sctx) | 5367 | static int full_send_tree(struct send_ctx *sctx) |
5119 | { | 5368 | { |
5120 | int ret; | 5369 | int ret; |
5370 | struct btrfs_trans_handle *trans = NULL; | ||
5121 | struct btrfs_root *send_root = sctx->send_root; | 5371 | struct btrfs_root *send_root = sctx->send_root; |
5122 | struct btrfs_key key; | 5372 | struct btrfs_key key; |
5123 | struct btrfs_key found_key; | 5373 | struct btrfs_key found_key; |
@@ -5139,6 +5389,19 @@ static int full_send_tree(struct send_ctx *sctx) | |||
5139 | key.type = BTRFS_INODE_ITEM_KEY; | 5389 | key.type = BTRFS_INODE_ITEM_KEY; |
5140 | key.offset = 0; | 5390 | key.offset = 0; |
5141 | 5391 | ||
5392 | join_trans: | ||
5393 | /* | ||
5394 | * We need to make sure the transaction does not get committed | ||
5395 | * while we do anything on commit roots. Join a transaction to prevent | ||
5396 | * this. | ||
5397 | */ | ||
5398 | trans = btrfs_join_transaction(send_root); | ||
5399 | if (IS_ERR(trans)) { | ||
5400 | ret = PTR_ERR(trans); | ||
5401 | trans = NULL; | ||
5402 | goto out; | ||
5403 | } | ||
5404 | |||
5142 | /* | 5405 | /* |
5143 | * Make sure the tree has not changed after re-joining. We detect this | 5406 | * Make sure the tree has not changed after re-joining. We detect this |
5144 | * by comparing start_ctransid and ctransid. They should always match. | 5407 | * by comparing start_ctransid and ctransid. They should always match. |
@@ -5162,6 +5425,19 @@ static int full_send_tree(struct send_ctx *sctx) | |||
5162 | goto out_finish; | 5425 | goto out_finish; |
5163 | 5426 | ||
5164 | while (1) { | 5427 | while (1) { |
5428 | /* | ||
5429 | * When someone want to commit while we iterate, end the | ||
5430 | * joined transaction and rejoin. | ||
5431 | */ | ||
5432 | if (btrfs_should_end_transaction(trans, send_root)) { | ||
5433 | ret = btrfs_end_transaction(trans, send_root); | ||
5434 | trans = NULL; | ||
5435 | if (ret < 0) | ||
5436 | goto out; | ||
5437 | btrfs_release_path(path); | ||
5438 | goto join_trans; | ||
5439 | } | ||
5440 | |||
5165 | eb = path->nodes[0]; | 5441 | eb = path->nodes[0]; |
5166 | slot = path->slots[0]; | 5442 | slot = path->slots[0]; |
5167 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 5443 | btrfs_item_key_to_cpu(eb, &found_key, slot); |
@@ -5189,6 +5465,12 @@ out_finish: | |||
5189 | 5465 | ||
5190 | out: | 5466 | out: |
5191 | btrfs_free_path(path); | 5467 | btrfs_free_path(path); |
5468 | if (trans) { | ||
5469 | if (!ret) | ||
5470 | ret = btrfs_end_transaction(trans, send_root); | ||
5471 | else | ||
5472 | btrfs_end_transaction(trans, send_root); | ||
5473 | } | ||
5192 | return ret; | 5474 | return ret; |
5193 | } | 5475 | } |
5194 | 5476 | ||
@@ -5340,6 +5622,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) | |||
5340 | 5622 | ||
5341 | sctx->pending_dir_moves = RB_ROOT; | 5623 | sctx->pending_dir_moves = RB_ROOT; |
5342 | sctx->waiting_dir_moves = RB_ROOT; | 5624 | sctx->waiting_dir_moves = RB_ROOT; |
5625 | sctx->orphan_dirs = RB_ROOT; | ||
5343 | 5626 | ||
5344 | sctx->clone_roots = vzalloc(sizeof(struct clone_root) * | 5627 | sctx->clone_roots = vzalloc(sizeof(struct clone_root) * |
5345 | (arg->clone_sources_count + 1)); | 5628 | (arg->clone_sources_count + 1)); |
@@ -5477,6 +5760,16 @@ out: | |||
5477 | kfree(dm); | 5760 | kfree(dm); |
5478 | } | 5761 | } |
5479 | 5762 | ||
5763 | WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); | ||
5764 | while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { | ||
5765 | struct rb_node *n; | ||
5766 | struct orphan_dir_info *odi; | ||
5767 | |||
5768 | n = rb_first(&sctx->orphan_dirs); | ||
5769 | odi = rb_entry(n, struct orphan_dir_info, node); | ||
5770 | free_orphan_dir_info(sctx, odi); | ||
5771 | } | ||
5772 | |||
5480 | if (sort_clone_roots) { | 5773 | if (sort_clone_roots) { |
5481 | for (i = 0; i < sctx->clone_roots_cnt; i++) | 5774 | for (i = 0; i < sctx->clone_roots_cnt; i++) |
5482 | btrfs_root_dec_send_in_progress( | 5775 | btrfs_root_dec_send_in_progress( |