diff options
Diffstat (limited to 'fs/btrfs/send.c')
-rw-r--r-- | fs/btrfs/send.c | 879 |
1 files changed, 572 insertions, 307 deletions
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 9dde9717c1b9..fd38b5053479 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -51,15 +51,18 @@ struct fs_path { | |||
51 | struct { | 51 | struct { |
52 | char *start; | 52 | char *start; |
53 | char *end; | 53 | char *end; |
54 | char *prepared; | ||
55 | 54 | ||
56 | char *buf; | 55 | char *buf; |
57 | int buf_len; | 56 | unsigned short buf_len:15; |
58 | unsigned int reversed:1; | 57 | unsigned short reversed:1; |
59 | unsigned int virtual_mem:1; | ||
60 | char inline_buf[]; | 58 | char inline_buf[]; |
61 | }; | 59 | }; |
62 | char pad[PAGE_SIZE]; | 60 | /* |
61 | * Average path length does not exceed 200 bytes, we'll have | ||
62 | * better packing in the slab and higher chance to satisfy | ||
63 | * a allocation later during send. | ||
64 | */ | ||
65 | char pad[256]; | ||
63 | }; | 66 | }; |
64 | }; | 67 | }; |
65 | #define FS_PATH_INLINE_SIZE \ | 68 | #define FS_PATH_INLINE_SIZE \ |
@@ -109,6 +112,7 @@ struct send_ctx { | |||
109 | int cur_inode_deleted; | 112 | int cur_inode_deleted; |
110 | u64 cur_inode_size; | 113 | u64 cur_inode_size; |
111 | u64 cur_inode_mode; | 114 | u64 cur_inode_mode; |
115 | u64 cur_inode_rdev; | ||
112 | u64 cur_inode_last_extent; | 116 | u64 cur_inode_last_extent; |
113 | 117 | ||
114 | u64 send_progress; | 118 | u64 send_progress; |
@@ -120,6 +124,8 @@ struct send_ctx { | |||
120 | struct list_head name_cache_list; | 124 | struct list_head name_cache_list; |
121 | int name_cache_size; | 125 | int name_cache_size; |
122 | 126 | ||
127 | struct file_ra_state ra; | ||
128 | |||
123 | char *read_buf; | 129 | char *read_buf; |
124 | 130 | ||
125 | /* | 131 | /* |
@@ -175,6 +181,47 @@ struct send_ctx { | |||
175 | * own move/rename can be performed. | 181 | * own move/rename can be performed. |
176 | */ | 182 | */ |
177 | struct rb_root waiting_dir_moves; | 183 | struct rb_root waiting_dir_moves; |
184 | |||
185 | /* | ||
186 | * A directory that is going to be rm'ed might have a child directory | ||
187 | * which is in the pending directory moves index above. In this case, | ||
188 | * the directory can only be removed after the move/rename of its child | ||
189 | * is performed. Example: | ||
190 | * | ||
191 | * Parent snapshot: | ||
192 | * | ||
193 | * . (ino 256) | ||
194 | * |-- a/ (ino 257) | ||
195 | * |-- b/ (ino 258) | ||
196 | * |-- c/ (ino 259) | ||
197 | * | |-- x/ (ino 260) | ||
198 | * | | ||
199 | * |-- y/ (ino 261) | ||
200 | * | ||
201 | * Send snapshot: | ||
202 | * | ||
203 | * . (ino 256) | ||
204 | * |-- a/ (ino 257) | ||
205 | * |-- b/ (ino 258) | ||
206 | * |-- YY/ (ino 261) | ||
207 | * |-- x/ (ino 260) | ||
208 | * | ||
209 | * Sequence of steps that lead to the send snapshot: | ||
210 | * rm -f /a/b/c/foo.txt | ||
211 | * mv /a/b/y /a/b/YY | ||
212 | * mv /a/b/c/x /a/b/YY | ||
213 | * rmdir /a/b/c | ||
214 | * | ||
215 | * When the child is processed, its move/rename is delayed until its | ||
216 | * parent is processed (as explained above), but all other operations | ||
217 | * like update utimes, chown, chgrp, etc, are performed and the paths | ||
218 | * that it uses for those operations must use the orphanized name of | ||
219 | * its parent (the directory we're going to rm later), so we need to | ||
220 | * memorize that name. | ||
221 | * | ||
222 | * Indexed by the inode number of the directory to be deleted. | ||
223 | */ | ||
224 | struct rb_root orphan_dirs; | ||
178 | }; | 225 | }; |
179 | 226 | ||
180 | struct pending_dir_move { | 227 | struct pending_dir_move { |
@@ -189,6 +236,18 @@ struct pending_dir_move { | |||
189 | struct waiting_dir_move { | 236 | struct waiting_dir_move { |
190 | struct rb_node node; | 237 | struct rb_node node; |
191 | u64 ino; | 238 | u64 ino; |
239 | /* | ||
240 | * There might be some directory that could not be removed because it | ||
241 | * was waiting for this directory inode to be moved first. Therefore | ||
242 | * after this directory is moved, we can try to rmdir the ino rmdir_ino. | ||
243 | */ | ||
244 | u64 rmdir_ino; | ||
245 | }; | ||
246 | |||
247 | struct orphan_dir_info { | ||
248 | struct rb_node node; | ||
249 | u64 ino; | ||
250 | u64 gen; | ||
192 | }; | 251 | }; |
193 | 252 | ||
194 | struct name_cache_entry { | 253 | struct name_cache_entry { |
@@ -214,6 +273,11 @@ struct name_cache_entry { | |||
214 | 273 | ||
215 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); | 274 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); |
216 | 275 | ||
276 | static struct waiting_dir_move * | ||
277 | get_waiting_dir_move(struct send_ctx *sctx, u64 ino); | ||
278 | |||
279 | static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino); | ||
280 | |||
217 | static int need_send_hole(struct send_ctx *sctx) | 281 | static int need_send_hole(struct send_ctx *sctx) |
218 | { | 282 | { |
219 | return (sctx->parent_root && !sctx->cur_inode_new && | 283 | return (sctx->parent_root && !sctx->cur_inode_new && |
@@ -242,7 +306,6 @@ static struct fs_path *fs_path_alloc(void) | |||
242 | if (!p) | 306 | if (!p) |
243 | return NULL; | 307 | return NULL; |
244 | p->reversed = 0; | 308 | p->reversed = 0; |
245 | p->virtual_mem = 0; | ||
246 | p->buf = p->inline_buf; | 309 | p->buf = p->inline_buf; |
247 | p->buf_len = FS_PATH_INLINE_SIZE; | 310 | p->buf_len = FS_PATH_INLINE_SIZE; |
248 | fs_path_reset(p); | 311 | fs_path_reset(p); |
@@ -265,12 +328,8 @@ static void fs_path_free(struct fs_path *p) | |||
265 | { | 328 | { |
266 | if (!p) | 329 | if (!p) |
267 | return; | 330 | return; |
268 | if (p->buf != p->inline_buf) { | 331 | if (p->buf != p->inline_buf) |
269 | if (p->virtual_mem) | 332 | kfree(p->buf); |
270 | vfree(p->buf); | ||
271 | else | ||
272 | kfree(p->buf); | ||
273 | } | ||
274 | kfree(p); | 333 | kfree(p); |
275 | } | 334 | } |
276 | 335 | ||
@@ -290,42 +349,30 @@ static int fs_path_ensure_buf(struct fs_path *p, int len) | |||
290 | if (p->buf_len >= len) | 349 | if (p->buf_len >= len) |
291 | return 0; | 350 | return 0; |
292 | 351 | ||
352 | if (len > PATH_MAX) { | ||
353 | WARN_ON(1); | ||
354 | return -ENOMEM; | ||
355 | } | ||
356 | |||
293 | path_len = p->end - p->start; | 357 | path_len = p->end - p->start; |
294 | old_buf_len = p->buf_len; | 358 | old_buf_len = p->buf_len; |
295 | len = PAGE_ALIGN(len); | 359 | |
296 | 360 | /* | |
297 | if (p->buf == p->inline_buf) { | 361 | * First time the inline_buf does not suffice |
298 | tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN); | 362 | */ |
299 | if (!tmp_buf) { | 363 | if (p->buf == p->inline_buf) |
300 | tmp_buf = vmalloc(len); | 364 | tmp_buf = kmalloc(len, GFP_NOFS); |
301 | if (!tmp_buf) | 365 | else |
302 | return -ENOMEM; | 366 | tmp_buf = krealloc(p->buf, len, GFP_NOFS); |
303 | p->virtual_mem = 1; | 367 | if (!tmp_buf) |
304 | } | 368 | return -ENOMEM; |
305 | memcpy(tmp_buf, p->buf, p->buf_len); | 369 | p->buf = tmp_buf; |
306 | p->buf = tmp_buf; | 370 | /* |
307 | p->buf_len = len; | 371 | * The real size of the buffer is bigger, this will let the fast path |
308 | } else { | 372 | * happen most of the time |
309 | if (p->virtual_mem) { | 373 | */ |
310 | tmp_buf = vmalloc(len); | 374 | p->buf_len = ksize(p->buf); |
311 | if (!tmp_buf) | 375 | |
312 | return -ENOMEM; | ||
313 | memcpy(tmp_buf, p->buf, p->buf_len); | ||
314 | vfree(p->buf); | ||
315 | } else { | ||
316 | tmp_buf = krealloc(p->buf, len, GFP_NOFS); | ||
317 | if (!tmp_buf) { | ||
318 | tmp_buf = vmalloc(len); | ||
319 | if (!tmp_buf) | ||
320 | return -ENOMEM; | ||
321 | memcpy(tmp_buf, p->buf, p->buf_len); | ||
322 | kfree(p->buf); | ||
323 | p->virtual_mem = 1; | ||
324 | } | ||
325 | } | ||
326 | p->buf = tmp_buf; | ||
327 | p->buf_len = len; | ||
328 | } | ||
329 | if (p->reversed) { | 376 | if (p->reversed) { |
330 | tmp_buf = p->buf + old_buf_len - path_len - 1; | 377 | tmp_buf = p->buf + old_buf_len - path_len - 1; |
331 | p->end = p->buf + p->buf_len - 1; | 378 | p->end = p->buf + p->buf_len - 1; |
@@ -338,7 +385,8 @@ static int fs_path_ensure_buf(struct fs_path *p, int len) | |||
338 | return 0; | 385 | return 0; |
339 | } | 386 | } |
340 | 387 | ||
341 | static int fs_path_prepare_for_add(struct fs_path *p, int name_len) | 388 | static int fs_path_prepare_for_add(struct fs_path *p, int name_len, |
389 | char **prepared) | ||
342 | { | 390 | { |
343 | int ret; | 391 | int ret; |
344 | int new_len; | 392 | int new_len; |
@@ -354,11 +402,11 @@ static int fs_path_prepare_for_add(struct fs_path *p, int name_len) | |||
354 | if (p->start != p->end) | 402 | if (p->start != p->end) |
355 | *--p->start = '/'; | 403 | *--p->start = '/'; |
356 | p->start -= name_len; | 404 | p->start -= name_len; |
357 | p->prepared = p->start; | 405 | *prepared = p->start; |
358 | } else { | 406 | } else { |
359 | if (p->start != p->end) | 407 | if (p->start != p->end) |
360 | *p->end++ = '/'; | 408 | *p->end++ = '/'; |
361 | p->prepared = p->end; | 409 | *prepared = p->end; |
362 | p->end += name_len; | 410 | p->end += name_len; |
363 | *p->end = 0; | 411 | *p->end = 0; |
364 | } | 412 | } |
@@ -370,12 +418,12 @@ out: | |||
370 | static int fs_path_add(struct fs_path *p, const char *name, int name_len) | 418 | static int fs_path_add(struct fs_path *p, const char *name, int name_len) |
371 | { | 419 | { |
372 | int ret; | 420 | int ret; |
421 | char *prepared; | ||
373 | 422 | ||
374 | ret = fs_path_prepare_for_add(p, name_len); | 423 | ret = fs_path_prepare_for_add(p, name_len, &prepared); |
375 | if (ret < 0) | 424 | if (ret < 0) |
376 | goto out; | 425 | goto out; |
377 | memcpy(p->prepared, name, name_len); | 426 | memcpy(prepared, name, name_len); |
378 | p->prepared = NULL; | ||
379 | 427 | ||
380 | out: | 428 | out: |
381 | return ret; | 429 | return ret; |
@@ -384,12 +432,12 @@ out: | |||
384 | static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) | 432 | static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) |
385 | { | 433 | { |
386 | int ret; | 434 | int ret; |
435 | char *prepared; | ||
387 | 436 | ||
388 | ret = fs_path_prepare_for_add(p, p2->end - p2->start); | 437 | ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); |
389 | if (ret < 0) | 438 | if (ret < 0) |
390 | goto out; | 439 | goto out; |
391 | memcpy(p->prepared, p2->start, p2->end - p2->start); | 440 | memcpy(prepared, p2->start, p2->end - p2->start); |
392 | p->prepared = NULL; | ||
393 | 441 | ||
394 | out: | 442 | out: |
395 | return ret; | 443 | return ret; |
@@ -400,13 +448,13 @@ static int fs_path_add_from_extent_buffer(struct fs_path *p, | |||
400 | unsigned long off, int len) | 448 | unsigned long off, int len) |
401 | { | 449 | { |
402 | int ret; | 450 | int ret; |
451 | char *prepared; | ||
403 | 452 | ||
404 | ret = fs_path_prepare_for_add(p, len); | 453 | ret = fs_path_prepare_for_add(p, len, &prepared); |
405 | if (ret < 0) | 454 | if (ret < 0) |
406 | goto out; | 455 | goto out; |
407 | 456 | ||
408 | read_extent_buffer(eb, p->prepared, off, len); | 457 | read_extent_buffer(eb, prepared, off, len); |
409 | p->prepared = NULL; | ||
410 | 458 | ||
411 | out: | 459 | out: |
412 | return ret; | 460 | return ret; |
@@ -450,6 +498,7 @@ static struct btrfs_path *alloc_path_for_send(void) | |||
450 | return NULL; | 498 | return NULL; |
451 | path->search_commit_root = 1; | 499 | path->search_commit_root = 1; |
452 | path->skip_locking = 1; | 500 | path->skip_locking = 1; |
501 | path->need_commit_sem = 1; | ||
453 | return path; | 502 | return path; |
454 | } | 503 | } |
455 | 504 | ||
@@ -728,29 +777,22 @@ out: | |||
728 | /* | 777 | /* |
729 | * Helper function to retrieve some fields from an inode item. | 778 | * Helper function to retrieve some fields from an inode item. |
730 | */ | 779 | */ |
731 | static int get_inode_info(struct btrfs_root *root, | 780 | static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path, |
732 | u64 ino, u64 *size, u64 *gen, | 781 | u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid, |
733 | u64 *mode, u64 *uid, u64 *gid, | 782 | u64 *gid, u64 *rdev) |
734 | u64 *rdev) | ||
735 | { | 783 | { |
736 | int ret; | 784 | int ret; |
737 | struct btrfs_inode_item *ii; | 785 | struct btrfs_inode_item *ii; |
738 | struct btrfs_key key; | 786 | struct btrfs_key key; |
739 | struct btrfs_path *path; | ||
740 | |||
741 | path = alloc_path_for_send(); | ||
742 | if (!path) | ||
743 | return -ENOMEM; | ||
744 | 787 | ||
745 | key.objectid = ino; | 788 | key.objectid = ino; |
746 | key.type = BTRFS_INODE_ITEM_KEY; | 789 | key.type = BTRFS_INODE_ITEM_KEY; |
747 | key.offset = 0; | 790 | key.offset = 0; |
748 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 791 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
749 | if (ret < 0) | ||
750 | goto out; | ||
751 | if (ret) { | 792 | if (ret) { |
752 | ret = -ENOENT; | 793 | if (ret > 0) |
753 | goto out; | 794 | ret = -ENOENT; |
795 | return ret; | ||
754 | } | 796 | } |
755 | 797 | ||
756 | ii = btrfs_item_ptr(path->nodes[0], path->slots[0], | 798 | ii = btrfs_item_ptr(path->nodes[0], path->slots[0], |
@@ -768,7 +810,22 @@ static int get_inode_info(struct btrfs_root *root, | |||
768 | if (rdev) | 810 | if (rdev) |
769 | *rdev = btrfs_inode_rdev(path->nodes[0], ii); | 811 | *rdev = btrfs_inode_rdev(path->nodes[0], ii); |
770 | 812 | ||
771 | out: | 813 | return ret; |
814 | } | ||
815 | |||
816 | static int get_inode_info(struct btrfs_root *root, | ||
817 | u64 ino, u64 *size, u64 *gen, | ||
818 | u64 *mode, u64 *uid, u64 *gid, | ||
819 | u64 *rdev) | ||
820 | { | ||
821 | struct btrfs_path *path; | ||
822 | int ret; | ||
823 | |||
824 | path = alloc_path_for_send(); | ||
825 | if (!path) | ||
826 | return -ENOMEM; | ||
827 | ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid, | ||
828 | rdev); | ||
772 | btrfs_free_path(path); | 829 | btrfs_free_path(path); |
773 | return ret; | 830 | return ret; |
774 | } | 831 | } |
@@ -915,9 +972,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
915 | struct btrfs_dir_item *di; | 972 | struct btrfs_dir_item *di; |
916 | struct btrfs_key di_key; | 973 | struct btrfs_key di_key; |
917 | char *buf = NULL; | 974 | char *buf = NULL; |
918 | char *buf2 = NULL; | 975 | const int buf_len = PATH_MAX; |
919 | int buf_len; | ||
920 | int buf_virtual = 0; | ||
921 | u32 name_len; | 976 | u32 name_len; |
922 | u32 data_len; | 977 | u32 data_len; |
923 | u32 cur; | 978 | u32 cur; |
@@ -927,7 +982,6 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
927 | int num; | 982 | int num; |
928 | u8 type; | 983 | u8 type; |
929 | 984 | ||
930 | buf_len = PAGE_SIZE; | ||
931 | buf = kmalloc(buf_len, GFP_NOFS); | 985 | buf = kmalloc(buf_len, GFP_NOFS); |
932 | if (!buf) { | 986 | if (!buf) { |
933 | ret = -ENOMEM; | 987 | ret = -ENOMEM; |
@@ -949,30 +1003,12 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
949 | type = btrfs_dir_type(eb, di); | 1003 | type = btrfs_dir_type(eb, di); |
950 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | 1004 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); |
951 | 1005 | ||
1006 | /* | ||
1007 | * Path too long | ||
1008 | */ | ||
952 | if (name_len + data_len > buf_len) { | 1009 | if (name_len + data_len > buf_len) { |
953 | buf_len = PAGE_ALIGN(name_len + data_len); | 1010 | ret = -ENAMETOOLONG; |
954 | if (buf_virtual) { | 1011 | goto out; |
955 | buf2 = vmalloc(buf_len); | ||
956 | if (!buf2) { | ||
957 | ret = -ENOMEM; | ||
958 | goto out; | ||
959 | } | ||
960 | vfree(buf); | ||
961 | } else { | ||
962 | buf2 = krealloc(buf, buf_len, GFP_NOFS); | ||
963 | if (!buf2) { | ||
964 | buf2 = vmalloc(buf_len); | ||
965 | if (!buf2) { | ||
966 | ret = -ENOMEM; | ||
967 | goto out; | ||
968 | } | ||
969 | kfree(buf); | ||
970 | buf_virtual = 1; | ||
971 | } | ||
972 | } | ||
973 | |||
974 | buf = buf2; | ||
975 | buf2 = NULL; | ||
976 | } | 1012 | } |
977 | 1013 | ||
978 | read_extent_buffer(eb, buf, (unsigned long)(di + 1), | 1014 | read_extent_buffer(eb, buf, (unsigned long)(di + 1), |
@@ -995,10 +1031,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, | |||
995 | } | 1031 | } |
996 | 1032 | ||
997 | out: | 1033 | out: |
998 | if (buf_virtual) | 1034 | kfree(buf); |
999 | vfree(buf); | ||
1000 | else | ||
1001 | kfree(buf); | ||
1002 | return ret; | 1035 | return ret; |
1003 | } | 1036 | } |
1004 | 1037 | ||
@@ -1066,6 +1099,7 @@ out: | |||
1066 | struct backref_ctx { | 1099 | struct backref_ctx { |
1067 | struct send_ctx *sctx; | 1100 | struct send_ctx *sctx; |
1068 | 1101 | ||
1102 | struct btrfs_path *path; | ||
1069 | /* number of total found references */ | 1103 | /* number of total found references */ |
1070 | u64 found; | 1104 | u64 found; |
1071 | 1105 | ||
@@ -1136,8 +1170,9 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | |||
1136 | * There are inodes that have extents that lie behind its i_size. Don't | 1170 | * There are inodes that have extents that lie behind its i_size. Don't |
1137 | * accept clones from these extents. | 1171 | * accept clones from these extents. |
1138 | */ | 1172 | */ |
1139 | ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL, | 1173 | ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL, |
1140 | NULL); | 1174 | NULL, NULL, NULL); |
1175 | btrfs_release_path(bctx->path); | ||
1141 | if (ret < 0) | 1176 | if (ret < 0) |
1142 | return ret; | 1177 | return ret; |
1143 | 1178 | ||
@@ -1216,12 +1251,17 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
1216 | if (!tmp_path) | 1251 | if (!tmp_path) |
1217 | return -ENOMEM; | 1252 | return -ENOMEM; |
1218 | 1253 | ||
1254 | /* We only use this path under the commit sem */ | ||
1255 | tmp_path->need_commit_sem = 0; | ||
1256 | |||
1219 | backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS); | 1257 | backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS); |
1220 | if (!backref_ctx) { | 1258 | if (!backref_ctx) { |
1221 | ret = -ENOMEM; | 1259 | ret = -ENOMEM; |
1222 | goto out; | 1260 | goto out; |
1223 | } | 1261 | } |
1224 | 1262 | ||
1263 | backref_ctx->path = tmp_path; | ||
1264 | |||
1225 | if (data_offset >= ino_size) { | 1265 | if (data_offset >= ino_size) { |
1226 | /* | 1266 | /* |
1227 | * There may be extents that lie behind the file's size. | 1267 | * There may be extents that lie behind the file's size. |
@@ -1249,8 +1289,10 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
1249 | } | 1289 | } |
1250 | logical = disk_byte + btrfs_file_extent_offset(eb, fi); | 1290 | logical = disk_byte + btrfs_file_extent_offset(eb, fi); |
1251 | 1291 | ||
1292 | down_read(&sctx->send_root->fs_info->commit_root_sem); | ||
1252 | ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, | 1293 | ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, |
1253 | &found_key, &flags); | 1294 | &found_key, &flags); |
1295 | up_read(&sctx->send_root->fs_info->commit_root_sem); | ||
1254 | btrfs_release_path(tmp_path); | 1296 | btrfs_release_path(tmp_path); |
1255 | 1297 | ||
1256 | if (ret < 0) | 1298 | if (ret < 0) |
@@ -1292,8 +1334,6 @@ static int find_extent_clone(struct send_ctx *sctx, | |||
1292 | extent_item_pos = logical - found_key.objectid; | 1334 | extent_item_pos = logical - found_key.objectid; |
1293 | else | 1335 | else |
1294 | extent_item_pos = 0; | 1336 | extent_item_pos = 0; |
1295 | |||
1296 | extent_item_pos = logical - found_key.objectid; | ||
1297 | ret = iterate_extent_inodes(sctx->send_root->fs_info, | 1337 | ret = iterate_extent_inodes(sctx->send_root->fs_info, |
1298 | found_key.objectid, extent_item_pos, 1, | 1338 | found_key.objectid, extent_item_pos, 1, |
1299 | __iterate_backrefs, backref_ctx); | 1339 | __iterate_backrefs, backref_ctx); |
@@ -1418,11 +1458,7 @@ static int gen_unique_name(struct send_ctx *sctx, | |||
1418 | while (1) { | 1458 | while (1) { |
1419 | len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", | 1459 | len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", |
1420 | ino, gen, idx); | 1460 | ino, gen, idx); |
1421 | if (len >= sizeof(tmp)) { | 1461 | ASSERT(len < sizeof(tmp)); |
1422 | /* should really not happen */ | ||
1423 | ret = -EOVERFLOW; | ||
1424 | goto out; | ||
1425 | } | ||
1426 | 1462 | ||
1427 | di = btrfs_lookup_dir_item(NULL, sctx->send_root, | 1463 | di = btrfs_lookup_dir_item(NULL, sctx->send_root, |
1428 | path, BTRFS_FIRST_FREE_OBJECTID, | 1464 | path, BTRFS_FIRST_FREE_OBJECTID, |
@@ -1632,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino, | |||
1632 | goto out; | 1668 | goto out; |
1633 | } | 1669 | } |
1634 | 1670 | ||
1635 | if (key.type == BTRFS_INODE_REF_KEY) { | 1671 | if (found_key.type == BTRFS_INODE_REF_KEY) { |
1636 | struct btrfs_inode_ref *iref; | 1672 | struct btrfs_inode_ref *iref; |
1637 | iref = btrfs_item_ptr(path->nodes[0], path->slots[0], | 1673 | iref = btrfs_item_ptr(path->nodes[0], path->slots[0], |
1638 | struct btrfs_inode_ref); | 1674 | struct btrfs_inode_ref); |
@@ -1898,13 +1934,20 @@ static void name_cache_delete(struct send_ctx *sctx, | |||
1898 | 1934 | ||
1899 | nce_head = radix_tree_lookup(&sctx->name_cache, | 1935 | nce_head = radix_tree_lookup(&sctx->name_cache, |
1900 | (unsigned long)nce->ino); | 1936 | (unsigned long)nce->ino); |
1901 | BUG_ON(!nce_head); | 1937 | if (!nce_head) { |
1938 | btrfs_err(sctx->send_root->fs_info, | ||
1939 | "name_cache_delete lookup failed ino %llu cache size %d, leaking memory", | ||
1940 | nce->ino, sctx->name_cache_size); | ||
1941 | } | ||
1902 | 1942 | ||
1903 | list_del(&nce->radix_list); | 1943 | list_del(&nce->radix_list); |
1904 | list_del(&nce->list); | 1944 | list_del(&nce->list); |
1905 | sctx->name_cache_size--; | 1945 | sctx->name_cache_size--; |
1906 | 1946 | ||
1907 | if (list_empty(nce_head)) { | 1947 | /* |
1948 | * We may not get to the final release of nce_head if the lookup fails | ||
1949 | */ | ||
1950 | if (nce_head && list_empty(nce_head)) { | ||
1908 | radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); | 1951 | radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); |
1909 | kfree(nce_head); | 1952 | kfree(nce_head); |
1910 | } | 1953 | } |
@@ -1977,7 +2020,6 @@ static void name_cache_free(struct send_ctx *sctx) | |||
1977 | */ | 2020 | */ |
1978 | static int __get_cur_name_and_parent(struct send_ctx *sctx, | 2021 | static int __get_cur_name_and_parent(struct send_ctx *sctx, |
1979 | u64 ino, u64 gen, | 2022 | u64 ino, u64 gen, |
1980 | int skip_name_cache, | ||
1981 | u64 *parent_ino, | 2023 | u64 *parent_ino, |
1982 | u64 *parent_gen, | 2024 | u64 *parent_gen, |
1983 | struct fs_path *dest) | 2025 | struct fs_path *dest) |
@@ -1987,8 +2029,6 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
1987 | struct btrfs_path *path = NULL; | 2029 | struct btrfs_path *path = NULL; |
1988 | struct name_cache_entry *nce = NULL; | 2030 | struct name_cache_entry *nce = NULL; |
1989 | 2031 | ||
1990 | if (skip_name_cache) | ||
1991 | goto get_ref; | ||
1992 | /* | 2032 | /* |
1993 | * First check if we already did a call to this function with the same | 2033 | * First check if we already did a call to this function with the same |
1994 | * ino/gen. If yes, check if the cache entry is still up-to-date. If yes | 2034 | * ino/gen. If yes, check if the cache entry is still up-to-date. If yes |
@@ -2033,12 +2073,11 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx, | |||
2033 | goto out_cache; | 2073 | goto out_cache; |
2034 | } | 2074 | } |
2035 | 2075 | ||
2036 | get_ref: | ||
2037 | /* | 2076 | /* |
2038 | * Depending on whether the inode was already processed or not, use | 2077 | * Depending on whether the inode was already processed or not, use |
2039 | * send_root or parent_root for ref lookup. | 2078 | * send_root or parent_root for ref lookup. |
2040 | */ | 2079 | */ |
2041 | if (ino < sctx->send_progress && !skip_name_cache) | 2080 | if (ino < sctx->send_progress) |
2042 | ret = get_first_ref(sctx->send_root, ino, | 2081 | ret = get_first_ref(sctx->send_root, ino, |
2043 | parent_ino, parent_gen, dest); | 2082 | parent_ino, parent_gen, dest); |
2044 | else | 2083 | else |
@@ -2062,8 +2101,6 @@ get_ref: | |||
2062 | goto out; | 2101 | goto out; |
2063 | ret = 1; | 2102 | ret = 1; |
2064 | } | 2103 | } |
2065 | if (skip_name_cache) | ||
2066 | goto out; | ||
2067 | 2104 | ||
2068 | out_cache: | 2105 | out_cache: |
2069 | /* | 2106 | /* |
@@ -2131,9 +2168,6 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, | |||
2131 | u64 parent_inode = 0; | 2168 | u64 parent_inode = 0; |
2132 | u64 parent_gen = 0; | 2169 | u64 parent_gen = 0; |
2133 | int stop = 0; | 2170 | int stop = 0; |
2134 | u64 start_ino = ino; | ||
2135 | u64 start_gen = gen; | ||
2136 | int skip_name_cache = 0; | ||
2137 | 2171 | ||
2138 | name = fs_path_alloc(); | 2172 | name = fs_path_alloc(); |
2139 | if (!name) { | 2173 | if (!name) { |
@@ -2141,31 +2175,33 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, | |||
2141 | goto out; | 2175 | goto out; |
2142 | } | 2176 | } |
2143 | 2177 | ||
2144 | if (is_waiting_for_move(sctx, ino)) | ||
2145 | skip_name_cache = 1; | ||
2146 | |||
2147 | again: | ||
2148 | dest->reversed = 1; | 2178 | dest->reversed = 1; |
2149 | fs_path_reset(dest); | 2179 | fs_path_reset(dest); |
2150 | 2180 | ||
2151 | while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { | 2181 | while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { |
2152 | fs_path_reset(name); | 2182 | fs_path_reset(name); |
2153 | 2183 | ||
2154 | ret = __get_cur_name_and_parent(sctx, ino, gen, skip_name_cache, | 2184 | if (is_waiting_for_rm(sctx, ino)) { |
2155 | &parent_inode, &parent_gen, name); | 2185 | ret = gen_unique_name(sctx, ino, gen, name); |
2186 | if (ret < 0) | ||
2187 | goto out; | ||
2188 | ret = fs_path_add_path(dest, name); | ||
2189 | break; | ||
2190 | } | ||
2191 | |||
2192 | if (is_waiting_for_move(sctx, ino)) { | ||
2193 | ret = get_first_ref(sctx->parent_root, ino, | ||
2194 | &parent_inode, &parent_gen, name); | ||
2195 | } else { | ||
2196 | ret = __get_cur_name_and_parent(sctx, ino, gen, | ||
2197 | &parent_inode, | ||
2198 | &parent_gen, name); | ||
2199 | if (ret) | ||
2200 | stop = 1; | ||
2201 | } | ||
2202 | |||
2156 | if (ret < 0) | 2203 | if (ret < 0) |
2157 | goto out; | 2204 | goto out; |
2158 | if (ret) | ||
2159 | stop = 1; | ||
2160 | |||
2161 | if (!skip_name_cache && | ||
2162 | is_waiting_for_move(sctx, parent_inode)) { | ||
2163 | ino = start_ino; | ||
2164 | gen = start_gen; | ||
2165 | stop = 0; | ||
2166 | skip_name_cache = 1; | ||
2167 | goto again; | ||
2168 | } | ||
2169 | 2205 | ||
2170 | ret = fs_path_add_path(dest, name); | 2206 | ret = fs_path_add_path(dest, name); |
2171 | if (ret < 0) | 2207 | if (ret < 0) |
@@ -2429,10 +2465,16 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino); | |||
2429 | if (!p) | 2465 | if (!p) |
2430 | return -ENOMEM; | 2466 | return -ENOMEM; |
2431 | 2467 | ||
2432 | ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL, | 2468 | if (ino != sctx->cur_ino) { |
2433 | NULL, &rdev); | 2469 | ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, |
2434 | if (ret < 0) | 2470 | NULL, NULL, &rdev); |
2435 | goto out; | 2471 | if (ret < 0) |
2472 | goto out; | ||
2473 | } else { | ||
2474 | gen = sctx->cur_inode_gen; | ||
2475 | mode = sctx->cur_inode_mode; | ||
2476 | rdev = sctx->cur_inode_rdev; | ||
2477 | } | ||
2436 | 2478 | ||
2437 | if (S_ISREG(mode)) { | 2479 | if (S_ISREG(mode)) { |
2438 | cmd = BTRFS_SEND_C_MKFILE; | 2480 | cmd = BTRFS_SEND_C_MKFILE; |
@@ -2512,17 +2554,26 @@ static int did_create_dir(struct send_ctx *sctx, u64 dir) | |||
2512 | key.objectid = dir; | 2554 | key.objectid = dir; |
2513 | key.type = BTRFS_DIR_INDEX_KEY; | 2555 | key.type = BTRFS_DIR_INDEX_KEY; |
2514 | key.offset = 0; | 2556 | key.offset = 0; |
2557 | ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); | ||
2558 | if (ret < 0) | ||
2559 | goto out; | ||
2560 | |||
2515 | while (1) { | 2561 | while (1) { |
2516 | ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, | 2562 | eb = path->nodes[0]; |
2517 | 1, 0); | 2563 | slot = path->slots[0]; |
2518 | if (ret < 0) | 2564 | if (slot >= btrfs_header_nritems(eb)) { |
2519 | goto out; | 2565 | ret = btrfs_next_leaf(sctx->send_root, path); |
2520 | if (!ret) { | 2566 | if (ret < 0) { |
2521 | eb = path->nodes[0]; | 2567 | goto out; |
2522 | slot = path->slots[0]; | 2568 | } else if (ret > 0) { |
2523 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 2569 | ret = 0; |
2570 | break; | ||
2571 | } | ||
2572 | continue; | ||
2524 | } | 2573 | } |
2525 | if (ret || found_key.objectid != key.objectid || | 2574 | |
2575 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
2576 | if (found_key.objectid != key.objectid || | ||
2526 | found_key.type != key.type) { | 2577 | found_key.type != key.type) { |
2527 | ret = 0; | 2578 | ret = 0; |
2528 | goto out; | 2579 | goto out; |
@@ -2537,8 +2588,7 @@ static int did_create_dir(struct send_ctx *sctx, u64 dir) | |||
2537 | goto out; | 2588 | goto out; |
2538 | } | 2589 | } |
2539 | 2590 | ||
2540 | key.offset = found_key.offset + 1; | 2591 | path->slots[0]++; |
2541 | btrfs_release_path(path); | ||
2542 | } | 2592 | } |
2543 | 2593 | ||
2544 | out: | 2594 | out: |
@@ -2590,7 +2640,7 @@ struct recorded_ref { | |||
2590 | * everything mixed. So we first record all refs and later process them. | 2640 | * everything mixed. So we first record all refs and later process them. |
2591 | * This function is a helper to record one ref. | 2641 | * This function is a helper to record one ref. |
2592 | */ | 2642 | */ |
2593 | static int record_ref(struct list_head *head, u64 dir, | 2643 | static int __record_ref(struct list_head *head, u64 dir, |
2594 | u64 dir_gen, struct fs_path *path) | 2644 | u64 dir_gen, struct fs_path *path) |
2595 | { | 2645 | { |
2596 | struct recorded_ref *ref; | 2646 | struct recorded_ref *ref; |
@@ -2676,12 +2726,78 @@ out: | |||
2676 | return ret; | 2726 | return ret; |
2677 | } | 2727 | } |
2678 | 2728 | ||
2729 | static struct orphan_dir_info * | ||
2730 | add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) | ||
2731 | { | ||
2732 | struct rb_node **p = &sctx->orphan_dirs.rb_node; | ||
2733 | struct rb_node *parent = NULL; | ||
2734 | struct orphan_dir_info *entry, *odi; | ||
2735 | |||
2736 | odi = kmalloc(sizeof(*odi), GFP_NOFS); | ||
2737 | if (!odi) | ||
2738 | return ERR_PTR(-ENOMEM); | ||
2739 | odi->ino = dir_ino; | ||
2740 | odi->gen = 0; | ||
2741 | |||
2742 | while (*p) { | ||
2743 | parent = *p; | ||
2744 | entry = rb_entry(parent, struct orphan_dir_info, node); | ||
2745 | if (dir_ino < entry->ino) { | ||
2746 | p = &(*p)->rb_left; | ||
2747 | } else if (dir_ino > entry->ino) { | ||
2748 | p = &(*p)->rb_right; | ||
2749 | } else { | ||
2750 | kfree(odi); | ||
2751 | return entry; | ||
2752 | } | ||
2753 | } | ||
2754 | |||
2755 | rb_link_node(&odi->node, parent, p); | ||
2756 | rb_insert_color(&odi->node, &sctx->orphan_dirs); | ||
2757 | return odi; | ||
2758 | } | ||
2759 | |||
2760 | static struct orphan_dir_info * | ||
2761 | get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) | ||
2762 | { | ||
2763 | struct rb_node *n = sctx->orphan_dirs.rb_node; | ||
2764 | struct orphan_dir_info *entry; | ||
2765 | |||
2766 | while (n) { | ||
2767 | entry = rb_entry(n, struct orphan_dir_info, node); | ||
2768 | if (dir_ino < entry->ino) | ||
2769 | n = n->rb_left; | ||
2770 | else if (dir_ino > entry->ino) | ||
2771 | n = n->rb_right; | ||
2772 | else | ||
2773 | return entry; | ||
2774 | } | ||
2775 | return NULL; | ||
2776 | } | ||
2777 | |||
2778 | static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) | ||
2779 | { | ||
2780 | struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); | ||
2781 | |||
2782 | return odi != NULL; | ||
2783 | } | ||
2784 | |||
2785 | static void free_orphan_dir_info(struct send_ctx *sctx, | ||
2786 | struct orphan_dir_info *odi) | ||
2787 | { | ||
2788 | if (!odi) | ||
2789 | return; | ||
2790 | rb_erase(&odi->node, &sctx->orphan_dirs); | ||
2791 | kfree(odi); | ||
2792 | } | ||
2793 | |||
2679 | /* | 2794 | /* |
2680 | * Returns 1 if a directory can be removed at this point in time. | 2795 | * Returns 1 if a directory can be removed at this point in time. |
2681 | * We check this by iterating all dir items and checking if the inode behind | 2796 | * We check this by iterating all dir items and checking if the inode behind |
2682 | * the dir item was already processed. | 2797 | * the dir item was already processed. |
2683 | */ | 2798 | */ |
2684 | static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | 2799 | static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, |
2800 | u64 send_progress) | ||
2685 | { | 2801 | { |
2686 | int ret = 0; | 2802 | int ret = 0; |
2687 | struct btrfs_root *root = sctx->parent_root; | 2803 | struct btrfs_root *root = sctx->parent_root; |
@@ -2704,31 +2820,52 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | |||
2704 | key.objectid = dir; | 2820 | key.objectid = dir; |
2705 | key.type = BTRFS_DIR_INDEX_KEY; | 2821 | key.type = BTRFS_DIR_INDEX_KEY; |
2706 | key.offset = 0; | 2822 | key.offset = 0; |
2823 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
2824 | if (ret < 0) | ||
2825 | goto out; | ||
2707 | 2826 | ||
2708 | while (1) { | 2827 | while (1) { |
2709 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 2828 | struct waiting_dir_move *dm; |
2710 | if (ret < 0) | 2829 | |
2711 | goto out; | 2830 | if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { |
2712 | if (!ret) { | 2831 | ret = btrfs_next_leaf(root, path); |
2713 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 2832 | if (ret < 0) |
2714 | path->slots[0]); | 2833 | goto out; |
2834 | else if (ret > 0) | ||
2835 | break; | ||
2836 | continue; | ||
2715 | } | 2837 | } |
2716 | if (ret || found_key.objectid != key.objectid || | 2838 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, |
2717 | found_key.type != key.type) { | 2839 | path->slots[0]); |
2840 | if (found_key.objectid != key.objectid || | ||
2841 | found_key.type != key.type) | ||
2718 | break; | 2842 | break; |
2719 | } | ||
2720 | 2843 | ||
2721 | di = btrfs_item_ptr(path->nodes[0], path->slots[0], | 2844 | di = btrfs_item_ptr(path->nodes[0], path->slots[0], |
2722 | struct btrfs_dir_item); | 2845 | struct btrfs_dir_item); |
2723 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); | 2846 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); |
2724 | 2847 | ||
2848 | dm = get_waiting_dir_move(sctx, loc.objectid); | ||
2849 | if (dm) { | ||
2850 | struct orphan_dir_info *odi; | ||
2851 | |||
2852 | odi = add_orphan_dir_info(sctx, dir); | ||
2853 | if (IS_ERR(odi)) { | ||
2854 | ret = PTR_ERR(odi); | ||
2855 | goto out; | ||
2856 | } | ||
2857 | odi->gen = dir_gen; | ||
2858 | dm->rmdir_ino = dir; | ||
2859 | ret = 0; | ||
2860 | goto out; | ||
2861 | } | ||
2862 | |||
2725 | if (loc.objectid > send_progress) { | 2863 | if (loc.objectid > send_progress) { |
2726 | ret = 0; | 2864 | ret = 0; |
2727 | goto out; | 2865 | goto out; |
2728 | } | 2866 | } |
2729 | 2867 | ||
2730 | btrfs_release_path(path); | 2868 | path->slots[0]++; |
2731 | key.offset = found_key.offset + 1; | ||
2732 | } | 2869 | } |
2733 | 2870 | ||
2734 | ret = 1; | 2871 | ret = 1; |
@@ -2740,19 +2877,9 @@ out: | |||
2740 | 2877 | ||
2741 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) | 2878 | static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) |
2742 | { | 2879 | { |
2743 | struct rb_node *n = sctx->waiting_dir_moves.rb_node; | 2880 | struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); |
2744 | struct waiting_dir_move *entry; | ||
2745 | 2881 | ||
2746 | while (n) { | 2882 | return entry != NULL; |
2747 | entry = rb_entry(n, struct waiting_dir_move, node); | ||
2748 | if (ino < entry->ino) | ||
2749 | n = n->rb_left; | ||
2750 | else if (ino > entry->ino) | ||
2751 | n = n->rb_right; | ||
2752 | else | ||
2753 | return 1; | ||
2754 | } | ||
2755 | return 0; | ||
2756 | } | 2883 | } |
2757 | 2884 | ||
2758 | static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) | 2885 | static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) |
@@ -2765,6 +2892,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) | |||
2765 | if (!dm) | 2892 | if (!dm) |
2766 | return -ENOMEM; | 2893 | return -ENOMEM; |
2767 | dm->ino = ino; | 2894 | dm->ino = ino; |
2895 | dm->rmdir_ino = 0; | ||
2768 | 2896 | ||
2769 | while (*p) { | 2897 | while (*p) { |
2770 | parent = *p; | 2898 | parent = *p; |
@@ -2784,31 +2912,41 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) | |||
2784 | return 0; | 2912 | return 0; |
2785 | } | 2913 | } |
2786 | 2914 | ||
2787 | static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino) | 2915 | static struct waiting_dir_move * |
2916 | get_waiting_dir_move(struct send_ctx *sctx, u64 ino) | ||
2788 | { | 2917 | { |
2789 | struct rb_node *n = sctx->waiting_dir_moves.rb_node; | 2918 | struct rb_node *n = sctx->waiting_dir_moves.rb_node; |
2790 | struct waiting_dir_move *entry; | 2919 | struct waiting_dir_move *entry; |
2791 | 2920 | ||
2792 | while (n) { | 2921 | while (n) { |
2793 | entry = rb_entry(n, struct waiting_dir_move, node); | 2922 | entry = rb_entry(n, struct waiting_dir_move, node); |
2794 | if (ino < entry->ino) { | 2923 | if (ino < entry->ino) |
2795 | n = n->rb_left; | 2924 | n = n->rb_left; |
2796 | } else if (ino > entry->ino) { | 2925 | else if (ino > entry->ino) |
2797 | n = n->rb_right; | 2926 | n = n->rb_right; |
2798 | } else { | 2927 | else |
2799 | rb_erase(&entry->node, &sctx->waiting_dir_moves); | 2928 | return entry; |
2800 | kfree(entry); | ||
2801 | return 0; | ||
2802 | } | ||
2803 | } | 2929 | } |
2804 | return -ENOENT; | 2930 | return NULL; |
2805 | } | 2931 | } |
2806 | 2932 | ||
2807 | static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) | 2933 | static void free_waiting_dir_move(struct send_ctx *sctx, |
2934 | struct waiting_dir_move *dm) | ||
2935 | { | ||
2936 | if (!dm) | ||
2937 | return; | ||
2938 | rb_erase(&dm->node, &sctx->waiting_dir_moves); | ||
2939 | kfree(dm); | ||
2940 | } | ||
2941 | |||
2942 | static int add_pending_dir_move(struct send_ctx *sctx, | ||
2943 | u64 ino, | ||
2944 | u64 ino_gen, | ||
2945 | u64 parent_ino) | ||
2808 | { | 2946 | { |
2809 | struct rb_node **p = &sctx->pending_dir_moves.rb_node; | 2947 | struct rb_node **p = &sctx->pending_dir_moves.rb_node; |
2810 | struct rb_node *parent = NULL; | 2948 | struct rb_node *parent = NULL; |
2811 | struct pending_dir_move *entry, *pm; | 2949 | struct pending_dir_move *entry = NULL, *pm; |
2812 | struct recorded_ref *cur; | 2950 | struct recorded_ref *cur; |
2813 | int exists = 0; | 2951 | int exists = 0; |
2814 | int ret; | 2952 | int ret; |
@@ -2817,8 +2955,8 @@ static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) | |||
2817 | if (!pm) | 2955 | if (!pm) |
2818 | return -ENOMEM; | 2956 | return -ENOMEM; |
2819 | pm->parent_ino = parent_ino; | 2957 | pm->parent_ino = parent_ino; |
2820 | pm->ino = sctx->cur_ino; | 2958 | pm->ino = ino; |
2821 | pm->gen = sctx->cur_inode_gen; | 2959 | pm->gen = ino_gen; |
2822 | INIT_LIST_HEAD(&pm->list); | 2960 | INIT_LIST_HEAD(&pm->list); |
2823 | INIT_LIST_HEAD(&pm->update_refs); | 2961 | INIT_LIST_HEAD(&pm->update_refs); |
2824 | RB_CLEAR_NODE(&pm->node); | 2962 | RB_CLEAR_NODE(&pm->node); |
@@ -2888,19 +3026,52 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2888 | { | 3026 | { |
2889 | struct fs_path *from_path = NULL; | 3027 | struct fs_path *from_path = NULL; |
2890 | struct fs_path *to_path = NULL; | 3028 | struct fs_path *to_path = NULL; |
3029 | struct fs_path *name = NULL; | ||
2891 | u64 orig_progress = sctx->send_progress; | 3030 | u64 orig_progress = sctx->send_progress; |
2892 | struct recorded_ref *cur; | 3031 | struct recorded_ref *cur; |
3032 | u64 parent_ino, parent_gen; | ||
3033 | struct waiting_dir_move *dm = NULL; | ||
3034 | u64 rmdir_ino = 0; | ||
2893 | int ret; | 3035 | int ret; |
2894 | 3036 | ||
3037 | name = fs_path_alloc(); | ||
2895 | from_path = fs_path_alloc(); | 3038 | from_path = fs_path_alloc(); |
2896 | if (!from_path) | 3039 | if (!name || !from_path) { |
2897 | return -ENOMEM; | 3040 | ret = -ENOMEM; |
3041 | goto out; | ||
3042 | } | ||
3043 | |||
3044 | dm = get_waiting_dir_move(sctx, pm->ino); | ||
3045 | ASSERT(dm); | ||
3046 | rmdir_ino = dm->rmdir_ino; | ||
3047 | free_waiting_dir_move(sctx, dm); | ||
2898 | 3048 | ||
2899 | sctx->send_progress = pm->ino; | 3049 | ret = get_first_ref(sctx->parent_root, pm->ino, |
2900 | ret = get_cur_path(sctx, pm->ino, pm->gen, from_path); | 3050 | &parent_ino, &parent_gen, name); |
2901 | if (ret < 0) | 3051 | if (ret < 0) |
2902 | goto out; | 3052 | goto out; |
2903 | 3053 | ||
3054 | if (parent_ino == sctx->cur_ino) { | ||
3055 | /* child only renamed, not moved */ | ||
3056 | ASSERT(parent_gen == sctx->cur_inode_gen); | ||
3057 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, | ||
3058 | from_path); | ||
3059 | if (ret < 0) | ||
3060 | goto out; | ||
3061 | ret = fs_path_add_path(from_path, name); | ||
3062 | if (ret < 0) | ||
3063 | goto out; | ||
3064 | } else { | ||
3065 | /* child moved and maybe renamed too */ | ||
3066 | sctx->send_progress = pm->ino; | ||
3067 | ret = get_cur_path(sctx, pm->ino, pm->gen, from_path); | ||
3068 | if (ret < 0) | ||
3069 | goto out; | ||
3070 | } | ||
3071 | |||
3072 | fs_path_free(name); | ||
3073 | name = NULL; | ||
3074 | |||
2904 | to_path = fs_path_alloc(); | 3075 | to_path = fs_path_alloc(); |
2905 | if (!to_path) { | 3076 | if (!to_path) { |
2906 | ret = -ENOMEM; | 3077 | ret = -ENOMEM; |
@@ -2908,9 +3079,6 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2908 | } | 3079 | } |
2909 | 3080 | ||
2910 | sctx->send_progress = sctx->cur_ino + 1; | 3081 | sctx->send_progress = sctx->cur_ino + 1; |
2911 | ret = del_waiting_dir_move(sctx, pm->ino); | ||
2912 | ASSERT(ret == 0); | ||
2913 | |||
2914 | ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); | 3082 | ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); |
2915 | if (ret < 0) | 3083 | if (ret < 0) |
2916 | goto out; | 3084 | goto out; |
@@ -2919,6 +3087,35 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2919 | if (ret < 0) | 3087 | if (ret < 0) |
2920 | goto out; | 3088 | goto out; |
2921 | 3089 | ||
3090 | if (rmdir_ino) { | ||
3091 | struct orphan_dir_info *odi; | ||
3092 | |||
3093 | odi = get_orphan_dir_info(sctx, rmdir_ino); | ||
3094 | if (!odi) { | ||
3095 | /* already deleted */ | ||
3096 | goto finish; | ||
3097 | } | ||
3098 | ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1); | ||
3099 | if (ret < 0) | ||
3100 | goto out; | ||
3101 | if (!ret) | ||
3102 | goto finish; | ||
3103 | |||
3104 | name = fs_path_alloc(); | ||
3105 | if (!name) { | ||
3106 | ret = -ENOMEM; | ||
3107 | goto out; | ||
3108 | } | ||
3109 | ret = get_cur_path(sctx, rmdir_ino, odi->gen, name); | ||
3110 | if (ret < 0) | ||
3111 | goto out; | ||
3112 | ret = send_rmdir(sctx, name); | ||
3113 | if (ret < 0) | ||
3114 | goto out; | ||
3115 | free_orphan_dir_info(sctx, odi); | ||
3116 | } | ||
3117 | |||
3118 | finish: | ||
2922 | ret = send_utimes(sctx, pm->ino, pm->gen); | 3119 | ret = send_utimes(sctx, pm->ino, pm->gen); |
2923 | if (ret < 0) | 3120 | if (ret < 0) |
2924 | goto out; | 3121 | goto out; |
@@ -2928,12 +3125,15 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
2928 | * and old parent(s). | 3125 | * and old parent(s). |
2929 | */ | 3126 | */ |
2930 | list_for_each_entry(cur, &pm->update_refs, list) { | 3127 | list_for_each_entry(cur, &pm->update_refs, list) { |
3128 | if (cur->dir == rmdir_ino) | ||
3129 | continue; | ||
2931 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); | 3130 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); |
2932 | if (ret < 0) | 3131 | if (ret < 0) |
2933 | goto out; | 3132 | goto out; |
2934 | } | 3133 | } |
2935 | 3134 | ||
2936 | out: | 3135 | out: |
3136 | fs_path_free(name); | ||
2937 | fs_path_free(from_path); | 3137 | fs_path_free(from_path); |
2938 | fs_path_free(to_path); | 3138 | fs_path_free(to_path); |
2939 | sctx->send_progress = orig_progress; | 3139 | sctx->send_progress = orig_progress; |
@@ -3005,17 +3205,19 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3005 | int ret; | 3205 | int ret; |
3006 | u64 ino = parent_ref->dir; | 3206 | u64 ino = parent_ref->dir; |
3007 | u64 parent_ino_before, parent_ino_after; | 3207 | u64 parent_ino_before, parent_ino_after; |
3008 | u64 new_gen, old_gen; | 3208 | u64 old_gen; |
3009 | struct fs_path *path_before = NULL; | 3209 | struct fs_path *path_before = NULL; |
3010 | struct fs_path *path_after = NULL; | 3210 | struct fs_path *path_after = NULL; |
3011 | int len1, len2; | 3211 | int len1, len2; |
3012 | 3212 | int register_upper_dirs; | |
3013 | if (parent_ref->dir <= sctx->cur_ino) | 3213 | u64 gen; |
3014 | return 0; | ||
3015 | 3214 | ||
3016 | if (is_waiting_for_move(sctx, ino)) | 3215 | if (is_waiting_for_move(sctx, ino)) |
3017 | return 1; | 3216 | return 1; |
3018 | 3217 | ||
3218 | if (parent_ref->dir <= sctx->cur_ino) | ||
3219 | return 0; | ||
3220 | |||
3019 | ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen, | 3221 | ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen, |
3020 | NULL, NULL, NULL, NULL); | 3222 | NULL, NULL, NULL, NULL); |
3021 | if (ret == -ENOENT) | 3223 | if (ret == -ENOENT) |
@@ -3023,12 +3225,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3023 | else if (ret < 0) | 3225 | else if (ret < 0) |
3024 | return ret; | 3226 | return ret; |
3025 | 3227 | ||
3026 | ret = get_inode_info(sctx->send_root, ino, NULL, &new_gen, | 3228 | if (parent_ref->dir_gen != old_gen) |
3027 | NULL, NULL, NULL, NULL); | ||
3028 | if (ret < 0) | ||
3029 | return ret; | ||
3030 | |||
3031 | if (new_gen != old_gen) | ||
3032 | return 0; | 3229 | return 0; |
3033 | 3230 | ||
3034 | path_before = fs_path_alloc(); | 3231 | path_before = fs_path_alloc(); |
@@ -3051,7 +3248,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3051 | } | 3248 | } |
3052 | 3249 | ||
3053 | ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, | 3250 | ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, |
3054 | NULL, path_after); | 3251 | &gen, path_after); |
3055 | if (ret == -ENOENT) { | 3252 | if (ret == -ENOENT) { |
3056 | ret = 0; | 3253 | ret = 0; |
3057 | goto out; | 3254 | goto out; |
@@ -3061,13 +3258,67 @@ static int wait_for_parent_move(struct send_ctx *sctx, | |||
3061 | 3258 | ||
3062 | len1 = fs_path_len(path_before); | 3259 | len1 = fs_path_len(path_before); |
3063 | len2 = fs_path_len(path_after); | 3260 | len2 = fs_path_len(path_after); |
3064 | if ((parent_ino_before != parent_ino_after) && (len1 != len2 || | 3261 | if (parent_ino_before != parent_ino_after || len1 != len2 || |
3065 | memcmp(path_before->start, path_after->start, len1))) { | 3262 | memcmp(path_before->start, path_after->start, len1)) { |
3066 | ret = 1; | 3263 | ret = 1; |
3067 | goto out; | 3264 | goto out; |
3068 | } | 3265 | } |
3069 | ret = 0; | 3266 | ret = 0; |
3070 | 3267 | ||
3268 | /* | ||
3269 | * Ok, our new most direct ancestor has a higher inode number but | ||
3270 | * wasn't moved/renamed. So maybe some of the new ancestors higher in | ||
3271 | * the hierarchy have an higher inode number too *and* were renamed | ||
3272 | * or moved - in this case we need to wait for the ancestor's rename | ||
3273 | * or move operation before we can do the move/rename for the current | ||
3274 | * inode. | ||
3275 | */ | ||
3276 | register_upper_dirs = 0; | ||
3277 | ino = parent_ino_after; | ||
3278 | again: | ||
3279 | while ((ret == 0 || register_upper_dirs) && ino > sctx->cur_ino) { | ||
3280 | u64 parent_gen; | ||
3281 | |||
3282 | fs_path_reset(path_before); | ||
3283 | fs_path_reset(path_after); | ||
3284 | |||
3285 | ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, | ||
3286 | &parent_gen, path_after); | ||
3287 | if (ret < 0) | ||
3288 | goto out; | ||
3289 | ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, | ||
3290 | NULL, path_before); | ||
3291 | if (ret == -ENOENT) { | ||
3292 | ret = 0; | ||
3293 | break; | ||
3294 | } else if (ret < 0) { | ||
3295 | goto out; | ||
3296 | } | ||
3297 | |||
3298 | len1 = fs_path_len(path_before); | ||
3299 | len2 = fs_path_len(path_after); | ||
3300 | if (parent_ino_before != parent_ino_after || len1 != len2 || | ||
3301 | memcmp(path_before->start, path_after->start, len1)) { | ||
3302 | ret = 1; | ||
3303 | if (register_upper_dirs) { | ||
3304 | break; | ||
3305 | } else { | ||
3306 | register_upper_dirs = 1; | ||
3307 | ino = parent_ref->dir; | ||
3308 | gen = parent_ref->dir_gen; | ||
3309 | goto again; | ||
3310 | } | ||
3311 | } else if (register_upper_dirs) { | ||
3312 | ret = add_pending_dir_move(sctx, ino, gen, | ||
3313 | parent_ino_after); | ||
3314 | if (ret < 0 && ret != -EEXIST) | ||
3315 | goto out; | ||
3316 | } | ||
3317 | |||
3318 | ino = parent_ino_after; | ||
3319 | gen = parent_gen; | ||
3320 | } | ||
3321 | |||
3071 | out: | 3322 | out: |
3072 | fs_path_free(path_before); | 3323 | fs_path_free(path_before); |
3073 | fs_path_free(path_after); | 3324 | fs_path_free(path_after); |
@@ -3089,6 +3340,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) | |||
3089 | u64 ow_gen; | 3340 | u64 ow_gen; |
3090 | int did_overwrite = 0; | 3341 | int did_overwrite = 0; |
3091 | int is_orphan = 0; | 3342 | int is_orphan = 0; |
3343 | u64 last_dir_ino_rm = 0; | ||
3092 | 3344 | ||
3093 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | 3345 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); |
3094 | 3346 | ||
@@ -3227,9 +3479,14 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3227 | * dirs, we always have one new and one deleted | 3479 | * dirs, we always have one new and one deleted |
3228 | * ref. The deleted ref is ignored later. | 3480 | * ref. The deleted ref is ignored later. |
3229 | */ | 3481 | */ |
3230 | if (wait_for_parent_move(sctx, cur)) { | 3482 | ret = wait_for_parent_move(sctx, cur); |
3483 | if (ret < 0) | ||
3484 | goto out; | ||
3485 | if (ret) { | ||
3231 | ret = add_pending_dir_move(sctx, | 3486 | ret = add_pending_dir_move(sctx, |
3232 | cur->dir); | 3487 | sctx->cur_ino, |
3488 | sctx->cur_inode_gen, | ||
3489 | cur->dir); | ||
3233 | *pending_move = 1; | 3490 | *pending_move = 1; |
3234 | } else { | 3491 | } else { |
3235 | ret = send_rename(sctx, valid_path, | 3492 | ret = send_rename(sctx, valid_path, |
@@ -3259,7 +3516,8 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3259 | * later, we do this check again and rmdir it then if possible. | 3516 | * later, we do this check again and rmdir it then if possible. |
3260 | * See the use of check_dirs for more details. | 3517 | * See the use of check_dirs for more details. |
3261 | */ | 3518 | */ |
3262 | ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino); | 3519 | ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen, |
3520 | sctx->cur_ino); | ||
3263 | if (ret < 0) | 3521 | if (ret < 0) |
3264 | goto out; | 3522 | goto out; |
3265 | if (ret) { | 3523 | if (ret) { |
@@ -3350,8 +3608,10 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3350 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); | 3608 | ret = send_utimes(sctx, cur->dir, cur->dir_gen); |
3351 | if (ret < 0) | 3609 | if (ret < 0) |
3352 | goto out; | 3610 | goto out; |
3353 | } else if (ret == inode_state_did_delete) { | 3611 | } else if (ret == inode_state_did_delete && |
3354 | ret = can_rmdir(sctx, cur->dir, sctx->cur_ino); | 3612 | cur->dir != last_dir_ino_rm) { |
3613 | ret = can_rmdir(sctx, cur->dir, cur->dir_gen, | ||
3614 | sctx->cur_ino); | ||
3355 | if (ret < 0) | 3615 | if (ret < 0) |
3356 | goto out; | 3616 | goto out; |
3357 | if (ret) { | 3617 | if (ret) { |
@@ -3362,6 +3622,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3362 | ret = send_rmdir(sctx, valid_path); | 3622 | ret = send_rmdir(sctx, valid_path); |
3363 | if (ret < 0) | 3623 | if (ret < 0) |
3364 | goto out; | 3624 | goto out; |
3625 | last_dir_ino_rm = cur->dir; | ||
3365 | } | 3626 | } |
3366 | } | 3627 | } |
3367 | } | 3628 | } |
@@ -3375,9 +3636,8 @@ out: | |||
3375 | return ret; | 3636 | return ret; |
3376 | } | 3637 | } |
3377 | 3638 | ||
3378 | static int __record_new_ref(int num, u64 dir, int index, | 3639 | static int record_ref(struct btrfs_root *root, int num, u64 dir, int index, |
3379 | struct fs_path *name, | 3640 | struct fs_path *name, void *ctx, struct list_head *refs) |
3380 | void *ctx) | ||
3381 | { | 3641 | { |
3382 | int ret = 0; | 3642 | int ret = 0; |
3383 | struct send_ctx *sctx = ctx; | 3643 | struct send_ctx *sctx = ctx; |
@@ -3388,7 +3648,7 @@ static int __record_new_ref(int num, u64 dir, int index, | |||
3388 | if (!p) | 3648 | if (!p) |
3389 | return -ENOMEM; | 3649 | return -ENOMEM; |
3390 | 3650 | ||
3391 | ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, | 3651 | ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL, |
3392 | NULL, NULL); | 3652 | NULL, NULL); |
3393 | if (ret < 0) | 3653 | if (ret < 0) |
3394 | goto out; | 3654 | goto out; |
@@ -3400,7 +3660,7 @@ static int __record_new_ref(int num, u64 dir, int index, | |||
3400 | if (ret < 0) | 3660 | if (ret < 0) |
3401 | goto out; | 3661 | goto out; |
3402 | 3662 | ||
3403 | ret = record_ref(&sctx->new_refs, dir, gen, p); | 3663 | ret = __record_ref(refs, dir, gen, p); |
3404 | 3664 | ||
3405 | out: | 3665 | out: |
3406 | if (ret) | 3666 | if (ret) |
@@ -3408,37 +3668,23 @@ out: | |||
3408 | return ret; | 3668 | return ret; |
3409 | } | 3669 | } |
3410 | 3670 | ||
3671 | static int __record_new_ref(int num, u64 dir, int index, | ||
3672 | struct fs_path *name, | ||
3673 | void *ctx) | ||
3674 | { | ||
3675 | struct send_ctx *sctx = ctx; | ||
3676 | return record_ref(sctx->send_root, num, dir, index, name, | ||
3677 | ctx, &sctx->new_refs); | ||
3678 | } | ||
3679 | |||
3680 | |||
3411 | static int __record_deleted_ref(int num, u64 dir, int index, | 3681 | static int __record_deleted_ref(int num, u64 dir, int index, |
3412 | struct fs_path *name, | 3682 | struct fs_path *name, |
3413 | void *ctx) | 3683 | void *ctx) |
3414 | { | 3684 | { |
3415 | int ret = 0; | ||
3416 | struct send_ctx *sctx = ctx; | 3685 | struct send_ctx *sctx = ctx; |
3417 | struct fs_path *p; | 3686 | return record_ref(sctx->parent_root, num, dir, index, name, |
3418 | u64 gen; | 3687 | ctx, &sctx->deleted_refs); |
3419 | |||
3420 | p = fs_path_alloc(); | ||
3421 | if (!p) | ||
3422 | return -ENOMEM; | ||
3423 | |||
3424 | ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, | ||
3425 | NULL, NULL); | ||
3426 | if (ret < 0) | ||
3427 | goto out; | ||
3428 | |||
3429 | ret = get_cur_path(sctx, dir, gen, p); | ||
3430 | if (ret < 0) | ||
3431 | goto out; | ||
3432 | ret = fs_path_add_path(p, name); | ||
3433 | if (ret < 0) | ||
3434 | goto out; | ||
3435 | |||
3436 | ret = record_ref(&sctx->deleted_refs, dir, gen, p); | ||
3437 | |||
3438 | out: | ||
3439 | if (ret) | ||
3440 | fs_path_free(p); | ||
3441 | return ret; | ||
3442 | } | 3688 | } |
3443 | 3689 | ||
3444 | static int record_new_ref(struct send_ctx *sctx) | 3690 | static int record_new_ref(struct send_ctx *sctx) |
@@ -3619,21 +3865,31 @@ static int process_all_refs(struct send_ctx *sctx, | |||
3619 | root = sctx->parent_root; | 3865 | root = sctx->parent_root; |
3620 | cb = __record_deleted_ref; | 3866 | cb = __record_deleted_ref; |
3621 | } else { | 3867 | } else { |
3622 | BUG(); | 3868 | btrfs_err(sctx->send_root->fs_info, |
3869 | "Wrong command %d in process_all_refs", cmd); | ||
3870 | ret = -EINVAL; | ||
3871 | goto out; | ||
3623 | } | 3872 | } |
3624 | 3873 | ||
3625 | key.objectid = sctx->cmp_key->objectid; | 3874 | key.objectid = sctx->cmp_key->objectid; |
3626 | key.type = BTRFS_INODE_REF_KEY; | 3875 | key.type = BTRFS_INODE_REF_KEY; |
3627 | key.offset = 0; | 3876 | key.offset = 0; |
3628 | while (1) { | 3877 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
3629 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 3878 | if (ret < 0) |
3630 | if (ret < 0) | 3879 | goto out; |
3631 | goto out; | ||
3632 | if (ret) | ||
3633 | break; | ||
3634 | 3880 | ||
3881 | while (1) { | ||
3635 | eb = path->nodes[0]; | 3882 | eb = path->nodes[0]; |
3636 | slot = path->slots[0]; | 3883 | slot = path->slots[0]; |
3884 | if (slot >= btrfs_header_nritems(eb)) { | ||
3885 | ret = btrfs_next_leaf(root, path); | ||
3886 | if (ret < 0) | ||
3887 | goto out; | ||
3888 | else if (ret > 0) | ||
3889 | break; | ||
3890 | continue; | ||
3891 | } | ||
3892 | |||
3637 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 3893 | btrfs_item_key_to_cpu(eb, &found_key, slot); |
3638 | 3894 | ||
3639 | if (found_key.objectid != key.objectid || | 3895 | if (found_key.objectid != key.objectid || |
@@ -3642,11 +3898,10 @@ static int process_all_refs(struct send_ctx *sctx, | |||
3642 | break; | 3898 | break; |
3643 | 3899 | ||
3644 | ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); | 3900 | ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); |
3645 | btrfs_release_path(path); | ||
3646 | if (ret < 0) | 3901 | if (ret < 0) |
3647 | goto out; | 3902 | goto out; |
3648 | 3903 | ||
3649 | key.offset = found_key.offset + 1; | 3904 | path->slots[0]++; |
3650 | } | 3905 | } |
3651 | btrfs_release_path(path); | 3906 | btrfs_release_path(path); |
3652 | 3907 | ||
@@ -3927,19 +4182,25 @@ static int process_all_new_xattrs(struct send_ctx *sctx) | |||
3927 | key.objectid = sctx->cmp_key->objectid; | 4182 | key.objectid = sctx->cmp_key->objectid; |
3928 | key.type = BTRFS_XATTR_ITEM_KEY; | 4183 | key.type = BTRFS_XATTR_ITEM_KEY; |
3929 | key.offset = 0; | 4184 | key.offset = 0; |
3930 | while (1) { | 4185 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
3931 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 4186 | if (ret < 0) |
3932 | if (ret < 0) | 4187 | goto out; |
3933 | goto out; | ||
3934 | if (ret) { | ||
3935 | ret = 0; | ||
3936 | goto out; | ||
3937 | } | ||
3938 | 4188 | ||
4189 | while (1) { | ||
3939 | eb = path->nodes[0]; | 4190 | eb = path->nodes[0]; |
3940 | slot = path->slots[0]; | 4191 | slot = path->slots[0]; |
3941 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 4192 | if (slot >= btrfs_header_nritems(eb)) { |
4193 | ret = btrfs_next_leaf(root, path); | ||
4194 | if (ret < 0) { | ||
4195 | goto out; | ||
4196 | } else if (ret > 0) { | ||
4197 | ret = 0; | ||
4198 | break; | ||
4199 | } | ||
4200 | continue; | ||
4201 | } | ||
3942 | 4202 | ||
4203 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
3943 | if (found_key.objectid != key.objectid || | 4204 | if (found_key.objectid != key.objectid || |
3944 | found_key.type != key.type) { | 4205 | found_key.type != key.type) { |
3945 | ret = 0; | 4206 | ret = 0; |
@@ -3951,8 +4212,7 @@ static int process_all_new_xattrs(struct send_ctx *sctx) | |||
3951 | if (ret < 0) | 4212 | if (ret < 0) |
3952 | goto out; | 4213 | goto out; |
3953 | 4214 | ||
3954 | btrfs_release_path(path); | 4215 | path->slots[0]++; |
3955 | key.offset = found_key.offset + 1; | ||
3956 | } | 4216 | } |
3957 | 4217 | ||
3958 | out: | 4218 | out: |
@@ -3991,6 +4251,13 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) | |||
3991 | goto out; | 4251 | goto out; |
3992 | 4252 | ||
3993 | last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; | 4253 | last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; |
4254 | |||
4255 | /* initial readahead */ | ||
4256 | memset(&sctx->ra, 0, sizeof(struct file_ra_state)); | ||
4257 | file_ra_state_init(&sctx->ra, inode->i_mapping); | ||
4258 | btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index, | ||
4259 | last_index - index + 1); | ||
4260 | |||
3994 | while (index <= last_index) { | 4261 | while (index <= last_index) { |
3995 | unsigned cur_len = min_t(unsigned, len, | 4262 | unsigned cur_len = min_t(unsigned, len, |
3996 | PAGE_CACHE_SIZE - pg_offset); | 4263 | PAGE_CACHE_SIZE - pg_offset); |
@@ -4174,6 +4441,9 @@ static int send_hole(struct send_ctx *sctx, u64 end) | |||
4174 | p = fs_path_alloc(); | 4441 | p = fs_path_alloc(); |
4175 | if (!p) | 4442 | if (!p) |
4176 | return -ENOMEM; | 4443 | return -ENOMEM; |
4444 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | ||
4445 | if (ret < 0) | ||
4446 | goto tlv_put_failure; | ||
4177 | memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); | 4447 | memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); |
4178 | while (offset < end) { | 4448 | while (offset < end) { |
4179 | len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE); | 4449 | len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE); |
@@ -4181,9 +4451,6 @@ static int send_hole(struct send_ctx *sctx, u64 end) | |||
4181 | ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); | 4451 | ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); |
4182 | if (ret < 0) | 4452 | if (ret < 0) |
4183 | break; | 4453 | break; |
4184 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | ||
4185 | if (ret < 0) | ||
4186 | break; | ||
4187 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 4454 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); |
4188 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | 4455 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); |
4189 | TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); | 4456 | TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); |
@@ -4724,7 +4991,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | |||
4724 | 4991 | ||
4725 | if (S_ISREG(sctx->cur_inode_mode)) { | 4992 | if (S_ISREG(sctx->cur_inode_mode)) { |
4726 | if (need_send_hole(sctx)) { | 4993 | if (need_send_hole(sctx)) { |
4727 | if (sctx->cur_inode_last_extent == (u64)-1) { | 4994 | if (sctx->cur_inode_last_extent == (u64)-1 || |
4995 | sctx->cur_inode_last_extent < | ||
4996 | sctx->cur_inode_size) { | ||
4728 | ret = get_last_extent(sctx, (u64)-1); | 4997 | ret = get_last_extent(sctx, (u64)-1); |
4729 | if (ret) | 4998 | if (ret) |
4730 | goto out; | 4999 | goto out; |
@@ -4763,18 +5032,19 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | |||
4763 | ret = apply_children_dir_moves(sctx); | 5032 | ret = apply_children_dir_moves(sctx); |
4764 | if (ret) | 5033 | if (ret) |
4765 | goto out; | 5034 | goto out; |
5035 | /* | ||
5036 | * Need to send that every time, no matter if it actually | ||
5037 | * changed between the two trees as we have done changes to | ||
5038 | * the inode before. If our inode is a directory and it's | ||
5039 | * waiting to be moved/renamed, we will send its utimes when | ||
5040 | * it's moved/renamed, therefore we don't need to do it here. | ||
5041 | */ | ||
5042 | sctx->send_progress = sctx->cur_ino + 1; | ||
5043 | ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); | ||
5044 | if (ret < 0) | ||
5045 | goto out; | ||
4766 | } | 5046 | } |
4767 | 5047 | ||
4768 | /* | ||
4769 | * Need to send that every time, no matter if it actually | ||
4770 | * changed between the two trees as we have done changes to | ||
4771 | * the inode before. | ||
4772 | */ | ||
4773 | sctx->send_progress = sctx->cur_ino + 1; | ||
4774 | ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); | ||
4775 | if (ret < 0) | ||
4776 | goto out; | ||
4777 | |||
4778 | out: | 5048 | out: |
4779 | return ret; | 5049 | return ret; |
4780 | } | 5050 | } |
@@ -4840,6 +5110,8 @@ static int changed_inode(struct send_ctx *sctx, | |||
4840 | sctx->left_path->nodes[0], left_ii); | 5110 | sctx->left_path->nodes[0], left_ii); |
4841 | sctx->cur_inode_mode = btrfs_inode_mode( | 5111 | sctx->cur_inode_mode = btrfs_inode_mode( |
4842 | sctx->left_path->nodes[0], left_ii); | 5112 | sctx->left_path->nodes[0], left_ii); |
5113 | sctx->cur_inode_rdev = btrfs_inode_rdev( | ||
5114 | sctx->left_path->nodes[0], left_ii); | ||
4843 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | 5115 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) |
4844 | ret = send_create_inode_if_needed(sctx); | 5116 | ret = send_create_inode_if_needed(sctx); |
4845 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { | 5117 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { |
@@ -4884,6 +5156,8 @@ static int changed_inode(struct send_ctx *sctx, | |||
4884 | sctx->left_path->nodes[0], left_ii); | 5156 | sctx->left_path->nodes[0], left_ii); |
4885 | sctx->cur_inode_mode = btrfs_inode_mode( | 5157 | sctx->cur_inode_mode = btrfs_inode_mode( |
4886 | sctx->left_path->nodes[0], left_ii); | 5158 | sctx->left_path->nodes[0], left_ii); |
5159 | sctx->cur_inode_rdev = btrfs_inode_rdev( | ||
5160 | sctx->left_path->nodes[0], left_ii); | ||
4887 | ret = send_create_inode_if_needed(sctx); | 5161 | ret = send_create_inode_if_needed(sctx); |
4888 | if (ret < 0) | 5162 | if (ret < 0) |
4889 | goto out; | 5163 | goto out; |
@@ -5124,37 +5398,15 @@ static int full_send_tree(struct send_ctx *sctx) | |||
5124 | struct btrfs_path *path; | 5398 | struct btrfs_path *path; |
5125 | struct extent_buffer *eb; | 5399 | struct extent_buffer *eb; |
5126 | int slot; | 5400 | int slot; |
5127 | u64 start_ctransid; | ||
5128 | u64 ctransid; | ||
5129 | 5401 | ||
5130 | path = alloc_path_for_send(); | 5402 | path = alloc_path_for_send(); |
5131 | if (!path) | 5403 | if (!path) |
5132 | return -ENOMEM; | 5404 | return -ENOMEM; |
5133 | 5405 | ||
5134 | spin_lock(&send_root->root_item_lock); | ||
5135 | start_ctransid = btrfs_root_ctransid(&send_root->root_item); | ||
5136 | spin_unlock(&send_root->root_item_lock); | ||
5137 | |||
5138 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | 5406 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; |
5139 | key.type = BTRFS_INODE_ITEM_KEY; | 5407 | key.type = BTRFS_INODE_ITEM_KEY; |
5140 | key.offset = 0; | 5408 | key.offset = 0; |
5141 | 5409 | ||
5142 | /* | ||
5143 | * Make sure the tree has not changed after re-joining. We detect this | ||
5144 | * by comparing start_ctransid and ctransid. They should always match. | ||
5145 | */ | ||
5146 | spin_lock(&send_root->root_item_lock); | ||
5147 | ctransid = btrfs_root_ctransid(&send_root->root_item); | ||
5148 | spin_unlock(&send_root->root_item_lock); | ||
5149 | |||
5150 | if (ctransid != start_ctransid) { | ||
5151 | WARN(1, KERN_WARNING "BTRFS: the root that you're trying to " | ||
5152 | "send was modified in between. This is " | ||
5153 | "probably a bug.\n"); | ||
5154 | ret = -EIO; | ||
5155 | goto out; | ||
5156 | } | ||
5157 | |||
5158 | ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); | 5410 | ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); |
5159 | if (ret < 0) | 5411 | if (ret < 0) |
5160 | goto out; | 5412 | goto out; |
@@ -5340,6 +5592,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) | |||
5340 | 5592 | ||
5341 | sctx->pending_dir_moves = RB_ROOT; | 5593 | sctx->pending_dir_moves = RB_ROOT; |
5342 | sctx->waiting_dir_moves = RB_ROOT; | 5594 | sctx->waiting_dir_moves = RB_ROOT; |
5595 | sctx->orphan_dirs = RB_ROOT; | ||
5343 | 5596 | ||
5344 | sctx->clone_roots = vzalloc(sizeof(struct clone_root) * | 5597 | sctx->clone_roots = vzalloc(sizeof(struct clone_root) * |
5345 | (arg->clone_sources_count + 1)); | 5598 | (arg->clone_sources_count + 1)); |
@@ -5435,7 +5688,9 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) | |||
5435 | NULL); | 5688 | NULL); |
5436 | sort_clone_roots = 1; | 5689 | sort_clone_roots = 1; |
5437 | 5690 | ||
5691 | current->journal_info = (void *)BTRFS_SEND_TRANS_STUB; | ||
5438 | ret = send_subvol(sctx); | 5692 | ret = send_subvol(sctx); |
5693 | current->journal_info = NULL; | ||
5439 | if (ret < 0) | 5694 | if (ret < 0) |
5440 | goto out; | 5695 | goto out; |
5441 | 5696 | ||
@@ -5477,6 +5732,16 @@ out: | |||
5477 | kfree(dm); | 5732 | kfree(dm); |
5478 | } | 5733 | } |
5479 | 5734 | ||
5735 | WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); | ||
5736 | while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { | ||
5737 | struct rb_node *n; | ||
5738 | struct orphan_dir_info *odi; | ||
5739 | |||
5740 | n = rb_first(&sctx->orphan_dirs); | ||
5741 | odi = rb_entry(n, struct orphan_dir_info, node); | ||
5742 | free_orphan_dir_info(sctx, odi); | ||
5743 | } | ||
5744 | |||
5480 | if (sort_clone_roots) { | 5745 | if (sort_clone_roots) { |
5481 | for (i = 0; i < sctx->clone_roots_cnt; i++) | 5746 | for (i = 0; i < sctx->clone_roots_cnt; i++) |
5482 | btrfs_root_dec_send_in_progress( | 5747 | btrfs_root_dec_send_in_progress( |