diff options
author | Alexander Block <ablock84@googlemail.com> | 2012-07-25 17:19:24 -0400 |
---|---|---|
committer | Alexander Block <ablock84@googlemail.com> | 2012-07-25 17:30:19 -0400 |
commit | 31db9f7c23fbf7e95026143f79645de6507b583b (patch) | |
tree | a3e416143bd08d8daca762f85a6d260e49c56c61 /fs/btrfs/send.c | |
parent | 7069830a9e381e33d44ded45095f764844c71d24 (diff) |
Btrfs: introduce BTRFS_IOC_SEND for btrfs send/receive
This patch introduces the BTRFS_IOC_SEND ioctl that is
required for send. It allows btrfs-progs to implement
full and incremental sends. Patches for btrfs-progs will
follow.
Signed-off-by: Alexander Block <ablock84@googlemail.com>
Reviewed-by: David Sterba <dave@jikos.cz>
Reviewed-by: Arne Jansen <sensille@gmx.net>
Reviewed-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Reviewed-by: Alex Lyakas <alex.bolshoy.btrfs@gmail.com>
Diffstat (limited to 'fs/btrfs/send.c')
-rw-r--r-- | fs/btrfs/send.c | 4570 |
1 files changed, 4570 insertions, 0 deletions
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c new file mode 100644 index 000000000000..5394cb75012a --- /dev/null +++ b/fs/btrfs/send.c | |||
@@ -0,0 +1,4570 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Alexander Block. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/bsearch.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/file.h> | ||
22 | #include <linux/sort.h> | ||
23 | #include <linux/mount.h> | ||
24 | #include <linux/xattr.h> | ||
25 | #include <linux/posix_acl_xattr.h> | ||
26 | #include <linux/radix-tree.h> | ||
27 | #include <linux/crc32c.h> | ||
28 | |||
29 | #include "send.h" | ||
30 | #include "backref.h" | ||
31 | #include "locking.h" | ||
32 | #include "disk-io.h" | ||
33 | #include "btrfs_inode.h" | ||
34 | #include "transaction.h" | ||
35 | |||
36 | static int g_verbose = 0; | ||
37 | |||
38 | #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__) | ||
39 | |||
40 | /* | ||
41 | * A fs_path is a helper to dynamically build path names with unknown size. | ||
42 | * It reallocates the internal buffer on demand. | ||
43 | * It allows fast adding of path elements on the right side (normal path) and | ||
44 | * fast adding to the left side (reversed path). A reversed path can also be | ||
45 | * unreversed if needed. | ||
46 | */ | ||
47 | struct fs_path { | ||
48 | union { | ||
49 | struct { | ||
50 | char *start; | ||
51 | char *end; | ||
52 | char *prepared; | ||
53 | |||
54 | char *buf; | ||
55 | int buf_len; | ||
56 | int reversed:1; | ||
57 | int virtual_mem:1; | ||
58 | char inline_buf[]; | ||
59 | }; | ||
60 | char pad[PAGE_SIZE]; | ||
61 | }; | ||
62 | }; | ||
63 | #define FS_PATH_INLINE_SIZE \ | ||
64 | (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) | ||
65 | |||
66 | |||
67 | /* reused for each extent */ | ||
68 | struct clone_root { | ||
69 | struct btrfs_root *root; | ||
70 | u64 ino; | ||
71 | u64 offset; | ||
72 | |||
73 | u64 found_refs; | ||
74 | }; | ||
75 | |||
76 | #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 | ||
77 | #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) | ||
78 | |||
79 | struct send_ctx { | ||
80 | struct file *send_filp; | ||
81 | loff_t send_off; | ||
82 | char *send_buf; | ||
83 | u32 send_size; | ||
84 | u32 send_max_size; | ||
85 | u64 total_send_size; | ||
86 | u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; | ||
87 | |||
88 | struct vfsmount *mnt; | ||
89 | |||
90 | struct btrfs_root *send_root; | ||
91 | struct btrfs_root *parent_root; | ||
92 | struct clone_root *clone_roots; | ||
93 | int clone_roots_cnt; | ||
94 | |||
95 | /* current state of the compare_tree call */ | ||
96 | struct btrfs_path *left_path; | ||
97 | struct btrfs_path *right_path; | ||
98 | struct btrfs_key *cmp_key; | ||
99 | |||
100 | /* | ||
101 | * infos of the currently processed inode. In case of deleted inodes, | ||
102 | * these are the values from the deleted inode. | ||
103 | */ | ||
104 | u64 cur_ino; | ||
105 | u64 cur_inode_gen; | ||
106 | int cur_inode_new; | ||
107 | int cur_inode_new_gen; | ||
108 | int cur_inode_deleted; | ||
109 | int cur_inode_first_ref_orphan; | ||
110 | u64 cur_inode_size; | ||
111 | u64 cur_inode_mode; | ||
112 | |||
113 | u64 send_progress; | ||
114 | |||
115 | struct list_head new_refs; | ||
116 | struct list_head deleted_refs; | ||
117 | |||
118 | struct radix_tree_root name_cache; | ||
119 | struct list_head name_cache_list; | ||
120 | int name_cache_size; | ||
121 | |||
122 | struct file *cur_inode_filp; | ||
123 | char *read_buf; | ||
124 | }; | ||
125 | |||
126 | struct name_cache_entry { | ||
127 | struct list_head list; | ||
128 | struct list_head use_list; | ||
129 | u64 ino; | ||
130 | u64 gen; | ||
131 | u64 parent_ino; | ||
132 | u64 parent_gen; | ||
133 | int ret; | ||
134 | int need_later_update; | ||
135 | int name_len; | ||
136 | char name[]; | ||
137 | }; | ||
138 | |||
139 | static void fs_path_reset(struct fs_path *p) | ||
140 | { | ||
141 | if (p->reversed) { | ||
142 | p->start = p->buf + p->buf_len - 1; | ||
143 | p->end = p->start; | ||
144 | *p->start = 0; | ||
145 | } else { | ||
146 | p->start = p->buf; | ||
147 | p->end = p->start; | ||
148 | *p->start = 0; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static struct fs_path *fs_path_alloc(struct send_ctx *sctx) | ||
153 | { | ||
154 | struct fs_path *p; | ||
155 | |||
156 | p = kmalloc(sizeof(*p), GFP_NOFS); | ||
157 | if (!p) | ||
158 | return NULL; | ||
159 | p->reversed = 0; | ||
160 | p->virtual_mem = 0; | ||
161 | p->buf = p->inline_buf; | ||
162 | p->buf_len = FS_PATH_INLINE_SIZE; | ||
163 | fs_path_reset(p); | ||
164 | return p; | ||
165 | } | ||
166 | |||
167 | static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx) | ||
168 | { | ||
169 | struct fs_path *p; | ||
170 | |||
171 | p = fs_path_alloc(sctx); | ||
172 | if (!p) | ||
173 | return NULL; | ||
174 | p->reversed = 1; | ||
175 | fs_path_reset(p); | ||
176 | return p; | ||
177 | } | ||
178 | |||
179 | static void fs_path_free(struct send_ctx *sctx, struct fs_path *p) | ||
180 | { | ||
181 | if (!p) | ||
182 | return; | ||
183 | if (p->buf != p->inline_buf) { | ||
184 | if (p->virtual_mem) | ||
185 | vfree(p->buf); | ||
186 | else | ||
187 | kfree(p->buf); | ||
188 | } | ||
189 | kfree(p); | ||
190 | } | ||
191 | |||
192 | static int fs_path_len(struct fs_path *p) | ||
193 | { | ||
194 | return p->end - p->start; | ||
195 | } | ||
196 | |||
197 | static int fs_path_ensure_buf(struct fs_path *p, int len) | ||
198 | { | ||
199 | char *tmp_buf; | ||
200 | int path_len; | ||
201 | int old_buf_len; | ||
202 | |||
203 | len++; | ||
204 | |||
205 | if (p->buf_len >= len) | ||
206 | return 0; | ||
207 | |||
208 | path_len = p->end - p->start; | ||
209 | old_buf_len = p->buf_len; | ||
210 | len = PAGE_ALIGN(len); | ||
211 | |||
212 | if (p->buf == p->inline_buf) { | ||
213 | tmp_buf = kmalloc(len, GFP_NOFS); | ||
214 | if (!tmp_buf) { | ||
215 | tmp_buf = vmalloc(len); | ||
216 | if (!tmp_buf) | ||
217 | return -ENOMEM; | ||
218 | p->virtual_mem = 1; | ||
219 | } | ||
220 | memcpy(tmp_buf, p->buf, p->buf_len); | ||
221 | p->buf = tmp_buf; | ||
222 | p->buf_len = len; | ||
223 | } else { | ||
224 | if (p->virtual_mem) { | ||
225 | tmp_buf = vmalloc(len); | ||
226 | if (!tmp_buf) | ||
227 | return -ENOMEM; | ||
228 | memcpy(tmp_buf, p->buf, p->buf_len); | ||
229 | vfree(p->buf); | ||
230 | } else { | ||
231 | tmp_buf = krealloc(p->buf, len, GFP_NOFS); | ||
232 | if (!tmp_buf) { | ||
233 | tmp_buf = vmalloc(len); | ||
234 | if (!tmp_buf) | ||
235 | return -ENOMEM; | ||
236 | memcpy(tmp_buf, p->buf, p->buf_len); | ||
237 | kfree(p->buf); | ||
238 | p->virtual_mem = 1; | ||
239 | } | ||
240 | } | ||
241 | p->buf = tmp_buf; | ||
242 | p->buf_len = len; | ||
243 | } | ||
244 | if (p->reversed) { | ||
245 | tmp_buf = p->buf + old_buf_len - path_len - 1; | ||
246 | p->end = p->buf + p->buf_len - 1; | ||
247 | p->start = p->end - path_len; | ||
248 | memmove(p->start, tmp_buf, path_len + 1); | ||
249 | } else { | ||
250 | p->start = p->buf; | ||
251 | p->end = p->start + path_len; | ||
252 | } | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static int fs_path_prepare_for_add(struct fs_path *p, int name_len) | ||
257 | { | ||
258 | int ret; | ||
259 | int new_len; | ||
260 | |||
261 | new_len = p->end - p->start + name_len; | ||
262 | if (p->start != p->end) | ||
263 | new_len++; | ||
264 | ret = fs_path_ensure_buf(p, new_len); | ||
265 | if (ret < 0) | ||
266 | goto out; | ||
267 | |||
268 | if (p->reversed) { | ||
269 | if (p->start != p->end) | ||
270 | *--p->start = '/'; | ||
271 | p->start -= name_len; | ||
272 | p->prepared = p->start; | ||
273 | } else { | ||
274 | if (p->start != p->end) | ||
275 | *p->end++ = '/'; | ||
276 | p->prepared = p->end; | ||
277 | p->end += name_len; | ||
278 | *p->end = 0; | ||
279 | } | ||
280 | |||
281 | out: | ||
282 | return ret; | ||
283 | } | ||
284 | |||
285 | static int fs_path_add(struct fs_path *p, const char *name, int name_len) | ||
286 | { | ||
287 | int ret; | ||
288 | |||
289 | ret = fs_path_prepare_for_add(p, name_len); | ||
290 | if (ret < 0) | ||
291 | goto out; | ||
292 | memcpy(p->prepared, name, name_len); | ||
293 | p->prepared = NULL; | ||
294 | |||
295 | out: | ||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) | ||
300 | { | ||
301 | int ret; | ||
302 | |||
303 | ret = fs_path_prepare_for_add(p, p2->end - p2->start); | ||
304 | if (ret < 0) | ||
305 | goto out; | ||
306 | memcpy(p->prepared, p2->start, p2->end - p2->start); | ||
307 | p->prepared = NULL; | ||
308 | |||
309 | out: | ||
310 | return ret; | ||
311 | } | ||
312 | |||
313 | static int fs_path_add_from_extent_buffer(struct fs_path *p, | ||
314 | struct extent_buffer *eb, | ||
315 | unsigned long off, int len) | ||
316 | { | ||
317 | int ret; | ||
318 | |||
319 | ret = fs_path_prepare_for_add(p, len); | ||
320 | if (ret < 0) | ||
321 | goto out; | ||
322 | |||
323 | read_extent_buffer(eb, p->prepared, off, len); | ||
324 | p->prepared = NULL; | ||
325 | |||
326 | out: | ||
327 | return ret; | ||
328 | } | ||
329 | |||
330 | static void fs_path_remove(struct fs_path *p) | ||
331 | { | ||
332 | BUG_ON(p->reversed); | ||
333 | while (p->start != p->end && *p->end != '/') | ||
334 | p->end--; | ||
335 | *p->end = 0; | ||
336 | } | ||
337 | |||
338 | static int fs_path_copy(struct fs_path *p, struct fs_path *from) | ||
339 | { | ||
340 | int ret; | ||
341 | |||
342 | p->reversed = from->reversed; | ||
343 | fs_path_reset(p); | ||
344 | |||
345 | ret = fs_path_add_path(p, from); | ||
346 | |||
347 | return ret; | ||
348 | } | ||
349 | |||
350 | |||
351 | static void fs_path_unreverse(struct fs_path *p) | ||
352 | { | ||
353 | char *tmp; | ||
354 | int len; | ||
355 | |||
356 | if (!p->reversed) | ||
357 | return; | ||
358 | |||
359 | tmp = p->start; | ||
360 | len = p->end - p->start; | ||
361 | p->start = p->buf; | ||
362 | p->end = p->start + len; | ||
363 | memmove(p->start, tmp, len + 1); | ||
364 | p->reversed = 0; | ||
365 | } | ||
366 | |||
367 | static struct btrfs_path *alloc_path_for_send(void) | ||
368 | { | ||
369 | struct btrfs_path *path; | ||
370 | |||
371 | path = btrfs_alloc_path(); | ||
372 | if (!path) | ||
373 | return NULL; | ||
374 | path->search_commit_root = 1; | ||
375 | path->skip_locking = 1; | ||
376 | return path; | ||
377 | } | ||
378 | |||
379 | static int write_buf(struct send_ctx *sctx, const void *buf, u32 len) | ||
380 | { | ||
381 | int ret; | ||
382 | mm_segment_t old_fs; | ||
383 | u32 pos = 0; | ||
384 | |||
385 | old_fs = get_fs(); | ||
386 | set_fs(KERNEL_DS); | ||
387 | |||
388 | while (pos < len) { | ||
389 | ret = vfs_write(sctx->send_filp, (char *)buf + pos, len - pos, | ||
390 | &sctx->send_off); | ||
391 | /* TODO handle that correctly */ | ||
392 | /*if (ret == -ERESTARTSYS) { | ||
393 | continue; | ||
394 | }*/ | ||
395 | if (ret < 0) | ||
396 | goto out; | ||
397 | if (ret == 0) { | ||
398 | ret = -EIO; | ||
399 | goto out; | ||
400 | } | ||
401 | pos += ret; | ||
402 | } | ||
403 | |||
404 | ret = 0; | ||
405 | |||
406 | out: | ||
407 | set_fs(old_fs); | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) | ||
412 | { | ||
413 | struct btrfs_tlv_header *hdr; | ||
414 | int total_len = sizeof(*hdr) + len; | ||
415 | int left = sctx->send_max_size - sctx->send_size; | ||
416 | |||
417 | if (unlikely(left < total_len)) | ||
418 | return -EOVERFLOW; | ||
419 | |||
420 | hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); | ||
421 | hdr->tlv_type = cpu_to_le16(attr); | ||
422 | hdr->tlv_len = cpu_to_le16(len); | ||
423 | memcpy(hdr + 1, data, len); | ||
424 | sctx->send_size += total_len; | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | #if 0 | ||
430 | static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value) | ||
431 | { | ||
432 | return tlv_put(sctx, attr, &value, sizeof(value)); | ||
433 | } | ||
434 | |||
435 | static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value) | ||
436 | { | ||
437 | __le16 tmp = cpu_to_le16(value); | ||
438 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | ||
439 | } | ||
440 | |||
441 | static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value) | ||
442 | { | ||
443 | __le32 tmp = cpu_to_le32(value); | ||
444 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value) | ||
449 | { | ||
450 | __le64 tmp = cpu_to_le64(value); | ||
451 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | ||
452 | } | ||
453 | |||
454 | static int tlv_put_string(struct send_ctx *sctx, u16 attr, | ||
455 | const char *str, int len) | ||
456 | { | ||
457 | if (len == -1) | ||
458 | len = strlen(str); | ||
459 | return tlv_put(sctx, attr, str, len); | ||
460 | } | ||
461 | |||
462 | static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, | ||
463 | const u8 *uuid) | ||
464 | { | ||
465 | return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); | ||
466 | } | ||
467 | |||
468 | #if 0 | ||
469 | static int tlv_put_timespec(struct send_ctx *sctx, u16 attr, | ||
470 | struct timespec *ts) | ||
471 | { | ||
472 | struct btrfs_timespec bts; | ||
473 | bts.sec = cpu_to_le64(ts->tv_sec); | ||
474 | bts.nsec = cpu_to_le32(ts->tv_nsec); | ||
475 | return tlv_put(sctx, attr, &bts, sizeof(bts)); | ||
476 | } | ||
477 | #endif | ||
478 | |||
479 | static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, | ||
480 | struct extent_buffer *eb, | ||
481 | struct btrfs_timespec *ts) | ||
482 | { | ||
483 | struct btrfs_timespec bts; | ||
484 | read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); | ||
485 | return tlv_put(sctx, attr, &bts, sizeof(bts)); | ||
486 | } | ||
487 | |||
488 | |||
489 | #define TLV_PUT(sctx, attrtype, attrlen, data) \ | ||
490 | do { \ | ||
491 | ret = tlv_put(sctx, attrtype, attrlen, data); \ | ||
492 | if (ret < 0) \ | ||
493 | goto tlv_put_failure; \ | ||
494 | } while (0) | ||
495 | |||
496 | #define TLV_PUT_INT(sctx, attrtype, bits, value) \ | ||
497 | do { \ | ||
498 | ret = tlv_put_u##bits(sctx, attrtype, value); \ | ||
499 | if (ret < 0) \ | ||
500 | goto tlv_put_failure; \ | ||
501 | } while (0) | ||
502 | |||
503 | #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) | ||
504 | #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) | ||
505 | #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) | ||
506 | #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) | ||
507 | #define TLV_PUT_STRING(sctx, attrtype, str, len) \ | ||
508 | do { \ | ||
509 | ret = tlv_put_string(sctx, attrtype, str, len); \ | ||
510 | if (ret < 0) \ | ||
511 | goto tlv_put_failure; \ | ||
512 | } while (0) | ||
513 | #define TLV_PUT_PATH(sctx, attrtype, p) \ | ||
514 | do { \ | ||
515 | ret = tlv_put_string(sctx, attrtype, p->start, \ | ||
516 | p->end - p->start); \ | ||
517 | if (ret < 0) \ | ||
518 | goto tlv_put_failure; \ | ||
519 | } while(0) | ||
520 | #define TLV_PUT_UUID(sctx, attrtype, uuid) \ | ||
521 | do { \ | ||
522 | ret = tlv_put_uuid(sctx, attrtype, uuid); \ | ||
523 | if (ret < 0) \ | ||
524 | goto tlv_put_failure; \ | ||
525 | } while (0) | ||
526 | #define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \ | ||
527 | do { \ | ||
528 | ret = tlv_put_timespec(sctx, attrtype, ts); \ | ||
529 | if (ret < 0) \ | ||
530 | goto tlv_put_failure; \ | ||
531 | } while (0) | ||
532 | #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ | ||
533 | do { \ | ||
534 | ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ | ||
535 | if (ret < 0) \ | ||
536 | goto tlv_put_failure; \ | ||
537 | } while (0) | ||
538 | |||
539 | static int send_header(struct send_ctx *sctx) | ||
540 | { | ||
541 | struct btrfs_stream_header hdr; | ||
542 | |||
543 | strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); | ||
544 | hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); | ||
545 | |||
546 | return write_buf(sctx, &hdr, sizeof(hdr)); | ||
547 | } | ||
548 | |||
549 | /* | ||
550 | * For each command/item we want to send to userspace, we call this function. | ||
551 | */ | ||
552 | static int begin_cmd(struct send_ctx *sctx, int cmd) | ||
553 | { | ||
554 | struct btrfs_cmd_header *hdr; | ||
555 | |||
556 | if (!sctx->send_buf) { | ||
557 | WARN_ON(1); | ||
558 | return -EINVAL; | ||
559 | } | ||
560 | |||
561 | BUG_ON(sctx->send_size); | ||
562 | |||
563 | sctx->send_size += sizeof(*hdr); | ||
564 | hdr = (struct btrfs_cmd_header *)sctx->send_buf; | ||
565 | hdr->cmd = cpu_to_le16(cmd); | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static int send_cmd(struct send_ctx *sctx) | ||
571 | { | ||
572 | int ret; | ||
573 | struct btrfs_cmd_header *hdr; | ||
574 | u32 crc; | ||
575 | |||
576 | hdr = (struct btrfs_cmd_header *)sctx->send_buf; | ||
577 | hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); | ||
578 | hdr->crc = 0; | ||
579 | |||
580 | crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); | ||
581 | hdr->crc = cpu_to_le32(crc); | ||
582 | |||
583 | ret = write_buf(sctx, sctx->send_buf, sctx->send_size); | ||
584 | |||
585 | sctx->total_send_size += sctx->send_size; | ||
586 | sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; | ||
587 | sctx->send_size = 0; | ||
588 | |||
589 | return ret; | ||
590 | } | ||
591 | |||
592 | /* | ||
593 | * Sends a move instruction to user space | ||
594 | */ | ||
595 | static int send_rename(struct send_ctx *sctx, | ||
596 | struct fs_path *from, struct fs_path *to) | ||
597 | { | ||
598 | int ret; | ||
599 | |||
600 | verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start); | ||
601 | |||
602 | ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); | ||
603 | if (ret < 0) | ||
604 | goto out; | ||
605 | |||
606 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); | ||
607 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); | ||
608 | |||
609 | ret = send_cmd(sctx); | ||
610 | |||
611 | tlv_put_failure: | ||
612 | out: | ||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | /* | ||
617 | * Sends a link instruction to user space | ||
618 | */ | ||
619 | static int send_link(struct send_ctx *sctx, | ||
620 | struct fs_path *path, struct fs_path *lnk) | ||
621 | { | ||
622 | int ret; | ||
623 | |||
624 | verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start); | ||
625 | |||
626 | ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); | ||
627 | if (ret < 0) | ||
628 | goto out; | ||
629 | |||
630 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | ||
631 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); | ||
632 | |||
633 | ret = send_cmd(sctx); | ||
634 | |||
635 | tlv_put_failure: | ||
636 | out: | ||
637 | return ret; | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * Sends an unlink instruction to user space | ||
642 | */ | ||
643 | static int send_unlink(struct send_ctx *sctx, struct fs_path *path) | ||
644 | { | ||
645 | int ret; | ||
646 | |||
647 | verbose_printk("btrfs: send_unlink %s\n", path->start); | ||
648 | |||
649 | ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); | ||
650 | if (ret < 0) | ||
651 | goto out; | ||
652 | |||
653 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | ||
654 | |||
655 | ret = send_cmd(sctx); | ||
656 | |||
657 | tlv_put_failure: | ||
658 | out: | ||
659 | return ret; | ||
660 | } | ||
661 | |||
662 | /* | ||
663 | * Sends a rmdir instruction to user space | ||
664 | */ | ||
665 | static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) | ||
666 | { | ||
667 | int ret; | ||
668 | |||
669 | verbose_printk("btrfs: send_rmdir %s\n", path->start); | ||
670 | |||
671 | ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); | ||
672 | if (ret < 0) | ||
673 | goto out; | ||
674 | |||
675 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | ||
676 | |||
677 | ret = send_cmd(sctx); | ||
678 | |||
679 | tlv_put_failure: | ||
680 | out: | ||
681 | return ret; | ||
682 | } | ||
683 | |||
684 | /* | ||
685 | * Helper function to retrieve some fields from an inode item. | ||
686 | */ | ||
687 | static int get_inode_info(struct btrfs_root *root, | ||
688 | u64 ino, u64 *size, u64 *gen, | ||
689 | u64 *mode, u64 *uid, u64 *gid) | ||
690 | { | ||
691 | int ret; | ||
692 | struct btrfs_inode_item *ii; | ||
693 | struct btrfs_key key; | ||
694 | struct btrfs_path *path; | ||
695 | |||
696 | path = alloc_path_for_send(); | ||
697 | if (!path) | ||
698 | return -ENOMEM; | ||
699 | |||
700 | key.objectid = ino; | ||
701 | key.type = BTRFS_INODE_ITEM_KEY; | ||
702 | key.offset = 0; | ||
703 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
704 | if (ret < 0) | ||
705 | goto out; | ||
706 | if (ret) { | ||
707 | ret = -ENOENT; | ||
708 | goto out; | ||
709 | } | ||
710 | |||
711 | ii = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
712 | struct btrfs_inode_item); | ||
713 | if (size) | ||
714 | *size = btrfs_inode_size(path->nodes[0], ii); | ||
715 | if (gen) | ||
716 | *gen = btrfs_inode_generation(path->nodes[0], ii); | ||
717 | if (mode) | ||
718 | *mode = btrfs_inode_mode(path->nodes[0], ii); | ||
719 | if (uid) | ||
720 | *uid = btrfs_inode_uid(path->nodes[0], ii); | ||
721 | if (gid) | ||
722 | *gid = btrfs_inode_gid(path->nodes[0], ii); | ||
723 | |||
724 | out: | ||
725 | btrfs_free_path(path); | ||
726 | return ret; | ||
727 | } | ||
728 | |||
729 | typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, | ||
730 | struct fs_path *p, | ||
731 | void *ctx); | ||
732 | |||
733 | /* | ||
734 | * Helper function to iterate the entries in ONE btrfs_inode_ref. | ||
735 | * The iterate callback may return a non zero value to stop iteration. This can | ||
736 | * be a negative value for error codes or 1 to simply stop it. | ||
737 | * | ||
738 | * path must point to the INODE_REF when called. | ||
739 | */ | ||
740 | static int iterate_inode_ref(struct send_ctx *sctx, | ||
741 | struct btrfs_root *root, struct btrfs_path *path, | ||
742 | struct btrfs_key *found_key, int resolve, | ||
743 | iterate_inode_ref_t iterate, void *ctx) | ||
744 | { | ||
745 | struct extent_buffer *eb; | ||
746 | struct btrfs_item *item; | ||
747 | struct btrfs_inode_ref *iref; | ||
748 | struct btrfs_path *tmp_path; | ||
749 | struct fs_path *p; | ||
750 | u32 cur; | ||
751 | u32 len; | ||
752 | u32 total; | ||
753 | int slot; | ||
754 | u32 name_len; | ||
755 | char *start; | ||
756 | int ret = 0; | ||
757 | int num; | ||
758 | int index; | ||
759 | |||
760 | p = fs_path_alloc_reversed(sctx); | ||
761 | if (!p) | ||
762 | return -ENOMEM; | ||
763 | |||
764 | tmp_path = alloc_path_for_send(); | ||
765 | if (!tmp_path) { | ||
766 | fs_path_free(sctx, p); | ||
767 | return -ENOMEM; | ||
768 | } | ||
769 | |||
770 | eb = path->nodes[0]; | ||
771 | slot = path->slots[0]; | ||
772 | item = btrfs_item_nr(eb, slot); | ||
773 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); | ||
774 | cur = 0; | ||
775 | len = 0; | ||
776 | total = btrfs_item_size(eb, item); | ||
777 | |||
778 | num = 0; | ||
779 | while (cur < total) { | ||
780 | fs_path_reset(p); | ||
781 | |||
782 | name_len = btrfs_inode_ref_name_len(eb, iref); | ||
783 | index = btrfs_inode_ref_index(eb, iref); | ||
784 | if (resolve) { | ||
785 | start = btrfs_iref_to_path(root, tmp_path, iref, eb, | ||
786 | found_key->offset, p->buf, | ||
787 | p->buf_len); | ||
788 | if (IS_ERR(start)) { | ||
789 | ret = PTR_ERR(start); | ||
790 | goto out; | ||
791 | } | ||
792 | if (start < p->buf) { | ||
793 | /* overflow , try again with larger buffer */ | ||
794 | ret = fs_path_ensure_buf(p, | ||
795 | p->buf_len + p->buf - start); | ||
796 | if (ret < 0) | ||
797 | goto out; | ||
798 | start = btrfs_iref_to_path(root, tmp_path, iref, | ||
799 | eb, found_key->offset, p->buf, | ||
800 | p->buf_len); | ||
801 | if (IS_ERR(start)) { | ||
802 | ret = PTR_ERR(start); | ||
803 | goto out; | ||
804 | } | ||
805 | BUG_ON(start < p->buf); | ||
806 | } | ||
807 | p->start = start; | ||
808 | } else { | ||
809 | ret = fs_path_add_from_extent_buffer(p, eb, | ||
810 | (unsigned long)(iref + 1), name_len); | ||
811 | if (ret < 0) | ||
812 | goto out; | ||
813 | } | ||
814 | |||
815 | |||
816 | len = sizeof(*iref) + name_len; | ||
817 | iref = (struct btrfs_inode_ref *)((char *)iref + len); | ||
818 | cur += len; | ||
819 | |||
820 | ret = iterate(num, found_key->offset, index, p, ctx); | ||
821 | if (ret) | ||
822 | goto out; | ||
823 | |||
824 | num++; | ||
825 | } | ||
826 | |||
827 | out: | ||
828 | btrfs_free_path(tmp_path); | ||
829 | fs_path_free(sctx, p); | ||
830 | return ret; | ||
831 | } | ||
832 | |||
833 | typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, | ||
834 | const char *name, int name_len, | ||
835 | const char *data, int data_len, | ||
836 | u8 type, void *ctx); | ||
837 | |||
838 | /* | ||
839 | * Helper function to iterate the entries in ONE btrfs_dir_item. | ||
840 | * The iterate callback may return a non zero value to stop iteration. This can | ||
841 | * be a negative value for error codes or 1 to simply stop it. | ||
842 | * | ||
843 | * path must point to the dir item when called. | ||
844 | */ | ||
845 | static int iterate_dir_item(struct send_ctx *sctx, | ||
846 | struct btrfs_root *root, struct btrfs_path *path, | ||
847 | struct btrfs_key *found_key, | ||
848 | iterate_dir_item_t iterate, void *ctx) | ||
849 | { | ||
850 | int ret = 0; | ||
851 | struct extent_buffer *eb; | ||
852 | struct btrfs_item *item; | ||
853 | struct btrfs_dir_item *di; | ||
854 | struct btrfs_path *tmp_path = NULL; | ||
855 | struct btrfs_key di_key; | ||
856 | char *buf = NULL; | ||
857 | char *buf2 = NULL; | ||
858 | int buf_len; | ||
859 | int buf_virtual = 0; | ||
860 | u32 name_len; | ||
861 | u32 data_len; | ||
862 | u32 cur; | ||
863 | u32 len; | ||
864 | u32 total; | ||
865 | int slot; | ||
866 | int num; | ||
867 | u8 type; | ||
868 | |||
869 | buf_len = PAGE_SIZE; | ||
870 | buf = kmalloc(buf_len, GFP_NOFS); | ||
871 | if (!buf) { | ||
872 | ret = -ENOMEM; | ||
873 | goto out; | ||
874 | } | ||
875 | |||
876 | tmp_path = alloc_path_for_send(); | ||
877 | if (!tmp_path) { | ||
878 | ret = -ENOMEM; | ||
879 | goto out; | ||
880 | } | ||
881 | |||
882 | eb = path->nodes[0]; | ||
883 | slot = path->slots[0]; | ||
884 | item = btrfs_item_nr(eb, slot); | ||
885 | di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); | ||
886 | cur = 0; | ||
887 | len = 0; | ||
888 | total = btrfs_item_size(eb, item); | ||
889 | |||
890 | num = 0; | ||
891 | while (cur < total) { | ||
892 | name_len = btrfs_dir_name_len(eb, di); | ||
893 | data_len = btrfs_dir_data_len(eb, di); | ||
894 | type = btrfs_dir_type(eb, di); | ||
895 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | ||
896 | |||
897 | if (name_len + data_len > buf_len) { | ||
898 | buf_len = PAGE_ALIGN(name_len + data_len); | ||
899 | if (buf_virtual) { | ||
900 | buf2 = vmalloc(buf_len); | ||
901 | if (!buf2) { | ||
902 | ret = -ENOMEM; | ||
903 | goto out; | ||
904 | } | ||
905 | vfree(buf); | ||
906 | } else { | ||
907 | buf2 = krealloc(buf, buf_len, GFP_NOFS); | ||
908 | if (!buf2) { | ||
909 | buf2 = vmalloc(buf_len); | ||
910 | if (!buf2) { | ||
911 | ret = -ENOMEM; | ||
912 | goto out; | ||
913 | } | ||
914 | kfree(buf); | ||
915 | buf_virtual = 1; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | buf = buf2; | ||
920 | buf2 = NULL; | ||
921 | } | ||
922 | |||
923 | read_extent_buffer(eb, buf, (unsigned long)(di + 1), | ||
924 | name_len + data_len); | ||
925 | |||
926 | len = sizeof(*di) + name_len + data_len; | ||
927 | di = (struct btrfs_dir_item *)((char *)di + len); | ||
928 | cur += len; | ||
929 | |||
930 | ret = iterate(num, &di_key, buf, name_len, buf + name_len, | ||
931 | data_len, type, ctx); | ||
932 | if (ret < 0) | ||
933 | goto out; | ||
934 | if (ret) { | ||
935 | ret = 0; | ||
936 | goto out; | ||
937 | } | ||
938 | |||
939 | num++; | ||
940 | } | ||
941 | |||
942 | out: | ||
943 | btrfs_free_path(tmp_path); | ||
944 | if (buf_virtual) | ||
945 | vfree(buf); | ||
946 | else | ||
947 | kfree(buf); | ||
948 | return ret; | ||
949 | } | ||
950 | |||
951 | static int __copy_first_ref(int num, u64 dir, int index, | ||
952 | struct fs_path *p, void *ctx) | ||
953 | { | ||
954 | int ret; | ||
955 | struct fs_path *pt = ctx; | ||
956 | |||
957 | ret = fs_path_copy(pt, p); | ||
958 | if (ret < 0) | ||
959 | return ret; | ||
960 | |||
961 | /* we want the first only */ | ||
962 | return 1; | ||
963 | } | ||
964 | |||
965 | /* | ||
966 | * Retrieve the first path of an inode. If an inode has more then one | ||
967 | * ref/hardlink, this is ignored. | ||
968 | */ | ||
969 | static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root, | ||
970 | u64 ino, struct fs_path *path) | ||
971 | { | ||
972 | int ret; | ||
973 | struct btrfs_key key, found_key; | ||
974 | struct btrfs_path *p; | ||
975 | |||
976 | p = alloc_path_for_send(); | ||
977 | if (!p) | ||
978 | return -ENOMEM; | ||
979 | |||
980 | fs_path_reset(path); | ||
981 | |||
982 | key.objectid = ino; | ||
983 | key.type = BTRFS_INODE_REF_KEY; | ||
984 | key.offset = 0; | ||
985 | |||
986 | ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); | ||
987 | if (ret < 0) | ||
988 | goto out; | ||
989 | if (ret) { | ||
990 | ret = 1; | ||
991 | goto out; | ||
992 | } | ||
993 | btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); | ||
994 | if (found_key.objectid != ino || | ||
995 | found_key.type != BTRFS_INODE_REF_KEY) { | ||
996 | ret = -ENOENT; | ||
997 | goto out; | ||
998 | } | ||
999 | |||
1000 | ret = iterate_inode_ref(sctx, root, p, &found_key, 1, | ||
1001 | __copy_first_ref, path); | ||
1002 | if (ret < 0) | ||
1003 | goto out; | ||
1004 | ret = 0; | ||
1005 | |||
1006 | out: | ||
1007 | btrfs_free_path(p); | ||
1008 | return ret; | ||
1009 | } | ||
1010 | |||
1011 | struct backref_ctx { | ||
1012 | struct send_ctx *sctx; | ||
1013 | |||
1014 | /* number of total found references */ | ||
1015 | u64 found; | ||
1016 | |||
1017 | /* | ||
1018 | * used for clones found in send_root. clones found behind cur_objectid | ||
1019 | * and cur_offset are not considered as allowed clones. | ||
1020 | */ | ||
1021 | u64 cur_objectid; | ||
1022 | u64 cur_offset; | ||
1023 | |||
1024 | /* may be truncated in case it's the last extent in a file */ | ||
1025 | u64 extent_len; | ||
1026 | |||
1027 | /* Just to check for bugs in backref resolving */ | ||
1028 | int found_in_send_root; | ||
1029 | }; | ||
1030 | |||
1031 | static int __clone_root_cmp_bsearch(const void *key, const void *elt) | ||
1032 | { | ||
1033 | u64 root = (u64)key; | ||
1034 | struct clone_root *cr = (struct clone_root *)elt; | ||
1035 | |||
1036 | if (root < cr->root->objectid) | ||
1037 | return -1; | ||
1038 | if (root > cr->root->objectid) | ||
1039 | return 1; | ||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
1043 | static int __clone_root_cmp_sort(const void *e1, const void *e2) | ||
1044 | { | ||
1045 | struct clone_root *cr1 = (struct clone_root *)e1; | ||
1046 | struct clone_root *cr2 = (struct clone_root *)e2; | ||
1047 | |||
1048 | if (cr1->root->objectid < cr2->root->objectid) | ||
1049 | return -1; | ||
1050 | if (cr1->root->objectid > cr2->root->objectid) | ||
1051 | return 1; | ||
1052 | return 0; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | * Called for every backref that is found for the current extent. | ||
1057 | */ | ||
1058 | static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | ||
1059 | { | ||
1060 | struct backref_ctx *bctx = ctx_; | ||
1061 | struct clone_root *found; | ||
1062 | int ret; | ||
1063 | u64 i_size; | ||
1064 | |||
1065 | /* First check if the root is in the list of accepted clone sources */ | ||
1066 | found = bsearch((void *)root, bctx->sctx->clone_roots, | ||
1067 | bctx->sctx->clone_roots_cnt, | ||
1068 | sizeof(struct clone_root), | ||
1069 | __clone_root_cmp_bsearch); | ||
1070 | if (!found) | ||
1071 | return 0; | ||
1072 | |||
1073 | if (found->root == bctx->sctx->send_root && | ||
1074 | ino == bctx->cur_objectid && | ||
1075 | offset == bctx->cur_offset) { | ||
1076 | bctx->found_in_send_root = 1; | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * There are inodes that have extents that lie behind it's i_size. Don't | ||
1081 | * accept clones from these extents. | ||
1082 | */ | ||
1083 | ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL); | ||
1084 | if (ret < 0) | ||
1085 | return ret; | ||
1086 | |||
1087 | if (offset + bctx->extent_len > i_size) | ||
1088 | return 0; | ||
1089 | |||
1090 | /* | ||
1091 | * Make sure we don't consider clones from send_root that are | ||
1092 | * behind the current inode/offset. | ||
1093 | */ | ||
1094 | if (found->root == bctx->sctx->send_root) { | ||
1095 | /* | ||
1096 | * TODO for the moment we don't accept clones from the inode | ||
1097 | * that is currently send. We may change this when | ||
1098 | * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same | ||
1099 | * file. | ||
1100 | */ | ||
1101 | if (ino >= bctx->cur_objectid) | ||
1102 | return 0; | ||
1103 | /*if (ino > ctx->cur_objectid) | ||
1104 | return 0; | ||
1105 | if (offset + ctx->extent_len > ctx->cur_offset) | ||
1106 | return 0;*/ | ||
1107 | |||
1108 | bctx->found++; | ||
1109 | found->found_refs++; | ||
1110 | found->ino = ino; | ||
1111 | found->offset = offset; | ||
1112 | return 0; | ||
1113 | } | ||
1114 | |||
1115 | bctx->found++; | ||
1116 | found->found_refs++; | ||
1117 | if (ino < found->ino) { | ||
1118 | found->ino = ino; | ||
1119 | found->offset = offset; | ||
1120 | } else if (found->ino == ino) { | ||
1121 | /* | ||
1122 | * same extent found more then once in the same file. | ||
1123 | */ | ||
1124 | if (found->offset > offset + bctx->extent_len) | ||
1125 | found->offset = offset; | ||
1126 | } | ||
1127 | |||
1128 | return 0; | ||
1129 | } | ||
1130 | |||
1131 | /* | ||
1132 | * path must point to the extent item when called. | ||
1133 | */ | ||
1134 | static int find_extent_clone(struct send_ctx *sctx, | ||
1135 | struct btrfs_path *path, | ||
1136 | u64 ino, u64 data_offset, | ||
1137 | u64 ino_size, | ||
1138 | struct clone_root **found) | ||
1139 | { | ||
1140 | int ret; | ||
1141 | int extent_type; | ||
1142 | u64 logical; | ||
1143 | u64 num_bytes; | ||
1144 | u64 extent_item_pos; | ||
1145 | struct btrfs_file_extent_item *fi; | ||
1146 | struct extent_buffer *eb = path->nodes[0]; | ||
1147 | struct backref_ctx backref_ctx; | ||
1148 | struct clone_root *cur_clone_root; | ||
1149 | struct btrfs_key found_key; | ||
1150 | struct btrfs_path *tmp_path; | ||
1151 | u32 i; | ||
1152 | |||
1153 | tmp_path = alloc_path_for_send(); | ||
1154 | if (!tmp_path) | ||
1155 | return -ENOMEM; | ||
1156 | |||
1157 | if (data_offset >= ino_size) { | ||
1158 | /* | ||
1159 | * There may be extents that lie behind the file's size. | ||
1160 | * I at least had this in combination with snapshotting while | ||
1161 | * writing large files. | ||
1162 | */ | ||
1163 | ret = 0; | ||
1164 | goto out; | ||
1165 | } | ||
1166 | |||
1167 | fi = btrfs_item_ptr(eb, path->slots[0], | ||
1168 | struct btrfs_file_extent_item); | ||
1169 | extent_type = btrfs_file_extent_type(eb, fi); | ||
1170 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | ||
1171 | ret = -ENOENT; | ||
1172 | goto out; | ||
1173 | } | ||
1174 | |||
1175 | num_bytes = btrfs_file_extent_num_bytes(eb, fi); | ||
1176 | logical = btrfs_file_extent_disk_bytenr(eb, fi); | ||
1177 | if (logical == 0) { | ||
1178 | ret = -ENOENT; | ||
1179 | goto out; | ||
1180 | } | ||
1181 | logical += btrfs_file_extent_offset(eb, fi); | ||
1182 | |||
1183 | ret = extent_from_logical(sctx->send_root->fs_info, | ||
1184 | logical, tmp_path, &found_key); | ||
1185 | btrfs_release_path(tmp_path); | ||
1186 | |||
1187 | if (ret < 0) | ||
1188 | goto out; | ||
1189 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | ||
1190 | ret = -EIO; | ||
1191 | goto out; | ||
1192 | } | ||
1193 | |||
1194 | /* | ||
1195 | * Setup the clone roots. | ||
1196 | */ | ||
1197 | for (i = 0; i < sctx->clone_roots_cnt; i++) { | ||
1198 | cur_clone_root = sctx->clone_roots + i; | ||
1199 | cur_clone_root->ino = (u64)-1; | ||
1200 | cur_clone_root->offset = 0; | ||
1201 | cur_clone_root->found_refs = 0; | ||
1202 | } | ||
1203 | |||
1204 | backref_ctx.sctx = sctx; | ||
1205 | backref_ctx.found = 0; | ||
1206 | backref_ctx.cur_objectid = ino; | ||
1207 | backref_ctx.cur_offset = data_offset; | ||
1208 | backref_ctx.found_in_send_root = 0; | ||
1209 | backref_ctx.extent_len = num_bytes; | ||
1210 | |||
1211 | /* | ||
1212 | * The last extent of a file may be too large due to page alignment. | ||
1213 | * We need to adjust extent_len in this case so that the checks in | ||
1214 | * __iterate_backrefs work. | ||
1215 | */ | ||
1216 | if (data_offset + num_bytes >= ino_size) | ||
1217 | backref_ctx.extent_len = ino_size - data_offset; | ||
1218 | |||
1219 | /* | ||
1220 | * Now collect all backrefs. | ||
1221 | */ | ||
1222 | extent_item_pos = logical - found_key.objectid; | ||
1223 | ret = iterate_extent_inodes(sctx->send_root->fs_info, | ||
1224 | found_key.objectid, extent_item_pos, 1, | ||
1225 | __iterate_backrefs, &backref_ctx); | ||
1226 | if (ret < 0) | ||
1227 | goto out; | ||
1228 | |||
1229 | if (!backref_ctx.found_in_send_root) { | ||
1230 | /* found a bug in backref code? */ | ||
1231 | ret = -EIO; | ||
1232 | printk(KERN_ERR "btrfs: ERROR did not find backref in " | ||
1233 | "send_root. inode=%llu, offset=%llu, " | ||
1234 | "logical=%llu\n", | ||
1235 | ino, data_offset, logical); | ||
1236 | goto out; | ||
1237 | } | ||
1238 | |||
1239 | verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " | ||
1240 | "ino=%llu, " | ||
1241 | "num_bytes=%llu, logical=%llu\n", | ||
1242 | data_offset, ino, num_bytes, logical); | ||
1243 | |||
1244 | if (!backref_ctx.found) | ||
1245 | verbose_printk("btrfs: no clones found\n"); | ||
1246 | |||
1247 | cur_clone_root = NULL; | ||
1248 | for (i = 0; i < sctx->clone_roots_cnt; i++) { | ||
1249 | if (sctx->clone_roots[i].found_refs) { | ||
1250 | if (!cur_clone_root) | ||
1251 | cur_clone_root = sctx->clone_roots + i; | ||
1252 | else if (sctx->clone_roots[i].root == sctx->send_root) | ||
1253 | /* prefer clones from send_root over others */ | ||
1254 | cur_clone_root = sctx->clone_roots + i; | ||
1255 | break; | ||
1256 | } | ||
1257 | |||
1258 | } | ||
1259 | |||
1260 | if (cur_clone_root) { | ||
1261 | *found = cur_clone_root; | ||
1262 | ret = 0; | ||
1263 | } else { | ||
1264 | ret = -ENOENT; | ||
1265 | } | ||
1266 | |||
1267 | out: | ||
1268 | btrfs_free_path(tmp_path); | ||
1269 | return ret; | ||
1270 | } | ||
1271 | |||
1272 | static int read_symlink(struct send_ctx *sctx, | ||
1273 | struct btrfs_root *root, | ||
1274 | u64 ino, | ||
1275 | struct fs_path *dest) | ||
1276 | { | ||
1277 | int ret; | ||
1278 | struct btrfs_path *path; | ||
1279 | struct btrfs_key key; | ||
1280 | struct btrfs_file_extent_item *ei; | ||
1281 | u8 type; | ||
1282 | u8 compression; | ||
1283 | unsigned long off; | ||
1284 | int len; | ||
1285 | |||
1286 | path = alloc_path_for_send(); | ||
1287 | if (!path) | ||
1288 | return -ENOMEM; | ||
1289 | |||
1290 | key.objectid = ino; | ||
1291 | key.type = BTRFS_EXTENT_DATA_KEY; | ||
1292 | key.offset = 0; | ||
1293 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
1294 | if (ret < 0) | ||
1295 | goto out; | ||
1296 | BUG_ON(ret); | ||
1297 | |||
1298 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
1299 | struct btrfs_file_extent_item); | ||
1300 | type = btrfs_file_extent_type(path->nodes[0], ei); | ||
1301 | compression = btrfs_file_extent_compression(path->nodes[0], ei); | ||
1302 | BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); | ||
1303 | BUG_ON(compression); | ||
1304 | |||
1305 | off = btrfs_file_extent_inline_start(ei); | ||
1306 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | ||
1307 | |||
1308 | ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); | ||
1309 | if (ret < 0) | ||
1310 | goto out; | ||
1311 | |||
1312 | out: | ||
1313 | btrfs_free_path(path); | ||
1314 | return ret; | ||
1315 | } | ||
1316 | |||
1317 | /* | ||
1318 | * Helper function to generate a file name that is unique in the root of | ||
1319 | * send_root and parent_root. This is used to generate names for orphan inodes. | ||
1320 | */ | ||
1321 | static int gen_unique_name(struct send_ctx *sctx, | ||
1322 | u64 ino, u64 gen, | ||
1323 | struct fs_path *dest) | ||
1324 | { | ||
1325 | int ret = 0; | ||
1326 | struct btrfs_path *path; | ||
1327 | struct btrfs_dir_item *di; | ||
1328 | char tmp[64]; | ||
1329 | int len; | ||
1330 | u64 idx = 0; | ||
1331 | |||
1332 | path = alloc_path_for_send(); | ||
1333 | if (!path) | ||
1334 | return -ENOMEM; | ||
1335 | |||
1336 | while (1) { | ||
1337 | len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu", | ||
1338 | ino, gen, idx); | ||
1339 | if (len >= sizeof(tmp)) { | ||
1340 | /* should really not happen */ | ||
1341 | ret = -EOVERFLOW; | ||
1342 | goto out; | ||
1343 | } | ||
1344 | |||
1345 | di = btrfs_lookup_dir_item(NULL, sctx->send_root, | ||
1346 | path, BTRFS_FIRST_FREE_OBJECTID, | ||
1347 | tmp, strlen(tmp), 0); | ||
1348 | btrfs_release_path(path); | ||
1349 | if (IS_ERR(di)) { | ||
1350 | ret = PTR_ERR(di); | ||
1351 | goto out; | ||
1352 | } | ||
1353 | if (di) { | ||
1354 | /* not unique, try again */ | ||
1355 | idx++; | ||
1356 | continue; | ||
1357 | } | ||
1358 | |||
1359 | if (!sctx->parent_root) { | ||
1360 | /* unique */ | ||
1361 | ret = 0; | ||
1362 | break; | ||
1363 | } | ||
1364 | |||
1365 | di = btrfs_lookup_dir_item(NULL, sctx->parent_root, | ||
1366 | path, BTRFS_FIRST_FREE_OBJECTID, | ||
1367 | tmp, strlen(tmp), 0); | ||
1368 | btrfs_release_path(path); | ||
1369 | if (IS_ERR(di)) { | ||
1370 | ret = PTR_ERR(di); | ||
1371 | goto out; | ||
1372 | } | ||
1373 | if (di) { | ||
1374 | /* not unique, try again */ | ||
1375 | idx++; | ||
1376 | continue; | ||
1377 | } | ||
1378 | /* unique */ | ||
1379 | break; | ||
1380 | } | ||
1381 | |||
1382 | ret = fs_path_add(dest, tmp, strlen(tmp)); | ||
1383 | |||
1384 | out: | ||
1385 | btrfs_free_path(path); | ||
1386 | return ret; | ||
1387 | } | ||
1388 | |||
1389 | enum inode_state { | ||
1390 | inode_state_no_change, | ||
1391 | inode_state_will_create, | ||
1392 | inode_state_did_create, | ||
1393 | inode_state_will_delete, | ||
1394 | inode_state_did_delete, | ||
1395 | }; | ||
1396 | |||
1397 | static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) | ||
1398 | { | ||
1399 | int ret; | ||
1400 | int left_ret; | ||
1401 | int right_ret; | ||
1402 | u64 left_gen; | ||
1403 | u64 right_gen; | ||
1404 | |||
1405 | ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, | ||
1406 | NULL); | ||
1407 | if (ret < 0 && ret != -ENOENT) | ||
1408 | goto out; | ||
1409 | left_ret = ret; | ||
1410 | |||
1411 | if (!sctx->parent_root) { | ||
1412 | right_ret = -ENOENT; | ||
1413 | } else { | ||
1414 | ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, | ||
1415 | NULL, NULL, NULL); | ||
1416 | if (ret < 0 && ret != -ENOENT) | ||
1417 | goto out; | ||
1418 | right_ret = ret; | ||
1419 | } | ||
1420 | |||
1421 | if (!left_ret && !right_ret) { | ||
1422 | if (left_gen == gen && right_gen == gen) | ||
1423 | ret = inode_state_no_change; | ||
1424 | else if (left_gen == gen) { | ||
1425 | if (ino < sctx->send_progress) | ||
1426 | ret = inode_state_did_create; | ||
1427 | else | ||
1428 | ret = inode_state_will_create; | ||
1429 | } else if (right_gen == gen) { | ||
1430 | if (ino < sctx->send_progress) | ||
1431 | ret = inode_state_did_delete; | ||
1432 | else | ||
1433 | ret = inode_state_will_delete; | ||
1434 | } else { | ||
1435 | ret = -ENOENT; | ||
1436 | } | ||
1437 | } else if (!left_ret) { | ||
1438 | if (left_gen == gen) { | ||
1439 | if (ino < sctx->send_progress) | ||
1440 | ret = inode_state_did_create; | ||
1441 | else | ||
1442 | ret = inode_state_will_create; | ||
1443 | } else { | ||
1444 | ret = -ENOENT; | ||
1445 | } | ||
1446 | } else if (!right_ret) { | ||
1447 | if (right_gen == gen) { | ||
1448 | if (ino < sctx->send_progress) | ||
1449 | ret = inode_state_did_delete; | ||
1450 | else | ||
1451 | ret = inode_state_will_delete; | ||
1452 | } else { | ||
1453 | ret = -ENOENT; | ||
1454 | } | ||
1455 | } else { | ||
1456 | ret = -ENOENT; | ||
1457 | } | ||
1458 | |||
1459 | out: | ||
1460 | return ret; | ||
1461 | } | ||
1462 | |||
1463 | static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) | ||
1464 | { | ||
1465 | int ret; | ||
1466 | |||
1467 | ret = get_cur_inode_state(sctx, ino, gen); | ||
1468 | if (ret < 0) | ||
1469 | goto out; | ||
1470 | |||
1471 | if (ret == inode_state_no_change || | ||
1472 | ret == inode_state_did_create || | ||
1473 | ret == inode_state_will_delete) | ||
1474 | ret = 1; | ||
1475 | else | ||
1476 | ret = 0; | ||
1477 | |||
1478 | out: | ||
1479 | return ret; | ||
1480 | } | ||
1481 | |||
1482 | /* | ||
1483 | * Helper function to lookup a dir item in a dir. | ||
1484 | */ | ||
1485 | static int lookup_dir_item_inode(struct btrfs_root *root, | ||
1486 | u64 dir, const char *name, int name_len, | ||
1487 | u64 *found_inode, | ||
1488 | u8 *found_type) | ||
1489 | { | ||
1490 | int ret = 0; | ||
1491 | struct btrfs_dir_item *di; | ||
1492 | struct btrfs_key key; | ||
1493 | struct btrfs_path *path; | ||
1494 | |||
1495 | path = alloc_path_for_send(); | ||
1496 | if (!path) | ||
1497 | return -ENOMEM; | ||
1498 | |||
1499 | di = btrfs_lookup_dir_item(NULL, root, path, | ||
1500 | dir, name, name_len, 0); | ||
1501 | if (!di) { | ||
1502 | ret = -ENOENT; | ||
1503 | goto out; | ||
1504 | } | ||
1505 | if (IS_ERR(di)) { | ||
1506 | ret = PTR_ERR(di); | ||
1507 | goto out; | ||
1508 | } | ||
1509 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); | ||
1510 | *found_inode = key.objectid; | ||
1511 | *found_type = btrfs_dir_type(path->nodes[0], di); | ||
1512 | |||
1513 | out: | ||
1514 | btrfs_free_path(path); | ||
1515 | return ret; | ||
1516 | } | ||
1517 | |||
1518 | static int get_first_ref(struct send_ctx *sctx, | ||
1519 | struct btrfs_root *root, u64 ino, | ||
1520 | u64 *dir, u64 *dir_gen, struct fs_path *name) | ||
1521 | { | ||
1522 | int ret; | ||
1523 | struct btrfs_key key; | ||
1524 | struct btrfs_key found_key; | ||
1525 | struct btrfs_path *path; | ||
1526 | struct btrfs_inode_ref *iref; | ||
1527 | int len; | ||
1528 | |||
1529 | path = alloc_path_for_send(); | ||
1530 | if (!path) | ||
1531 | return -ENOMEM; | ||
1532 | |||
1533 | key.objectid = ino; | ||
1534 | key.type = BTRFS_INODE_REF_KEY; | ||
1535 | key.offset = 0; | ||
1536 | |||
1537 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | ||
1538 | if (ret < 0) | ||
1539 | goto out; | ||
1540 | if (!ret) | ||
1541 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | ||
1542 | path->slots[0]); | ||
1543 | if (ret || found_key.objectid != key.objectid || | ||
1544 | found_key.type != key.type) { | ||
1545 | ret = -ENOENT; | ||
1546 | goto out; | ||
1547 | } | ||
1548 | |||
1549 | iref = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
1550 | struct btrfs_inode_ref); | ||
1551 | len = btrfs_inode_ref_name_len(path->nodes[0], iref); | ||
1552 | ret = fs_path_add_from_extent_buffer(name, path->nodes[0], | ||
1553 | (unsigned long)(iref + 1), len); | ||
1554 | if (ret < 0) | ||
1555 | goto out; | ||
1556 | btrfs_release_path(path); | ||
1557 | |||
1558 | ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL, | ||
1559 | NULL); | ||
1560 | if (ret < 0) | ||
1561 | goto out; | ||
1562 | |||
1563 | *dir = found_key.offset; | ||
1564 | |||
1565 | out: | ||
1566 | btrfs_free_path(path); | ||
1567 | return ret; | ||
1568 | } | ||
1569 | |||
1570 | static int is_first_ref(struct send_ctx *sctx, | ||
1571 | struct btrfs_root *root, | ||
1572 | u64 ino, u64 dir, | ||
1573 | const char *name, int name_len) | ||
1574 | { | ||
1575 | int ret; | ||
1576 | struct fs_path *tmp_name; | ||
1577 | u64 tmp_dir; | ||
1578 | u64 tmp_dir_gen; | ||
1579 | |||
1580 | tmp_name = fs_path_alloc(sctx); | ||
1581 | if (!tmp_name) | ||
1582 | return -ENOMEM; | ||
1583 | |||
1584 | ret = get_first_ref(sctx, root, ino, &tmp_dir, &tmp_dir_gen, tmp_name); | ||
1585 | if (ret < 0) | ||
1586 | goto out; | ||
1587 | |||
1588 | if (name_len != fs_path_len(tmp_name)) { | ||
1589 | ret = 0; | ||
1590 | goto out; | ||
1591 | } | ||
1592 | |||
1593 | ret = memcmp(tmp_name->start, name, name_len); | ||
1594 | if (ret) | ||
1595 | ret = 0; | ||
1596 | else | ||
1597 | ret = 1; | ||
1598 | |||
1599 | out: | ||
1600 | fs_path_free(sctx, tmp_name); | ||
1601 | return ret; | ||
1602 | } | ||
1603 | |||
1604 | static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, | ||
1605 | const char *name, int name_len, | ||
1606 | u64 *who_ino, u64 *who_gen) | ||
1607 | { | ||
1608 | int ret = 0; | ||
1609 | u64 other_inode = 0; | ||
1610 | u8 other_type = 0; | ||
1611 | |||
1612 | if (!sctx->parent_root) | ||
1613 | goto out; | ||
1614 | |||
1615 | ret = is_inode_existent(sctx, dir, dir_gen); | ||
1616 | if (ret <= 0) | ||
1617 | goto out; | ||
1618 | |||
1619 | ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, | ||
1620 | &other_inode, &other_type); | ||
1621 | if (ret < 0 && ret != -ENOENT) | ||
1622 | goto out; | ||
1623 | if (ret) { | ||
1624 | ret = 0; | ||
1625 | goto out; | ||
1626 | } | ||
1627 | |||
1628 | if (other_inode > sctx->send_progress) { | ||
1629 | ret = get_inode_info(sctx->parent_root, other_inode, NULL, | ||
1630 | who_gen, NULL, NULL, NULL); | ||
1631 | if (ret < 0) | ||
1632 | goto out; | ||
1633 | |||
1634 | ret = 1; | ||
1635 | *who_ino = other_inode; | ||
1636 | } else { | ||
1637 | ret = 0; | ||
1638 | } | ||
1639 | |||
1640 | out: | ||
1641 | return ret; | ||
1642 | } | ||
1643 | |||
1644 | static int did_overwrite_ref(struct send_ctx *sctx, | ||
1645 | u64 dir, u64 dir_gen, | ||
1646 | u64 ino, u64 ino_gen, | ||
1647 | const char *name, int name_len) | ||
1648 | { | ||
1649 | int ret = 0; | ||
1650 | u64 gen; | ||
1651 | u64 ow_inode; | ||
1652 | u8 other_type; | ||
1653 | |||
1654 | if (!sctx->parent_root) | ||
1655 | goto out; | ||
1656 | |||
1657 | ret = is_inode_existent(sctx, dir, dir_gen); | ||
1658 | if (ret <= 0) | ||
1659 | goto out; | ||
1660 | |||
1661 | /* check if the ref was overwritten by another ref */ | ||
1662 | ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, | ||
1663 | &ow_inode, &other_type); | ||
1664 | if (ret < 0 && ret != -ENOENT) | ||
1665 | goto out; | ||
1666 | if (ret) { | ||
1667 | /* was never and will never be overwritten */ | ||
1668 | ret = 0; | ||
1669 | goto out; | ||
1670 | } | ||
1671 | |||
1672 | ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, | ||
1673 | NULL); | ||
1674 | if (ret < 0) | ||
1675 | goto out; | ||
1676 | |||
1677 | if (ow_inode == ino && gen == ino_gen) { | ||
1678 | ret = 0; | ||
1679 | goto out; | ||
1680 | } | ||
1681 | |||
1682 | /* we know that it is or will be overwritten. check this now */ | ||
1683 | if (ow_inode < sctx->send_progress) | ||
1684 | ret = 1; | ||
1685 | else | ||
1686 | ret = 0; | ||
1687 | |||
1688 | out: | ||
1689 | return ret; | ||
1690 | } | ||
1691 | |||
1692 | static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) | ||
1693 | { | ||
1694 | int ret = 0; | ||
1695 | struct fs_path *name = NULL; | ||
1696 | u64 dir; | ||
1697 | u64 dir_gen; | ||
1698 | |||
1699 | if (!sctx->parent_root) | ||
1700 | goto out; | ||
1701 | |||
1702 | name = fs_path_alloc(sctx); | ||
1703 | if (!name) | ||
1704 | return -ENOMEM; | ||
1705 | |||
1706 | ret = get_first_ref(sctx, sctx->parent_root, ino, &dir, &dir_gen, name); | ||
1707 | if (ret < 0) | ||
1708 | goto out; | ||
1709 | |||
1710 | ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, | ||
1711 | name->start, fs_path_len(name)); | ||
1712 | if (ret < 0) | ||
1713 | goto out; | ||
1714 | |||
1715 | out: | ||
1716 | fs_path_free(sctx, name); | ||
1717 | return ret; | ||
1718 | } | ||
1719 | |||
1720 | static int name_cache_insert(struct send_ctx *sctx, | ||
1721 | struct name_cache_entry *nce) | ||
1722 | { | ||
1723 | int ret = 0; | ||
1724 | struct name_cache_entry **ncea; | ||
1725 | |||
1726 | ncea = radix_tree_lookup(&sctx->name_cache, nce->ino); | ||
1727 | if (ncea) { | ||
1728 | if (!ncea[0]) | ||
1729 | ncea[0] = nce; | ||
1730 | else if (!ncea[1]) | ||
1731 | ncea[1] = nce; | ||
1732 | else | ||
1733 | BUG(); | ||
1734 | } else { | ||
1735 | ncea = kmalloc(sizeof(void *) * 2, GFP_NOFS); | ||
1736 | if (!ncea) | ||
1737 | return -ENOMEM; | ||
1738 | |||
1739 | ncea[0] = nce; | ||
1740 | ncea[1] = NULL; | ||
1741 | ret = radix_tree_insert(&sctx->name_cache, nce->ino, ncea); | ||
1742 | if (ret < 0) | ||
1743 | return ret; | ||
1744 | } | ||
1745 | list_add_tail(&nce->list, &sctx->name_cache_list); | ||
1746 | sctx->name_cache_size++; | ||
1747 | |||
1748 | return ret; | ||
1749 | } | ||
1750 | |||
1751 | static void name_cache_delete(struct send_ctx *sctx, | ||
1752 | struct name_cache_entry *nce) | ||
1753 | { | ||
1754 | struct name_cache_entry **ncea; | ||
1755 | |||
1756 | ncea = radix_tree_lookup(&sctx->name_cache, nce->ino); | ||
1757 | BUG_ON(!ncea); | ||
1758 | |||
1759 | if (ncea[0] == nce) | ||
1760 | ncea[0] = NULL; | ||
1761 | else if (ncea[1] == nce) | ||
1762 | ncea[1] = NULL; | ||
1763 | else | ||
1764 | BUG(); | ||
1765 | |||
1766 | if (!ncea[0] && !ncea[1]) { | ||
1767 | radix_tree_delete(&sctx->name_cache, nce->ino); | ||
1768 | kfree(ncea); | ||
1769 | } | ||
1770 | |||
1771 | list_del(&nce->list); | ||
1772 | |||
1773 | sctx->name_cache_size--; | ||
1774 | } | ||
1775 | |||
1776 | static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, | ||
1777 | u64 ino, u64 gen) | ||
1778 | { | ||
1779 | struct name_cache_entry **ncea; | ||
1780 | |||
1781 | ncea = radix_tree_lookup(&sctx->name_cache, ino); | ||
1782 | if (!ncea) | ||
1783 | return NULL; | ||
1784 | |||
1785 | if (ncea[0] && ncea[0]->gen == gen) | ||
1786 | return ncea[0]; | ||
1787 | else if (ncea[1] && ncea[1]->gen == gen) | ||
1788 | return ncea[1]; | ||
1789 | return NULL; | ||
1790 | } | ||
1791 | |||
1792 | static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) | ||
1793 | { | ||
1794 | list_del(&nce->list); | ||
1795 | list_add_tail(&nce->list, &sctx->name_cache_list); | ||
1796 | } | ||
1797 | |||
1798 | static void name_cache_clean_unused(struct send_ctx *sctx) | ||
1799 | { | ||
1800 | struct name_cache_entry *nce; | ||
1801 | |||
1802 | if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) | ||
1803 | return; | ||
1804 | |||
1805 | while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { | ||
1806 | nce = list_entry(sctx->name_cache_list.next, | ||
1807 | struct name_cache_entry, list); | ||
1808 | name_cache_delete(sctx, nce); | ||
1809 | kfree(nce); | ||
1810 | } | ||
1811 | } | ||
1812 | |||
1813 | static void name_cache_free(struct send_ctx *sctx) | ||
1814 | { | ||
1815 | struct name_cache_entry *nce; | ||
1816 | struct name_cache_entry *tmp; | ||
1817 | |||
1818 | list_for_each_entry_safe(nce, tmp, &sctx->name_cache_list, list) { | ||
1819 | name_cache_delete(sctx, nce); | ||
1820 | } | ||
1821 | } | ||
1822 | |||
1823 | static int __get_cur_name_and_parent(struct send_ctx *sctx, | ||
1824 | u64 ino, u64 gen, | ||
1825 | u64 *parent_ino, | ||
1826 | u64 *parent_gen, | ||
1827 | struct fs_path *dest) | ||
1828 | { | ||
1829 | int ret; | ||
1830 | int nce_ret; | ||
1831 | struct btrfs_path *path = NULL; | ||
1832 | struct name_cache_entry *nce = NULL; | ||
1833 | |||
1834 | nce = name_cache_search(sctx, ino, gen); | ||
1835 | if (nce) { | ||
1836 | if (ino < sctx->send_progress && nce->need_later_update) { | ||
1837 | name_cache_delete(sctx, nce); | ||
1838 | kfree(nce); | ||
1839 | nce = NULL; | ||
1840 | } else { | ||
1841 | name_cache_used(sctx, nce); | ||
1842 | *parent_ino = nce->parent_ino; | ||
1843 | *parent_gen = nce->parent_gen; | ||
1844 | ret = fs_path_add(dest, nce->name, nce->name_len); | ||
1845 | if (ret < 0) | ||
1846 | goto out; | ||
1847 | ret = nce->ret; | ||
1848 | goto out; | ||
1849 | } | ||
1850 | } | ||
1851 | |||
1852 | path = alloc_path_for_send(); | ||
1853 | if (!path) | ||
1854 | return -ENOMEM; | ||
1855 | |||
1856 | ret = is_inode_existent(sctx, ino, gen); | ||
1857 | if (ret < 0) | ||
1858 | goto out; | ||
1859 | |||
1860 | if (!ret) { | ||
1861 | ret = gen_unique_name(sctx, ino, gen, dest); | ||
1862 | if (ret < 0) | ||
1863 | goto out; | ||
1864 | ret = 1; | ||
1865 | goto out_cache; | ||
1866 | } | ||
1867 | |||
1868 | if (ino < sctx->send_progress) | ||
1869 | ret = get_first_ref(sctx, sctx->send_root, ino, | ||
1870 | parent_ino, parent_gen, dest); | ||
1871 | else | ||
1872 | ret = get_first_ref(sctx, sctx->parent_root, ino, | ||
1873 | parent_ino, parent_gen, dest); | ||
1874 | if (ret < 0) | ||
1875 | goto out; | ||
1876 | |||
1877 | ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, | ||
1878 | dest->start, dest->end - dest->start); | ||
1879 | if (ret < 0) | ||
1880 | goto out; | ||
1881 | if (ret) { | ||
1882 | fs_path_reset(dest); | ||
1883 | ret = gen_unique_name(sctx, ino, gen, dest); | ||
1884 | if (ret < 0) | ||
1885 | goto out; | ||
1886 | ret = 1; | ||
1887 | } | ||
1888 | |||
1889 | out_cache: | ||
1890 | nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS); | ||
1891 | if (!nce) { | ||
1892 | ret = -ENOMEM; | ||
1893 | goto out; | ||
1894 | } | ||
1895 | |||
1896 | nce->ino = ino; | ||
1897 | nce->gen = gen; | ||
1898 | nce->parent_ino = *parent_ino; | ||
1899 | nce->parent_gen = *parent_gen; | ||
1900 | nce->name_len = fs_path_len(dest); | ||
1901 | nce->ret = ret; | ||
1902 | strcpy(nce->name, dest->start); | ||
1903 | memset(&nce->use_list, 0, sizeof(nce->use_list)); | ||
1904 | |||
1905 | if (ino < sctx->send_progress) | ||
1906 | nce->need_later_update = 0; | ||
1907 | else | ||
1908 | nce->need_later_update = 1; | ||
1909 | |||
1910 | nce_ret = name_cache_insert(sctx, nce); | ||
1911 | if (nce_ret < 0) | ||
1912 | ret = nce_ret; | ||
1913 | name_cache_clean_unused(sctx); | ||
1914 | |||
1915 | out: | ||
1916 | btrfs_free_path(path); | ||
1917 | return ret; | ||
1918 | } | ||
1919 | |||
1920 | /* | ||
1921 | * Magic happens here. This function returns the first ref to an inode as it | ||
1922 | * would look like while receiving the stream at this point in time. | ||
1923 | * We walk the path up to the root. For every inode in between, we check if it | ||
1924 | * was already processed/sent. If yes, we continue with the parent as found | ||
1925 | * in send_root. If not, we continue with the parent as found in parent_root. | ||
1926 | * If we encounter an inode that was deleted at this point in time, we use the | ||
1927 | * inodes "orphan" name instead of the real name and stop. Same with new inodes | ||
1928 | * that were not created yet and overwritten inodes/refs. | ||
1929 | * | ||
1930 | * When do we have have orphan inodes: | ||
1931 | * 1. When an inode is freshly created and thus no valid refs are available yet | ||
1932 | * 2. When a directory lost all it's refs (deleted) but still has dir items | ||
1933 | * inside which were not processed yet (pending for move/delete). If anyone | ||
1934 | * tried to get the path to the dir items, it would get a path inside that | ||
1935 | * orphan directory. | ||
1936 | * 3. When an inode is moved around or gets new links, it may overwrite the ref | ||
1937 | * of an unprocessed inode. If in that case the first ref would be | ||
1938 | * overwritten, the overwritten inode gets "orphanized". Later when we | ||
1939 | * process this overwritten inode, it is restored at a new place by moving | ||
1940 | * the orphan inode. | ||
1941 | * | ||
1942 | * sctx->send_progress tells this function at which point in time receiving | ||
1943 | * would be. | ||
1944 | */ | ||
1945 | static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, | ||
1946 | struct fs_path *dest) | ||
1947 | { | ||
1948 | int ret = 0; | ||
1949 | struct fs_path *name = NULL; | ||
1950 | u64 parent_inode = 0; | ||
1951 | u64 parent_gen = 0; | ||
1952 | int stop = 0; | ||
1953 | |||
1954 | name = fs_path_alloc(sctx); | ||
1955 | if (!name) { | ||
1956 | ret = -ENOMEM; | ||
1957 | goto out; | ||
1958 | } | ||
1959 | |||
1960 | dest->reversed = 1; | ||
1961 | fs_path_reset(dest); | ||
1962 | |||
1963 | while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { | ||
1964 | fs_path_reset(name); | ||
1965 | |||
1966 | ret = __get_cur_name_and_parent(sctx, ino, gen, | ||
1967 | &parent_inode, &parent_gen, name); | ||
1968 | if (ret < 0) | ||
1969 | goto out; | ||
1970 | if (ret) | ||
1971 | stop = 1; | ||
1972 | |||
1973 | ret = fs_path_add_path(dest, name); | ||
1974 | if (ret < 0) | ||
1975 | goto out; | ||
1976 | |||
1977 | ino = parent_inode; | ||
1978 | gen = parent_gen; | ||
1979 | } | ||
1980 | |||
1981 | out: | ||
1982 | fs_path_free(sctx, name); | ||
1983 | if (!ret) | ||
1984 | fs_path_unreverse(dest); | ||
1985 | return ret; | ||
1986 | } | ||
1987 | |||
1988 | /* | ||
1989 | * Called for regular files when sending extents data. Opens a struct file | ||
1990 | * to read from the file. | ||
1991 | */ | ||
1992 | static int open_cur_inode_file(struct send_ctx *sctx) | ||
1993 | { | ||
1994 | int ret = 0; | ||
1995 | struct btrfs_key key; | ||
1996 | struct vfsmount *mnt; | ||
1997 | struct inode *inode; | ||
1998 | struct dentry *dentry; | ||
1999 | struct file *filp; | ||
2000 | int new = 0; | ||
2001 | |||
2002 | if (sctx->cur_inode_filp) | ||
2003 | goto out; | ||
2004 | |||
2005 | key.objectid = sctx->cur_ino; | ||
2006 | key.type = BTRFS_INODE_ITEM_KEY; | ||
2007 | key.offset = 0; | ||
2008 | |||
2009 | inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root, | ||
2010 | &new); | ||
2011 | if (IS_ERR(inode)) { | ||
2012 | ret = PTR_ERR(inode); | ||
2013 | goto out; | ||
2014 | } | ||
2015 | |||
2016 | dentry = d_obtain_alias(inode); | ||
2017 | inode = NULL; | ||
2018 | if (IS_ERR(dentry)) { | ||
2019 | ret = PTR_ERR(dentry); | ||
2020 | goto out; | ||
2021 | } | ||
2022 | |||
2023 | mnt = mntget(sctx->mnt); | ||
2024 | filp = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE, current_cred()); | ||
2025 | dentry = NULL; | ||
2026 | mnt = NULL; | ||
2027 | if (IS_ERR(filp)) { | ||
2028 | ret = PTR_ERR(filp); | ||
2029 | goto out; | ||
2030 | } | ||
2031 | sctx->cur_inode_filp = filp; | ||
2032 | |||
2033 | out: | ||
2034 | /* | ||
2035 | * no xxxput required here as every vfs op | ||
2036 | * does it by itself on failure | ||
2037 | */ | ||
2038 | return ret; | ||
2039 | } | ||
2040 | |||
2041 | /* | ||
2042 | * Closes the struct file that was created in open_cur_inode_file | ||
2043 | */ | ||
2044 | static int close_cur_inode_file(struct send_ctx *sctx) | ||
2045 | { | ||
2046 | int ret = 0; | ||
2047 | |||
2048 | if (!sctx->cur_inode_filp) | ||
2049 | goto out; | ||
2050 | |||
2051 | ret = filp_close(sctx->cur_inode_filp, NULL); | ||
2052 | sctx->cur_inode_filp = NULL; | ||
2053 | |||
2054 | out: | ||
2055 | return ret; | ||
2056 | } | ||
2057 | |||
2058 | /* | ||
2059 | * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace | ||
2060 | */ | ||
2061 | static int send_subvol_begin(struct send_ctx *sctx) | ||
2062 | { | ||
2063 | int ret; | ||
2064 | struct btrfs_root *send_root = sctx->send_root; | ||
2065 | struct btrfs_root *parent_root = sctx->parent_root; | ||
2066 | struct btrfs_path *path; | ||
2067 | struct btrfs_key key; | ||
2068 | struct btrfs_root_ref *ref; | ||
2069 | struct extent_buffer *leaf; | ||
2070 | char *name = NULL; | ||
2071 | int namelen; | ||
2072 | |||
2073 | path = alloc_path_for_send(); | ||
2074 | if (!path) | ||
2075 | return -ENOMEM; | ||
2076 | |||
2077 | name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS); | ||
2078 | if (!name) { | ||
2079 | btrfs_free_path(path); | ||
2080 | return -ENOMEM; | ||
2081 | } | ||
2082 | |||
2083 | key.objectid = send_root->objectid; | ||
2084 | key.type = BTRFS_ROOT_BACKREF_KEY; | ||
2085 | key.offset = 0; | ||
2086 | |||
2087 | ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, | ||
2088 | &key, path, 1, 0); | ||
2089 | if (ret < 0) | ||
2090 | goto out; | ||
2091 | if (ret) { | ||
2092 | ret = -ENOENT; | ||
2093 | goto out; | ||
2094 | } | ||
2095 | |||
2096 | leaf = path->nodes[0]; | ||
2097 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | ||
2098 | if (key.type != BTRFS_ROOT_BACKREF_KEY || | ||
2099 | key.objectid != send_root->objectid) { | ||
2100 | ret = -ENOENT; | ||
2101 | goto out; | ||
2102 | } | ||
2103 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | ||
2104 | namelen = btrfs_root_ref_name_len(leaf, ref); | ||
2105 | read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); | ||
2106 | btrfs_release_path(path); | ||
2107 | |||
2108 | if (ret < 0) | ||
2109 | goto out; | ||
2110 | |||
2111 | if (parent_root) { | ||
2112 | ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); | ||
2113 | if (ret < 0) | ||
2114 | goto out; | ||
2115 | } else { | ||
2116 | ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); | ||
2117 | if (ret < 0) | ||
2118 | goto out; | ||
2119 | } | ||
2120 | |||
2121 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); | ||
2122 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, | ||
2123 | sctx->send_root->root_item.uuid); | ||
2124 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, | ||
2125 | sctx->send_root->root_item.ctransid); | ||
2126 | if (parent_root) { | ||
2127 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, | ||
2128 | sctx->parent_root->root_item.uuid); | ||
2129 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, | ||
2130 | sctx->parent_root->root_item.ctransid); | ||
2131 | } | ||
2132 | |||
2133 | ret = send_cmd(sctx); | ||
2134 | |||
2135 | tlv_put_failure: | ||
2136 | out: | ||
2137 | btrfs_free_path(path); | ||
2138 | kfree(name); | ||
2139 | return ret; | ||
2140 | } | ||
2141 | |||
2142 | static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) | ||
2143 | { | ||
2144 | int ret = 0; | ||
2145 | struct fs_path *p; | ||
2146 | |||
2147 | verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size); | ||
2148 | |||
2149 | p = fs_path_alloc(sctx); | ||
2150 | if (!p) | ||
2151 | return -ENOMEM; | ||
2152 | |||
2153 | ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); | ||
2154 | if (ret < 0) | ||
2155 | goto out; | ||
2156 | |||
2157 | ret = get_cur_path(sctx, ino, gen, p); | ||
2158 | if (ret < 0) | ||
2159 | goto out; | ||
2160 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | ||
2161 | TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); | ||
2162 | |||
2163 | ret = send_cmd(sctx); | ||
2164 | |||
2165 | tlv_put_failure: | ||
2166 | out: | ||
2167 | fs_path_free(sctx, p); | ||
2168 | return ret; | ||
2169 | } | ||
2170 | |||
2171 | static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) | ||
2172 | { | ||
2173 | int ret = 0; | ||
2174 | struct fs_path *p; | ||
2175 | |||
2176 | verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode); | ||
2177 | |||
2178 | p = fs_path_alloc(sctx); | ||
2179 | if (!p) | ||
2180 | return -ENOMEM; | ||
2181 | |||
2182 | ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); | ||
2183 | if (ret < 0) | ||
2184 | goto out; | ||
2185 | |||
2186 | ret = get_cur_path(sctx, ino, gen, p); | ||
2187 | if (ret < 0) | ||
2188 | goto out; | ||
2189 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | ||
2190 | TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); | ||
2191 | |||
2192 | ret = send_cmd(sctx); | ||
2193 | |||
2194 | tlv_put_failure: | ||
2195 | out: | ||
2196 | fs_path_free(sctx, p); | ||
2197 | return ret; | ||
2198 | } | ||
2199 | |||
2200 | static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) | ||
2201 | { | ||
2202 | int ret = 0; | ||
2203 | struct fs_path *p; | ||
2204 | |||
2205 | verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid); | ||
2206 | |||
2207 | p = fs_path_alloc(sctx); | ||
2208 | if (!p) | ||
2209 | return -ENOMEM; | ||
2210 | |||
2211 | ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); | ||
2212 | if (ret < 0) | ||
2213 | goto out; | ||
2214 | |||
2215 | ret = get_cur_path(sctx, ino, gen, p); | ||
2216 | if (ret < 0) | ||
2217 | goto out; | ||
2218 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | ||
2219 | TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); | ||
2220 | TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); | ||
2221 | |||
2222 | ret = send_cmd(sctx); | ||
2223 | |||
2224 | tlv_put_failure: | ||
2225 | out: | ||
2226 | fs_path_free(sctx, p); | ||
2227 | return ret; | ||
2228 | } | ||
2229 | |||
2230 | static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) | ||
2231 | { | ||
2232 | int ret = 0; | ||
2233 | struct fs_path *p = NULL; | ||
2234 | struct btrfs_inode_item *ii; | ||
2235 | struct btrfs_path *path = NULL; | ||
2236 | struct extent_buffer *eb; | ||
2237 | struct btrfs_key key; | ||
2238 | int slot; | ||
2239 | |||
2240 | verbose_printk("btrfs: send_utimes %llu\n", ino); | ||
2241 | |||
2242 | p = fs_path_alloc(sctx); | ||
2243 | if (!p) | ||
2244 | return -ENOMEM; | ||
2245 | |||
2246 | path = alloc_path_for_send(); | ||
2247 | if (!path) { | ||
2248 | ret = -ENOMEM; | ||
2249 | goto out; | ||
2250 | } | ||
2251 | |||
2252 | key.objectid = ino; | ||
2253 | key.type = BTRFS_INODE_ITEM_KEY; | ||
2254 | key.offset = 0; | ||
2255 | ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); | ||
2256 | if (ret < 0) | ||
2257 | goto out; | ||
2258 | |||
2259 | eb = path->nodes[0]; | ||
2260 | slot = path->slots[0]; | ||
2261 | ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); | ||
2262 | |||
2263 | ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); | ||
2264 | if (ret < 0) | ||
2265 | goto out; | ||
2266 | |||
2267 | ret = get_cur_path(sctx, ino, gen, p); | ||
2268 | if (ret < 0) | ||
2269 | goto out; | ||
2270 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | ||
2271 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, | ||
2272 | btrfs_inode_atime(ii)); | ||
2273 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, | ||
2274 | btrfs_inode_mtime(ii)); | ||
2275 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, | ||
2276 | btrfs_inode_ctime(ii)); | ||
2277 | /* TODO otime? */ | ||
2278 | |||
2279 | ret = send_cmd(sctx); | ||
2280 | |||
2281 | tlv_put_failure: | ||
2282 | out: | ||
2283 | fs_path_free(sctx, p); | ||
2284 | btrfs_free_path(path); | ||
2285 | return ret; | ||
2286 | } | ||
2287 | |||
2288 | /* | ||
2289 | * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have | ||
2290 | * a valid path yet because we did not process the refs yet. So, the inode | ||
2291 | * is created as orphan. | ||
2292 | */ | ||
2293 | static int send_create_inode(struct send_ctx *sctx, struct btrfs_path *path, | ||
2294 | struct btrfs_key *key) | ||
2295 | { | ||
2296 | int ret = 0; | ||
2297 | struct extent_buffer *eb = path->nodes[0]; | ||
2298 | struct btrfs_inode_item *ii; | ||
2299 | struct fs_path *p; | ||
2300 | int slot = path->slots[0]; | ||
2301 | int cmd; | ||
2302 | u64 mode; | ||
2303 | |||
2304 | verbose_printk("btrfs: send_create_inode %llu\n", sctx->cur_ino); | ||
2305 | |||
2306 | p = fs_path_alloc(sctx); | ||
2307 | if (!p) | ||
2308 | return -ENOMEM; | ||
2309 | |||
2310 | ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); | ||
2311 | mode = btrfs_inode_mode(eb, ii); | ||
2312 | |||
2313 | if (S_ISREG(mode)) | ||
2314 | cmd = BTRFS_SEND_C_MKFILE; | ||
2315 | else if (S_ISDIR(mode)) | ||
2316 | cmd = BTRFS_SEND_C_MKDIR; | ||
2317 | else if (S_ISLNK(mode)) | ||
2318 | cmd = BTRFS_SEND_C_SYMLINK; | ||
2319 | else if (S_ISCHR(mode) || S_ISBLK(mode)) | ||
2320 | cmd = BTRFS_SEND_C_MKNOD; | ||
2321 | else if (S_ISFIFO(mode)) | ||
2322 | cmd = BTRFS_SEND_C_MKFIFO; | ||
2323 | else if (S_ISSOCK(mode)) | ||
2324 | cmd = BTRFS_SEND_C_MKSOCK; | ||
2325 | else { | ||
2326 | printk(KERN_WARNING "btrfs: unexpected inode type %o", | ||
2327 | (int)(mode & S_IFMT)); | ||
2328 | ret = -ENOTSUPP; | ||
2329 | goto out; | ||
2330 | } | ||
2331 | |||
2332 | ret = begin_cmd(sctx, cmd); | ||
2333 | if (ret < 0) | ||
2334 | goto out; | ||
2335 | |||
2336 | ret = gen_unique_name(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | ||
2337 | if (ret < 0) | ||
2338 | goto out; | ||
2339 | |||
2340 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | ||
2341 | TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, sctx->cur_ino); | ||
2342 | |||
2343 | if (S_ISLNK(mode)) { | ||
2344 | fs_path_reset(p); | ||
2345 | ret = read_symlink(sctx, sctx->send_root, sctx->cur_ino, p); | ||
2346 | if (ret < 0) | ||
2347 | goto out; | ||
2348 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); | ||
2349 | } else if (S_ISCHR(mode) || S_ISBLK(mode) || | ||
2350 | S_ISFIFO(mode) || S_ISSOCK(mode)) { | ||
2351 | TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, btrfs_inode_rdev(eb, ii)); | ||
2352 | } | ||
2353 | |||
2354 | ret = send_cmd(sctx); | ||
2355 | if (ret < 0) | ||
2356 | goto out; | ||
2357 | |||
2358 | |||
2359 | tlv_put_failure: | ||
2360 | out: | ||
2361 | fs_path_free(sctx, p); | ||
2362 | return ret; | ||
2363 | } | ||
2364 | |||
2365 | struct recorded_ref { | ||
2366 | struct list_head list; | ||
2367 | char *dir_path; | ||
2368 | char *name; | ||
2369 | struct fs_path *full_path; | ||
2370 | u64 dir; | ||
2371 | u64 dir_gen; | ||
2372 | int dir_path_len; | ||
2373 | int name_len; | ||
2374 | }; | ||
2375 | |||
2376 | /* | ||
2377 | * We need to process new refs before deleted refs, but compare_tree gives us | ||
2378 | * everything mixed. So we first record all refs and later process them. | ||
2379 | * This function is a helper to record one ref. | ||
2380 | */ | ||
2381 | static int record_ref(struct list_head *head, u64 dir, | ||
2382 | u64 dir_gen, struct fs_path *path) | ||
2383 | { | ||
2384 | struct recorded_ref *ref; | ||
2385 | char *tmp; | ||
2386 | |||
2387 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | ||
2388 | if (!ref) | ||
2389 | return -ENOMEM; | ||
2390 | |||
2391 | ref->dir = dir; | ||
2392 | ref->dir_gen = dir_gen; | ||
2393 | ref->full_path = path; | ||
2394 | |||
2395 | tmp = strrchr(ref->full_path->start, '/'); | ||
2396 | if (!tmp) { | ||
2397 | ref->name_len = ref->full_path->end - ref->full_path->start; | ||
2398 | ref->name = ref->full_path->start; | ||
2399 | ref->dir_path_len = 0; | ||
2400 | ref->dir_path = ref->full_path->start; | ||
2401 | } else { | ||
2402 | tmp++; | ||
2403 | ref->name_len = ref->full_path->end - tmp; | ||
2404 | ref->name = tmp; | ||
2405 | ref->dir_path = ref->full_path->start; | ||
2406 | ref->dir_path_len = ref->full_path->end - | ||
2407 | ref->full_path->start - 1 - ref->name_len; | ||
2408 | } | ||
2409 | |||
2410 | list_add_tail(&ref->list, head); | ||
2411 | return 0; | ||
2412 | } | ||
2413 | |||
2414 | static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head) | ||
2415 | { | ||
2416 | struct recorded_ref *cur; | ||
2417 | struct recorded_ref *tmp; | ||
2418 | |||
2419 | list_for_each_entry_safe(cur, tmp, head, list) { | ||
2420 | fs_path_free(sctx, cur->full_path); | ||
2421 | kfree(cur); | ||
2422 | } | ||
2423 | INIT_LIST_HEAD(head); | ||
2424 | } | ||
2425 | |||
2426 | static void free_recorded_refs(struct send_ctx *sctx) | ||
2427 | { | ||
2428 | __free_recorded_refs(sctx, &sctx->new_refs); | ||
2429 | __free_recorded_refs(sctx, &sctx->deleted_refs); | ||
2430 | } | ||
2431 | |||
2432 | /* | ||
2433 | * Renames/moves a file/dir to it's orphan name. Used when the first | ||
2434 | * ref of an unprocessed inode gets overwritten and for all non empty | ||
2435 | * directories. | ||
2436 | */ | ||
2437 | static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, | ||
2438 | struct fs_path *path) | ||
2439 | { | ||
2440 | int ret; | ||
2441 | struct fs_path *orphan; | ||
2442 | |||
2443 | orphan = fs_path_alloc(sctx); | ||
2444 | if (!orphan) | ||
2445 | return -ENOMEM; | ||
2446 | |||
2447 | ret = gen_unique_name(sctx, ino, gen, orphan); | ||
2448 | if (ret < 0) | ||
2449 | goto out; | ||
2450 | |||
2451 | ret = send_rename(sctx, path, orphan); | ||
2452 | |||
2453 | out: | ||
2454 | fs_path_free(sctx, orphan); | ||
2455 | return ret; | ||
2456 | } | ||
2457 | |||
2458 | /* | ||
2459 | * Returns 1 if a directory can be removed at this point in time. | ||
2460 | * We check this by iterating all dir items and checking if the inode behind | ||
2461 | * the dir item was already processed. | ||
2462 | */ | ||
2463 | static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | ||
2464 | { | ||
2465 | int ret = 0; | ||
2466 | struct btrfs_root *root = sctx->parent_root; | ||
2467 | struct btrfs_path *path; | ||
2468 | struct btrfs_key key; | ||
2469 | struct btrfs_key found_key; | ||
2470 | struct btrfs_key loc; | ||
2471 | struct btrfs_dir_item *di; | ||
2472 | |||
2473 | path = alloc_path_for_send(); | ||
2474 | if (!path) | ||
2475 | return -ENOMEM; | ||
2476 | |||
2477 | key.objectid = dir; | ||
2478 | key.type = BTRFS_DIR_INDEX_KEY; | ||
2479 | key.offset = 0; | ||
2480 | |||
2481 | while (1) { | ||
2482 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | ||
2483 | if (ret < 0) | ||
2484 | goto out; | ||
2485 | if (!ret) { | ||
2486 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | ||
2487 | path->slots[0]); | ||
2488 | } | ||
2489 | if (ret || found_key.objectid != key.objectid || | ||
2490 | found_key.type != key.type) { | ||
2491 | break; | ||
2492 | } | ||
2493 | |||
2494 | di = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
2495 | struct btrfs_dir_item); | ||
2496 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); | ||
2497 | |||
2498 | if (loc.objectid > send_progress) { | ||
2499 | ret = 0; | ||
2500 | goto out; | ||
2501 | } | ||
2502 | |||
2503 | btrfs_release_path(path); | ||
2504 | key.offset = found_key.offset + 1; | ||
2505 | } | ||
2506 | |||
2507 | ret = 1; | ||
2508 | |||
2509 | out: | ||
2510 | btrfs_free_path(path); | ||
2511 | return ret; | ||
2512 | } | ||
2513 | |||
2514 | struct finish_unordered_dir_ctx { | ||
2515 | struct send_ctx *sctx; | ||
2516 | struct fs_path *cur_path; | ||
2517 | struct fs_path *dir_path; | ||
2518 | u64 dir_ino; | ||
2519 | int need_delete; | ||
2520 | int delete_pass; | ||
2521 | }; | ||
2522 | |||
2523 | int __finish_unordered_dir(int num, struct btrfs_key *di_key, | ||
2524 | const char *name, int name_len, | ||
2525 | const char *data, int data_len, | ||
2526 | u8 type, void *ctx) | ||
2527 | { | ||
2528 | int ret = 0; | ||
2529 | struct finish_unordered_dir_ctx *fctx = ctx; | ||
2530 | struct send_ctx *sctx = fctx->sctx; | ||
2531 | u64 di_gen; | ||
2532 | u64 di_mode; | ||
2533 | int is_orphan = 0; | ||
2534 | |||
2535 | if (di_key->objectid >= fctx->dir_ino) | ||
2536 | goto out; | ||
2537 | |||
2538 | fs_path_reset(fctx->cur_path); | ||
2539 | |||
2540 | ret = get_inode_info(sctx->send_root, di_key->objectid, | ||
2541 | NULL, &di_gen, &di_mode, NULL, NULL); | ||
2542 | if (ret < 0) | ||
2543 | goto out; | ||
2544 | |||
2545 | ret = is_first_ref(sctx, sctx->send_root, di_key->objectid, | ||
2546 | fctx->dir_ino, name, name_len); | ||
2547 | if (ret < 0) | ||
2548 | goto out; | ||
2549 | if (ret) { | ||
2550 | is_orphan = 1; | ||
2551 | ret = gen_unique_name(sctx, di_key->objectid, di_gen, | ||
2552 | fctx->cur_path); | ||
2553 | } else { | ||
2554 | ret = get_cur_path(sctx, di_key->objectid, di_gen, | ||
2555 | fctx->cur_path); | ||
2556 | } | ||
2557 | if (ret < 0) | ||
2558 | goto out; | ||
2559 | |||
2560 | ret = fs_path_add(fctx->dir_path, name, name_len); | ||
2561 | if (ret < 0) | ||
2562 | goto out; | ||
2563 | |||
2564 | if (!fctx->delete_pass) { | ||
2565 | if (S_ISDIR(di_mode)) { | ||
2566 | ret = send_rename(sctx, fctx->cur_path, | ||
2567 | fctx->dir_path); | ||
2568 | } else { | ||
2569 | ret = send_link(sctx, fctx->dir_path, | ||
2570 | fctx->cur_path); | ||
2571 | if (is_orphan) | ||
2572 | fctx->need_delete = 1; | ||
2573 | } | ||
2574 | } else if (!S_ISDIR(di_mode)) { | ||
2575 | ret = send_unlink(sctx, fctx->cur_path); | ||
2576 | } else { | ||
2577 | ret = 0; | ||
2578 | } | ||
2579 | |||
2580 | fs_path_remove(fctx->dir_path); | ||
2581 | |||
2582 | out: | ||
2583 | return ret; | ||
2584 | } | ||
2585 | |||
2586 | /* | ||
2587 | * Go through all dir items and see if we find refs which could not be created | ||
2588 | * in the past because the dir did not exist at that time. | ||
2589 | */ | ||
2590 | static int finish_outoforder_dir(struct send_ctx *sctx, u64 dir, u64 dir_gen) | ||
2591 | { | ||
2592 | int ret = 0; | ||
2593 | struct btrfs_path *path = NULL; | ||
2594 | struct btrfs_key key; | ||
2595 | struct btrfs_key found_key; | ||
2596 | struct extent_buffer *eb; | ||
2597 | struct finish_unordered_dir_ctx fctx; | ||
2598 | int slot; | ||
2599 | |||
2600 | path = alloc_path_for_send(); | ||
2601 | if (!path) { | ||
2602 | ret = -ENOMEM; | ||
2603 | goto out; | ||
2604 | } | ||
2605 | |||
2606 | memset(&fctx, 0, sizeof(fctx)); | ||
2607 | fctx.sctx = sctx; | ||
2608 | fctx.cur_path = fs_path_alloc(sctx); | ||
2609 | fctx.dir_path = fs_path_alloc(sctx); | ||
2610 | if (!fctx.cur_path || !fctx.dir_path) { | ||
2611 | ret = -ENOMEM; | ||
2612 | goto out; | ||
2613 | } | ||
2614 | fctx.dir_ino = dir; | ||
2615 | |||
2616 | ret = get_cur_path(sctx, dir, dir_gen, fctx.dir_path); | ||
2617 | if (ret < 0) | ||
2618 | goto out; | ||
2619 | |||
2620 | /* | ||
2621 | * We do two passes. The first links in the new refs and the second | ||
2622 | * deletes orphans if required. Deletion of orphans is not required for | ||
2623 | * directory inodes, as we always have only one ref and use rename | ||
2624 | * instead of link for those. | ||
2625 | */ | ||
2626 | |||
2627 | again: | ||
2628 | key.objectid = dir; | ||
2629 | key.type = BTRFS_DIR_ITEM_KEY; | ||
2630 | key.offset = 0; | ||
2631 | while (1) { | ||
2632 | ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, | ||
2633 | 1, 0); | ||
2634 | if (ret < 0) | ||
2635 | goto out; | ||
2636 | eb = path->nodes[0]; | ||
2637 | slot = path->slots[0]; | ||
2638 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
2639 | |||
2640 | if (found_key.objectid != key.objectid || | ||
2641 | found_key.type != key.type) { | ||
2642 | btrfs_release_path(path); | ||
2643 | break; | ||
2644 | } | ||
2645 | |||
2646 | ret = iterate_dir_item(sctx, sctx->send_root, path, | ||
2647 | &found_key, __finish_unordered_dir, | ||
2648 | &fctx); | ||
2649 | if (ret < 0) | ||
2650 | goto out; | ||
2651 | |||
2652 | key.offset = found_key.offset + 1; | ||
2653 | btrfs_release_path(path); | ||
2654 | } | ||
2655 | |||
2656 | if (!fctx.delete_pass && fctx.need_delete) { | ||
2657 | fctx.delete_pass = 1; | ||
2658 | goto again; | ||
2659 | } | ||
2660 | |||
2661 | out: | ||
2662 | btrfs_free_path(path); | ||
2663 | fs_path_free(sctx, fctx.cur_path); | ||
2664 | fs_path_free(sctx, fctx.dir_path); | ||
2665 | return ret; | ||
2666 | } | ||
2667 | |||
2668 | /* | ||
2669 | * This does all the move/link/unlink/rmdir magic. | ||
2670 | */ | ||
2671 | static int process_recorded_refs(struct send_ctx *sctx) | ||
2672 | { | ||
2673 | int ret = 0; | ||
2674 | struct recorded_ref *cur; | ||
2675 | struct ulist *check_dirs = NULL; | ||
2676 | struct ulist_iterator uit; | ||
2677 | struct ulist_node *un; | ||
2678 | struct fs_path *valid_path = NULL; | ||
2679 | u64 ow_inode; | ||
2680 | u64 ow_gen; | ||
2681 | int did_overwrite = 0; | ||
2682 | int is_orphan = 0; | ||
2683 | |||
2684 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | ||
2685 | |||
2686 | valid_path = fs_path_alloc(sctx); | ||
2687 | if (!valid_path) { | ||
2688 | ret = -ENOMEM; | ||
2689 | goto out; | ||
2690 | } | ||
2691 | |||
2692 | check_dirs = ulist_alloc(GFP_NOFS); | ||
2693 | if (!check_dirs) { | ||
2694 | ret = -ENOMEM; | ||
2695 | goto out; | ||
2696 | } | ||
2697 | |||
2698 | /* | ||
2699 | * First, check if the first ref of the current inode was overwritten | ||
2700 | * before. If yes, we know that the current inode was already orphanized | ||
2701 | * and thus use the orphan name. If not, we can use get_cur_path to | ||
2702 | * get the path of the first ref as it would like while receiving at | ||
2703 | * this point in time. | ||
2704 | * New inodes are always orphan at the beginning, so force to use the | ||
2705 | * orphan name in this case. | ||
2706 | * The first ref is stored in valid_path and will be updated if it | ||
2707 | * gets moved around. | ||
2708 | */ | ||
2709 | if (!sctx->cur_inode_new) { | ||
2710 | ret = did_overwrite_first_ref(sctx, sctx->cur_ino, | ||
2711 | sctx->cur_inode_gen); | ||
2712 | if (ret < 0) | ||
2713 | goto out; | ||
2714 | if (ret) | ||
2715 | did_overwrite = 1; | ||
2716 | } | ||
2717 | if (sctx->cur_inode_new || did_overwrite) { | ||
2718 | ret = gen_unique_name(sctx, sctx->cur_ino, | ||
2719 | sctx->cur_inode_gen, valid_path); | ||
2720 | if (ret < 0) | ||
2721 | goto out; | ||
2722 | is_orphan = 1; | ||
2723 | } else { | ||
2724 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, | ||
2725 | valid_path); | ||
2726 | if (ret < 0) | ||
2727 | goto out; | ||
2728 | } | ||
2729 | |||
2730 | list_for_each_entry(cur, &sctx->new_refs, list) { | ||
2731 | /* | ||
2732 | * Check if this new ref would overwrite the first ref of | ||
2733 | * another unprocessed inode. If yes, orphanize the | ||
2734 | * overwritten inode. If we find an overwritten ref that is | ||
2735 | * not the first ref, simply unlink it. | ||
2736 | */ | ||
2737 | ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, | ||
2738 | cur->name, cur->name_len, | ||
2739 | &ow_inode, &ow_gen); | ||
2740 | if (ret < 0) | ||
2741 | goto out; | ||
2742 | if (ret) { | ||
2743 | ret = is_first_ref(sctx, sctx->parent_root, | ||
2744 | ow_inode, cur->dir, cur->name, | ||
2745 | cur->name_len); | ||
2746 | if (ret < 0) | ||
2747 | goto out; | ||
2748 | if (ret) { | ||
2749 | ret = orphanize_inode(sctx, ow_inode, ow_gen, | ||
2750 | cur->full_path); | ||
2751 | if (ret < 0) | ||
2752 | goto out; | ||
2753 | } else { | ||
2754 | ret = send_unlink(sctx, cur->full_path); | ||
2755 | if (ret < 0) | ||
2756 | goto out; | ||
2757 | } | ||
2758 | } | ||
2759 | |||
2760 | /* | ||
2761 | * link/move the ref to the new place. If we have an orphan | ||
2762 | * inode, move it and update valid_path. If not, link or move | ||
2763 | * it depending on the inode mode. | ||
2764 | */ | ||
2765 | if (is_orphan && !sctx->cur_inode_first_ref_orphan) { | ||
2766 | ret = send_rename(sctx, valid_path, cur->full_path); | ||
2767 | if (ret < 0) | ||
2768 | goto out; | ||
2769 | is_orphan = 0; | ||
2770 | ret = fs_path_copy(valid_path, cur->full_path); | ||
2771 | if (ret < 0) | ||
2772 | goto out; | ||
2773 | } else { | ||
2774 | if (S_ISDIR(sctx->cur_inode_mode)) { | ||
2775 | /* | ||
2776 | * Dirs can't be linked, so move it. For moved | ||
2777 | * dirs, we always have one new and one deleted | ||
2778 | * ref. The deleted ref is ignored later. | ||
2779 | */ | ||
2780 | ret = send_rename(sctx, valid_path, | ||
2781 | cur->full_path); | ||
2782 | if (ret < 0) | ||
2783 | goto out; | ||
2784 | ret = fs_path_copy(valid_path, cur->full_path); | ||
2785 | if (ret < 0) | ||
2786 | goto out; | ||
2787 | } else { | ||
2788 | ret = send_link(sctx, cur->full_path, | ||
2789 | valid_path); | ||
2790 | if (ret < 0) | ||
2791 | goto out; | ||
2792 | } | ||
2793 | } | ||
2794 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | ||
2795 | GFP_NOFS); | ||
2796 | if (ret < 0) | ||
2797 | goto out; | ||
2798 | } | ||
2799 | |||
2800 | if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { | ||
2801 | /* | ||
2802 | * Check if we can already rmdir the directory. If not, | ||
2803 | * orphanize it. For every dir item inside that gets deleted | ||
2804 | * later, we do this check again and rmdir it then if possible. | ||
2805 | * See the use of check_dirs for more details. | ||
2806 | */ | ||
2807 | ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino); | ||
2808 | if (ret < 0) | ||
2809 | goto out; | ||
2810 | if (ret) { | ||
2811 | ret = send_rmdir(sctx, valid_path); | ||
2812 | if (ret < 0) | ||
2813 | goto out; | ||
2814 | } else if (!is_orphan) { | ||
2815 | ret = orphanize_inode(sctx, sctx->cur_ino, | ||
2816 | sctx->cur_inode_gen, valid_path); | ||
2817 | if (ret < 0) | ||
2818 | goto out; | ||
2819 | is_orphan = 1; | ||
2820 | } | ||
2821 | |||
2822 | list_for_each_entry(cur, &sctx->deleted_refs, list) { | ||
2823 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | ||
2824 | GFP_NOFS); | ||
2825 | if (ret < 0) | ||
2826 | goto out; | ||
2827 | } | ||
2828 | } else if (!S_ISDIR(sctx->cur_inode_mode)) { | ||
2829 | /* | ||
2830 | * We have a non dir inode. Go through all deleted refs and | ||
2831 | * unlink them if they were not already overwritten by other | ||
2832 | * inodes. | ||
2833 | */ | ||
2834 | list_for_each_entry(cur, &sctx->deleted_refs, list) { | ||
2835 | ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, | ||
2836 | sctx->cur_ino, sctx->cur_inode_gen, | ||
2837 | cur->name, cur->name_len); | ||
2838 | if (ret < 0) | ||
2839 | goto out; | ||
2840 | if (!ret) { | ||
2841 | /* | ||
2842 | * In case the inode was moved to a directory | ||
2843 | * that was not created yet (see | ||
2844 | * __record_new_ref), we can not unlink the ref | ||
2845 | * as it will be needed later when the parent | ||
2846 | * directory is created, so that we can move in | ||
2847 | * the inode to the new dir. | ||
2848 | */ | ||
2849 | if (!is_orphan && | ||
2850 | sctx->cur_inode_first_ref_orphan) { | ||
2851 | ret = orphanize_inode(sctx, | ||
2852 | sctx->cur_ino, | ||
2853 | sctx->cur_inode_gen, | ||
2854 | cur->full_path); | ||
2855 | if (ret < 0) | ||
2856 | goto out; | ||
2857 | ret = gen_unique_name(sctx, | ||
2858 | sctx->cur_ino, | ||
2859 | sctx->cur_inode_gen, | ||
2860 | valid_path); | ||
2861 | if (ret < 0) | ||
2862 | goto out; | ||
2863 | is_orphan = 1; | ||
2864 | |||
2865 | } else { | ||
2866 | ret = send_unlink(sctx, cur->full_path); | ||
2867 | if (ret < 0) | ||
2868 | goto out; | ||
2869 | } | ||
2870 | } | ||
2871 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | ||
2872 | GFP_NOFS); | ||
2873 | if (ret < 0) | ||
2874 | goto out; | ||
2875 | } | ||
2876 | |||
2877 | /* | ||
2878 | * If the inode is still orphan, unlink the orphan. This may | ||
2879 | * happen when a previous inode did overwrite the first ref | ||
2880 | * of this inode and no new refs were added for the current | ||
2881 | * inode. | ||
2882 | * We can however not delete the orphan in case the inode relies | ||
2883 | * in a directory that was not created yet (see | ||
2884 | * __record_new_ref) | ||
2885 | */ | ||
2886 | if (is_orphan && !sctx->cur_inode_first_ref_orphan) { | ||
2887 | ret = send_unlink(sctx, valid_path); | ||
2888 | if (ret < 0) | ||
2889 | goto out; | ||
2890 | } | ||
2891 | } | ||
2892 | |||
2893 | /* | ||
2894 | * We did collect all parent dirs where cur_inode was once located. We | ||
2895 | * now go through all these dirs and check if they are pending for | ||
2896 | * deletion and if it's finally possible to perform the rmdir now. | ||
2897 | * We also update the inode stats of the parent dirs here. | ||
2898 | */ | ||
2899 | ULIST_ITER_INIT(&uit); | ||
2900 | while ((un = ulist_next(check_dirs, &uit))) { | ||
2901 | if (un->val > sctx->cur_ino) | ||
2902 | continue; | ||
2903 | |||
2904 | ret = get_cur_inode_state(sctx, un->val, un->aux); | ||
2905 | if (ret < 0) | ||
2906 | goto out; | ||
2907 | |||
2908 | if (ret == inode_state_did_create || | ||
2909 | ret == inode_state_no_change) { | ||
2910 | /* TODO delayed utimes */ | ||
2911 | ret = send_utimes(sctx, un->val, un->aux); | ||
2912 | if (ret < 0) | ||
2913 | goto out; | ||
2914 | } else if (ret == inode_state_did_delete) { | ||
2915 | ret = can_rmdir(sctx, un->val, sctx->cur_ino); | ||
2916 | if (ret < 0) | ||
2917 | goto out; | ||
2918 | if (ret) { | ||
2919 | ret = get_cur_path(sctx, un->val, un->aux, | ||
2920 | valid_path); | ||
2921 | if (ret < 0) | ||
2922 | goto out; | ||
2923 | ret = send_rmdir(sctx, valid_path); | ||
2924 | if (ret < 0) | ||
2925 | goto out; | ||
2926 | } | ||
2927 | } | ||
2928 | } | ||
2929 | |||
2930 | /* | ||
2931 | * Current inode is now at it's new position, so we must increase | ||
2932 | * send_progress | ||
2933 | */ | ||
2934 | sctx->send_progress = sctx->cur_ino + 1; | ||
2935 | |||
2936 | /* | ||
2937 | * We may have a directory here that has pending refs which could not | ||
2938 | * be created before (because the dir did not exist before, see | ||
2939 | * __record_new_ref). finish_outoforder_dir will link/move the pending | ||
2940 | * refs. | ||
2941 | */ | ||
2942 | if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_new) { | ||
2943 | ret = finish_outoforder_dir(sctx, sctx->cur_ino, | ||
2944 | sctx->cur_inode_gen); | ||
2945 | if (ret < 0) | ||
2946 | goto out; | ||
2947 | } | ||
2948 | |||
2949 | ret = 0; | ||
2950 | |||
2951 | out: | ||
2952 | free_recorded_refs(sctx); | ||
2953 | ulist_free(check_dirs); | ||
2954 | fs_path_free(sctx, valid_path); | ||
2955 | return ret; | ||
2956 | } | ||
2957 | |||
2958 | static int __record_new_ref(int num, u64 dir, int index, | ||
2959 | struct fs_path *name, | ||
2960 | void *ctx) | ||
2961 | { | ||
2962 | int ret = 0; | ||
2963 | struct send_ctx *sctx = ctx; | ||
2964 | struct fs_path *p; | ||
2965 | u64 gen; | ||
2966 | |||
2967 | p = fs_path_alloc(sctx); | ||
2968 | if (!p) | ||
2969 | return -ENOMEM; | ||
2970 | |||
2971 | ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, | ||
2972 | NULL); | ||
2973 | if (ret < 0) | ||
2974 | goto out; | ||
2975 | |||
2976 | /* | ||
2977 | * The parent may be non-existent at this point in time. This happens | ||
2978 | * if the ino of the parent dir is higher then the current ino. In this | ||
2979 | * case, we can not process this ref until the parent dir is finally | ||
2980 | * created. If we reach the parent dir later, process_recorded_refs | ||
2981 | * will go through all dir items and process the refs that could not be | ||
2982 | * processed before. In case this is the first ref, we set | ||
2983 | * cur_inode_first_ref_orphan to 1 to inform process_recorded_refs to | ||
2984 | * keep an orphan of the inode so that it later can be used for | ||
2985 | * link/move | ||
2986 | */ | ||
2987 | ret = is_inode_existent(sctx, dir, gen); | ||
2988 | if (ret < 0) | ||
2989 | goto out; | ||
2990 | if (!ret) { | ||
2991 | ret = is_first_ref(sctx, sctx->send_root, sctx->cur_ino, dir, | ||
2992 | name->start, fs_path_len(name)); | ||
2993 | if (ret < 0) | ||
2994 | goto out; | ||
2995 | if (ret) | ||
2996 | sctx->cur_inode_first_ref_orphan = 1; | ||
2997 | ret = 0; | ||
2998 | goto out; | ||
2999 | } | ||
3000 | |||
3001 | ret = get_cur_path(sctx, dir, gen, p); | ||
3002 | if (ret < 0) | ||
3003 | goto out; | ||
3004 | ret = fs_path_add_path(p, name); | ||
3005 | if (ret < 0) | ||
3006 | goto out; | ||
3007 | |||
3008 | ret = record_ref(&sctx->new_refs, dir, gen, p); | ||
3009 | |||
3010 | out: | ||
3011 | if (ret) | ||
3012 | fs_path_free(sctx, p); | ||
3013 | return ret; | ||
3014 | } | ||
3015 | |||
3016 | static int __record_deleted_ref(int num, u64 dir, int index, | ||
3017 | struct fs_path *name, | ||
3018 | void *ctx) | ||
3019 | { | ||
3020 | int ret = 0; | ||
3021 | struct send_ctx *sctx = ctx; | ||
3022 | struct fs_path *p; | ||
3023 | u64 gen; | ||
3024 | |||
3025 | p = fs_path_alloc(sctx); | ||
3026 | if (!p) | ||
3027 | return -ENOMEM; | ||
3028 | |||
3029 | ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, | ||
3030 | NULL); | ||
3031 | if (ret < 0) | ||
3032 | goto out; | ||
3033 | |||
3034 | ret = get_cur_path(sctx, dir, gen, p); | ||
3035 | if (ret < 0) | ||
3036 | goto out; | ||
3037 | ret = fs_path_add_path(p, name); | ||
3038 | if (ret < 0) | ||
3039 | goto out; | ||
3040 | |||
3041 | ret = record_ref(&sctx->deleted_refs, dir, gen, p); | ||
3042 | |||
3043 | out: | ||
3044 | if (ret) | ||
3045 | fs_path_free(sctx, p); | ||
3046 | return ret; | ||
3047 | } | ||
3048 | |||
3049 | static int record_new_ref(struct send_ctx *sctx) | ||
3050 | { | ||
3051 | int ret; | ||
3052 | |||
3053 | ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path, | ||
3054 | sctx->cmp_key, 0, __record_new_ref, sctx); | ||
3055 | if (ret < 0) | ||
3056 | goto out; | ||
3057 | ret = 0; | ||
3058 | |||
3059 | out: | ||
3060 | return ret; | ||
3061 | } | ||
3062 | |||
3063 | static int record_deleted_ref(struct send_ctx *sctx) | ||
3064 | { | ||
3065 | int ret; | ||
3066 | |||
3067 | ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path, | ||
3068 | sctx->cmp_key, 0, __record_deleted_ref, sctx); | ||
3069 | if (ret < 0) | ||
3070 | goto out; | ||
3071 | ret = 0; | ||
3072 | |||
3073 | out: | ||
3074 | return ret; | ||
3075 | } | ||
3076 | |||
3077 | struct find_ref_ctx { | ||
3078 | u64 dir; | ||
3079 | struct fs_path *name; | ||
3080 | int found_idx; | ||
3081 | }; | ||
3082 | |||
3083 | static int __find_iref(int num, u64 dir, int index, | ||
3084 | struct fs_path *name, | ||
3085 | void *ctx_) | ||
3086 | { | ||
3087 | struct find_ref_ctx *ctx = ctx_; | ||
3088 | |||
3089 | if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && | ||
3090 | strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { | ||
3091 | ctx->found_idx = num; | ||
3092 | return 1; | ||
3093 | } | ||
3094 | return 0; | ||
3095 | } | ||
3096 | |||
3097 | static int find_iref(struct send_ctx *sctx, | ||
3098 | struct btrfs_root *root, | ||
3099 | struct btrfs_path *path, | ||
3100 | struct btrfs_key *key, | ||
3101 | u64 dir, struct fs_path *name) | ||
3102 | { | ||
3103 | int ret; | ||
3104 | struct find_ref_ctx ctx; | ||
3105 | |||
3106 | ctx.dir = dir; | ||
3107 | ctx.name = name; | ||
3108 | ctx.found_idx = -1; | ||
3109 | |||
3110 | ret = iterate_inode_ref(sctx, root, path, key, 0, __find_iref, &ctx); | ||
3111 | if (ret < 0) | ||
3112 | return ret; | ||
3113 | |||
3114 | if (ctx.found_idx == -1) | ||
3115 | return -ENOENT; | ||
3116 | |||
3117 | return ctx.found_idx; | ||
3118 | } | ||
3119 | |||
3120 | static int __record_changed_new_ref(int num, u64 dir, int index, | ||
3121 | struct fs_path *name, | ||
3122 | void *ctx) | ||
3123 | { | ||
3124 | int ret; | ||
3125 | struct send_ctx *sctx = ctx; | ||
3126 | |||
3127 | ret = find_iref(sctx, sctx->parent_root, sctx->right_path, | ||
3128 | sctx->cmp_key, dir, name); | ||
3129 | if (ret == -ENOENT) | ||
3130 | ret = __record_new_ref(num, dir, index, name, sctx); | ||
3131 | else if (ret > 0) | ||
3132 | ret = 0; | ||
3133 | |||
3134 | return ret; | ||
3135 | } | ||
3136 | |||
3137 | static int __record_changed_deleted_ref(int num, u64 dir, int index, | ||
3138 | struct fs_path *name, | ||
3139 | void *ctx) | ||
3140 | { | ||
3141 | int ret; | ||
3142 | struct send_ctx *sctx = ctx; | ||
3143 | |||
3144 | ret = find_iref(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key, | ||
3145 | dir, name); | ||
3146 | if (ret == -ENOENT) | ||
3147 | ret = __record_deleted_ref(num, dir, index, name, sctx); | ||
3148 | else if (ret > 0) | ||
3149 | ret = 0; | ||
3150 | |||
3151 | return ret; | ||
3152 | } | ||
3153 | |||
3154 | static int record_changed_ref(struct send_ctx *sctx) | ||
3155 | { | ||
3156 | int ret = 0; | ||
3157 | |||
3158 | ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path, | ||
3159 | sctx->cmp_key, 0, __record_changed_new_ref, sctx); | ||
3160 | if (ret < 0) | ||
3161 | goto out; | ||
3162 | ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path, | ||
3163 | sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); | ||
3164 | if (ret < 0) | ||
3165 | goto out; | ||
3166 | ret = 0; | ||
3167 | |||
3168 | out: | ||
3169 | return ret; | ||
3170 | } | ||
3171 | |||
3172 | /* | ||
3173 | * Record and process all refs at once. Needed when an inode changes the | ||
3174 | * generation number, which means that it was deleted and recreated. | ||
3175 | */ | ||
3176 | static int process_all_refs(struct send_ctx *sctx, | ||
3177 | enum btrfs_compare_tree_result cmd) | ||
3178 | { | ||
3179 | int ret; | ||
3180 | struct btrfs_root *root; | ||
3181 | struct btrfs_path *path; | ||
3182 | struct btrfs_key key; | ||
3183 | struct btrfs_key found_key; | ||
3184 | struct extent_buffer *eb; | ||
3185 | int slot; | ||
3186 | iterate_inode_ref_t cb; | ||
3187 | |||
3188 | path = alloc_path_for_send(); | ||
3189 | if (!path) | ||
3190 | return -ENOMEM; | ||
3191 | |||
3192 | if (cmd == BTRFS_COMPARE_TREE_NEW) { | ||
3193 | root = sctx->send_root; | ||
3194 | cb = __record_new_ref; | ||
3195 | } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { | ||
3196 | root = sctx->parent_root; | ||
3197 | cb = __record_deleted_ref; | ||
3198 | } else { | ||
3199 | BUG(); | ||
3200 | } | ||
3201 | |||
3202 | key.objectid = sctx->cmp_key->objectid; | ||
3203 | key.type = BTRFS_INODE_REF_KEY; | ||
3204 | key.offset = 0; | ||
3205 | while (1) { | ||
3206 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | ||
3207 | if (ret < 0) { | ||
3208 | btrfs_release_path(path); | ||
3209 | goto out; | ||
3210 | } | ||
3211 | if (ret) { | ||
3212 | btrfs_release_path(path); | ||
3213 | break; | ||
3214 | } | ||
3215 | |||
3216 | eb = path->nodes[0]; | ||
3217 | slot = path->slots[0]; | ||
3218 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
3219 | |||
3220 | if (found_key.objectid != key.objectid || | ||
3221 | found_key.type != key.type) { | ||
3222 | btrfs_release_path(path); | ||
3223 | break; | ||
3224 | } | ||
3225 | |||
3226 | ret = iterate_inode_ref(sctx, sctx->parent_root, path, | ||
3227 | &found_key, 0, cb, sctx); | ||
3228 | btrfs_release_path(path); | ||
3229 | if (ret < 0) | ||
3230 | goto out; | ||
3231 | |||
3232 | key.offset = found_key.offset + 1; | ||
3233 | } | ||
3234 | |||
3235 | ret = process_recorded_refs(sctx); | ||
3236 | |||
3237 | out: | ||
3238 | btrfs_free_path(path); | ||
3239 | return ret; | ||
3240 | } | ||
3241 | |||
3242 | static int send_set_xattr(struct send_ctx *sctx, | ||
3243 | struct fs_path *path, | ||
3244 | const char *name, int name_len, | ||
3245 | const char *data, int data_len) | ||
3246 | { | ||
3247 | int ret = 0; | ||
3248 | |||
3249 | ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); | ||
3250 | if (ret < 0) | ||
3251 | goto out; | ||
3252 | |||
3253 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | ||
3254 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); | ||
3255 | TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); | ||
3256 | |||
3257 | ret = send_cmd(sctx); | ||
3258 | |||
3259 | tlv_put_failure: | ||
3260 | out: | ||
3261 | return ret; | ||
3262 | } | ||
3263 | |||
3264 | static int send_remove_xattr(struct send_ctx *sctx, | ||
3265 | struct fs_path *path, | ||
3266 | const char *name, int name_len) | ||
3267 | { | ||
3268 | int ret = 0; | ||
3269 | |||
3270 | ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); | ||
3271 | if (ret < 0) | ||
3272 | goto out; | ||
3273 | |||
3274 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | ||
3275 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); | ||
3276 | |||
3277 | ret = send_cmd(sctx); | ||
3278 | |||
3279 | tlv_put_failure: | ||
3280 | out: | ||
3281 | return ret; | ||
3282 | } | ||
3283 | |||
3284 | static int __process_new_xattr(int num, struct btrfs_key *di_key, | ||
3285 | const char *name, int name_len, | ||
3286 | const char *data, int data_len, | ||
3287 | u8 type, void *ctx) | ||
3288 | { | ||
3289 | int ret; | ||
3290 | struct send_ctx *sctx = ctx; | ||
3291 | struct fs_path *p; | ||
3292 | posix_acl_xattr_header dummy_acl; | ||
3293 | |||
3294 | p = fs_path_alloc(sctx); | ||
3295 | if (!p) | ||
3296 | return -ENOMEM; | ||
3297 | |||
3298 | /* | ||
3299 | * This hack is needed because empty acl's are stored as zero byte | ||
3300 | * data in xattrs. Problem with that is, that receiving these zero byte | ||
3301 | * acl's will fail later. To fix this, we send a dummy acl list that | ||
3302 | * only contains the version number and no entries. | ||
3303 | */ | ||
3304 | if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || | ||
3305 | !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { | ||
3306 | if (data_len == 0) { | ||
3307 | dummy_acl.a_version = | ||
3308 | cpu_to_le32(POSIX_ACL_XATTR_VERSION); | ||
3309 | data = (char *)&dummy_acl; | ||
3310 | data_len = sizeof(dummy_acl); | ||
3311 | } | ||
3312 | } | ||
3313 | |||
3314 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | ||
3315 | if (ret < 0) | ||
3316 | goto out; | ||
3317 | |||
3318 | ret = send_set_xattr(sctx, p, name, name_len, data, data_len); | ||
3319 | |||
3320 | out: | ||
3321 | fs_path_free(sctx, p); | ||
3322 | return ret; | ||
3323 | } | ||
3324 | |||
3325 | static int __process_deleted_xattr(int num, struct btrfs_key *di_key, | ||
3326 | const char *name, int name_len, | ||
3327 | const char *data, int data_len, | ||
3328 | u8 type, void *ctx) | ||
3329 | { | ||
3330 | int ret; | ||
3331 | struct send_ctx *sctx = ctx; | ||
3332 | struct fs_path *p; | ||
3333 | |||
3334 | p = fs_path_alloc(sctx); | ||
3335 | if (!p) | ||
3336 | return -ENOMEM; | ||
3337 | |||
3338 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | ||
3339 | if (ret < 0) | ||
3340 | goto out; | ||
3341 | |||
3342 | ret = send_remove_xattr(sctx, p, name, name_len); | ||
3343 | |||
3344 | out: | ||
3345 | fs_path_free(sctx, p); | ||
3346 | return ret; | ||
3347 | } | ||
3348 | |||
3349 | static int process_new_xattr(struct send_ctx *sctx) | ||
3350 | { | ||
3351 | int ret = 0; | ||
3352 | |||
3353 | ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path, | ||
3354 | sctx->cmp_key, __process_new_xattr, sctx); | ||
3355 | |||
3356 | return ret; | ||
3357 | } | ||
3358 | |||
3359 | static int process_deleted_xattr(struct send_ctx *sctx) | ||
3360 | { | ||
3361 | int ret; | ||
3362 | |||
3363 | ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path, | ||
3364 | sctx->cmp_key, __process_deleted_xattr, sctx); | ||
3365 | |||
3366 | return ret; | ||
3367 | } | ||
3368 | |||
3369 | struct find_xattr_ctx { | ||
3370 | const char *name; | ||
3371 | int name_len; | ||
3372 | int found_idx; | ||
3373 | char *found_data; | ||
3374 | int found_data_len; | ||
3375 | }; | ||
3376 | |||
3377 | static int __find_xattr(int num, struct btrfs_key *di_key, | ||
3378 | const char *name, int name_len, | ||
3379 | const char *data, int data_len, | ||
3380 | u8 type, void *vctx) | ||
3381 | { | ||
3382 | struct find_xattr_ctx *ctx = vctx; | ||
3383 | |||
3384 | if (name_len == ctx->name_len && | ||
3385 | strncmp(name, ctx->name, name_len) == 0) { | ||
3386 | ctx->found_idx = num; | ||
3387 | ctx->found_data_len = data_len; | ||
3388 | ctx->found_data = kmalloc(data_len, GFP_NOFS); | ||
3389 | if (!ctx->found_data) | ||
3390 | return -ENOMEM; | ||
3391 | memcpy(ctx->found_data, data, data_len); | ||
3392 | return 1; | ||
3393 | } | ||
3394 | return 0; | ||
3395 | } | ||
3396 | |||
3397 | static int find_xattr(struct send_ctx *sctx, | ||
3398 | struct btrfs_root *root, | ||
3399 | struct btrfs_path *path, | ||
3400 | struct btrfs_key *key, | ||
3401 | const char *name, int name_len, | ||
3402 | char **data, int *data_len) | ||
3403 | { | ||
3404 | int ret; | ||
3405 | struct find_xattr_ctx ctx; | ||
3406 | |||
3407 | ctx.name = name; | ||
3408 | ctx.name_len = name_len; | ||
3409 | ctx.found_idx = -1; | ||
3410 | ctx.found_data = NULL; | ||
3411 | ctx.found_data_len = 0; | ||
3412 | |||
3413 | ret = iterate_dir_item(sctx, root, path, key, __find_xattr, &ctx); | ||
3414 | if (ret < 0) | ||
3415 | return ret; | ||
3416 | |||
3417 | if (ctx.found_idx == -1) | ||
3418 | return -ENOENT; | ||
3419 | if (data) { | ||
3420 | *data = ctx.found_data; | ||
3421 | *data_len = ctx.found_data_len; | ||
3422 | } else { | ||
3423 | kfree(ctx.found_data); | ||
3424 | } | ||
3425 | return ctx.found_idx; | ||
3426 | } | ||
3427 | |||
3428 | |||
3429 | static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, | ||
3430 | const char *name, int name_len, | ||
3431 | const char *data, int data_len, | ||
3432 | u8 type, void *ctx) | ||
3433 | { | ||
3434 | int ret; | ||
3435 | struct send_ctx *sctx = ctx; | ||
3436 | char *found_data = NULL; | ||
3437 | int found_data_len = 0; | ||
3438 | struct fs_path *p = NULL; | ||
3439 | |||
3440 | ret = find_xattr(sctx, sctx->parent_root, sctx->right_path, | ||
3441 | sctx->cmp_key, name, name_len, &found_data, | ||
3442 | &found_data_len); | ||
3443 | if (ret == -ENOENT) { | ||
3444 | ret = __process_new_xattr(num, di_key, name, name_len, data, | ||
3445 | data_len, type, ctx); | ||
3446 | } else if (ret >= 0) { | ||
3447 | if (data_len != found_data_len || | ||
3448 | memcmp(data, found_data, data_len)) { | ||
3449 | ret = __process_new_xattr(num, di_key, name, name_len, | ||
3450 | data, data_len, type, ctx); | ||
3451 | } else { | ||
3452 | ret = 0; | ||
3453 | } | ||
3454 | } | ||
3455 | |||
3456 | kfree(found_data); | ||
3457 | fs_path_free(sctx, p); | ||
3458 | return ret; | ||
3459 | } | ||
3460 | |||
3461 | static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, | ||
3462 | const char *name, int name_len, | ||
3463 | const char *data, int data_len, | ||
3464 | u8 type, void *ctx) | ||
3465 | { | ||
3466 | int ret; | ||
3467 | struct send_ctx *sctx = ctx; | ||
3468 | |||
3469 | ret = find_xattr(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key, | ||
3470 | name, name_len, NULL, NULL); | ||
3471 | if (ret == -ENOENT) | ||
3472 | ret = __process_deleted_xattr(num, di_key, name, name_len, data, | ||
3473 | data_len, type, ctx); | ||
3474 | else if (ret >= 0) | ||
3475 | ret = 0; | ||
3476 | |||
3477 | return ret; | ||
3478 | } | ||
3479 | |||
3480 | static int process_changed_xattr(struct send_ctx *sctx) | ||
3481 | { | ||
3482 | int ret = 0; | ||
3483 | |||
3484 | ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path, | ||
3485 | sctx->cmp_key, __process_changed_new_xattr, sctx); | ||
3486 | if (ret < 0) | ||
3487 | goto out; | ||
3488 | ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path, | ||
3489 | sctx->cmp_key, __process_changed_deleted_xattr, sctx); | ||
3490 | |||
3491 | out: | ||
3492 | return ret; | ||
3493 | } | ||
3494 | |||
3495 | static int process_all_new_xattrs(struct send_ctx *sctx) | ||
3496 | { | ||
3497 | int ret; | ||
3498 | struct btrfs_root *root; | ||
3499 | struct btrfs_path *path; | ||
3500 | struct btrfs_key key; | ||
3501 | struct btrfs_key found_key; | ||
3502 | struct extent_buffer *eb; | ||
3503 | int slot; | ||
3504 | |||
3505 | path = alloc_path_for_send(); | ||
3506 | if (!path) | ||
3507 | return -ENOMEM; | ||
3508 | |||
3509 | root = sctx->send_root; | ||
3510 | |||
3511 | key.objectid = sctx->cmp_key->objectid; | ||
3512 | key.type = BTRFS_XATTR_ITEM_KEY; | ||
3513 | key.offset = 0; | ||
3514 | while (1) { | ||
3515 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | ||
3516 | if (ret < 0) | ||
3517 | goto out; | ||
3518 | if (ret) { | ||
3519 | ret = 0; | ||
3520 | goto out; | ||
3521 | } | ||
3522 | |||
3523 | eb = path->nodes[0]; | ||
3524 | slot = path->slots[0]; | ||
3525 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
3526 | |||
3527 | if (found_key.objectid != key.objectid || | ||
3528 | found_key.type != key.type) { | ||
3529 | ret = 0; | ||
3530 | goto out; | ||
3531 | } | ||
3532 | |||
3533 | ret = iterate_dir_item(sctx, root, path, &found_key, | ||
3534 | __process_new_xattr, sctx); | ||
3535 | if (ret < 0) | ||
3536 | goto out; | ||
3537 | |||
3538 | btrfs_release_path(path); | ||
3539 | key.offset = found_key.offset + 1; | ||
3540 | } | ||
3541 | |||
3542 | out: | ||
3543 | btrfs_free_path(path); | ||
3544 | return ret; | ||
3545 | } | ||
3546 | |||
3547 | /* | ||
3548 | * Read some bytes from the current inode/file and send a write command to | ||
3549 | * user space. | ||
3550 | */ | ||
3551 | static int send_write(struct send_ctx *sctx, u64 offset, u32 len) | ||
3552 | { | ||
3553 | int ret = 0; | ||
3554 | struct fs_path *p; | ||
3555 | loff_t pos = offset; | ||
3556 | int readed; | ||
3557 | mm_segment_t old_fs; | ||
3558 | |||
3559 | p = fs_path_alloc(sctx); | ||
3560 | if (!p) | ||
3561 | return -ENOMEM; | ||
3562 | |||
3563 | /* | ||
3564 | * vfs normally only accepts user space buffers for security reasons. | ||
3565 | * we only read from the file and also only provide the read_buf buffer | ||
3566 | * to vfs. As this buffer does not come from a user space call, it's | ||
3567 | * ok to temporary allow kernel space buffers. | ||
3568 | */ | ||
3569 | old_fs = get_fs(); | ||
3570 | set_fs(KERNEL_DS); | ||
3571 | |||
3572 | verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len); | ||
3573 | |||
3574 | ret = open_cur_inode_file(sctx); | ||
3575 | if (ret < 0) | ||
3576 | goto out; | ||
3577 | |||
3578 | ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos); | ||
3579 | if (ret < 0) | ||
3580 | goto out; | ||
3581 | readed = ret; | ||
3582 | if (!readed) | ||
3583 | goto out; | ||
3584 | |||
3585 | ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); | ||
3586 | if (ret < 0) | ||
3587 | goto out; | ||
3588 | |||
3589 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | ||
3590 | if (ret < 0) | ||
3591 | goto out; | ||
3592 | |||
3593 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | ||
3594 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | ||
3595 | TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, readed); | ||
3596 | |||
3597 | ret = send_cmd(sctx); | ||
3598 | |||
3599 | tlv_put_failure: | ||
3600 | out: | ||
3601 | fs_path_free(sctx, p); | ||
3602 | set_fs(old_fs); | ||
3603 | if (ret < 0) | ||
3604 | return ret; | ||
3605 | return readed; | ||
3606 | } | ||
3607 | |||
3608 | /* | ||
3609 | * Send a clone command to user space. | ||
3610 | */ | ||
3611 | static int send_clone(struct send_ctx *sctx, | ||
3612 | u64 offset, u32 len, | ||
3613 | struct clone_root *clone_root) | ||
3614 | { | ||
3615 | int ret = 0; | ||
3616 | struct btrfs_root *clone_root2 = clone_root->root; | ||
3617 | struct fs_path *p; | ||
3618 | u64 gen; | ||
3619 | |||
3620 | verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, " | ||
3621 | "clone_inode=%llu, clone_offset=%llu\n", offset, len, | ||
3622 | clone_root->root->objectid, clone_root->ino, | ||
3623 | clone_root->offset); | ||
3624 | |||
3625 | p = fs_path_alloc(sctx); | ||
3626 | if (!p) | ||
3627 | return -ENOMEM; | ||
3628 | |||
3629 | ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); | ||
3630 | if (ret < 0) | ||
3631 | goto out; | ||
3632 | |||
3633 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | ||
3634 | if (ret < 0) | ||
3635 | goto out; | ||
3636 | |||
3637 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | ||
3638 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); | ||
3639 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | ||
3640 | |||
3641 | if (clone_root2 == sctx->send_root) { | ||
3642 | ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, | ||
3643 | &gen, NULL, NULL, NULL); | ||
3644 | if (ret < 0) | ||
3645 | goto out; | ||
3646 | ret = get_cur_path(sctx, clone_root->ino, gen, p); | ||
3647 | } else { | ||
3648 | ret = get_inode_path(sctx, clone_root2, clone_root->ino, p); | ||
3649 | } | ||
3650 | if (ret < 0) | ||
3651 | goto out; | ||
3652 | |||
3653 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, | ||
3654 | clone_root2->root_item.uuid); | ||
3655 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, | ||
3656 | clone_root2->root_item.ctransid); | ||
3657 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); | ||
3658 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, | ||
3659 | clone_root->offset); | ||
3660 | |||
3661 | ret = send_cmd(sctx); | ||
3662 | |||
3663 | tlv_put_failure: | ||
3664 | out: | ||
3665 | fs_path_free(sctx, p); | ||
3666 | return ret; | ||
3667 | } | ||
3668 | |||
3669 | static int send_write_or_clone(struct send_ctx *sctx, | ||
3670 | struct btrfs_path *path, | ||
3671 | struct btrfs_key *key, | ||
3672 | struct clone_root *clone_root) | ||
3673 | { | ||
3674 | int ret = 0; | ||
3675 | struct btrfs_file_extent_item *ei; | ||
3676 | u64 offset = key->offset; | ||
3677 | u64 pos = 0; | ||
3678 | u64 len; | ||
3679 | u32 l; | ||
3680 | u8 type; | ||
3681 | |||
3682 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
3683 | struct btrfs_file_extent_item); | ||
3684 | type = btrfs_file_extent_type(path->nodes[0], ei); | ||
3685 | if (type == BTRFS_FILE_EXTENT_INLINE) | ||
3686 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | ||
3687 | else | ||
3688 | len = btrfs_file_extent_num_bytes(path->nodes[0], ei); | ||
3689 | |||
3690 | if (offset + len > sctx->cur_inode_size) | ||
3691 | len = sctx->cur_inode_size - offset; | ||
3692 | if (len == 0) { | ||
3693 | ret = 0; | ||
3694 | goto out; | ||
3695 | } | ||
3696 | |||
3697 | if (!clone_root) { | ||
3698 | while (pos < len) { | ||
3699 | l = len - pos; | ||
3700 | if (l > BTRFS_SEND_READ_SIZE) | ||
3701 | l = BTRFS_SEND_READ_SIZE; | ||
3702 | ret = send_write(sctx, pos + offset, l); | ||
3703 | if (ret < 0) | ||
3704 | goto out; | ||
3705 | if (!ret) | ||
3706 | break; | ||
3707 | pos += ret; | ||
3708 | } | ||
3709 | ret = 0; | ||
3710 | } else { | ||
3711 | ret = send_clone(sctx, offset, len, clone_root); | ||
3712 | } | ||
3713 | |||
3714 | out: | ||
3715 | return ret; | ||
3716 | } | ||
3717 | |||
3718 | static int is_extent_unchanged(struct send_ctx *sctx, | ||
3719 | struct btrfs_path *left_path, | ||
3720 | struct btrfs_key *ekey) | ||
3721 | { | ||
3722 | int ret = 0; | ||
3723 | struct btrfs_key key; | ||
3724 | struct btrfs_path *path = NULL; | ||
3725 | struct extent_buffer *eb; | ||
3726 | int slot; | ||
3727 | struct btrfs_key found_key; | ||
3728 | struct btrfs_file_extent_item *ei; | ||
3729 | u64 left_disknr; | ||
3730 | u64 right_disknr; | ||
3731 | u64 left_offset; | ||
3732 | u64 right_offset; | ||
3733 | u64 left_offset_fixed; | ||
3734 | u64 left_len; | ||
3735 | u64 right_len; | ||
3736 | u8 left_type; | ||
3737 | u8 right_type; | ||
3738 | |||
3739 | path = alloc_path_for_send(); | ||
3740 | if (!path) | ||
3741 | return -ENOMEM; | ||
3742 | |||
3743 | eb = left_path->nodes[0]; | ||
3744 | slot = left_path->slots[0]; | ||
3745 | |||
3746 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | ||
3747 | left_type = btrfs_file_extent_type(eb, ei); | ||
3748 | left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | ||
3749 | left_len = btrfs_file_extent_num_bytes(eb, ei); | ||
3750 | left_offset = btrfs_file_extent_offset(eb, ei); | ||
3751 | |||
3752 | if (left_type != BTRFS_FILE_EXTENT_REG) { | ||
3753 | ret = 0; | ||
3754 | goto out; | ||
3755 | } | ||
3756 | |||
3757 | /* | ||
3758 | * Following comments will refer to these graphics. L is the left | ||
3759 | * extents which we are checking at the moment. 1-8 are the right | ||
3760 | * extents that we iterate. | ||
3761 | * | ||
3762 | * |-----L-----| | ||
3763 | * |-1-|-2a-|-3-|-4-|-5-|-6-| | ||
3764 | * | ||
3765 | * |-----L-----| | ||
3766 | * |--1--|-2b-|...(same as above) | ||
3767 | * | ||
3768 | * Alternative situation. Happens on files where extents got split. | ||
3769 | * |-----L-----| | ||
3770 | * |-----------7-----------|-6-| | ||
3771 | * | ||
3772 | * Alternative situation. Happens on files which got larger. | ||
3773 | * |-----L-----| | ||
3774 | * |-8-| | ||
3775 | * Nothing follows after 8. | ||
3776 | */ | ||
3777 | |||
3778 | key.objectid = ekey->objectid; | ||
3779 | key.type = BTRFS_EXTENT_DATA_KEY; | ||
3780 | key.offset = ekey->offset; | ||
3781 | ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); | ||
3782 | if (ret < 0) | ||
3783 | goto out; | ||
3784 | if (ret) { | ||
3785 | ret = 0; | ||
3786 | goto out; | ||
3787 | } | ||
3788 | |||
3789 | /* | ||
3790 | * Handle special case where the right side has no extents at all. | ||
3791 | */ | ||
3792 | eb = path->nodes[0]; | ||
3793 | slot = path->slots[0]; | ||
3794 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
3795 | if (found_key.objectid != key.objectid || | ||
3796 | found_key.type != key.type) { | ||
3797 | ret = 0; | ||
3798 | goto out; | ||
3799 | } | ||
3800 | |||
3801 | /* | ||
3802 | * We're now on 2a, 2b or 7. | ||
3803 | */ | ||
3804 | key = found_key; | ||
3805 | while (key.offset < ekey->offset + left_len) { | ||
3806 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | ||
3807 | right_type = btrfs_file_extent_type(eb, ei); | ||
3808 | right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | ||
3809 | right_len = btrfs_file_extent_num_bytes(eb, ei); | ||
3810 | right_offset = btrfs_file_extent_offset(eb, ei); | ||
3811 | |||
3812 | if (right_type != BTRFS_FILE_EXTENT_REG) { | ||
3813 | ret = 0; | ||
3814 | goto out; | ||
3815 | } | ||
3816 | |||
3817 | /* | ||
3818 | * Are we at extent 8? If yes, we know the extent is changed. | ||
3819 | * This may only happen on the first iteration. | ||
3820 | */ | ||
3821 | if (found_key.offset + right_len < ekey->offset) { | ||
3822 | ret = 0; | ||
3823 | goto out; | ||
3824 | } | ||
3825 | |||
3826 | left_offset_fixed = left_offset; | ||
3827 | if (key.offset < ekey->offset) { | ||
3828 | /* Fix the right offset for 2a and 7. */ | ||
3829 | right_offset += ekey->offset - key.offset; | ||
3830 | } else { | ||
3831 | /* Fix the left offset for all behind 2a and 2b */ | ||
3832 | left_offset_fixed += key.offset - ekey->offset; | ||
3833 | } | ||
3834 | |||
3835 | /* | ||
3836 | * Check if we have the same extent. | ||
3837 | */ | ||
3838 | if (left_disknr + left_offset_fixed != | ||
3839 | right_disknr + right_offset) { | ||
3840 | ret = 0; | ||
3841 | goto out; | ||
3842 | } | ||
3843 | |||
3844 | /* | ||
3845 | * Go to the next extent. | ||
3846 | */ | ||
3847 | ret = btrfs_next_item(sctx->parent_root, path); | ||
3848 | if (ret < 0) | ||
3849 | goto out; | ||
3850 | if (!ret) { | ||
3851 | eb = path->nodes[0]; | ||
3852 | slot = path->slots[0]; | ||
3853 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
3854 | } | ||
3855 | if (ret || found_key.objectid != key.objectid || | ||
3856 | found_key.type != key.type) { | ||
3857 | key.offset += right_len; | ||
3858 | break; | ||
3859 | } else { | ||
3860 | if (found_key.offset != key.offset + right_len) { | ||
3861 | /* Should really not happen */ | ||
3862 | ret = -EIO; | ||
3863 | goto out; | ||
3864 | } | ||
3865 | } | ||
3866 | key = found_key; | ||
3867 | } | ||
3868 | |||
3869 | /* | ||
3870 | * We're now behind the left extent (treat as unchanged) or at the end | ||
3871 | * of the right side (treat as changed). | ||
3872 | */ | ||
3873 | if (key.offset >= ekey->offset + left_len) | ||
3874 | ret = 1; | ||
3875 | else | ||
3876 | ret = 0; | ||
3877 | |||
3878 | |||
3879 | out: | ||
3880 | btrfs_free_path(path); | ||
3881 | return ret; | ||
3882 | } | ||
3883 | |||
3884 | static int process_extent(struct send_ctx *sctx, | ||
3885 | struct btrfs_path *path, | ||
3886 | struct btrfs_key *key) | ||
3887 | { | ||
3888 | int ret = 0; | ||
3889 | struct clone_root *found_clone = NULL; | ||
3890 | |||
3891 | if (S_ISLNK(sctx->cur_inode_mode)) | ||
3892 | return 0; | ||
3893 | |||
3894 | if (sctx->parent_root && !sctx->cur_inode_new) { | ||
3895 | ret = is_extent_unchanged(sctx, path, key); | ||
3896 | if (ret < 0) | ||
3897 | goto out; | ||
3898 | if (ret) { | ||
3899 | ret = 0; | ||
3900 | goto out; | ||
3901 | } | ||
3902 | } | ||
3903 | |||
3904 | ret = find_extent_clone(sctx, path, key->objectid, key->offset, | ||
3905 | sctx->cur_inode_size, &found_clone); | ||
3906 | if (ret != -ENOENT && ret < 0) | ||
3907 | goto out; | ||
3908 | |||
3909 | ret = send_write_or_clone(sctx, path, key, found_clone); | ||
3910 | |||
3911 | out: | ||
3912 | return ret; | ||
3913 | } | ||
3914 | |||
3915 | static int process_all_extents(struct send_ctx *sctx) | ||
3916 | { | ||
3917 | int ret; | ||
3918 | struct btrfs_root *root; | ||
3919 | struct btrfs_path *path; | ||
3920 | struct btrfs_key key; | ||
3921 | struct btrfs_key found_key; | ||
3922 | struct extent_buffer *eb; | ||
3923 | int slot; | ||
3924 | |||
3925 | root = sctx->send_root; | ||
3926 | path = alloc_path_for_send(); | ||
3927 | if (!path) | ||
3928 | return -ENOMEM; | ||
3929 | |||
3930 | key.objectid = sctx->cmp_key->objectid; | ||
3931 | key.type = BTRFS_EXTENT_DATA_KEY; | ||
3932 | key.offset = 0; | ||
3933 | while (1) { | ||
3934 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | ||
3935 | if (ret < 0) | ||
3936 | goto out; | ||
3937 | if (ret) { | ||
3938 | ret = 0; | ||
3939 | goto out; | ||
3940 | } | ||
3941 | |||
3942 | eb = path->nodes[0]; | ||
3943 | slot = path->slots[0]; | ||
3944 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
3945 | |||
3946 | if (found_key.objectid != key.objectid || | ||
3947 | found_key.type != key.type) { | ||
3948 | ret = 0; | ||
3949 | goto out; | ||
3950 | } | ||
3951 | |||
3952 | ret = process_extent(sctx, path, &found_key); | ||
3953 | if (ret < 0) | ||
3954 | goto out; | ||
3955 | |||
3956 | btrfs_release_path(path); | ||
3957 | key.offset = found_key.offset + 1; | ||
3958 | } | ||
3959 | |||
3960 | out: | ||
3961 | btrfs_free_path(path); | ||
3962 | return ret; | ||
3963 | } | ||
3964 | |||
3965 | static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end) | ||
3966 | { | ||
3967 | int ret = 0; | ||
3968 | |||
3969 | if (sctx->cur_ino == 0) | ||
3970 | goto out; | ||
3971 | if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && | ||
3972 | sctx->cmp_key->type <= BTRFS_INODE_REF_KEY) | ||
3973 | goto out; | ||
3974 | if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) | ||
3975 | goto out; | ||
3976 | |||
3977 | ret = process_recorded_refs(sctx); | ||
3978 | |||
3979 | out: | ||
3980 | return ret; | ||
3981 | } | ||
3982 | |||
3983 | static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | ||
3984 | { | ||
3985 | int ret = 0; | ||
3986 | u64 left_mode; | ||
3987 | u64 left_uid; | ||
3988 | u64 left_gid; | ||
3989 | u64 right_mode; | ||
3990 | u64 right_uid; | ||
3991 | u64 right_gid; | ||
3992 | int need_chmod = 0; | ||
3993 | int need_chown = 0; | ||
3994 | |||
3995 | ret = process_recorded_refs_if_needed(sctx, at_end); | ||
3996 | if (ret < 0) | ||
3997 | goto out; | ||
3998 | |||
3999 | if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) | ||
4000 | goto out; | ||
4001 | if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) | ||
4002 | goto out; | ||
4003 | |||
4004 | ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, | ||
4005 | &left_mode, &left_uid, &left_gid); | ||
4006 | if (ret < 0) | ||
4007 | goto out; | ||
4008 | |||
4009 | if (!S_ISLNK(sctx->cur_inode_mode)) { | ||
4010 | if (!sctx->parent_root || sctx->cur_inode_new) { | ||
4011 | need_chmod = 1; | ||
4012 | need_chown = 1; | ||
4013 | } else { | ||
4014 | ret = get_inode_info(sctx->parent_root, sctx->cur_ino, | ||
4015 | NULL, NULL, &right_mode, &right_uid, | ||
4016 | &right_gid); | ||
4017 | if (ret < 0) | ||
4018 | goto out; | ||
4019 | |||
4020 | if (left_uid != right_uid || left_gid != right_gid) | ||
4021 | need_chown = 1; | ||
4022 | if (left_mode != right_mode) | ||
4023 | need_chmod = 1; | ||
4024 | } | ||
4025 | } | ||
4026 | |||
4027 | if (S_ISREG(sctx->cur_inode_mode)) { | ||
4028 | ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen, | ||
4029 | sctx->cur_inode_size); | ||
4030 | if (ret < 0) | ||
4031 | goto out; | ||
4032 | } | ||
4033 | |||
4034 | if (need_chown) { | ||
4035 | ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, | ||
4036 | left_uid, left_gid); | ||
4037 | if (ret < 0) | ||
4038 | goto out; | ||
4039 | } | ||
4040 | if (need_chmod) { | ||
4041 | ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, | ||
4042 | left_mode); | ||
4043 | if (ret < 0) | ||
4044 | goto out; | ||
4045 | } | ||
4046 | |||
4047 | /* | ||
4048 | * Need to send that every time, no matter if it actually changed | ||
4049 | * between the two trees as we have done changes to the inode before. | ||
4050 | */ | ||
4051 | ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); | ||
4052 | if (ret < 0) | ||
4053 | goto out; | ||
4054 | |||
4055 | out: | ||
4056 | return ret; | ||
4057 | } | ||
4058 | |||
4059 | static int changed_inode(struct send_ctx *sctx, | ||
4060 | enum btrfs_compare_tree_result result) | ||
4061 | { | ||
4062 | int ret = 0; | ||
4063 | struct btrfs_key *key = sctx->cmp_key; | ||
4064 | struct btrfs_inode_item *left_ii = NULL; | ||
4065 | struct btrfs_inode_item *right_ii = NULL; | ||
4066 | u64 left_gen = 0; | ||
4067 | u64 right_gen = 0; | ||
4068 | |||
4069 | ret = close_cur_inode_file(sctx); | ||
4070 | if (ret < 0) | ||
4071 | goto out; | ||
4072 | |||
4073 | sctx->cur_ino = key->objectid; | ||
4074 | sctx->cur_inode_new_gen = 0; | ||
4075 | sctx->cur_inode_first_ref_orphan = 0; | ||
4076 | sctx->send_progress = sctx->cur_ino; | ||
4077 | |||
4078 | if (result == BTRFS_COMPARE_TREE_NEW || | ||
4079 | result == BTRFS_COMPARE_TREE_CHANGED) { | ||
4080 | left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], | ||
4081 | sctx->left_path->slots[0], | ||
4082 | struct btrfs_inode_item); | ||
4083 | left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], | ||
4084 | left_ii); | ||
4085 | } else { | ||
4086 | right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], | ||
4087 | sctx->right_path->slots[0], | ||
4088 | struct btrfs_inode_item); | ||
4089 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], | ||
4090 | right_ii); | ||
4091 | } | ||
4092 | if (result == BTRFS_COMPARE_TREE_CHANGED) { | ||
4093 | right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], | ||
4094 | sctx->right_path->slots[0], | ||
4095 | struct btrfs_inode_item); | ||
4096 | |||
4097 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], | ||
4098 | right_ii); | ||
4099 | if (left_gen != right_gen) | ||
4100 | sctx->cur_inode_new_gen = 1; | ||
4101 | } | ||
4102 | |||
4103 | if (result == BTRFS_COMPARE_TREE_NEW) { | ||
4104 | sctx->cur_inode_gen = left_gen; | ||
4105 | sctx->cur_inode_new = 1; | ||
4106 | sctx->cur_inode_deleted = 0; | ||
4107 | sctx->cur_inode_size = btrfs_inode_size( | ||
4108 | sctx->left_path->nodes[0], left_ii); | ||
4109 | sctx->cur_inode_mode = btrfs_inode_mode( | ||
4110 | sctx->left_path->nodes[0], left_ii); | ||
4111 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | ||
4112 | ret = send_create_inode(sctx, sctx->left_path, | ||
4113 | sctx->cmp_key); | ||
4114 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { | ||
4115 | sctx->cur_inode_gen = right_gen; | ||
4116 | sctx->cur_inode_new = 0; | ||
4117 | sctx->cur_inode_deleted = 1; | ||
4118 | sctx->cur_inode_size = btrfs_inode_size( | ||
4119 | sctx->right_path->nodes[0], right_ii); | ||
4120 | sctx->cur_inode_mode = btrfs_inode_mode( | ||
4121 | sctx->right_path->nodes[0], right_ii); | ||
4122 | } else if (result == BTRFS_COMPARE_TREE_CHANGED) { | ||
4123 | if (sctx->cur_inode_new_gen) { | ||
4124 | sctx->cur_inode_gen = right_gen; | ||
4125 | sctx->cur_inode_new = 0; | ||
4126 | sctx->cur_inode_deleted = 1; | ||
4127 | sctx->cur_inode_size = btrfs_inode_size( | ||
4128 | sctx->right_path->nodes[0], right_ii); | ||
4129 | sctx->cur_inode_mode = btrfs_inode_mode( | ||
4130 | sctx->right_path->nodes[0], right_ii); | ||
4131 | ret = process_all_refs(sctx, | ||
4132 | BTRFS_COMPARE_TREE_DELETED); | ||
4133 | if (ret < 0) | ||
4134 | goto out; | ||
4135 | |||
4136 | sctx->cur_inode_gen = left_gen; | ||
4137 | sctx->cur_inode_new = 1; | ||
4138 | sctx->cur_inode_deleted = 0; | ||
4139 | sctx->cur_inode_size = btrfs_inode_size( | ||
4140 | sctx->left_path->nodes[0], left_ii); | ||
4141 | sctx->cur_inode_mode = btrfs_inode_mode( | ||
4142 | sctx->left_path->nodes[0], left_ii); | ||
4143 | ret = send_create_inode(sctx, sctx->left_path, | ||
4144 | sctx->cmp_key); | ||
4145 | if (ret < 0) | ||
4146 | goto out; | ||
4147 | |||
4148 | ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); | ||
4149 | if (ret < 0) | ||
4150 | goto out; | ||
4151 | ret = process_all_extents(sctx); | ||
4152 | if (ret < 0) | ||
4153 | goto out; | ||
4154 | ret = process_all_new_xattrs(sctx); | ||
4155 | if (ret < 0) | ||
4156 | goto out; | ||
4157 | } else { | ||
4158 | sctx->cur_inode_gen = left_gen; | ||
4159 | sctx->cur_inode_new = 0; | ||
4160 | sctx->cur_inode_new_gen = 0; | ||
4161 | sctx->cur_inode_deleted = 0; | ||
4162 | sctx->cur_inode_size = btrfs_inode_size( | ||
4163 | sctx->left_path->nodes[0], left_ii); | ||
4164 | sctx->cur_inode_mode = btrfs_inode_mode( | ||
4165 | sctx->left_path->nodes[0], left_ii); | ||
4166 | } | ||
4167 | } | ||
4168 | |||
4169 | out: | ||
4170 | return ret; | ||
4171 | } | ||
4172 | |||
4173 | static int changed_ref(struct send_ctx *sctx, | ||
4174 | enum btrfs_compare_tree_result result) | ||
4175 | { | ||
4176 | int ret = 0; | ||
4177 | |||
4178 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | ||
4179 | |||
4180 | if (!sctx->cur_inode_new_gen && | ||
4181 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { | ||
4182 | if (result == BTRFS_COMPARE_TREE_NEW) | ||
4183 | ret = record_new_ref(sctx); | ||
4184 | else if (result == BTRFS_COMPARE_TREE_DELETED) | ||
4185 | ret = record_deleted_ref(sctx); | ||
4186 | else if (result == BTRFS_COMPARE_TREE_CHANGED) | ||
4187 | ret = record_changed_ref(sctx); | ||
4188 | } | ||
4189 | |||
4190 | return ret; | ||
4191 | } | ||
4192 | |||
4193 | static int changed_xattr(struct send_ctx *sctx, | ||
4194 | enum btrfs_compare_tree_result result) | ||
4195 | { | ||
4196 | int ret = 0; | ||
4197 | |||
4198 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | ||
4199 | |||
4200 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | ||
4201 | if (result == BTRFS_COMPARE_TREE_NEW) | ||
4202 | ret = process_new_xattr(sctx); | ||
4203 | else if (result == BTRFS_COMPARE_TREE_DELETED) | ||
4204 | ret = process_deleted_xattr(sctx); | ||
4205 | else if (result == BTRFS_COMPARE_TREE_CHANGED) | ||
4206 | ret = process_changed_xattr(sctx); | ||
4207 | } | ||
4208 | |||
4209 | return ret; | ||
4210 | } | ||
4211 | |||
4212 | static int changed_extent(struct send_ctx *sctx, | ||
4213 | enum btrfs_compare_tree_result result) | ||
4214 | { | ||
4215 | int ret = 0; | ||
4216 | |||
4217 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | ||
4218 | |||
4219 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | ||
4220 | if (result != BTRFS_COMPARE_TREE_DELETED) | ||
4221 | ret = process_extent(sctx, sctx->left_path, | ||
4222 | sctx->cmp_key); | ||
4223 | } | ||
4224 | |||
4225 | return ret; | ||
4226 | } | ||
4227 | |||
4228 | |||
4229 | static int changed_cb(struct btrfs_root *left_root, | ||
4230 | struct btrfs_root *right_root, | ||
4231 | struct btrfs_path *left_path, | ||
4232 | struct btrfs_path *right_path, | ||
4233 | struct btrfs_key *key, | ||
4234 | enum btrfs_compare_tree_result result, | ||
4235 | void *ctx) | ||
4236 | { | ||
4237 | int ret = 0; | ||
4238 | struct send_ctx *sctx = ctx; | ||
4239 | |||
4240 | sctx->left_path = left_path; | ||
4241 | sctx->right_path = right_path; | ||
4242 | sctx->cmp_key = key; | ||
4243 | |||
4244 | ret = finish_inode_if_needed(sctx, 0); | ||
4245 | if (ret < 0) | ||
4246 | goto out; | ||
4247 | |||
4248 | if (key->type == BTRFS_INODE_ITEM_KEY) | ||
4249 | ret = changed_inode(sctx, result); | ||
4250 | else if (key->type == BTRFS_INODE_REF_KEY) | ||
4251 | ret = changed_ref(sctx, result); | ||
4252 | else if (key->type == BTRFS_XATTR_ITEM_KEY) | ||
4253 | ret = changed_xattr(sctx, result); | ||
4254 | else if (key->type == BTRFS_EXTENT_DATA_KEY) | ||
4255 | ret = changed_extent(sctx, result); | ||
4256 | |||
4257 | out: | ||
4258 | return ret; | ||
4259 | } | ||
4260 | |||
4261 | static int full_send_tree(struct send_ctx *sctx) | ||
4262 | { | ||
4263 | int ret; | ||
4264 | struct btrfs_trans_handle *trans = NULL; | ||
4265 | struct btrfs_root *send_root = sctx->send_root; | ||
4266 | struct btrfs_key key; | ||
4267 | struct btrfs_key found_key; | ||
4268 | struct btrfs_path *path; | ||
4269 | struct extent_buffer *eb; | ||
4270 | int slot; | ||
4271 | u64 start_ctransid; | ||
4272 | u64 ctransid; | ||
4273 | |||
4274 | path = alloc_path_for_send(); | ||
4275 | if (!path) | ||
4276 | return -ENOMEM; | ||
4277 | |||
4278 | spin_lock(&send_root->root_times_lock); | ||
4279 | start_ctransid = btrfs_root_ctransid(&send_root->root_item); | ||
4280 | spin_unlock(&send_root->root_times_lock); | ||
4281 | |||
4282 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | ||
4283 | key.type = BTRFS_INODE_ITEM_KEY; | ||
4284 | key.offset = 0; | ||
4285 | |||
4286 | join_trans: | ||
4287 | /* | ||
4288 | * We need to make sure the transaction does not get committed | ||
4289 | * while we do anything on commit roots. Join a transaction to prevent | ||
4290 | * this. | ||
4291 | */ | ||
4292 | trans = btrfs_join_transaction(send_root); | ||
4293 | if (IS_ERR(trans)) { | ||
4294 | ret = PTR_ERR(trans); | ||
4295 | trans = NULL; | ||
4296 | goto out; | ||
4297 | } | ||
4298 | |||
4299 | /* | ||
4300 | * Make sure the tree has not changed | ||
4301 | */ | ||
4302 | spin_lock(&send_root->root_times_lock); | ||
4303 | ctransid = btrfs_root_ctransid(&send_root->root_item); | ||
4304 | spin_unlock(&send_root->root_times_lock); | ||
4305 | |||
4306 | if (ctransid != start_ctransid) { | ||
4307 | WARN(1, KERN_WARNING "btrfs: the root that you're trying to " | ||
4308 | "send was modified in between. This is " | ||
4309 | "probably a bug.\n"); | ||
4310 | ret = -EIO; | ||
4311 | goto out; | ||
4312 | } | ||
4313 | |||
4314 | ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); | ||
4315 | if (ret < 0) | ||
4316 | goto out; | ||
4317 | if (ret) | ||
4318 | goto out_finish; | ||
4319 | |||
4320 | while (1) { | ||
4321 | /* | ||
4322 | * When someone want to commit while we iterate, end the | ||
4323 | * joined transaction and rejoin. | ||
4324 | */ | ||
4325 | if (btrfs_should_end_transaction(trans, send_root)) { | ||
4326 | ret = btrfs_end_transaction(trans, send_root); | ||
4327 | trans = NULL; | ||
4328 | if (ret < 0) | ||
4329 | goto out; | ||
4330 | btrfs_release_path(path); | ||
4331 | goto join_trans; | ||
4332 | } | ||
4333 | |||
4334 | eb = path->nodes[0]; | ||
4335 | slot = path->slots[0]; | ||
4336 | btrfs_item_key_to_cpu(eb, &found_key, slot); | ||
4337 | |||
4338 | ret = changed_cb(send_root, NULL, path, NULL, | ||
4339 | &found_key, BTRFS_COMPARE_TREE_NEW, sctx); | ||
4340 | if (ret < 0) | ||
4341 | goto out; | ||
4342 | |||
4343 | key.objectid = found_key.objectid; | ||
4344 | key.type = found_key.type; | ||
4345 | key.offset = found_key.offset + 1; | ||
4346 | |||
4347 | ret = btrfs_next_item(send_root, path); | ||
4348 | if (ret < 0) | ||
4349 | goto out; | ||
4350 | if (ret) { | ||
4351 | ret = 0; | ||
4352 | break; | ||
4353 | } | ||
4354 | } | ||
4355 | |||
4356 | out_finish: | ||
4357 | ret = finish_inode_if_needed(sctx, 1); | ||
4358 | |||
4359 | out: | ||
4360 | btrfs_free_path(path); | ||
4361 | if (trans) { | ||
4362 | if (!ret) | ||
4363 | ret = btrfs_end_transaction(trans, send_root); | ||
4364 | else | ||
4365 | btrfs_end_transaction(trans, send_root); | ||
4366 | } | ||
4367 | return ret; | ||
4368 | } | ||
4369 | |||
4370 | static int send_subvol(struct send_ctx *sctx) | ||
4371 | { | ||
4372 | int ret; | ||
4373 | |||
4374 | ret = send_header(sctx); | ||
4375 | if (ret < 0) | ||
4376 | goto out; | ||
4377 | |||
4378 | ret = send_subvol_begin(sctx); | ||
4379 | if (ret < 0) | ||
4380 | goto out; | ||
4381 | |||
4382 | if (sctx->parent_root) { | ||
4383 | ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, | ||
4384 | changed_cb, sctx); | ||
4385 | if (ret < 0) | ||
4386 | goto out; | ||
4387 | ret = finish_inode_if_needed(sctx, 1); | ||
4388 | if (ret < 0) | ||
4389 | goto out; | ||
4390 | } else { | ||
4391 | ret = full_send_tree(sctx); | ||
4392 | if (ret < 0) | ||
4393 | goto out; | ||
4394 | } | ||
4395 | |||
4396 | out: | ||
4397 | if (!ret) | ||
4398 | ret = close_cur_inode_file(sctx); | ||
4399 | else | ||
4400 | close_cur_inode_file(sctx); | ||
4401 | |||
4402 | free_recorded_refs(sctx); | ||
4403 | return ret; | ||
4404 | } | ||
4405 | |||
4406 | long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) | ||
4407 | { | ||
4408 | int ret = 0; | ||
4409 | struct btrfs_root *send_root; | ||
4410 | struct btrfs_root *clone_root; | ||
4411 | struct btrfs_fs_info *fs_info; | ||
4412 | struct btrfs_ioctl_send_args *arg = NULL; | ||
4413 | struct btrfs_key key; | ||
4414 | struct file *filp = NULL; | ||
4415 | struct send_ctx *sctx = NULL; | ||
4416 | u32 i; | ||
4417 | u64 *clone_sources_tmp = NULL; | ||
4418 | |||
4419 | if (!capable(CAP_SYS_ADMIN)) | ||
4420 | return -EPERM; | ||
4421 | |||
4422 | send_root = BTRFS_I(fdentry(mnt_file)->d_inode)->root; | ||
4423 | fs_info = send_root->fs_info; | ||
4424 | |||
4425 | arg = memdup_user(arg_, sizeof(*arg)); | ||
4426 | if (IS_ERR(arg)) { | ||
4427 | ret = PTR_ERR(arg); | ||
4428 | arg = NULL; | ||
4429 | goto out; | ||
4430 | } | ||
4431 | |||
4432 | if (!access_ok(VERIFY_READ, arg->clone_sources, | ||
4433 | sizeof(*arg->clone_sources * | ||
4434 | arg->clone_sources_count))) { | ||
4435 | ret = -EFAULT; | ||
4436 | goto out; | ||
4437 | } | ||
4438 | |||
4439 | sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS); | ||
4440 | if (!sctx) { | ||
4441 | ret = -ENOMEM; | ||
4442 | goto out; | ||
4443 | } | ||
4444 | |||
4445 | INIT_LIST_HEAD(&sctx->new_refs); | ||
4446 | INIT_LIST_HEAD(&sctx->deleted_refs); | ||
4447 | INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS); | ||
4448 | INIT_LIST_HEAD(&sctx->name_cache_list); | ||
4449 | |||
4450 | sctx->send_filp = fget(arg->send_fd); | ||
4451 | if (IS_ERR(sctx->send_filp)) { | ||
4452 | ret = PTR_ERR(sctx->send_filp); | ||
4453 | goto out; | ||
4454 | } | ||
4455 | |||
4456 | sctx->mnt = mnt_file->f_path.mnt; | ||
4457 | |||
4458 | sctx->send_root = send_root; | ||
4459 | sctx->clone_roots_cnt = arg->clone_sources_count; | ||
4460 | |||
4461 | sctx->send_max_size = BTRFS_SEND_BUF_SIZE; | ||
4462 | sctx->send_buf = vmalloc(sctx->send_max_size); | ||
4463 | if (!sctx->send_buf) { | ||
4464 | ret = -ENOMEM; | ||
4465 | goto out; | ||
4466 | } | ||
4467 | |||
4468 | sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); | ||
4469 | if (!sctx->read_buf) { | ||
4470 | ret = -ENOMEM; | ||
4471 | goto out; | ||
4472 | } | ||
4473 | |||
4474 | sctx->clone_roots = vzalloc(sizeof(struct clone_root) * | ||
4475 | (arg->clone_sources_count + 1)); | ||
4476 | if (!sctx->clone_roots) { | ||
4477 | ret = -ENOMEM; | ||
4478 | goto out; | ||
4479 | } | ||
4480 | |||
4481 | if (arg->clone_sources_count) { | ||
4482 | clone_sources_tmp = vmalloc(arg->clone_sources_count * | ||
4483 | sizeof(*arg->clone_sources)); | ||
4484 | if (!clone_sources_tmp) { | ||
4485 | ret = -ENOMEM; | ||
4486 | goto out; | ||
4487 | } | ||
4488 | |||
4489 | ret = copy_from_user(clone_sources_tmp, arg->clone_sources, | ||
4490 | arg->clone_sources_count * | ||
4491 | sizeof(*arg->clone_sources)); | ||
4492 | if (ret) { | ||
4493 | ret = -EFAULT; | ||
4494 | goto out; | ||
4495 | } | ||
4496 | |||
4497 | for (i = 0; i < arg->clone_sources_count; i++) { | ||
4498 | key.objectid = clone_sources_tmp[i]; | ||
4499 | key.type = BTRFS_ROOT_ITEM_KEY; | ||
4500 | key.offset = (u64)-1; | ||
4501 | clone_root = btrfs_read_fs_root_no_name(fs_info, &key); | ||
4502 | if (!clone_root) { | ||
4503 | ret = -EINVAL; | ||
4504 | goto out; | ||
4505 | } | ||
4506 | if (IS_ERR(clone_root)) { | ||
4507 | ret = PTR_ERR(clone_root); | ||
4508 | goto out; | ||
4509 | } | ||
4510 | sctx->clone_roots[i].root = clone_root; | ||
4511 | } | ||
4512 | vfree(clone_sources_tmp); | ||
4513 | clone_sources_tmp = NULL; | ||
4514 | } | ||
4515 | |||
4516 | if (arg->parent_root) { | ||
4517 | key.objectid = arg->parent_root; | ||
4518 | key.type = BTRFS_ROOT_ITEM_KEY; | ||
4519 | key.offset = (u64)-1; | ||
4520 | sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); | ||
4521 | if (!sctx->parent_root) { | ||
4522 | ret = -EINVAL; | ||
4523 | goto out; | ||
4524 | } | ||
4525 | } | ||
4526 | |||
4527 | /* | ||
4528 | * Clones from send_root are allowed, but only if the clone source | ||
4529 | * is behind the current send position. This is checked while searching | ||
4530 | * for possible clone sources. | ||
4531 | */ | ||
4532 | sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; | ||
4533 | |||
4534 | /* We do a bsearch later */ | ||
4535 | sort(sctx->clone_roots, sctx->clone_roots_cnt, | ||
4536 | sizeof(*sctx->clone_roots), __clone_root_cmp_sort, | ||
4537 | NULL); | ||
4538 | |||
4539 | ret = send_subvol(sctx); | ||
4540 | if (ret < 0) | ||
4541 | goto out; | ||
4542 | |||
4543 | ret = begin_cmd(sctx, BTRFS_SEND_C_END); | ||
4544 | if (ret < 0) | ||
4545 | goto out; | ||
4546 | ret = send_cmd(sctx); | ||
4547 | if (ret < 0) | ||
4548 | goto out; | ||
4549 | |||
4550 | out: | ||
4551 | if (filp) | ||
4552 | fput(filp); | ||
4553 | kfree(arg); | ||
4554 | vfree(clone_sources_tmp); | ||
4555 | |||
4556 | if (sctx) { | ||
4557 | if (sctx->send_filp) | ||
4558 | fput(sctx->send_filp); | ||
4559 | |||
4560 | vfree(sctx->clone_roots); | ||
4561 | vfree(sctx->send_buf); | ||
4562 | vfree(sctx->read_buf); | ||
4563 | |||
4564 | name_cache_free(sctx); | ||
4565 | |||
4566 | kfree(sctx); | ||
4567 | } | ||
4568 | |||
4569 | return ret; | ||
4570 | } | ||