diff options
-rw-r--r-- | fs/btrfs/backref.c | 354 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 8 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 7 |
3 files changed, 107 insertions, 262 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 03c30a1836f4..b9a843226de8 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -23,18 +23,6 @@ | |||
23 | #include "transaction.h" | 23 | #include "transaction.h" |
24 | #include "delayed-ref.h" | 24 | #include "delayed-ref.h" |
25 | 25 | ||
26 | struct __data_ref { | ||
27 | struct list_head list; | ||
28 | u64 inum; | ||
29 | u64 root; | ||
30 | u64 extent_data_item_offset; | ||
31 | }; | ||
32 | |||
33 | struct __shared_ref { | ||
34 | struct list_head list; | ||
35 | u64 disk_byte; | ||
36 | }; | ||
37 | |||
38 | /* | 26 | /* |
39 | * this structure records all encountered refs on the way up to the root | 27 | * this structure records all encountered refs on the way up to the root |
40 | */ | 28 | */ |
@@ -964,8 +952,11 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, | |||
964 | btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); | 952 | btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); |
965 | if (found_key->type != BTRFS_EXTENT_ITEM_KEY || | 953 | if (found_key->type != BTRFS_EXTENT_ITEM_KEY || |
966 | found_key->objectid > logical || | 954 | found_key->objectid > logical || |
967 | found_key->objectid + found_key->offset <= logical) | 955 | found_key->objectid + found_key->offset <= logical) { |
956 | pr_debug("logical %llu is not within any extent\n", | ||
957 | (unsigned long long)logical); | ||
968 | return -ENOENT; | 958 | return -ENOENT; |
959 | } | ||
969 | 960 | ||
970 | eb = path->nodes[0]; | 961 | eb = path->nodes[0]; |
971 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | 962 | item_size = btrfs_item_size_nr(eb, path->slots[0]); |
@@ -974,6 +965,13 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, | |||
974 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | 965 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); |
975 | flags = btrfs_extent_flags(eb, ei); | 966 | flags = btrfs_extent_flags(eb, ei); |
976 | 967 | ||
968 | pr_debug("logical %llu is at position %llu within the extent (%llu " | ||
969 | "EXTENT_ITEM %llu) flags %#llx size %u\n", | ||
970 | (unsigned long long)logical, | ||
971 | (unsigned long long)(logical - found_key->objectid), | ||
972 | (unsigned long long)found_key->objectid, | ||
973 | (unsigned long long)found_key->offset, | ||
974 | (unsigned long long)flags, item_size); | ||
977 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) | 975 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
978 | return BTRFS_EXTENT_FLAG_TREE_BLOCK; | 976 | return BTRFS_EXTENT_FLAG_TREE_BLOCK; |
979 | if (flags & BTRFS_EXTENT_FLAG_DATA) | 977 | if (flags & BTRFS_EXTENT_FLAG_DATA) |
@@ -1070,128 +1068,11 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | |||
1070 | return 0; | 1068 | return 0; |
1071 | } | 1069 | } |
1072 | 1070 | ||
1073 | static int __data_list_add(struct list_head *head, u64 inum, | 1071 | static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, |
1074 | u64 extent_data_item_offset, u64 root) | 1072 | struct btrfs_path *path, u64 logical, |
1075 | { | 1073 | u64 orig_extent_item_objectid, |
1076 | struct __data_ref *ref; | 1074 | u64 extent_item_pos, u64 root, |
1077 | 1075 | iterate_extent_inodes_t *iterate, void *ctx) | |
1078 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | ||
1079 | if (!ref) | ||
1080 | return -ENOMEM; | ||
1081 | |||
1082 | ref->inum = inum; | ||
1083 | ref->extent_data_item_offset = extent_data_item_offset; | ||
1084 | ref->root = root; | ||
1085 | list_add_tail(&ref->list, head); | ||
1086 | |||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb, | ||
1091 | struct btrfs_extent_data_ref *dref) | ||
1092 | { | ||
1093 | return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref), | ||
1094 | btrfs_extent_data_ref_offset(eb, dref), | ||
1095 | btrfs_extent_data_ref_root(eb, dref)); | ||
1096 | } | ||
1097 | |||
1098 | static int __shared_list_add(struct list_head *head, u64 disk_byte) | ||
1099 | { | ||
1100 | struct __shared_ref *ref; | ||
1101 | |||
1102 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | ||
1103 | if (!ref) | ||
1104 | return -ENOMEM; | ||
1105 | |||
1106 | ref->disk_byte = disk_byte; | ||
1107 | list_add_tail(&ref->list, head); | ||
1108 | |||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info, | ||
1113 | u64 logical, u64 inum, | ||
1114 | u64 extent_data_item_offset, | ||
1115 | u64 extent_offset, | ||
1116 | struct btrfs_path *path, | ||
1117 | struct list_head *data_refs, | ||
1118 | iterate_extent_inodes_t *iterate, | ||
1119 | void *ctx) | ||
1120 | { | ||
1121 | u64 ref_root; | ||
1122 | u32 item_size; | ||
1123 | struct btrfs_key key; | ||
1124 | struct extent_buffer *eb; | ||
1125 | struct btrfs_extent_item *ei; | ||
1126 | struct btrfs_extent_inline_ref *eiref; | ||
1127 | struct __data_ref *ref; | ||
1128 | int ret; | ||
1129 | int type; | ||
1130 | int last; | ||
1131 | unsigned long ptr = 0; | ||
1132 | |||
1133 | WARN_ON(!list_empty(data_refs)); | ||
1134 | ret = extent_from_logical(fs_info, logical, path, &key); | ||
1135 | if (ret & BTRFS_EXTENT_FLAG_DATA) | ||
1136 | ret = -EIO; | ||
1137 | if (ret < 0) | ||
1138 | goto out; | ||
1139 | |||
1140 | eb = path->nodes[0]; | ||
1141 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | ||
1142 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | ||
1143 | |||
1144 | ret = 0; | ||
1145 | ref_root = 0; | ||
1146 | /* | ||
1147 | * as done in iterate_extent_inodes, we first build a list of refs to | ||
1148 | * iterate, then free the path and then iterate them to avoid deadlocks. | ||
1149 | */ | ||
1150 | do { | ||
1151 | last = __get_extent_inline_ref(&ptr, eb, ei, item_size, | ||
1152 | &eiref, &type); | ||
1153 | if (last < 0) { | ||
1154 | ret = last; | ||
1155 | goto out; | ||
1156 | } | ||
1157 | if (type == BTRFS_TREE_BLOCK_REF_KEY || | ||
1158 | type == BTRFS_SHARED_BLOCK_REF_KEY) { | ||
1159 | ref_root = btrfs_extent_inline_ref_offset(eb, eiref); | ||
1160 | ret = __data_list_add(data_refs, inum, | ||
1161 | extent_data_item_offset, | ||
1162 | ref_root); | ||
1163 | } | ||
1164 | } while (!ret && !last); | ||
1165 | |||
1166 | btrfs_release_path(path); | ||
1167 | |||
1168 | if (ref_root == 0) { | ||
1169 | printk(KERN_ERR "btrfs: failed to find tree block ref " | ||
1170 | "for shared data backref %llu\n", logical); | ||
1171 | WARN_ON(1); | ||
1172 | ret = -EIO; | ||
1173 | } | ||
1174 | |||
1175 | out: | ||
1176 | while (!list_empty(data_refs)) { | ||
1177 | ref = list_first_entry(data_refs, struct __data_ref, list); | ||
1178 | list_del(&ref->list); | ||
1179 | if (!ret) | ||
1180 | ret = iterate(ref->inum, extent_offset + | ||
1181 | ref->extent_data_item_offset, | ||
1182 | ref->root, ctx); | ||
1183 | kfree(ref); | ||
1184 | } | ||
1185 | |||
1186 | return ret; | ||
1187 | } | ||
1188 | |||
1189 | static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info, | ||
1190 | u64 logical, u64 orig_extent_item_objectid, | ||
1191 | u64 extent_offset, struct btrfs_path *path, | ||
1192 | struct list_head *data_refs, | ||
1193 | iterate_extent_inodes_t *iterate, | ||
1194 | void *ctx) | ||
1195 | { | 1076 | { |
1196 | u64 disk_byte; | 1077 | u64 disk_byte; |
1197 | struct btrfs_key key; | 1078 | struct btrfs_key key; |
@@ -1199,8 +1080,10 @@ static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info, | |||
1199 | struct extent_buffer *eb; | 1080 | struct extent_buffer *eb; |
1200 | int slot; | 1081 | int slot; |
1201 | int nritems; | 1082 | int nritems; |
1202 | int ret; | 1083 | int ret = 0; |
1203 | int found = 0; | 1084 | int extent_type; |
1085 | u64 data_offset; | ||
1086 | u64 data_len; | ||
1204 | 1087 | ||
1205 | eb = read_tree_block(fs_info->tree_root, logical, | 1088 | eb = read_tree_block(fs_info->tree_root, logical, |
1206 | fs_info->tree_root->leafsize, 0); | 1089 | fs_info->tree_root->leafsize, 0); |
@@ -1218,149 +1101,99 @@ static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info, | |||
1218 | if (key.type != BTRFS_EXTENT_DATA_KEY) | 1101 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
1219 | continue; | 1102 | continue; |
1220 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 1103 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); |
1221 | if (!fi) { | 1104 | extent_type = btrfs_file_extent_type(eb, fi); |
1222 | free_extent_buffer(eb); | 1105 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) |
1223 | return -EIO; | 1106 | continue; |
1224 | } | 1107 | /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ |
1225 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | 1108 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); |
1226 | if (disk_byte != orig_extent_item_objectid) { | 1109 | if (disk_byte != orig_extent_item_objectid) |
1227 | if (found) | 1110 | continue; |
1228 | break; | ||
1229 | else | ||
1230 | continue; | ||
1231 | } | ||
1232 | ++found; | ||
1233 | ret = __iter_shared_inline_ref_inodes(fs_info, logical, | ||
1234 | key.objectid, | ||
1235 | key.offset, | ||
1236 | extent_offset, path, | ||
1237 | data_refs, | ||
1238 | iterate, ctx); | ||
1239 | if (ret) | ||
1240 | break; | ||
1241 | } | ||
1242 | 1111 | ||
1243 | if (!found) { | 1112 | data_offset = btrfs_file_extent_offset(eb, fi); |
1244 | printk(KERN_ERR "btrfs: failed to follow shared data backref " | 1113 | data_len = btrfs_file_extent_num_bytes(eb, fi); |
1245 | "to parent %llu\n", logical); | 1114 | |
1246 | WARN_ON(1); | 1115 | if (extent_item_pos < data_offset || |
1247 | ret = -EIO; | 1116 | extent_item_pos >= data_offset + data_len) |
1117 | continue; | ||
1118 | |||
1119 | pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " | ||
1120 | "root %llu\n", orig_extent_item_objectid, | ||
1121 | key.objectid, key.offset, root); | ||
1122 | ret = iterate(key.objectid, | ||
1123 | key.offset + (extent_item_pos - data_offset), | ||
1124 | root, ctx); | ||
1125 | if (ret) { | ||
1126 | pr_debug("stopping iteration because ret=%d\n", ret); | ||
1127 | break; | ||
1128 | } | ||
1248 | } | 1129 | } |
1249 | 1130 | ||
1250 | free_extent_buffer(eb); | 1131 | free_extent_buffer(eb); |
1132 | |||
1251 | return ret; | 1133 | return ret; |
1252 | } | 1134 | } |
1253 | 1135 | ||
1254 | /* | 1136 | /* |
1255 | * calls iterate() for every inode that references the extent identified by | 1137 | * calls iterate() for every inode that references the extent identified by |
1256 | * the given parameters. will use the path given as a parameter and return it | 1138 | * the given parameters. |
1257 | * released. | ||
1258 | * when the iterator function returns a non-zero value, iteration stops. | 1139 | * when the iterator function returns a non-zero value, iteration stops. |
1140 | * path is guaranteed to be in released state when iterate() is called. | ||
1259 | */ | 1141 | */ |
1260 | int iterate_extent_inodes(struct btrfs_fs_info *fs_info, | 1142 | int iterate_extent_inodes(struct btrfs_fs_info *fs_info, |
1261 | struct btrfs_path *path, | 1143 | struct btrfs_path *path, |
1262 | u64 extent_item_objectid, | 1144 | u64 extent_item_objectid, u64 extent_item_pos, |
1263 | u64 extent_offset, | ||
1264 | iterate_extent_inodes_t *iterate, void *ctx) | 1145 | iterate_extent_inodes_t *iterate, void *ctx) |
1265 | { | 1146 | { |
1266 | unsigned long ptr = 0; | ||
1267 | int last; | ||
1268 | int ret; | 1147 | int ret; |
1269 | int type; | ||
1270 | u64 logical; | ||
1271 | u32 item_size; | ||
1272 | struct btrfs_extent_inline_ref *eiref; | ||
1273 | struct btrfs_extent_data_ref *dref; | ||
1274 | struct extent_buffer *eb; | ||
1275 | struct btrfs_extent_item *ei; | ||
1276 | struct btrfs_key key; | ||
1277 | struct list_head data_refs = LIST_HEAD_INIT(data_refs); | 1148 | struct list_head data_refs = LIST_HEAD_INIT(data_refs); |
1278 | struct list_head shared_refs = LIST_HEAD_INIT(shared_refs); | 1149 | struct list_head shared_refs = LIST_HEAD_INIT(shared_refs); |
1279 | struct __data_ref *ref_d; | 1150 | struct btrfs_trans_handle *trans; |
1280 | struct __shared_ref *ref_s; | 1151 | struct ulist *refs; |
1152 | struct ulist *roots; | ||
1153 | struct ulist_node *ref_node = NULL; | ||
1154 | struct ulist_node *root_node = NULL; | ||
1155 | struct seq_list seq_elem; | ||
1156 | struct btrfs_delayed_ref_root *delayed_refs; | ||
1281 | 1157 | ||
1282 | eb = path->nodes[0]; | 1158 | trans = btrfs_join_transaction(fs_info->extent_root); |
1283 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | 1159 | if (IS_ERR(trans)) |
1284 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | 1160 | return PTR_ERR(trans); |
1285 | |||
1286 | /* first we iterate the inline refs, ... */ | ||
1287 | do { | ||
1288 | last = __get_extent_inline_ref(&ptr, eb, ei, item_size, | ||
1289 | &eiref, &type); | ||
1290 | if (last == -ENOENT) { | ||
1291 | ret = 0; | ||
1292 | break; | ||
1293 | } | ||
1294 | if (last < 0) { | ||
1295 | ret = last; | ||
1296 | break; | ||
1297 | } | ||
1298 | 1161 | ||
1299 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { | 1162 | pr_debug("resolving all inodes for extent %llu\n", |
1300 | dref = (struct btrfs_extent_data_ref *)(&eiref->offset); | 1163 | extent_item_objectid); |
1301 | ret = __data_list_add_eb(&data_refs, eb, dref); | ||
1302 | } else if (type == BTRFS_SHARED_DATA_REF_KEY) { | ||
1303 | logical = btrfs_extent_inline_ref_offset(eb, eiref); | ||
1304 | ret = __shared_list_add(&shared_refs, logical); | ||
1305 | } | ||
1306 | } while (!ret && !last); | ||
1307 | 1164 | ||
1308 | /* ... then we proceed to in-tree references and ... */ | 1165 | delayed_refs = &trans->transaction->delayed_refs; |
1309 | while (!ret) { | 1166 | spin_lock(&delayed_refs->lock); |
1310 | ++path->slots[0]; | 1167 | btrfs_get_delayed_seq(delayed_refs, &seq_elem); |
1311 | if (path->slots[0] > btrfs_header_nritems(eb)) { | 1168 | spin_unlock(&delayed_refs->lock); |
1312 | ret = btrfs_next_leaf(fs_info->extent_root, path); | ||
1313 | if (ret) { | ||
1314 | if (ret == 1) | ||
1315 | ret = 0; /* we're done */ | ||
1316 | break; | ||
1317 | } | ||
1318 | eb = path->nodes[0]; | ||
1319 | } | ||
1320 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); | ||
1321 | if (key.objectid != extent_item_objectid) | ||
1322 | break; | ||
1323 | if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { | ||
1324 | dref = btrfs_item_ptr(eb, path->slots[0], | ||
1325 | struct btrfs_extent_data_ref); | ||
1326 | ret = __data_list_add_eb(&data_refs, eb, dref); | ||
1327 | } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { | ||
1328 | ret = __shared_list_add(&shared_refs, key.offset); | ||
1329 | } | ||
1330 | } | ||
1331 | 1169 | ||
1332 | btrfs_release_path(path); | 1170 | ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, |
1171 | extent_item_pos, seq_elem.seq, | ||
1172 | &refs); | ||
1333 | 1173 | ||
1334 | /* | 1174 | if (ret) |
1335 | * ... only at the very end we can process the refs we found. this is | 1175 | goto out; |
1336 | * because the iterator function we call is allowed to make tree lookups | ||
1337 | * and we have to avoid deadlocks. additionally, we need more tree | ||
1338 | * lookups ourselves for shared data refs. | ||
1339 | */ | ||
1340 | while (!list_empty(&data_refs)) { | ||
1341 | ref_d = list_first_entry(&data_refs, struct __data_ref, list); | ||
1342 | list_del(&ref_d->list); | ||
1343 | if (!ret) | ||
1344 | ret = iterate(ref_d->inum, extent_offset + | ||
1345 | ref_d->extent_data_item_offset, | ||
1346 | ref_d->root, ctx); | ||
1347 | kfree(ref_d); | ||
1348 | } | ||
1349 | 1176 | ||
1350 | while (!list_empty(&shared_refs)) { | 1177 | while (!ret && (ref_node = ulist_next(refs, ref_node))) { |
1351 | ref_s = list_first_entry(&shared_refs, struct __shared_ref, | 1178 | ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1, |
1352 | list); | 1179 | seq_elem.seq, &roots); |
1353 | list_del(&ref_s->list); | 1180 | if (ret) |
1354 | if (!ret) | 1181 | break; |
1355 | ret = __iter_shared_inline_ref(fs_info, | 1182 | while (!ret && (root_node = ulist_next(roots, root_node))) { |
1356 | ref_s->disk_byte, | 1183 | pr_debug("root %llu references leaf %llu\n", |
1357 | extent_item_objectid, | 1184 | root_node->val, ref_node->val); |
1358 | extent_offset, path, | 1185 | ret = iterate_leaf_refs(fs_info, path, ref_node->val, |
1359 | &data_refs, | 1186 | extent_item_objectid, |
1360 | iterate, ctx); | 1187 | extent_item_pos, root_node->val, |
1361 | kfree(ref_s); | 1188 | iterate, ctx); |
1189 | } | ||
1362 | } | 1190 | } |
1363 | 1191 | ||
1192 | ulist_free(refs); | ||
1193 | ulist_free(roots); | ||
1194 | out: | ||
1195 | btrfs_put_delayed_seq(delayed_refs, &seq_elem); | ||
1196 | btrfs_end_transaction(trans, fs_info->extent_root); | ||
1364 | return ret; | 1197 | return ret; |
1365 | } | 1198 | } |
1366 | 1199 | ||
@@ -1369,19 +1202,20 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, | |||
1369 | iterate_extent_inodes_t *iterate, void *ctx) | 1202 | iterate_extent_inodes_t *iterate, void *ctx) |
1370 | { | 1203 | { |
1371 | int ret; | 1204 | int ret; |
1372 | u64 offset; | 1205 | u64 extent_item_pos; |
1373 | struct btrfs_key found_key; | 1206 | struct btrfs_key found_key; |
1374 | 1207 | ||
1375 | ret = extent_from_logical(fs_info, logical, path, | 1208 | ret = extent_from_logical(fs_info, logical, path, |
1376 | &found_key); | 1209 | &found_key); |
1210 | btrfs_release_path(path); | ||
1377 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) | 1211 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
1378 | ret = -EINVAL; | 1212 | ret = -EINVAL; |
1379 | if (ret < 0) | 1213 | if (ret < 0) |
1380 | return ret; | 1214 | return ret; |
1381 | 1215 | ||
1382 | offset = logical - found_key.objectid; | 1216 | extent_item_pos = logical - found_key.objectid; |
1383 | ret = iterate_extent_inodes(fs_info, path, found_key.objectid, | 1217 | ret = iterate_extent_inodes(fs_info, path, found_key.objectid, |
1384 | offset, iterate, ctx); | 1218 | extent_item_pos, iterate, ctx); |
1385 | 1219 | ||
1386 | return ret; | 1220 | return ret; |
1387 | } | 1221 | } |
@@ -1426,6 +1260,10 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |||
1426 | for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { | 1260 | for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { |
1427 | name_len = btrfs_inode_ref_name_len(eb, iref); | 1261 | name_len = btrfs_inode_ref_name_len(eb, iref); |
1428 | /* path must be released before calling iterate()! */ | 1262 | /* path must be released before calling iterate()! */ |
1263 | pr_debug("following ref at offset %u for inode %llu in " | ||
1264 | "tree %llu\n", cur, | ||
1265 | (unsigned long long)found_key.objectid, | ||
1266 | (unsigned long long)fs_root->objectid); | ||
1429 | ret = iterate(parent, iref, eb, ctx); | 1267 | ret = iterate(parent, iref, eb, ctx); |
1430 | if (ret) { | 1268 | if (ret) { |
1431 | free_extent_buffer(eb); | 1269 | free_extent_buffer(eb); |
@@ -1466,10 +1304,14 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, | |||
1466 | return PTR_ERR(fspath); | 1304 | return PTR_ERR(fspath); |
1467 | 1305 | ||
1468 | if (fspath > fspath_min) { | 1306 | if (fspath > fspath_min) { |
1307 | pr_debug("path resolved: %s\n", fspath); | ||
1469 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; | 1308 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; |
1470 | ++ipath->fspath->elem_cnt; | 1309 | ++ipath->fspath->elem_cnt; |
1471 | ipath->fspath->bytes_left = fspath - fspath_min; | 1310 | ipath->fspath->bytes_left = fspath - fspath_min; |
1472 | } else { | 1311 | } else { |
1312 | pr_debug("missed path, not enough space. missing bytes: %lu, " | ||
1313 | "constructed so far: %s\n", | ||
1314 | (unsigned long)(fspath_min - fspath), fspath_min); | ||
1473 | ++ipath->fspath->elem_missed; | 1315 | ++ipath->fspath->elem_missed; |
1474 | ipath->fspath->bytes_missing += fspath_min - fspath; | 1316 | ipath->fspath->bytes_missing += fspath_min - fspath; |
1475 | ipath->fspath->bytes_left = 0; | 1317 | ipath->fspath->bytes_left = 0; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c48f2e931ea0..9b0526872b7b 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2976,7 +2976,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
2976 | { | 2976 | { |
2977 | int ret = 0; | 2977 | int ret = 0; |
2978 | int size; | 2978 | int size; |
2979 | u64 extent_offset; | 2979 | u64 extent_item_pos; |
2980 | struct btrfs_ioctl_logical_ino_args *loi; | 2980 | struct btrfs_ioctl_logical_ino_args *loi; |
2981 | struct btrfs_data_container *inodes = NULL; | 2981 | struct btrfs_data_container *inodes = NULL; |
2982 | struct btrfs_path *path = NULL; | 2982 | struct btrfs_path *path = NULL; |
@@ -3007,15 +3007,17 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
3007 | } | 3007 | } |
3008 | 3008 | ||
3009 | ret = extent_from_logical(root->fs_info, loi->logical, path, &key); | 3009 | ret = extent_from_logical(root->fs_info, loi->logical, path, &key); |
3010 | btrfs_release_path(path); | ||
3010 | 3011 | ||
3011 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) | 3012 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
3012 | ret = -ENOENT; | 3013 | ret = -ENOENT; |
3013 | if (ret < 0) | 3014 | if (ret < 0) |
3014 | goto out; | 3015 | goto out; |
3015 | 3016 | ||
3016 | extent_offset = loi->logical - key.objectid; | 3017 | extent_item_pos = loi->logical - key.objectid; |
3017 | ret = iterate_extent_inodes(root->fs_info, path, key.objectid, | 3018 | ret = iterate_extent_inodes(root->fs_info, path, key.objectid, |
3018 | extent_offset, build_ino_list, inodes); | 3019 | extent_item_pos, build_ino_list, |
3020 | inodes); | ||
3019 | 3021 | ||
3020 | if (ret < 0) | 3022 | if (ret < 0) |
3021 | goto out; | 3023 | goto out; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c27bcb67f330..b5edff25a53f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -309,7 +309,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio, | |||
309 | u8 ref_level; | 309 | u8 ref_level; |
310 | unsigned long ptr = 0; | 310 | unsigned long ptr = 0; |
311 | const int bufsize = 4096; | 311 | const int bufsize = 4096; |
312 | u64 extent_offset; | 312 | u64 extent_item_pos; |
313 | 313 | ||
314 | path = btrfs_alloc_path(); | 314 | path = btrfs_alloc_path(); |
315 | 315 | ||
@@ -329,12 +329,13 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio, | |||
329 | if (ret < 0) | 329 | if (ret < 0) |
330 | goto out; | 330 | goto out; |
331 | 331 | ||
332 | extent_offset = swarn.logical - found_key.objectid; | 332 | extent_item_pos = swarn.logical - found_key.objectid; |
333 | swarn.extent_item_size = found_key.offset; | 333 | swarn.extent_item_size = found_key.offset; |
334 | 334 | ||
335 | eb = path->nodes[0]; | 335 | eb = path->nodes[0]; |
336 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | 336 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); |
337 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | 337 | item_size = btrfs_item_size_nr(eb, path->slots[0]); |
338 | btrfs_release_path(path); | ||
338 | 339 | ||
339 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 340 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
340 | do { | 341 | do { |
@@ -351,7 +352,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio, | |||
351 | } else { | 352 | } else { |
352 | swarn.path = path; | 353 | swarn.path = path; |
353 | iterate_extent_inodes(fs_info, path, found_key.objectid, | 354 | iterate_extent_inodes(fs_info, path, found_key.objectid, |
354 | extent_offset, | 355 | extent_item_pos, |
355 | scrub_print_warning_inode, &swarn); | 356 | scrub_print_warning_inode, &swarn); |
356 | } | 357 | } |
357 | 358 | ||