aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/delayed-inode.c')
-rw-r--r--fs/btrfs/delayed-inode.c208
1 files changed, 133 insertions, 75 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 8d292fbae659..451b00c86f6c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -55,8 +55,7 @@ static inline void btrfs_init_delayed_node(
55 delayed_node->inode_id = inode_id; 55 delayed_node->inode_id = inode_id;
56 atomic_set(&delayed_node->refs, 0); 56 atomic_set(&delayed_node->refs, 0);
57 delayed_node->count = 0; 57 delayed_node->count = 0;
58 delayed_node->in_list = 0; 58 delayed_node->flags = 0;
59 delayed_node->inode_dirty = 0;
60 delayed_node->ins_root = RB_ROOT; 59 delayed_node->ins_root = RB_ROOT;
61 delayed_node->del_root = RB_ROOT; 60 delayed_node->del_root = RB_ROOT;
62 mutex_init(&delayed_node->mutex); 61 mutex_init(&delayed_node->mutex);
@@ -172,7 +171,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
172 int mod) 171 int mod)
173{ 172{
174 spin_lock(&root->lock); 173 spin_lock(&root->lock);
175 if (node->in_list) { 174 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
176 if (!list_empty(&node->p_list)) 175 if (!list_empty(&node->p_list))
177 list_move_tail(&node->p_list, &root->prepare_list); 176 list_move_tail(&node->p_list, &root->prepare_list);
178 else if (mod) 177 else if (mod)
@@ -182,7 +181,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
182 list_add_tail(&node->p_list, &root->prepare_list); 181 list_add_tail(&node->p_list, &root->prepare_list);
183 atomic_inc(&node->refs); /* inserted into list */ 182 atomic_inc(&node->refs); /* inserted into list */
184 root->nodes++; 183 root->nodes++;
185 node->in_list = 1; 184 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
186 } 185 }
187 spin_unlock(&root->lock); 186 spin_unlock(&root->lock);
188} 187}
@@ -192,13 +191,13 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 struct btrfs_delayed_node *node) 191 struct btrfs_delayed_node *node)
193{ 192{
194 spin_lock(&root->lock); 193 spin_lock(&root->lock);
195 if (node->in_list) { 194 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
196 root->nodes--; 195 root->nodes--;
197 atomic_dec(&node->refs); /* not in the list */ 196 atomic_dec(&node->refs); /* not in the list */
198 list_del_init(&node->n_list); 197 list_del_init(&node->n_list);
199 if (!list_empty(&node->p_list)) 198 if (!list_empty(&node->p_list))
200 list_del_init(&node->p_list); 199 list_del_init(&node->p_list);
201 node->in_list = 0; 200 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
202 } 201 }
203 spin_unlock(&root->lock); 202 spin_unlock(&root->lock);
204} 203}
@@ -231,7 +230,8 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
231 230
232 delayed_root = node->root->fs_info->delayed_root; 231 delayed_root = node->root->fs_info->delayed_root;
233 spin_lock(&delayed_root->lock); 232 spin_lock(&delayed_root->lock);
234 if (!node->in_list) { /* not in the list */ 233 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
234 /* not in the list */
235 if (list_empty(&delayed_root->node_list)) 235 if (list_empty(&delayed_root->node_list))
236 goto out; 236 goto out;
237 p = delayed_root->node_list.next; 237 p = delayed_root->node_list.next;
@@ -1004,9 +1004,10 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1004{ 1004{
1005 struct btrfs_delayed_root *delayed_root; 1005 struct btrfs_delayed_root *delayed_root;
1006 1006
1007 if (delayed_node && delayed_node->inode_dirty) { 1007 if (delayed_node &&
1008 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1008 BUG_ON(!delayed_node->root); 1009 BUG_ON(!delayed_node->root);
1009 delayed_node->inode_dirty = 0; 1010 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1010 delayed_node->count--; 1011 delayed_node->count--;
1011 1012
1012 delayed_root = delayed_node->root->fs_info->delayed_root; 1013 delayed_root = delayed_node->root->fs_info->delayed_root;
@@ -1014,6 +1015,18 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1014 } 1015 }
1015} 1016}
1016 1017
1018static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1019{
1020 struct btrfs_delayed_root *delayed_root;
1021
1022 ASSERT(delayed_node->root);
1023 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1024 delayed_node->count--;
1025
1026 delayed_root = delayed_node->root->fs_info->delayed_root;
1027 finish_one_item(delayed_root);
1028}
1029
1017static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 1030static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1018 struct btrfs_root *root, 1031 struct btrfs_root *root,
1019 struct btrfs_path *path, 1032 struct btrfs_path *path,
@@ -1022,13 +1035,19 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1022 struct btrfs_key key; 1035 struct btrfs_key key;
1023 struct btrfs_inode_item *inode_item; 1036 struct btrfs_inode_item *inode_item;
1024 struct extent_buffer *leaf; 1037 struct extent_buffer *leaf;
1038 int mod;
1025 int ret; 1039 int ret;
1026 1040
1027 key.objectid = node->inode_id; 1041 key.objectid = node->inode_id;
1028 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 1042 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1029 key.offset = 0; 1043 key.offset = 0;
1030 1044
1031 ret = btrfs_lookup_inode(trans, root, path, &key, 1); 1045 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1046 mod = -1;
1047 else
1048 mod = 1;
1049
1050 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1032 if (ret > 0) { 1051 if (ret > 0) {
1033 btrfs_release_path(path); 1052 btrfs_release_path(path);
1034 return -ENOENT; 1053 return -ENOENT;
@@ -1036,19 +1055,58 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1036 return ret; 1055 return ret;
1037 } 1056 }
1038 1057
1039 btrfs_unlock_up_safe(path, 1);
1040 leaf = path->nodes[0]; 1058 leaf = path->nodes[0];
1041 inode_item = btrfs_item_ptr(leaf, path->slots[0], 1059 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1042 struct btrfs_inode_item); 1060 struct btrfs_inode_item);
1043 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, 1061 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1044 sizeof(struct btrfs_inode_item)); 1062 sizeof(struct btrfs_inode_item));
1045 btrfs_mark_buffer_dirty(leaf); 1063 btrfs_mark_buffer_dirty(leaf);
1046 btrfs_release_path(path);
1047 1064
1065 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1066 goto no_iref;
1067
1068 path->slots[0]++;
1069 if (path->slots[0] >= btrfs_header_nritems(leaf))
1070 goto search;
1071again:
1072 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1073 if (key.objectid != node->inode_id)
1074 goto out;
1075
1076 if (key.type != BTRFS_INODE_REF_KEY &&
1077 key.type != BTRFS_INODE_EXTREF_KEY)
1078 goto out;
1079
1080 /*
1081 * Delayed iref deletion is for the inode who has only one link,
1082 * so there is only one iref. The case that several irefs are
1083 * in the same item doesn't exist.
1084 */
1085 btrfs_del_item(trans, root, path);
1086out:
1087 btrfs_release_delayed_iref(node);
1088no_iref:
1089 btrfs_release_path(path);
1090err_out:
1048 btrfs_delayed_inode_release_metadata(root, node); 1091 btrfs_delayed_inode_release_metadata(root, node);
1049 btrfs_release_delayed_inode(node); 1092 btrfs_release_delayed_inode(node);
1050 1093
1051 return 0; 1094 return ret;
1095
1096search:
1097 btrfs_release_path(path);
1098
1099 btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1100 key.offset = -1;
1101 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1102 if (ret < 0)
1103 goto err_out;
1104 ASSERT(ret);
1105
1106 ret = 0;
1107 leaf = path->nodes[0];
1108 path->slots[0]--;
1109 goto again;
1052} 1110}
1053 1111
1054static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 1112static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
@@ -1059,7 +1117,7 @@ static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1059 int ret; 1117 int ret;
1060 1118
1061 mutex_lock(&node->mutex); 1119 mutex_lock(&node->mutex);
1062 if (!node->inode_dirty) { 1120 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1063 mutex_unlock(&node->mutex); 1121 mutex_unlock(&node->mutex);
1064 return 0; 1122 return 0;
1065 } 1123 }
@@ -1203,7 +1261,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
1203 return 0; 1261 return 0;
1204 1262
1205 mutex_lock(&delayed_node->mutex); 1263 mutex_lock(&delayed_node->mutex);
1206 if (!delayed_node->inode_dirty) { 1264 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1207 mutex_unlock(&delayed_node->mutex); 1265 mutex_unlock(&delayed_node->mutex);
1208 btrfs_release_delayed_node(delayed_node); 1266 btrfs_release_delayed_node(delayed_node);
1209 return 0; 1267 return 0;
@@ -1227,7 +1285,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
1227 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv; 1285 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1228 1286
1229 mutex_lock(&delayed_node->mutex); 1287 mutex_lock(&delayed_node->mutex);
1230 if (delayed_node->inode_dirty) 1288 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1231 ret = __btrfs_update_delayed_inode(trans, delayed_node->root, 1289 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1232 path, delayed_node); 1290 path, delayed_node);
1233 else 1291 else
@@ -1300,36 +1358,9 @@ again:
1300 trans->block_rsv = &root->fs_info->delayed_block_rsv; 1358 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1301 1359
1302 __btrfs_commit_inode_delayed_items(trans, path, delayed_node); 1360 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1303 /*
1304 * Maybe new delayed items have been inserted, so we need requeue
1305 * the work. Besides that, we must dequeue the empty delayed nodes
1306 * to avoid the race between delayed items balance and the worker.
1307 * The race like this:
1308 * Task1 Worker thread
1309 * count == 0, needn't requeue
1310 * also needn't insert the
1311 * delayed node into prepare
1312 * list again.
1313 * add lots of delayed items
1314 * queue the delayed node
1315 * already in the list,
1316 * and not in the prepare
1317 * list, it means the delayed
1318 * node is being dealt with
1319 * by the worker.
1320 * do delayed items balance
1321 * the delayed node is being
1322 * dealt with by the worker
1323 * now, just wait.
1324 * the worker goto idle.
1325 * Task1 will sleep until the transaction is commited.
1326 */
1327 mutex_lock(&delayed_node->mutex);
1328 btrfs_dequeue_delayed_node(root->fs_info->delayed_root, delayed_node);
1329 mutex_unlock(&delayed_node->mutex);
1330 1361
1331 trans->block_rsv = block_rsv; 1362 trans->block_rsv = block_rsv;
1332 btrfs_end_transaction_dmeta(trans, root); 1363 btrfs_end_transaction(trans, root);
1333 btrfs_btree_balance_dirty_nodelay(root); 1364 btrfs_btree_balance_dirty_nodelay(root);
1334 1365
1335release_path: 1366release_path:
@@ -1376,52 +1407,41 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1376 WARN_ON(btrfs_first_delayed_node(delayed_root)); 1407 WARN_ON(btrfs_first_delayed_node(delayed_root));
1377} 1408}
1378 1409
1379static int refs_newer(struct btrfs_delayed_root *delayed_root, 1410static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1380 int seq, int count)
1381{ 1411{
1382 int val = atomic_read(&delayed_root->items_seq); 1412 int val = atomic_read(&delayed_root->items_seq);
1383 1413
1384 if (val < seq || val >= seq + count) 1414 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1415 return 1;
1416
1417 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1385 return 1; 1418 return 1;
1419
1386 return 0; 1420 return 0;
1387} 1421}
1388 1422
1389void btrfs_balance_delayed_items(struct btrfs_root *root) 1423void btrfs_balance_delayed_items(struct btrfs_root *root)
1390{ 1424{
1391 struct btrfs_delayed_root *delayed_root; 1425 struct btrfs_delayed_root *delayed_root;
1392 int seq;
1393 1426
1394 delayed_root = btrfs_get_delayed_root(root); 1427 delayed_root = btrfs_get_delayed_root(root);
1395 1428
1396 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) 1429 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1397 return; 1430 return;
1398 1431
1399 seq = atomic_read(&delayed_root->items_seq);
1400
1401 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { 1432 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1433 int seq;
1402 int ret; 1434 int ret;
1403 DEFINE_WAIT(__wait); 1435
1436 seq = atomic_read(&delayed_root->items_seq);
1404 1437
1405 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0); 1438 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
1406 if (ret) 1439 if (ret)
1407 return; 1440 return;
1408 1441
1409 while (1) { 1442 wait_event_interruptible(delayed_root->wait,
1410 prepare_to_wait(&delayed_root->wait, &__wait, 1443 could_end_wait(delayed_root, seq));
1411 TASK_INTERRUPTIBLE); 1444 return;
1412
1413 if (refs_newer(delayed_root, seq,
1414 BTRFS_DELAYED_BATCH) ||
1415 atomic_read(&delayed_root->items) <
1416 BTRFS_DELAYED_BACKGROUND) {
1417 break;
1418 }
1419 if (!signal_pending(current))
1420 schedule();
1421 else
1422 break;
1423 }
1424 finish_wait(&delayed_root->wait, &__wait);
1425 } 1445 }
1426 1446
1427 btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH); 1447 btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
@@ -1472,9 +1492,9 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1472 mutex_lock(&delayed_node->mutex); 1492 mutex_lock(&delayed_node->mutex);
1473 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); 1493 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1474 if (unlikely(ret)) { 1494 if (unlikely(ret)) {
1475 printk(KERN_ERR "err add delayed dir index item(name: %.*s) " 1495 btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
1476 "into the insertion tree of the delayed node" 1496 "into the insertion tree of the delayed node"
1477 "(root id: %llu, inode id: %llu, errno: %d)\n", 1497 "(root id: %llu, inode id: %llu, errno: %d)",
1478 name_len, name, delayed_node->root->objectid, 1498 name_len, name, delayed_node->root->objectid,
1479 delayed_node->inode_id, ret); 1499 delayed_node->inode_id, ret);
1480 BUG(); 1500 BUG();
@@ -1544,9 +1564,9 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1544 mutex_lock(&node->mutex); 1564 mutex_lock(&node->mutex);
1545 ret = __btrfs_add_delayed_deletion_item(node, item); 1565 ret = __btrfs_add_delayed_deletion_item(node, item);
1546 if (unlikely(ret)) { 1566 if (unlikely(ret)) {
1547 printk(KERN_ERR "err add delayed dir index item(index: %llu) " 1567 btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
1548 "into the deletion tree of the delayed node" 1568 "into the deletion tree of the delayed node"
1549 "(root id: %llu, inode id: %llu, errno: %d)\n", 1569 "(root id: %llu, inode id: %llu, errno: %d)",
1550 index, node->root->objectid, node->inode_id, 1570 index, node->root->objectid, node->inode_id,
1551 ret); 1571 ret);
1552 BUG(); 1572 BUG();
@@ -1759,7 +1779,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1759 return -ENOENT; 1779 return -ENOENT;
1760 1780
1761 mutex_lock(&delayed_node->mutex); 1781 mutex_lock(&delayed_node->mutex);
1762 if (!delayed_node->inode_dirty) { 1782 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1763 mutex_unlock(&delayed_node->mutex); 1783 mutex_unlock(&delayed_node->mutex);
1764 btrfs_release_delayed_node(delayed_node); 1784 btrfs_release_delayed_node(delayed_node);
1765 return -ENOENT; 1785 return -ENOENT;
@@ -1810,7 +1830,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1810 return PTR_ERR(delayed_node); 1830 return PTR_ERR(delayed_node);
1811 1831
1812 mutex_lock(&delayed_node->mutex); 1832 mutex_lock(&delayed_node->mutex);
1813 if (delayed_node->inode_dirty) { 1833 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1814 fill_stack_inode_item(trans, &delayed_node->inode_item, inode); 1834 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1815 goto release_node; 1835 goto release_node;
1816 } 1836 }
@@ -1821,7 +1841,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1821 goto release_node; 1841 goto release_node;
1822 1842
1823 fill_stack_inode_item(trans, &delayed_node->inode_item, inode); 1843 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1824 delayed_node->inode_dirty = 1; 1844 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1825 delayed_node->count++; 1845 delayed_node->count++;
1826 atomic_inc(&root->fs_info->delayed_root->items); 1846 atomic_inc(&root->fs_info->delayed_root->items);
1827release_node: 1847release_node:
@@ -1830,6 +1850,41 @@ release_node:
1830 return ret; 1850 return ret;
1831} 1851}
1832 1852
1853int btrfs_delayed_delete_inode_ref(struct inode *inode)
1854{
1855 struct btrfs_delayed_node *delayed_node;
1856
1857 delayed_node = btrfs_get_or_create_delayed_node(inode);
1858 if (IS_ERR(delayed_node))
1859 return PTR_ERR(delayed_node);
1860
1861 /*
1862 * We don't reserve space for inode ref deletion is because:
1863 * - We ONLY do async inode ref deletion for the inode who has only
1864 * one link(i_nlink == 1), it means there is only one inode ref.
1865 * And in most case, the inode ref and the inode item are in the
1866 * same leaf, and we will deal with them at the same time.
1867 * Since we are sure we will reserve the space for the inode item,
1868 * it is unnecessary to reserve space for inode ref deletion.
1869 * - If the inode ref and the inode item are not in the same leaf,
1870 * We also needn't worry about enospc problem, because we reserve
1871 * much more space for the inode update than it needs.
1872 * - At the worst, we can steal some space from the global reservation.
1873 * It is very rare.
1874 */
1875 mutex_lock(&delayed_node->mutex);
1876 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1877 goto release_node;
1878
1879 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1880 delayed_node->count++;
1881 atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1882release_node:
1883 mutex_unlock(&delayed_node->mutex);
1884 btrfs_release_delayed_node(delayed_node);
1885 return 0;
1886}
1887
1833static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) 1888static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1834{ 1889{
1835 struct btrfs_root *root = delayed_node->root; 1890 struct btrfs_root *root = delayed_node->root;
@@ -1852,7 +1907,10 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1852 btrfs_release_delayed_item(prev_item); 1907 btrfs_release_delayed_item(prev_item);
1853 } 1908 }
1854 1909
1855 if (delayed_node->inode_dirty) { 1910 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1911 btrfs_release_delayed_iref(delayed_node);
1912
1913 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1856 btrfs_delayed_inode_release_metadata(root, delayed_node); 1914 btrfs_delayed_inode_release_metadata(root, delayed_node);
1857 btrfs_release_delayed_inode(delayed_node); 1915 btrfs_release_delayed_inode(delayed_node);
1858 } 1916 }