aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-09-08 11:18:08 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:07 -0400
commit4bef084857ab8fe71cf49eae349c25e440a49150 (patch)
tree7a9a850515538421c2976f8ee4c1bea5ceced59c /fs
parent325cd4bafeb6cfb44addd6e807a9b74170d1be31 (diff)
Btrfs: Tree logging fixes
* Pin down data blocks to prevent them from being reallocated like so: trans 1: allocate file extent trans 2: free file extent trans 3: free file extent during old snapshot deletion trans 3: allocate file extent to new file trans 3: fsync new file Before the tree logging code, this was legal because the fsync would commit the transation that did the final data extent free and the transaction that allocated the extent to the new file at the same time. With the tree logging code, the tree log subtransaction can commit before the transaction that freed the extent. If we crash, we're left with two different files using the extent. * Don't wait in start_transaction if log replay is going on. This avoids deadlocks from iput while we're cleaning up link counts in the replay code. * Don't deadlock in replay_one_name by trying to read an inode off the disk while holding paths for the directory * Hold the buffer lock while we mark a buffer as written. This closes a race where someone is changing a buffer while we write it. They are supposed to mark it dirty again after they change it, but this violates the cow rules. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/disk-io.c33
-rw-r--r--fs/btrfs/disk-io.h1
-rw-r--r--fs/btrfs/extent-tree.c20
-rw-r--r--fs/btrfs/extent_io.c47
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/transaction.c16
-rw-r--r--fs/btrfs/tree-log.c13
7 files changed, 107 insertions, 24 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a4373db5967a..42bf99168056 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -307,9 +307,7 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
307 goto err; 307 goto err;
308 } 308 }
309 found_level = btrfs_header_level(eb); 309 found_level = btrfs_header_level(eb);
310 spin_lock(&root->fs_info->hash_lock); 310
311 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
312 spin_unlock(&root->fs_info->hash_lock);
313 csum_tree_block(root, eb, 0); 311 csum_tree_block(root, eb, 0);
314err: 312err:
315 free_extent_buffer(eb); 313 free_extent_buffer(eb);
@@ -1998,7 +1996,36 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1998 return ret; 1996 return ret;
1999} 1997}
2000 1998
1999int btree_lock_page_hook(struct page *page)
2000{
2001 struct inode *inode = page->mapping->host;
2002 struct btrfs_root *root = BTRFS_I(inode)->root;
2003 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2004 struct extent_buffer *eb;
2005 unsigned long len;
2006 u64 bytenr = page_offset(page);
2007
2008 if (page->private == EXTENT_PAGE_PRIVATE)
2009 goto out;
2010
2011 len = page->private >> 2;
2012 eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2013 if (!eb)
2014 goto out;
2015
2016 btrfs_tree_lock(eb);
2017 spin_lock(&root->fs_info->hash_lock);
2018 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2019 spin_unlock(&root->fs_info->hash_lock);
2020 btrfs_tree_unlock(eb);
2021 free_extent_buffer(eb);
2022out:
2023 lock_page(page);
2024 return 0;
2025}
2026
2001static struct extent_io_ops btree_extent_io_ops = { 2027static struct extent_io_ops btree_extent_io_ops = {
2028 .write_cache_pages_lock_hook = btree_lock_page_hook,
2002 .writepage_io_hook = btree_writepage_io_hook, 2029 .writepage_io_hook = btree_writepage_io_hook,
2003 .readpage_end_io_hook = btree_readpage_end_io_hook, 2030 .readpage_end_io_hook = btree_readpage_end_io_hook,
2004 .submit_bio_hook = btree_submit_bio_hook, 2031 .submit_bio_hook = btree_submit_bio_hook,
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 6b6fdc697f31..f84f5058dbbb 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -80,4 +80,5 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
80 struct btrfs_fs_info *fs_info); 80 struct btrfs_fs_info *fs_info);
81int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 81int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
82 struct btrfs_fs_info *fs_info); 82 struct btrfs_fs_info *fs_info);
83int btree_lock_page_hook(struct page *page);
83#endif 84#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 646b9148ca21..3181759da1cf 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1590,13 +1590,17 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1590} 1590}
1591 1591
1592static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, 1592static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1593 int pending) 1593 int is_data, int pending)
1594{ 1594{
1595 int err = 0; 1595 int err = 0;
1596 1596
1597 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); 1597 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1598 if (!pending) { 1598 if (!pending) {
1599 struct extent_buffer *buf; 1599 struct extent_buffer *buf;
1600
1601 if (is_data)
1602 goto pinit;
1603
1600 buf = btrfs_find_tree_block(root, bytenr, num_bytes); 1604 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1601 if (buf) { 1605 if (buf) {
1602 /* we can reuse a block if it hasn't been written 1606 /* we can reuse a block if it hasn't been written
@@ -1624,6 +1628,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1624 } 1628 }
1625 free_extent_buffer(buf); 1629 free_extent_buffer(buf);
1626 } 1630 }
1631pinit:
1627 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); 1632 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
1628 } else { 1633 } else {
1629 set_extent_bits(&root->fs_info->pending_del, 1634 set_extent_bits(&root->fs_info->pending_del,
@@ -1744,7 +1749,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1744#endif 1749#endif
1745 1750
1746 if (pin) { 1751 if (pin) {
1747 ret = pin_down_bytes(root, bytenr, num_bytes, 0); 1752 ret = pin_down_bytes(root, bytenr, num_bytes,
1753 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0);
1748 if (ret > 0) 1754 if (ret > 0)
1749 mark_free = 1; 1755 mark_free = 1;
1750 BUG_ON(ret < 0); 1756 BUG_ON(ret < 0);
@@ -1862,9 +1868,17 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1862 ref_generation = 0; 1868 ref_generation = 0;
1863 1869
1864 if (root == extent_root) { 1870 if (root == extent_root) {
1865 pin_down_bytes(root, bytenr, num_bytes, 1); 1871 pin_down_bytes(root, bytenr, num_bytes, 0, 1);
1866 return 0; 1872 return 0;
1867 } 1873 }
1874 /* if metadata always pin */
1875 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1876 pin = 1;
1877
1878 /* if data pin when any transaction has committed this */
1879 if (ref_generation != trans->transid)
1880 pin = 1;
1881
1868 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid, 1882 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1869 ref_generation, owner_objectid, owner_offset, 1883 ref_generation, owner_objectid, owner_offset,
1870 pin, pin == 0); 1884 pin, pin == 0);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 239e7c908abf..319a0c7a4a58 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -29,7 +29,10 @@ static struct kmem_cache *extent_buffer_cache;
29 29
30static LIST_HEAD(buffers); 30static LIST_HEAD(buffers);
31static LIST_HEAD(states); 31static LIST_HEAD(states);
32
33#ifdef LEAK_DEBUG
32static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; 34static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
35#endif
33 36
34#define BUFFER_LRU_MAX 64 37#define BUFFER_LRU_MAX 64
35 38
@@ -106,7 +109,9 @@ EXPORT_SYMBOL(extent_io_tree_init);
106struct extent_state *alloc_extent_state(gfp_t mask) 109struct extent_state *alloc_extent_state(gfp_t mask)
107{ 110{
108 struct extent_state *state; 111 struct extent_state *state;
112#ifdef LEAK_DEBUG
109 unsigned long flags; 113 unsigned long flags;
114#endif
110 115
111 state = kmem_cache_alloc(extent_state_cache, mask); 116 state = kmem_cache_alloc(extent_state_cache, mask);
112 if (!state) 117 if (!state)
@@ -114,10 +119,11 @@ struct extent_state *alloc_extent_state(gfp_t mask)
114 state->state = 0; 119 state->state = 0;
115 state->private = 0; 120 state->private = 0;
116 state->tree = NULL; 121 state->tree = NULL;
122#ifdef LEAK_DEBUG
117 spin_lock_irqsave(&leak_lock, flags); 123 spin_lock_irqsave(&leak_lock, flags);
118 list_add(&state->leak_list, &states); 124 list_add(&state->leak_list, &states);
119 spin_unlock_irqrestore(&leak_lock, flags); 125 spin_unlock_irqrestore(&leak_lock, flags);
120 126#endif
121 atomic_set(&state->refs, 1); 127 atomic_set(&state->refs, 1);
122 init_waitqueue_head(&state->wq); 128 init_waitqueue_head(&state->wq);
123 return state; 129 return state;
@@ -129,11 +135,15 @@ void free_extent_state(struct extent_state *state)
129 if (!state) 135 if (!state)
130 return; 136 return;
131 if (atomic_dec_and_test(&state->refs)) { 137 if (atomic_dec_and_test(&state->refs)) {
138#ifdef LEAK_DEBUG
132 unsigned long flags; 139 unsigned long flags;
140#endif
133 WARN_ON(state->tree); 141 WARN_ON(state->tree);
142#ifdef LEAK_DEBUG
134 spin_lock_irqsave(&leak_lock, flags); 143 spin_lock_irqsave(&leak_lock, flags);
135 list_del(&state->leak_list); 144 list_del(&state->leak_list);
136 spin_unlock_irqrestore(&leak_lock, flags); 145 spin_unlock_irqrestore(&leak_lock, flags);
146#endif
137 kmem_cache_free(extent_state_cache, state); 147 kmem_cache_free(extent_state_cache, state);
138 } 148 }
139} 149}
@@ -2070,13 +2080,13 @@ done:
2070} 2080}
2071 2081
2072#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) 2082#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
2073/* Taken directly from 2.6.23 for 2.6.18 back port */ 2083/* Taken directly from 2.6.23 with a mod for a lockpage hook */
2074typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 2084typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2075 void *data); 2085 void *data);
2086#endif
2076 2087
2077/** 2088/**
2078 * write_cache_pages - walk the list of dirty pages of the given address space 2089 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2079 * and write all of them.
2080 * @mapping: address space structure to write 2090 * @mapping: address space structure to write
2081 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 2091 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2082 * @writepage: function called for each page 2092 * @writepage: function called for each page
@@ -2090,9 +2100,10 @@ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2090 * WB_SYNC_ALL then we were called for data integrity and we must wait for 2100 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2091 * existing IO to complete. 2101 * existing IO to complete.
2092 */ 2102 */
2093static int write_cache_pages(struct address_space *mapping, 2103int extent_write_cache_pages(struct extent_io_tree *tree,
2094 struct writeback_control *wbc, writepage_t writepage, 2104 struct address_space *mapping,
2095 void *data) 2105 struct writeback_control *wbc,
2106 writepage_t writepage, void *data)
2096{ 2107{
2097 struct backing_dev_info *bdi = mapping->backing_dev_info; 2108 struct backing_dev_info *bdi = mapping->backing_dev_info;
2098 int ret = 0; 2109 int ret = 0;
@@ -2138,7 +2149,10 @@ retry:
2138 * swizzled back from swapper_space to tmpfs file 2149 * swizzled back from swapper_space to tmpfs file
2139 * mapping 2150 * mapping
2140 */ 2151 */
2141 lock_page(page); 2152 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2153 tree->ops->write_cache_pages_lock_hook(page);
2154 else
2155 lock_page(page);
2142 2156
2143 if (unlikely(page->mapping != mapping)) { 2157 if (unlikely(page->mapping != mapping)) {
2144 unlock_page(page); 2158 unlock_page(page);
@@ -2187,9 +2201,12 @@ retry:
2187 } 2201 }
2188 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2202 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2189 mapping->writeback_index = index; 2203 mapping->writeback_index = index;
2204
2205 if (wbc->range_cont)
2206 wbc->range_start = index << PAGE_CACHE_SHIFT;
2190 return ret; 2207 return ret;
2191} 2208}
2192#endif 2209EXPORT_SYMBOL(extent_write_cache_pages);
2193 2210
2194int extent_write_full_page(struct extent_io_tree *tree, struct page *page, 2211int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2195 get_extent_t *get_extent, 2212 get_extent_t *get_extent,
@@ -2214,7 +2231,8 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2214 2231
2215 ret = __extent_writepage(page, wbc, &epd); 2232 ret = __extent_writepage(page, wbc, &epd);
2216 2233
2217 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd); 2234 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2235 __extent_writepage, &epd);
2218 if (epd.bio) { 2236 if (epd.bio) {
2219 submit_one_bio(WRITE, epd.bio, 0); 2237 submit_one_bio(WRITE, epd.bio, 0);
2220 } 2238 }
@@ -2235,7 +2253,8 @@ int extent_writepages(struct extent_io_tree *tree,
2235 .get_extent = get_extent, 2253 .get_extent = get_extent,
2236 }; 2254 };
2237 2255
2238 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); 2256 ret = extent_write_cache_pages(tree, mapping, wbc,
2257 __extent_writepage, &epd);
2239 if (epd.bio) { 2258 if (epd.bio) {
2240 submit_one_bio(WRITE, epd.bio, 0); 2259 submit_one_bio(WRITE, epd.bio, 0);
2241 } 2260 }
@@ -2567,15 +2586,19 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2567 gfp_t mask) 2586 gfp_t mask)
2568{ 2587{
2569 struct extent_buffer *eb = NULL; 2588 struct extent_buffer *eb = NULL;
2589#ifdef LEAK_DEBUG
2570 unsigned long flags; 2590 unsigned long flags;
2591#endif
2571 2592
2572 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 2593 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2573 eb->start = start; 2594 eb->start = start;
2574 eb->len = len; 2595 eb->len = len;
2575 mutex_init(&eb->mutex); 2596 mutex_init(&eb->mutex);
2597#ifdef LEAK_DEBUG
2576 spin_lock_irqsave(&leak_lock, flags); 2598 spin_lock_irqsave(&leak_lock, flags);
2577 list_add(&eb->leak_list, &buffers); 2599 list_add(&eb->leak_list, &buffers);
2578 spin_unlock_irqrestore(&leak_lock, flags); 2600 spin_unlock_irqrestore(&leak_lock, flags);
2601#endif
2579 atomic_set(&eb->refs, 1); 2602 atomic_set(&eb->refs, 1);
2580 2603
2581 return eb; 2604 return eb;
@@ -2583,10 +2606,12 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2583 2606
2584static void __free_extent_buffer(struct extent_buffer *eb) 2607static void __free_extent_buffer(struct extent_buffer *eb)
2585{ 2608{
2609#ifdef LEAK_DEBUG
2586 unsigned long flags; 2610 unsigned long flags;
2587 spin_lock_irqsave(&leak_lock, flags); 2611 spin_lock_irqsave(&leak_lock, flags);
2588 list_del(&eb->leak_list); 2612 list_del(&eb->leak_list);
2589 spin_unlock_irqrestore(&leak_lock, flags); 2613 spin_unlock_irqrestore(&leak_lock, flags);
2614#endif
2590 kmem_cache_free(extent_buffer_cache, eb); 2615 kmem_cache_free(extent_buffer_cache, eb);
2591} 2616}
2592 2617
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 315cfceae312..3cb411a5f4d3 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -50,6 +50,7 @@ struct extent_io_ops {
50 unsigned long old, unsigned long bits); 50 unsigned long old, unsigned long bits);
51 int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, 51 int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end,
52 unsigned long old, unsigned long bits); 52 unsigned long old, unsigned long bits);
53 int (*write_cache_pages_lock_hook)(struct page *page);
53}; 54};
54 55
55struct extent_io_tree { 56struct extent_io_tree {
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 49c4f5b40ed6..61a377bcb2fb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -161,7 +161,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
161 int ret; 161 int ret;
162 162
163 mutex_lock(&root->fs_info->trans_mutex); 163 mutex_lock(&root->fs_info->trans_mutex);
164 if ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2) 164 if (!root->fs_info->log_root_recovering &&
165 ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
165 wait_current_trans(root); 166 wait_current_trans(root);
166 ret = join_transaction(root); 167 ret = join_transaction(root);
167 BUG_ON(ret); 168 BUG_ON(ret);
@@ -328,9 +329,17 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
328 329
329 index = start >> PAGE_CACHE_SHIFT; 330 index = start >> PAGE_CACHE_SHIFT;
330 start = (u64)(index + 1) << PAGE_CACHE_SHIFT; 331 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
331 page = find_lock_page(btree_inode->i_mapping, index); 332 page = find_get_page(btree_inode->i_mapping, index);
332 if (!page) 333 if (!page)
333 continue; 334 continue;
335
336 btree_lock_page_hook(page);
337 if (!page->mapping) {
338 unlock_page(page);
339 page_cache_release(page);
340 continue;
341 }
342
334 if (PageWriteback(page)) { 343 if (PageWriteback(page)) {
335 if (PageDirty(page)) 344 if (PageDirty(page))
336 wait_on_page_writeback(page); 345 wait_on_page_writeback(page);
@@ -360,7 +369,8 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
360 if (!page) 369 if (!page)
361 continue; 370 continue;
362 if (PageDirty(page)) { 371 if (PageDirty(page)) {
363 lock_page(page); 372 btree_lock_page_hook(page);
373 wait_on_page_writeback(page);
364 err = write_one_page(page, 0); 374 err = write_one_page(page, 0);
365 if (err) 375 if (err)
366 werr = err; 376 werr = err;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d1ce8314b948..13d7ee8e0c52 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1176,8 +1176,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1176 struct btrfs_key found_key; 1176 struct btrfs_key found_key;
1177 struct btrfs_key log_key; 1177 struct btrfs_key log_key;
1178 struct inode *dir; 1178 struct inode *dir;
1179 struct inode *inode;
1180 u8 log_type; 1179 u8 log_type;
1180 int exists;
1181 int ret; 1181 int ret;
1182 1182
1183 dir = read_one_inode(root, key->objectid); 1183 dir = read_one_inode(root, key->objectid);
@@ -1190,6 +1190,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1190 name_len); 1190 name_len);
1191 1191
1192 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1192 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1193 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1194 if (exists == 0)
1195 exists = 1;
1196 else
1197 exists = 0;
1198 btrfs_release_path(root, path);
1199
1193 if (key->type == BTRFS_DIR_ITEM_KEY) { 1200 if (key->type == BTRFS_DIR_ITEM_KEY) {
1194 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1201 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1195 name, name_len, 1); 1202 name, name_len, 1);
@@ -1224,11 +1231,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1224 * don't drop the conflicting directory entry if the inode 1231 * don't drop the conflicting directory entry if the inode
1225 * for the new entry doesn't exist 1232 * for the new entry doesn't exist
1226 */ 1233 */
1227 inode = read_one_inode(root, log_key.objectid); 1234 if (!exists)
1228 if (!inode)
1229 goto out; 1235 goto out;
1230 1236
1231 iput(inode);
1232 ret = drop_one_dir_item(trans, root, path, dir, dst_di); 1237 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1233 BUG_ON(ret); 1238 BUG_ON(ret);
1234 1239