aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c386
1 files changed, 266 insertions, 120 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a6ed6944e50c..85a1e5053fe6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -230,7 +230,6 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
230 u64 inline_len = actual_end - start; 230 u64 inline_len = actual_end - start;
231 u64 aligned_end = (end + root->sectorsize - 1) & 231 u64 aligned_end = (end + root->sectorsize - 1) &
232 ~((u64)root->sectorsize - 1); 232 ~((u64)root->sectorsize - 1);
233 u64 hint_byte;
234 u64 data_len = inline_len; 233 u64 data_len = inline_len;
235 int ret; 234 int ret;
236 235
@@ -247,8 +246,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
247 return 1; 246 return 1;
248 } 247 }
249 248
250 ret = btrfs_drop_extents(trans, inode, start, aligned_end, 249 ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
251 &hint_byte, 1);
252 if (ret) 250 if (ret)
253 return ret; 251 return ret;
254 252
@@ -664,7 +662,7 @@ retry:
664 async_extent->compressed_size, 662 async_extent->compressed_size,
665 async_extent->compressed_size, 663 async_extent->compressed_size,
666 0, alloc_hint, &ins, 1); 664 0, alloc_hint, &ins, 1);
667 if (ret) 665 if (ret && ret != -ENOSPC)
668 btrfs_abort_transaction(trans, root, ret); 666 btrfs_abort_transaction(trans, root, ret);
669 btrfs_end_transaction(trans, root); 667 btrfs_end_transaction(trans, root);
670 } 668 }
@@ -1308,6 +1306,7 @@ out_check:
1308 em->block_start = disk_bytenr; 1306 em->block_start = disk_bytenr;
1309 em->bdev = root->fs_info->fs_devices->latest_bdev; 1307 em->bdev = root->fs_info->fs_devices->latest_bdev;
1310 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1308 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1309 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
1311 while (1) { 1310 while (1) {
1312 write_lock(&em_tree->lock); 1311 write_lock(&em_tree->lock);
1313 ret = add_extent_mapping(em_tree, em); 1312 ret = add_extent_mapping(em_tree, em);
@@ -1364,11 +1363,7 @@ out_check:
1364 } 1363 }
1365 1364
1366error: 1365error:
1367 if (nolock) { 1366 err = btrfs_end_transaction(trans, root);
1368 err = btrfs_end_transaction_nolock(trans, root);
1369 } else {
1370 err = btrfs_end_transaction(trans, root);
1371 }
1372 if (!ret) 1367 if (!ret)
1373 ret = err; 1368 ret = err;
1374 1369
@@ -1785,7 +1780,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1785 struct btrfs_path *path; 1780 struct btrfs_path *path;
1786 struct extent_buffer *leaf; 1781 struct extent_buffer *leaf;
1787 struct btrfs_key ins; 1782 struct btrfs_key ins;
1788 u64 hint;
1789 int ret; 1783 int ret;
1790 1784
1791 path = btrfs_alloc_path(); 1785 path = btrfs_alloc_path();
@@ -1803,8 +1797,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1803 * the caller is expected to unpin it and allow it to be merged 1797 * the caller is expected to unpin it and allow it to be merged
1804 * with the others. 1798 * with the others.
1805 */ 1799 */
1806 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, 1800 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1807 &hint, 0); 1801 file_pos + num_bytes, 0);
1808 if (ret) 1802 if (ret)
1809 goto out; 1803 goto out;
1810 1804
@@ -1828,10 +1822,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1828 btrfs_set_file_extent_encryption(leaf, fi, encryption); 1822 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1829 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 1823 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1830 1824
1831 btrfs_unlock_up_safe(path, 1);
1832 btrfs_set_lock_blocking(leaf);
1833
1834 btrfs_mark_buffer_dirty(leaf); 1825 btrfs_mark_buffer_dirty(leaf);
1826 btrfs_release_path(path);
1835 1827
1836 inode_add_bytes(inode, num_bytes); 1828 inode_add_bytes(inode, num_bytes);
1837 1829
@@ -1929,11 +1921,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1929 ordered_extent->len, 1921 ordered_extent->len,
1930 compress_type, 0, 0, 1922 compress_type, 0, 0,
1931 BTRFS_FILE_EXTENT_REG); 1923 BTRFS_FILE_EXTENT_REG);
1932 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1933 ordered_extent->file_offset,
1934 ordered_extent->len);
1935 } 1924 }
1936 1925 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1926 ordered_extent->file_offset, ordered_extent->len,
1927 trans->transid);
1937 if (ret < 0) { 1928 if (ret < 0) {
1938 btrfs_abort_transaction(trans, root, ret); 1929 btrfs_abort_transaction(trans, root, ret);
1939 goto out_unlock; 1930 goto out_unlock;
@@ -1949,6 +1940,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1949 btrfs_abort_transaction(trans, root, ret); 1940 btrfs_abort_transaction(trans, root, ret);
1950 goto out_unlock; 1941 goto out_unlock;
1951 } 1942 }
1943 } else {
1944 btrfs_set_inode_last_trans(trans, inode);
1952 } 1945 }
1953 ret = 0; 1946 ret = 0;
1954out_unlock: 1947out_unlock:
@@ -1958,12 +1951,8 @@ out_unlock:
1958out: 1951out:
1959 if (root != root->fs_info->tree_root) 1952 if (root != root->fs_info->tree_root)
1960 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 1953 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1961 if (trans) { 1954 if (trans)
1962 if (nolock) 1955 btrfs_end_transaction(trans, root);
1963 btrfs_end_transaction_nolock(trans, root);
1964 else
1965 btrfs_end_transaction(trans, root);
1966 }
1967 1956
1968 if (ret) 1957 if (ret)
1969 clear_extent_uptodate(io_tree, ordered_extent->file_offset, 1958 clear_extent_uptodate(io_tree, ordered_extent->file_offset,
@@ -2119,7 +2108,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
2119 if (empty) 2108 if (empty)
2120 return; 2109 return;
2121 2110
2122 down_read(&root->fs_info->cleanup_work_sem);
2123 spin_lock(&fs_info->delayed_iput_lock); 2111 spin_lock(&fs_info->delayed_iput_lock);
2124 list_splice_init(&fs_info->delayed_iputs, &list); 2112 list_splice_init(&fs_info->delayed_iputs, &list);
2125 spin_unlock(&fs_info->delayed_iput_lock); 2113 spin_unlock(&fs_info->delayed_iput_lock);
@@ -2130,7 +2118,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
2130 iput(delayed->inode); 2118 iput(delayed->inode);
2131 kfree(delayed); 2119 kfree(delayed);
2132 } 2120 }
2133 up_read(&root->fs_info->cleanup_work_sem);
2134} 2121}
2135 2122
2136enum btrfs_orphan_cleanup_state { 2123enum btrfs_orphan_cleanup_state {
@@ -2198,7 +2185,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2198 int ret; 2185 int ret;
2199 2186
2200 if (!root->orphan_block_rsv) { 2187 if (!root->orphan_block_rsv) {
2201 block_rsv = btrfs_alloc_block_rsv(root); 2188 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2202 if (!block_rsv) 2189 if (!block_rsv)
2203 return -ENOMEM; 2190 return -ENOMEM;
2204 } 2191 }
@@ -2225,7 +2212,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2225 insert = 1; 2212 insert = 1;
2226#endif 2213#endif
2227 insert = 1; 2214 insert = 1;
2228 atomic_dec(&root->orphan_inodes); 2215 atomic_inc(&root->orphan_inodes);
2229 } 2216 }
2230 2217
2231 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 2218 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
@@ -2590,6 +2577,18 @@ static void btrfs_read_locked_inode(struct inode *inode)
2590 2577
2591 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 2578 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2592 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 2579 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2580 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
2581
2582 /*
2583 * If we were modified in the current generation and evicted from memory
2584 * and then re-read we need to do a full sync since we don't have any
2585 * idea about which extents were modified before we were evicted from
2586 * cache.
2587 */
2588 if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
2589 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2590 &BTRFS_I(inode)->runtime_flags);
2591
2593 inode->i_version = btrfs_inode_sequence(leaf, inode_item); 2592 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
2594 inode->i_generation = BTRFS_I(inode)->generation; 2593 inode->i_generation = BTRFS_I(inode)->generation;
2595 inode->i_rdev = 0; 2594 inode->i_rdev = 0;
@@ -2894,7 +2893,6 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2894 struct btrfs_trans_handle *trans; 2893 struct btrfs_trans_handle *trans;
2895 struct btrfs_root *root = BTRFS_I(dir)->root; 2894 struct btrfs_root *root = BTRFS_I(dir)->root;
2896 struct btrfs_path *path; 2895 struct btrfs_path *path;
2897 struct btrfs_inode_ref *ref;
2898 struct btrfs_dir_item *di; 2896 struct btrfs_dir_item *di;
2899 struct inode *inode = dentry->d_inode; 2897 struct inode *inode = dentry->d_inode;
2900 u64 index; 2898 u64 index;
@@ -3008,17 +3006,17 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
3008 } 3006 }
3009 btrfs_release_path(path); 3007 btrfs_release_path(path);
3010 3008
3011 ref = btrfs_lookup_inode_ref(trans, root, path, 3009 ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
3012 dentry->d_name.name, dentry->d_name.len, 3010 dentry->d_name.len, ino, dir_ino, 0,
3013 ino, dir_ino, 0); 3011 &index);
3014 if (IS_ERR(ref)) { 3012 if (ret) {
3015 err = PTR_ERR(ref); 3013 err = ret;
3016 goto out; 3014 goto out;
3017 } 3015 }
3018 BUG_ON(!ref); /* Logic error */ 3016
3019 if (check_path_shared(root, path)) 3017 if (check_path_shared(root, path))
3020 goto out; 3018 goto out;
3021 index = btrfs_inode_ref_index(path->nodes[0], ref); 3019
3022 btrfs_release_path(path); 3020 btrfs_release_path(path);
3023 3021
3024 /* 3022 /*
@@ -3061,7 +3059,7 @@ out:
3061static void __unlink_end_trans(struct btrfs_trans_handle *trans, 3059static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3062 struct btrfs_root *root) 3060 struct btrfs_root *root)
3063{ 3061{
3064 if (trans->block_rsv == &root->fs_info->global_block_rsv) { 3062 if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
3065 btrfs_block_rsv_release(root, trans->block_rsv, 3063 btrfs_block_rsv_release(root, trans->block_rsv,
3066 trans->bytes_reserved); 3064 trans->bytes_reserved);
3067 trans->block_rsv = &root->fs_info->trans_block_rsv; 3065 trans->block_rsv = &root->fs_info->trans_block_rsv;
@@ -3191,9 +3189,10 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3191 struct btrfs_trans_handle *trans; 3189 struct btrfs_trans_handle *trans;
3192 unsigned long nr = 0; 3190 unsigned long nr = 0;
3193 3191
3194 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || 3192 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3195 btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3196 return -ENOTEMPTY; 3193 return -ENOTEMPTY;
3194 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3195 return -EPERM;
3197 3196
3198 trans = __unlink_start_trans(dir, dentry); 3197 trans = __unlink_start_trans(dir, dentry);
3199 if (IS_ERR(trans)) 3198 if (IS_ERR(trans))
@@ -3267,8 +3266,13 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3267 return -ENOMEM; 3266 return -ENOMEM;
3268 path->reada = -1; 3267 path->reada = -1;
3269 3268
3269 /*
3270 * We want to drop from the next block forward in case this new size is
3271 * not block aligned since we will be keeping the last block of the
3272 * extent just the way it is.
3273 */
3270 if (root->ref_cows || root == root->fs_info->tree_root) 3274 if (root->ref_cows || root == root->fs_info->tree_root)
3271 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 3275 btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0);
3272 3276
3273 /* 3277 /*
3274 * This function is also used to drop the items in the log tree before 3278 * This function is also used to drop the items in the log tree before
@@ -3429,12 +3433,6 @@ delete:
3429 3433
3430 if (path->slots[0] == 0 || 3434 if (path->slots[0] == 0 ||
3431 path->slots[0] != pending_del_slot) { 3435 path->slots[0] != pending_del_slot) {
3432 if (root->ref_cows &&
3433 BTRFS_I(inode)->location.objectid !=
3434 BTRFS_FREE_INO_OBJECTID) {
3435 err = -EAGAIN;
3436 goto out;
3437 }
3438 if (pending_del_nr) { 3436 if (pending_del_nr) {
3439 ret = btrfs_del_items(trans, root, path, 3437 ret = btrfs_del_items(trans, root, path,
3440 pending_del_slot, 3438 pending_del_slot,
@@ -3465,12 +3463,20 @@ error:
3465} 3463}
3466 3464
3467/* 3465/*
3468 * taken from block_truncate_page, but does cow as it zeros out 3466 * btrfs_truncate_page - read, zero a chunk and write a page
3469 * any bytes left in the last page in the file. 3467 * @inode - inode that we're zeroing
3468 * @from - the offset to start zeroing
3469 * @len - the length to zero, 0 to zero the entire range respective to the
3470 * offset
3471 * @front - zero up to the offset instead of from the offset on
3472 *
3473 * This will find the page for the "from" offset and cow the page and zero the
3474 * part we want to zero. This is used with truncate and hole punching.
3470 */ 3475 */
3471static int btrfs_truncate_page(struct address_space *mapping, loff_t from) 3476int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
3477 int front)
3472{ 3478{
3473 struct inode *inode = mapping->host; 3479 struct address_space *mapping = inode->i_mapping;
3474 struct btrfs_root *root = BTRFS_I(inode)->root; 3480 struct btrfs_root *root = BTRFS_I(inode)->root;
3475 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3481 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3476 struct btrfs_ordered_extent *ordered; 3482 struct btrfs_ordered_extent *ordered;
@@ -3485,7 +3491,8 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3485 u64 page_start; 3491 u64 page_start;
3486 u64 page_end; 3492 u64 page_end;
3487 3493
3488 if ((offset & (blocksize - 1)) == 0) 3494 if ((offset & (blocksize - 1)) == 0 &&
3495 (!len || ((len & (blocksize - 1)) == 0)))
3489 goto out; 3496 goto out;
3490 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 3497 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3491 if (ret) 3498 if (ret)
@@ -3532,7 +3539,8 @@ again:
3532 } 3539 }
3533 3540
3534 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 3541 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3535 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 3542 EXTENT_DIRTY | EXTENT_DELALLOC |
3543 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
3536 0, 0, &cached_state, GFP_NOFS); 3544 0, 0, &cached_state, GFP_NOFS);
3537 3545
3538 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 3546 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
@@ -3545,8 +3553,13 @@ again:
3545 3553
3546 ret = 0; 3554 ret = 0;
3547 if (offset != PAGE_CACHE_SIZE) { 3555 if (offset != PAGE_CACHE_SIZE) {
3556 if (!len)
3557 len = PAGE_CACHE_SIZE - offset;
3548 kaddr = kmap(page); 3558 kaddr = kmap(page);
3549 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 3559 if (front)
3560 memset(kaddr, 0, offset);
3561 else
3562 memset(kaddr + offset, 0, len);
3550 flush_dcache_page(page); 3563 flush_dcache_page(page);
3551 kunmap(page); 3564 kunmap(page);
3552 } 3565 }
@@ -3577,6 +3590,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3577 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3590 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3578 struct extent_map *em = NULL; 3591 struct extent_map *em = NULL;
3579 struct extent_state *cached_state = NULL; 3592 struct extent_state *cached_state = NULL;
3593 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3580 u64 mask = root->sectorsize - 1; 3594 u64 mask = root->sectorsize - 1;
3581 u64 hole_start = (oldsize + mask) & ~mask; 3595 u64 hole_start = (oldsize + mask) & ~mask;
3582 u64 block_end = (size + mask) & ~mask; 3596 u64 block_end = (size + mask) & ~mask;
@@ -3613,7 +3627,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3613 last_byte = min(extent_map_end(em), block_end); 3627 last_byte = min(extent_map_end(em), block_end);
3614 last_byte = (last_byte + mask) & ~mask; 3628 last_byte = (last_byte + mask) & ~mask;
3615 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3629 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3616 u64 hint_byte = 0; 3630 struct extent_map *hole_em;
3617 hole_size = last_byte - cur_offset; 3631 hole_size = last_byte - cur_offset;
3618 3632
3619 trans = btrfs_start_transaction(root, 3); 3633 trans = btrfs_start_transaction(root, 3);
@@ -3622,9 +3636,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3622 break; 3636 break;
3623 } 3637 }
3624 3638
3625 err = btrfs_drop_extents(trans, inode, cur_offset, 3639 err = btrfs_drop_extents(trans, root, inode,
3626 cur_offset + hole_size, 3640 cur_offset,
3627 &hint_byte, 1); 3641 cur_offset + hole_size, 1);
3628 if (err) { 3642 if (err) {
3629 btrfs_abort_transaction(trans, root, err); 3643 btrfs_abort_transaction(trans, root, err);
3630 btrfs_end_transaction(trans, root); 3644 btrfs_end_transaction(trans, root);
@@ -3641,9 +3655,39 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3641 break; 3655 break;
3642 } 3656 }
3643 3657
3644 btrfs_drop_extent_cache(inode, hole_start, 3658 btrfs_drop_extent_cache(inode, cur_offset,
3645 last_byte - 1, 0); 3659 cur_offset + hole_size - 1, 0);
3660 hole_em = alloc_extent_map();
3661 if (!hole_em) {
3662 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3663 &BTRFS_I(inode)->runtime_flags);
3664 goto next;
3665 }
3666 hole_em->start = cur_offset;
3667 hole_em->len = hole_size;
3668 hole_em->orig_start = cur_offset;
3646 3669
3670 hole_em->block_start = EXTENT_MAP_HOLE;
3671 hole_em->block_len = 0;
3672 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
3673 hole_em->compress_type = BTRFS_COMPRESS_NONE;
3674 hole_em->generation = trans->transid;
3675
3676 while (1) {
3677 write_lock(&em_tree->lock);
3678 err = add_extent_mapping(em_tree, hole_em);
3679 if (!err)
3680 list_move(&hole_em->list,
3681 &em_tree->modified_extents);
3682 write_unlock(&em_tree->lock);
3683 if (err != -EEXIST)
3684 break;
3685 btrfs_drop_extent_cache(inode, cur_offset,
3686 cur_offset +
3687 hole_size - 1, 0);
3688 }
3689 free_extent_map(hole_em);
3690next:
3647 btrfs_update_inode(trans, root, inode); 3691 btrfs_update_inode(trans, root, inode);
3648 btrfs_end_transaction(trans, root); 3692 btrfs_end_transaction(trans, root);
3649 } 3693 }
@@ -3768,26 +3812,22 @@ void btrfs_evict_inode(struct inode *inode)
3768 goto no_delete; 3812 goto no_delete;
3769 } 3813 }
3770 3814
3771 rsv = btrfs_alloc_block_rsv(root); 3815 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3772 if (!rsv) { 3816 if (!rsv) {
3773 btrfs_orphan_del(NULL, inode); 3817 btrfs_orphan_del(NULL, inode);
3774 goto no_delete; 3818 goto no_delete;
3775 } 3819 }
3776 rsv->size = min_size; 3820 rsv->size = min_size;
3821 rsv->failfast = 1;
3777 global_rsv = &root->fs_info->global_block_rsv; 3822 global_rsv = &root->fs_info->global_block_rsv;
3778 3823
3779 btrfs_i_size_write(inode, 0); 3824 btrfs_i_size_write(inode, 0);
3780 3825
3781 /* 3826 /*
3782 * This is a bit simpler than btrfs_truncate since 3827 * This is a bit simpler than btrfs_truncate since we've already
3783 * 3828 * reserved our space for our orphan item in the unlink, so we just
3784 * 1) We've already reserved our space for our orphan item in the 3829 * need to reserve some slack space in case we add bytes and update
3785 * unlink. 3830 * inode item when doing the truncate.
3786 * 2) We're going to delete the inode item, so we don't need to update
3787 * it at all.
3788 *
3789 * So we just need to reserve some slack space in case we add bytes when
3790 * doing the truncate.
3791 */ 3831 */
3792 while (1) { 3832 while (1) {
3793 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); 3833 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
@@ -3808,7 +3848,7 @@ void btrfs_evict_inode(struct inode *inode)
3808 goto no_delete; 3848 goto no_delete;
3809 } 3849 }
3810 3850
3811 trans = btrfs_start_transaction(root, 0); 3851 trans = btrfs_start_transaction_noflush(root, 1);
3812 if (IS_ERR(trans)) { 3852 if (IS_ERR(trans)) {
3813 btrfs_orphan_del(NULL, inode); 3853 btrfs_orphan_del(NULL, inode);
3814 btrfs_free_block_rsv(root, rsv); 3854 btrfs_free_block_rsv(root, rsv);
@@ -3818,9 +3858,13 @@ void btrfs_evict_inode(struct inode *inode)
3818 trans->block_rsv = rsv; 3858 trans->block_rsv = rsv;
3819 3859
3820 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 3860 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3821 if (ret != -EAGAIN) 3861 if (ret != -ENOSPC)
3822 break; 3862 break;
3823 3863
3864 trans->block_rsv = &root->fs_info->trans_block_rsv;
3865 ret = btrfs_update_inode(trans, root, inode);
3866 BUG_ON(ret);
3867
3824 nr = trans->blocks_used; 3868 nr = trans->blocks_used;
3825 btrfs_end_transaction(trans, root); 3869 btrfs_end_transaction(trans, root);
3826 trans = NULL; 3870 trans = NULL;
@@ -4470,10 +4514,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4470 trans = btrfs_join_transaction(root); 4514 trans = btrfs_join_transaction(root);
4471 if (IS_ERR(trans)) 4515 if (IS_ERR(trans))
4472 return PTR_ERR(trans); 4516 return PTR_ERR(trans);
4473 if (nolock) 4517 ret = btrfs_commit_transaction(trans, root);
4474 ret = btrfs_end_transaction_nolock(trans, root);
4475 else
4476 ret = btrfs_commit_transaction(trans, root);
4477 } 4518 }
4478 return ret; 4519 return ret;
4479} 4520}
@@ -4671,6 +4712,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4671 BTRFS_I(inode)->generation = trans->transid; 4712 BTRFS_I(inode)->generation = trans->transid;
4672 inode->i_generation = BTRFS_I(inode)->generation; 4713 inode->i_generation = BTRFS_I(inode)->generation;
4673 4714
4715 /*
4716 * We could have gotten an inode number from somebody who was fsynced
4717 * and then removed in this same transaction, so let's just set full
4718 * sync since it will be a full sync anyway and this will blow away the
4719 * old info in the log.
4720 */
4721 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
4722
4674 if (S_ISDIR(mode)) 4723 if (S_ISDIR(mode))
4675 owner = 0; 4724 owner = 0;
4676 else 4725 else
@@ -4680,6 +4729,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4680 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 4729 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4681 key[0].offset = 0; 4730 key[0].offset = 0;
4682 4731
4732 /*
4733 * Start new inodes with an inode_ref. This is slightly more
4734 * efficient for small numbers of hard links since they will
4735 * be packed into one item. Extended refs will kick in if we
4736 * add more hard links than can fit in the ref item.
4737 */
4683 key[1].objectid = objectid; 4738 key[1].objectid = objectid;
4684 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); 4739 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4685 key[1].offset = ref_objectid; 4740 key[1].offset = ref_objectid;
@@ -4986,7 +5041,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4986 if (root->objectid != BTRFS_I(inode)->root->objectid) 5041 if (root->objectid != BTRFS_I(inode)->root->objectid)
4987 return -EXDEV; 5042 return -EXDEV;
4988 5043
4989 if (inode->i_nlink == ~0U) 5044 if (inode->i_nlink >= BTRFS_LINK_MAX)
4990 return -EMLINK; 5045 return -EMLINK;
4991 5046
4992 err = btrfs_set_inode_index(dir, &index); 5047 err = btrfs_set_inode_index(dir, &index);
@@ -5450,7 +5505,8 @@ insert:
5450 write_unlock(&em_tree->lock); 5505 write_unlock(&em_tree->lock);
5451out: 5506out:
5452 5507
5453 trace_btrfs_get_extent(root, em); 5508 if (em)
5509 trace_btrfs_get_extent(root, em);
5454 5510
5455 if (path) 5511 if (path)
5456 btrfs_free_path(path); 5512 btrfs_free_path(path);
@@ -5836,6 +5892,48 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
5836 return ret; 5892 return ret;
5837} 5893}
5838 5894
5895static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
5896 u64 len, u64 orig_start,
5897 u64 block_start, u64 block_len,
5898 int type)
5899{
5900 struct extent_map_tree *em_tree;
5901 struct extent_map *em;
5902 struct btrfs_root *root = BTRFS_I(inode)->root;
5903 int ret;
5904
5905 em_tree = &BTRFS_I(inode)->extent_tree;
5906 em = alloc_extent_map();
5907 if (!em)
5908 return ERR_PTR(-ENOMEM);
5909
5910 em->start = start;
5911 em->orig_start = orig_start;
5912 em->len = len;
5913 em->block_len = block_len;
5914 em->block_start = block_start;
5915 em->bdev = root->fs_info->fs_devices->latest_bdev;
5916 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5917 if (type == BTRFS_ORDERED_PREALLOC)
5918 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5919
5920 do {
5921 btrfs_drop_extent_cache(inode, em->start,
5922 em->start + em->len - 1, 0);
5923 write_lock(&em_tree->lock);
5924 ret = add_extent_mapping(em_tree, em);
5925 write_unlock(&em_tree->lock);
5926 } while (ret == -EEXIST);
5927
5928 if (ret) {
5929 free_extent_map(em);
5930 return ERR_PTR(ret);
5931 }
5932
5933 return em;
5934}
5935
5936
5839static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 5937static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5840 struct buffer_head *bh_result, int create) 5938 struct buffer_head *bh_result, int create)
5841{ 5939{
@@ -5950,6 +6048,19 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5950 goto must_cow; 6048 goto must_cow;
5951 6049
5952 if (can_nocow_odirect(trans, inode, start, len) == 1) { 6050 if (can_nocow_odirect(trans, inode, start, len) == 1) {
6051 u64 orig_start = em->start;
6052
6053 if (type == BTRFS_ORDERED_PREALLOC) {
6054 free_extent_map(em);
6055 em = create_pinned_em(inode, start, len,
6056 orig_start,
6057 block_start, len, type);
6058 if (IS_ERR(em)) {
6059 btrfs_end_transaction(trans, root);
6060 goto unlock_err;
6061 }
6062 }
6063
5953 ret = btrfs_add_ordered_extent_dio(inode, start, 6064 ret = btrfs_add_ordered_extent_dio(inode, start,
5954 block_start, len, len, type); 6065 block_start, len, len, type);
5955 btrfs_end_transaction(trans, root); 6066 btrfs_end_transaction(trans, root);
@@ -5999,7 +6110,8 @@ unlock:
5999 if (lockstart < lockend) { 6110 if (lockstart < lockend) {
6000 if (create && len < lockend - lockstart) { 6111 if (create && len < lockend - lockstart) {
6001 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 6112 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6002 lockstart + len - 1, unlock_bits, 1, 0, 6113 lockstart + len - 1,
6114 unlock_bits | EXTENT_DEFRAG, 1, 0,
6003 &cached_state, GFP_NOFS); 6115 &cached_state, GFP_NOFS);
6004 /* 6116 /*
6005 * Beside unlock, we also need to cleanup reserved space 6117 * Beside unlock, we also need to cleanup reserved space
@@ -6007,8 +6119,8 @@ unlock:
6007 */ 6119 */
6008 clear_extent_bit(&BTRFS_I(inode)->io_tree, 6120 clear_extent_bit(&BTRFS_I(inode)->io_tree,
6009 lockstart + len, lockend, 6121 lockstart + len, lockend,
6010 unlock_bits | EXTENT_DO_ACCOUNTING, 6122 unlock_bits | EXTENT_DO_ACCOUNTING |
6011 1, 0, NULL, GFP_NOFS); 6123 EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS);
6012 } else { 6124 } else {
6013 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 6125 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6014 lockend, unlock_bits, 1, 0, 6126 lockend, unlock_bits, 1, 0,
@@ -6573,8 +6685,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6573 */ 6685 */
6574 clear_extent_bit(tree, page_start, page_end, 6686 clear_extent_bit(tree, page_start, page_end,
6575 EXTENT_DIRTY | EXTENT_DELALLOC | 6687 EXTENT_DIRTY | EXTENT_DELALLOC |
6576 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, 6688 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
6577 &cached_state, GFP_NOFS); 6689 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
6578 /* 6690 /*
6579 * whoever cleared the private bit is responsible 6691 * whoever cleared the private bit is responsible
6580 * for the finish_ordered_io 6692 * for the finish_ordered_io
@@ -6590,7 +6702,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6590 } 6702 }
6591 clear_extent_bit(tree, page_start, page_end, 6703 clear_extent_bit(tree, page_start, page_end,
6592 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 6704 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6593 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); 6705 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
6706 &cached_state, GFP_NOFS);
6594 __btrfs_releasepage(page, GFP_NOFS); 6707 __btrfs_releasepage(page, GFP_NOFS);
6595 6708
6596 ClearPageChecked(page); 6709 ClearPageChecked(page);
@@ -6687,7 +6800,8 @@ again:
6687 * prepare_pages in the normal write path. 6800 * prepare_pages in the normal write path.
6688 */ 6801 */
6689 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 6802 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6690 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 6803 EXTENT_DIRTY | EXTENT_DELALLOC |
6804 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
6691 0, 0, &cached_state, GFP_NOFS); 6805 0, 0, &cached_state, GFP_NOFS);
6692 6806
6693 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 6807 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
@@ -6718,6 +6832,7 @@ again:
6718 6832
6719 BTRFS_I(inode)->last_trans = root->fs_info->generation; 6833 BTRFS_I(inode)->last_trans = root->fs_info->generation;
6720 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 6834 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6835 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
6721 6836
6722 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 6837 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6723 6838
@@ -6745,7 +6860,7 @@ static int btrfs_truncate(struct inode *inode)
6745 u64 mask = root->sectorsize - 1; 6860 u64 mask = root->sectorsize - 1;
6746 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 6861 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
6747 6862
6748 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); 6863 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
6749 if (ret) 6864 if (ret)
6750 return ret; 6865 return ret;
6751 6866
@@ -6788,10 +6903,11 @@ static int btrfs_truncate(struct inode *inode)
6788 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for 6903 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6789 * updating the inode. 6904 * updating the inode.
6790 */ 6905 */
6791 rsv = btrfs_alloc_block_rsv(root); 6906 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
6792 if (!rsv) 6907 if (!rsv)
6793 return -ENOMEM; 6908 return -ENOMEM;
6794 rsv->size = min_size; 6909 rsv->size = min_size;
6910 rsv->failfast = 1;
6795 6911
6796 /* 6912 /*
6797 * 1 for the truncate slack space 6913 * 1 for the truncate slack space
@@ -6837,36 +6953,21 @@ static int btrfs_truncate(struct inode *inode)
6837 &BTRFS_I(inode)->runtime_flags)) 6953 &BTRFS_I(inode)->runtime_flags))
6838 btrfs_add_ordered_operation(trans, root, inode); 6954 btrfs_add_ordered_operation(trans, root, inode);
6839 6955
6840 while (1) { 6956 /*
6841 ret = btrfs_block_rsv_refill(root, rsv, min_size); 6957 * So if we truncate and then write and fsync we normally would just
6842 if (ret) { 6958 * write the extents that changed, which is a problem if we need to
6843 /* 6959 * first truncate that entire inode. So set this flag so we write out
6844 * This can only happen with the original transaction we 6960 * all of the extents in the inode to the sync log so we're completely
6845 * started above, every other time we shouldn't have a 6961 * safe.
6846 * transaction started yet. 6962 */
6847 */ 6963 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6848 if (ret == -EAGAIN) 6964 trans->block_rsv = rsv;
6849 goto end_trans;
6850 err = ret;
6851 break;
6852 }
6853
6854 if (!trans) {
6855 /* Just need the 1 for updating the inode */
6856 trans = btrfs_start_transaction(root, 1);
6857 if (IS_ERR(trans)) {
6858 ret = err = PTR_ERR(trans);
6859 trans = NULL;
6860 break;
6861 }
6862 }
6863
6864 trans->block_rsv = rsv;
6865 6965
6966 while (1) {
6866 ret = btrfs_truncate_inode_items(trans, root, inode, 6967 ret = btrfs_truncate_inode_items(trans, root, inode,
6867 inode->i_size, 6968 inode->i_size,
6868 BTRFS_EXTENT_DATA_KEY); 6969 BTRFS_EXTENT_DATA_KEY);
6869 if (ret != -EAGAIN) { 6970 if (ret != -ENOSPC) {
6870 err = ret; 6971 err = ret;
6871 break; 6972 break;
6872 } 6973 }
@@ -6877,11 +6978,22 @@ static int btrfs_truncate(struct inode *inode)
6877 err = ret; 6978 err = ret;
6878 break; 6979 break;
6879 } 6980 }
6880end_trans: 6981
6881 nr = trans->blocks_used; 6982 nr = trans->blocks_used;
6882 btrfs_end_transaction(trans, root); 6983 btrfs_end_transaction(trans, root);
6883 trans = NULL;
6884 btrfs_btree_balance_dirty(root, nr); 6984 btrfs_btree_balance_dirty(root, nr);
6985
6986 trans = btrfs_start_transaction(root, 2);
6987 if (IS_ERR(trans)) {
6988 ret = err = PTR_ERR(trans);
6989 trans = NULL;
6990 break;
6991 }
6992
6993 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
6994 rsv, min_size);
6995 BUG_ON(ret); /* shouldn't happen */
6996 trans->block_rsv = rsv;
6885 } 6997 }
6886 6998
6887 if (ret == 0 && inode->i_nlink > 0) { 6999 if (ret == 0 && inode->i_nlink > 0) {
@@ -6965,6 +7077,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6965 ei->csum_bytes = 0; 7077 ei->csum_bytes = 0;
6966 ei->index_cnt = (u64)-1; 7078 ei->index_cnt = (u64)-1;
6967 ei->last_unlink_trans = 0; 7079 ei->last_unlink_trans = 0;
7080 ei->last_log_commit = 0;
6968 7081
6969 spin_lock_init(&ei->lock); 7082 spin_lock_init(&ei->lock);
6970 ei->outstanding_extents = 0; 7083 ei->outstanding_extents = 0;
@@ -7095,31 +7208,31 @@ void btrfs_destroy_cachep(void)
7095 7208
7096int btrfs_init_cachep(void) 7209int btrfs_init_cachep(void)
7097{ 7210{
7098 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache", 7211 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7099 sizeof(struct btrfs_inode), 0, 7212 sizeof(struct btrfs_inode), 0,
7100 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); 7213 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7101 if (!btrfs_inode_cachep) 7214 if (!btrfs_inode_cachep)
7102 goto fail; 7215 goto fail;
7103 7216
7104 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache", 7217 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
7105 sizeof(struct btrfs_trans_handle), 0, 7218 sizeof(struct btrfs_trans_handle), 0,
7106 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7219 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7107 if (!btrfs_trans_handle_cachep) 7220 if (!btrfs_trans_handle_cachep)
7108 goto fail; 7221 goto fail;
7109 7222
7110 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache", 7223 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
7111 sizeof(struct btrfs_transaction), 0, 7224 sizeof(struct btrfs_transaction), 0,
7112 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7225 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7113 if (!btrfs_transaction_cachep) 7226 if (!btrfs_transaction_cachep)
7114 goto fail; 7227 goto fail;
7115 7228
7116 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache", 7229 btrfs_path_cachep = kmem_cache_create("btrfs_path",
7117 sizeof(struct btrfs_path), 0, 7230 sizeof(struct btrfs_path), 0,
7118 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7231 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7119 if (!btrfs_path_cachep) 7232 if (!btrfs_path_cachep)
7120 goto fail; 7233 goto fail;
7121 7234
7122 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", 7235 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
7123 sizeof(struct btrfs_free_space), 0, 7236 sizeof(struct btrfs_free_space), 0,
7124 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7237 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7125 if (!btrfs_free_space_cachep) 7238 if (!btrfs_free_space_cachep)
@@ -7513,6 +7626,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7513 loff_t actual_len, u64 *alloc_hint, 7626 loff_t actual_len, u64 *alloc_hint,
7514 struct btrfs_trans_handle *trans) 7627 struct btrfs_trans_handle *trans)
7515{ 7628{
7629 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
7630 struct extent_map *em;
7516 struct btrfs_root *root = BTRFS_I(inode)->root; 7631 struct btrfs_root *root = BTRFS_I(inode)->root;
7517 struct btrfs_key ins; 7632 struct btrfs_key ins;
7518 u64 cur_offset = start; 7633 u64 cur_offset = start;
@@ -7553,6 +7668,37 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7553 btrfs_drop_extent_cache(inode, cur_offset, 7668 btrfs_drop_extent_cache(inode, cur_offset,
7554 cur_offset + ins.offset -1, 0); 7669 cur_offset + ins.offset -1, 0);
7555 7670
7671 em = alloc_extent_map();
7672 if (!em) {
7673 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
7674 &BTRFS_I(inode)->runtime_flags);
7675 goto next;
7676 }
7677
7678 em->start = cur_offset;
7679 em->orig_start = cur_offset;
7680 em->len = ins.offset;
7681 em->block_start = ins.objectid;
7682 em->block_len = ins.offset;
7683 em->bdev = root->fs_info->fs_devices->latest_bdev;
7684 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7685 em->generation = trans->transid;
7686
7687 while (1) {
7688 write_lock(&em_tree->lock);
7689 ret = add_extent_mapping(em_tree, em);
7690 if (!ret)
7691 list_move(&em->list,
7692 &em_tree->modified_extents);
7693 write_unlock(&em_tree->lock);
7694 if (ret != -EEXIST)
7695 break;
7696 btrfs_drop_extent_cache(inode, cur_offset,
7697 cur_offset + ins.offset - 1,
7698 0);
7699 }
7700 free_extent_map(em);
7701next:
7556 num_bytes -= ins.offset; 7702 num_bytes -= ins.offset;
7557 cur_offset += ins.offset; 7703 cur_offset += ins.offset;
7558 *alloc_hint = ins.objectid + ins.offset; 7704 *alloc_hint = ins.objectid + ins.offset;