aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c326
1 files changed, 226 insertions, 100 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d96f5cf38a2d..41a5688ffdfe 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -263,7 +263,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
263 data_len = compressed_size; 263 data_len = compressed_size;
264 264
265 if (start > 0 || 265 if (start > 0 ||
266 actual_end > PAGE_CACHE_SIZE || 266 actual_end > root->sectorsize ||
267 data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) || 267 data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
268 (!compressed_size && 268 (!compressed_size &&
269 (actual_end & (root->sectorsize - 1)) == 0) || 269 (actual_end & (root->sectorsize - 1)) == 0) ||
@@ -2002,7 +2002,8 @@ again:
2002 if (PagePrivate2(page)) 2002 if (PagePrivate2(page))
2003 goto out; 2003 goto out;
2004 2004
2005 ordered = btrfs_lookup_ordered_extent(inode, page_start); 2005 ordered = btrfs_lookup_ordered_range(inode, page_start,
2006 PAGE_CACHE_SIZE);
2006 if (ordered) { 2007 if (ordered) {
2007 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2008 page_end, &cached_state, GFP_NOFS); 2009 page_end, &cached_state, GFP_NOFS);
@@ -4013,7 +4014,8 @@ err:
4013 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 4014 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4014 inode_inc_iversion(inode); 4015 inode_inc_iversion(inode);
4015 inode_inc_iversion(dir); 4016 inode_inc_iversion(dir);
4016 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 4017 inode->i_ctime = dir->i_mtime =
4018 dir->i_ctime = current_fs_time(inode->i_sb);
4017 ret = btrfs_update_inode(trans, root, dir); 4019 ret = btrfs_update_inode(trans, root, dir);
4018out: 4020out:
4019 return ret; 4021 return ret;
@@ -4156,7 +4158,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4156 4158
4157 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 4159 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4158 inode_inc_iversion(dir); 4160 inode_inc_iversion(dir);
4159 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 4161 dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
4160 ret = btrfs_update_inode_fallback(trans, root, dir); 4162 ret = btrfs_update_inode_fallback(trans, root, dir);
4161 if (ret) 4163 if (ret)
4162 btrfs_abort_transaction(trans, root, ret); 4164 btrfs_abort_transaction(trans, root, ret);
@@ -4211,11 +4213,20 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
4211{ 4213{
4212 int ret; 4214 int ret;
4213 4215
4216 /*
4217 * This is only used to apply pressure to the enospc system, we don't
4218 * intend to use this reservation at all.
4219 */
4214 bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted); 4220 bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4221 bytes_deleted *= root->nodesize;
4215 ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv, 4222 ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4216 bytes_deleted, BTRFS_RESERVE_NO_FLUSH); 4223 bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4217 if (!ret) 4224 if (!ret) {
4225 trace_btrfs_space_reservation(root->fs_info, "transaction",
4226 trans->transid,
4227 bytes_deleted, 1);
4218 trans->bytes_reserved += bytes_deleted; 4228 trans->bytes_reserved += bytes_deleted;
4229 }
4219 return ret; 4230 return ret;
4220 4231
4221} 4232}
@@ -4248,7 +4259,8 @@ static int truncate_inline_extent(struct inode *inode,
4248 * read the extent item from disk (data not in the page cache). 4259 * read the extent item from disk (data not in the page cache).
4249 */ 4260 */
4250 btrfs_release_path(path); 4261 btrfs_release_path(path);
4251 return btrfs_truncate_page(inode, offset, page_end - offset, 0); 4262 return btrfs_truncate_block(inode, offset, page_end - offset,
4263 0);
4252 } 4264 }
4253 4265
4254 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 4266 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
@@ -4601,17 +4613,17 @@ error:
4601} 4613}
4602 4614
4603/* 4615/*
4604 * btrfs_truncate_page - read, zero a chunk and write a page 4616 * btrfs_truncate_block - read, zero a chunk and write a block
4605 * @inode - inode that we're zeroing 4617 * @inode - inode that we're zeroing
4606 * @from - the offset to start zeroing 4618 * @from - the offset to start zeroing
4607 * @len - the length to zero, 0 to zero the entire range respective to the 4619 * @len - the length to zero, 0 to zero the entire range respective to the
4608 * offset 4620 * offset
4609 * @front - zero up to the offset instead of from the offset on 4621 * @front - zero up to the offset instead of from the offset on
4610 * 4622 *
4611 * This will find the page for the "from" offset and cow the page and zero the 4623 * This will find the block for the "from" offset and cow the block and zero the
4612 * part we want to zero. This is used with truncate and hole punching. 4624 * part we want to zero. This is used with truncate and hole punching.
4613 */ 4625 */
4614int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 4626int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4615 int front) 4627 int front)
4616{ 4628{
4617 struct address_space *mapping = inode->i_mapping; 4629 struct address_space *mapping = inode->i_mapping;
@@ -4622,18 +4634,19 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4622 char *kaddr; 4634 char *kaddr;
4623 u32 blocksize = root->sectorsize; 4635 u32 blocksize = root->sectorsize;
4624 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4636 pgoff_t index = from >> PAGE_CACHE_SHIFT;
4625 unsigned offset = from & (PAGE_CACHE_SIZE-1); 4637 unsigned offset = from & (blocksize - 1);
4626 struct page *page; 4638 struct page *page;
4627 gfp_t mask = btrfs_alloc_write_mask(mapping); 4639 gfp_t mask = btrfs_alloc_write_mask(mapping);
4628 int ret = 0; 4640 int ret = 0;
4629 u64 page_start; 4641 u64 block_start;
4630 u64 page_end; 4642 u64 block_end;
4631 4643
4632 if ((offset & (blocksize - 1)) == 0 && 4644 if ((offset & (blocksize - 1)) == 0 &&
4633 (!len || ((len & (blocksize - 1)) == 0))) 4645 (!len || ((len & (blocksize - 1)) == 0)))
4634 goto out; 4646 goto out;
4647
4635 ret = btrfs_delalloc_reserve_space(inode, 4648 ret = btrfs_delalloc_reserve_space(inode,
4636 round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE); 4649 round_down(from, blocksize), blocksize);
4637 if (ret) 4650 if (ret)
4638 goto out; 4651 goto out;
4639 4652
@@ -4641,14 +4654,14 @@ again:
4641 page = find_or_create_page(mapping, index, mask); 4654 page = find_or_create_page(mapping, index, mask);
4642 if (!page) { 4655 if (!page) {
4643 btrfs_delalloc_release_space(inode, 4656 btrfs_delalloc_release_space(inode,
4644 round_down(from, PAGE_CACHE_SIZE), 4657 round_down(from, blocksize),
4645 PAGE_CACHE_SIZE); 4658 blocksize);
4646 ret = -ENOMEM; 4659 ret = -ENOMEM;
4647 goto out; 4660 goto out;
4648 } 4661 }
4649 4662
4650 page_start = page_offset(page); 4663 block_start = round_down(from, blocksize);
4651 page_end = page_start + PAGE_CACHE_SIZE - 1; 4664 block_end = block_start + blocksize - 1;
4652 4665
4653 if (!PageUptodate(page)) { 4666 if (!PageUptodate(page)) {
4654 ret = btrfs_readpage(NULL, page); 4667 ret = btrfs_readpage(NULL, page);
@@ -4665,12 +4678,12 @@ again:
4665 } 4678 }
4666 wait_on_page_writeback(page); 4679 wait_on_page_writeback(page);
4667 4680
4668 lock_extent_bits(io_tree, page_start, page_end, &cached_state); 4681 lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4669 set_page_extent_mapped(page); 4682 set_page_extent_mapped(page);
4670 4683
4671 ordered = btrfs_lookup_ordered_extent(inode, page_start); 4684 ordered = btrfs_lookup_ordered_extent(inode, block_start);
4672 if (ordered) { 4685 if (ordered) {
4673 unlock_extent_cached(io_tree, page_start, page_end, 4686 unlock_extent_cached(io_tree, block_start, block_end,
4674 &cached_state, GFP_NOFS); 4687 &cached_state, GFP_NOFS);
4675 unlock_page(page); 4688 unlock_page(page);
4676 page_cache_release(page); 4689 page_cache_release(page);
@@ -4679,39 +4692,41 @@ again:
4679 goto again; 4692 goto again;
4680 } 4693 }
4681 4694
4682 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 4695 clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4683 EXTENT_DIRTY | EXTENT_DELALLOC | 4696 EXTENT_DIRTY | EXTENT_DELALLOC |
4684 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4697 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4685 0, 0, &cached_state, GFP_NOFS); 4698 0, 0, &cached_state, GFP_NOFS);
4686 4699
4687 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 4700 ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
4688 &cached_state); 4701 &cached_state);
4689 if (ret) { 4702 if (ret) {
4690 unlock_extent_cached(io_tree, page_start, page_end, 4703 unlock_extent_cached(io_tree, block_start, block_end,
4691 &cached_state, GFP_NOFS); 4704 &cached_state, GFP_NOFS);
4692 goto out_unlock; 4705 goto out_unlock;
4693 } 4706 }
4694 4707
4695 if (offset != PAGE_CACHE_SIZE) { 4708 if (offset != blocksize) {
4696 if (!len) 4709 if (!len)
4697 len = PAGE_CACHE_SIZE - offset; 4710 len = blocksize - offset;
4698 kaddr = kmap(page); 4711 kaddr = kmap(page);
4699 if (front) 4712 if (front)
4700 memset(kaddr, 0, offset); 4713 memset(kaddr + (block_start - page_offset(page)),
4714 0, offset);
4701 else 4715 else
4702 memset(kaddr + offset, 0, len); 4716 memset(kaddr + (block_start - page_offset(page)) + offset,
4717 0, len);
4703 flush_dcache_page(page); 4718 flush_dcache_page(page);
4704 kunmap(page); 4719 kunmap(page);
4705 } 4720 }
4706 ClearPageChecked(page); 4721 ClearPageChecked(page);
4707 set_page_dirty(page); 4722 set_page_dirty(page);
4708 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, 4723 unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
4709 GFP_NOFS); 4724 GFP_NOFS);
4710 4725
4711out_unlock: 4726out_unlock:
4712 if (ret) 4727 if (ret)
4713 btrfs_delalloc_release_space(inode, page_start, 4728 btrfs_delalloc_release_space(inode, block_start,
4714 PAGE_CACHE_SIZE); 4729 blocksize);
4715 unlock_page(page); 4730 unlock_page(page);
4716 page_cache_release(page); 4731 page_cache_release(page);
4717out: 4732out:
@@ -4782,11 +4797,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4782 int err = 0; 4797 int err = 0;
4783 4798
4784 /* 4799 /*
4785 * If our size started in the middle of a page we need to zero out the 4800 * If our size started in the middle of a block we need to zero out the
4786 * rest of the page before we expand the i_size, otherwise we could 4801 * rest of the block before we expand the i_size, otherwise we could
4787 * expose stale data. 4802 * expose stale data.
4788 */ 4803 */
4789 err = btrfs_truncate_page(inode, oldsize, 0, 0); 4804 err = btrfs_truncate_block(inode, oldsize, 0, 0);
4790 if (err) 4805 if (err)
4791 return err; 4806 return err;
4792 4807
@@ -4895,7 +4910,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4895 } 4910 }
4896 4911
4897 if (newsize > oldsize) { 4912 if (newsize > oldsize) {
4898 truncate_pagecache(inode, newsize);
4899 /* 4913 /*
4900 * Don't do an expanding truncate while snapshoting is ongoing. 4914 * Don't do an expanding truncate while snapshoting is ongoing.
4901 * This is to ensure the snapshot captures a fully consistent 4915 * This is to ensure the snapshot captures a fully consistent
@@ -4918,6 +4932,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4918 4932
4919 i_size_write(inode, newsize); 4933 i_size_write(inode, newsize);
4920 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 4934 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4935 pagecache_isize_extended(inode, oldsize, newsize);
4921 ret = btrfs_update_inode(trans, root, inode); 4936 ret = btrfs_update_inode(trans, root, inode);
4922 btrfs_end_write_no_snapshoting(root); 4937 btrfs_end_write_no_snapshoting(root);
4923 btrfs_end_transaction(trans, root); 4938 btrfs_end_transaction(trans, root);
@@ -5588,7 +5603,7 @@ static struct inode *new_simple_dir(struct super_block *s,
5588 inode->i_op = &btrfs_dir_ro_inode_operations; 5603 inode->i_op = &btrfs_dir_ro_inode_operations;
5589 inode->i_fop = &simple_dir_operations; 5604 inode->i_fop = &simple_dir_operations;
5590 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5605 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5591 inode->i_mtime = CURRENT_TIME; 5606 inode->i_mtime = current_fs_time(inode->i_sb);
5592 inode->i_atime = inode->i_mtime; 5607 inode->i_atime = inode->i_mtime;
5593 inode->i_ctime = inode->i_mtime; 5608 inode->i_ctime = inode->i_mtime;
5594 BTRFS_I(inode)->i_otime = inode->i_mtime; 5609 BTRFS_I(inode)->i_otime = inode->i_mtime;
@@ -5790,7 +5805,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5790 if (name_len <= sizeof(tmp_name)) { 5805 if (name_len <= sizeof(tmp_name)) {
5791 name_ptr = tmp_name; 5806 name_ptr = tmp_name;
5792 } else { 5807 } else {
5793 name_ptr = kmalloc(name_len, GFP_NOFS); 5808 name_ptr = kmalloc(name_len, GFP_KERNEL);
5794 if (!name_ptr) { 5809 if (!name_ptr) {
5795 ret = -ENOMEM; 5810 ret = -ENOMEM;
5796 goto err; 5811 goto err;
@@ -6172,7 +6187,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6172 inode_init_owner(inode, dir, mode); 6187 inode_init_owner(inode, dir, mode);
6173 inode_set_bytes(inode, 0); 6188 inode_set_bytes(inode, 0);
6174 6189
6175 inode->i_mtime = CURRENT_TIME; 6190 inode->i_mtime = current_fs_time(inode->i_sb);
6176 inode->i_atime = inode->i_mtime; 6191 inode->i_atime = inode->i_mtime;
6177 inode->i_ctime = inode->i_mtime; 6192 inode->i_ctime = inode->i_mtime;
6178 BTRFS_I(inode)->i_otime = inode->i_mtime; 6193 BTRFS_I(inode)->i_otime = inode->i_mtime;
@@ -6285,7 +6300,8 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
6285 btrfs_i_size_write(parent_inode, parent_inode->i_size + 6300 btrfs_i_size_write(parent_inode, parent_inode->i_size +
6286 name_len * 2); 6301 name_len * 2);
6287 inode_inc_iversion(parent_inode); 6302 inode_inc_iversion(parent_inode);
6288 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 6303 parent_inode->i_mtime = parent_inode->i_ctime =
6304 current_fs_time(parent_inode->i_sb);
6289 ret = btrfs_update_inode(trans, root, parent_inode); 6305 ret = btrfs_update_inode(trans, root, parent_inode);
6290 if (ret) 6306 if (ret)
6291 btrfs_abort_transaction(trans, root, ret); 6307 btrfs_abort_transaction(trans, root, ret);
@@ -6503,7 +6519,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6503 BTRFS_I(inode)->dir_index = 0ULL; 6519 BTRFS_I(inode)->dir_index = 0ULL;
6504 inc_nlink(inode); 6520 inc_nlink(inode);
6505 inode_inc_iversion(inode); 6521 inode_inc_iversion(inode);
6506 inode->i_ctime = CURRENT_TIME; 6522 inode->i_ctime = current_fs_time(inode->i_sb);
6507 ihold(inode); 6523 ihold(inode);
6508 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6524 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6509 6525
@@ -7414,7 +7430,26 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7414 cached_state, GFP_NOFS); 7430 cached_state, GFP_NOFS);
7415 7431
7416 if (ordered) { 7432 if (ordered) {
7417 btrfs_start_ordered_extent(inode, ordered, 1); 7433 /*
7434 * If we are doing a DIO read and the ordered extent we
7435 * found is for a buffered write, we can not wait for it
7436 * to complete and retry, because if we do so we can
7437 * deadlock with concurrent buffered writes on page
7438 * locks. This happens only if our DIO read covers more
7439 * than one extent map, if at this point has already
7440 * created an ordered extent for a previous extent map
7441 * and locked its range in the inode's io tree, and a
7442 * concurrent write against that previous extent map's
7443 * range and this range started (we unlock the ranges
7444 * in the io tree only when the bios complete and
7445 * buffered writes always lock pages before attempting
7446 * to lock range in the io tree).
7447 */
7448 if (writing ||
7449 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7450 btrfs_start_ordered_extent(inode, ordered, 1);
7451 else
7452 ret = -ENOTBLK;
7418 btrfs_put_ordered_extent(ordered); 7453 btrfs_put_ordered_extent(ordered);
7419 } else { 7454 } else {
7420 /* 7455 /*
@@ -7431,9 +7466,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7431 * that page. 7466 * that page.
7432 */ 7467 */
7433 ret = -ENOTBLK; 7468 ret = -ENOTBLK;
7434 break;
7435 } 7469 }
7436 7470
7471 if (ret)
7472 break;
7473
7437 cond_resched(); 7474 cond_resched();
7438 } 7475 }
7439 7476
@@ -7764,9 +7801,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
7764} 7801}
7765 7802
7766static int dio_read_error(struct inode *inode, struct bio *failed_bio, 7803static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7767 struct page *page, u64 start, u64 end, 7804 struct page *page, unsigned int pgoff,
7768 int failed_mirror, bio_end_io_t *repair_endio, 7805 u64 start, u64 end, int failed_mirror,
7769 void *repair_arg) 7806 bio_end_io_t *repair_endio, void *repair_arg)
7770{ 7807{
7771 struct io_failure_record *failrec; 7808 struct io_failure_record *failrec;
7772 struct bio *bio; 7809 struct bio *bio;
@@ -7787,7 +7824,9 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7787 return -EIO; 7824 return -EIO;
7788 } 7825 }
7789 7826
7790 if (failed_bio->bi_vcnt > 1) 7827 if ((failed_bio->bi_vcnt > 1)
7828 || (failed_bio->bi_io_vec->bv_len
7829 > BTRFS_I(inode)->root->sectorsize))
7791 read_mode = READ_SYNC | REQ_FAILFAST_DEV; 7830 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7792 else 7831 else
7793 read_mode = READ_SYNC; 7832 read_mode = READ_SYNC;
@@ -7795,7 +7834,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7795 isector = start - btrfs_io_bio(failed_bio)->logical; 7834 isector = start - btrfs_io_bio(failed_bio)->logical;
7796 isector >>= inode->i_sb->s_blocksize_bits; 7835 isector >>= inode->i_sb->s_blocksize_bits;
7797 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 7836 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7798 0, isector, repair_endio, repair_arg); 7837 pgoff, isector, repair_endio, repair_arg);
7799 if (!bio) { 7838 if (!bio) {
7800 free_io_failure(inode, failrec); 7839 free_io_failure(inode, failrec);
7801 return -EIO; 7840 return -EIO;
@@ -7825,12 +7864,17 @@ struct btrfs_retry_complete {
7825static void btrfs_retry_endio_nocsum(struct bio *bio) 7864static void btrfs_retry_endio_nocsum(struct bio *bio)
7826{ 7865{
7827 struct btrfs_retry_complete *done = bio->bi_private; 7866 struct btrfs_retry_complete *done = bio->bi_private;
7867 struct inode *inode;
7828 struct bio_vec *bvec; 7868 struct bio_vec *bvec;
7829 int i; 7869 int i;
7830 7870
7831 if (bio->bi_error) 7871 if (bio->bi_error)
7832 goto end; 7872 goto end;
7833 7873
7874 ASSERT(bio->bi_vcnt == 1);
7875 inode = bio->bi_io_vec->bv_page->mapping->host;
7876 ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
7877
7834 done->uptodate = 1; 7878 done->uptodate = 1;
7835 bio_for_each_segment_all(bvec, bio, i) 7879 bio_for_each_segment_all(bvec, bio, i)
7836 clean_io_failure(done->inode, done->start, bvec->bv_page, 0); 7880 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
@@ -7842,25 +7886,35 @@ end:
7842static int __btrfs_correct_data_nocsum(struct inode *inode, 7886static int __btrfs_correct_data_nocsum(struct inode *inode,
7843 struct btrfs_io_bio *io_bio) 7887 struct btrfs_io_bio *io_bio)
7844{ 7888{
7889 struct btrfs_fs_info *fs_info;
7845 struct bio_vec *bvec; 7890 struct bio_vec *bvec;
7846 struct btrfs_retry_complete done; 7891 struct btrfs_retry_complete done;
7847 u64 start; 7892 u64 start;
7893 unsigned int pgoff;
7894 u32 sectorsize;
7895 int nr_sectors;
7848 int i; 7896 int i;
7849 int ret; 7897 int ret;
7850 7898
7899 fs_info = BTRFS_I(inode)->root->fs_info;
7900 sectorsize = BTRFS_I(inode)->root->sectorsize;
7901
7851 start = io_bio->logical; 7902 start = io_bio->logical;
7852 done.inode = inode; 7903 done.inode = inode;
7853 7904
7854 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 7905 bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7855try_again: 7906 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
7907 pgoff = bvec->bv_offset;
7908
7909next_block_or_try_again:
7856 done.uptodate = 0; 7910 done.uptodate = 0;
7857 done.start = start; 7911 done.start = start;
7858 init_completion(&done.done); 7912 init_completion(&done.done);
7859 7913
7860 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 7914 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
7861 start + bvec->bv_len - 1, 7915 pgoff, start, start + sectorsize - 1,
7862 io_bio->mirror_num, 7916 io_bio->mirror_num,
7863 btrfs_retry_endio_nocsum, &done); 7917 btrfs_retry_endio_nocsum, &done);
7864 if (ret) 7918 if (ret)
7865 return ret; 7919 return ret;
7866 7920
@@ -7868,10 +7922,15 @@ try_again:
7868 7922
7869 if (!done.uptodate) { 7923 if (!done.uptodate) {
7870 /* We might have another mirror, so try again */ 7924 /* We might have another mirror, so try again */
7871 goto try_again; 7925 goto next_block_or_try_again;
7872 } 7926 }
7873 7927
7874 start += bvec->bv_len; 7928 start += sectorsize;
7929
7930 if (nr_sectors--) {
7931 pgoff += sectorsize;
7932 goto next_block_or_try_again;
7933 }
7875 } 7934 }
7876 7935
7877 return 0; 7936 return 0;
@@ -7881,7 +7940,9 @@ static void btrfs_retry_endio(struct bio *bio)
7881{ 7940{
7882 struct btrfs_retry_complete *done = bio->bi_private; 7941 struct btrfs_retry_complete *done = bio->bi_private;
7883 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7942 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7943 struct inode *inode;
7884 struct bio_vec *bvec; 7944 struct bio_vec *bvec;
7945 u64 start;
7885 int uptodate; 7946 int uptodate;
7886 int ret; 7947 int ret;
7887 int i; 7948 int i;
@@ -7890,13 +7951,20 @@ static void btrfs_retry_endio(struct bio *bio)
7890 goto end; 7951 goto end;
7891 7952
7892 uptodate = 1; 7953 uptodate = 1;
7954
7955 start = done->start;
7956
7957 ASSERT(bio->bi_vcnt == 1);
7958 inode = bio->bi_io_vec->bv_page->mapping->host;
7959 ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
7960
7893 bio_for_each_segment_all(bvec, bio, i) { 7961 bio_for_each_segment_all(bvec, bio, i) {
7894 ret = __readpage_endio_check(done->inode, io_bio, i, 7962 ret = __readpage_endio_check(done->inode, io_bio, i,
7895 bvec->bv_page, 0, 7963 bvec->bv_page, bvec->bv_offset,
7896 done->start, bvec->bv_len); 7964 done->start, bvec->bv_len);
7897 if (!ret) 7965 if (!ret)
7898 clean_io_failure(done->inode, done->start, 7966 clean_io_failure(done->inode, done->start,
7899 bvec->bv_page, 0); 7967 bvec->bv_page, bvec->bv_offset);
7900 else 7968 else
7901 uptodate = 0; 7969 uptodate = 0;
7902 } 7970 }
@@ -7910,20 +7978,34 @@ end:
7910static int __btrfs_subio_endio_read(struct inode *inode, 7978static int __btrfs_subio_endio_read(struct inode *inode,
7911 struct btrfs_io_bio *io_bio, int err) 7979 struct btrfs_io_bio *io_bio, int err)
7912{ 7980{
7981 struct btrfs_fs_info *fs_info;
7913 struct bio_vec *bvec; 7982 struct bio_vec *bvec;
7914 struct btrfs_retry_complete done; 7983 struct btrfs_retry_complete done;
7915 u64 start; 7984 u64 start;
7916 u64 offset = 0; 7985 u64 offset = 0;
7986 u32 sectorsize;
7987 int nr_sectors;
7988 unsigned int pgoff;
7989 int csum_pos;
7917 int i; 7990 int i;
7918 int ret; 7991 int ret;
7919 7992
7993 fs_info = BTRFS_I(inode)->root->fs_info;
7994 sectorsize = BTRFS_I(inode)->root->sectorsize;
7995
7920 err = 0; 7996 err = 0;
7921 start = io_bio->logical; 7997 start = io_bio->logical;
7922 done.inode = inode; 7998 done.inode = inode;
7923 7999
7924 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 8000 bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7925 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 8001 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
7926 0, start, bvec->bv_len); 8002
8003 pgoff = bvec->bv_offset;
8004next_block:
8005 csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
8006 ret = __readpage_endio_check(inode, io_bio, csum_pos,
8007 bvec->bv_page, pgoff, start,
8008 sectorsize);
7927 if (likely(!ret)) 8009 if (likely(!ret))
7928 goto next; 8010 goto next;
7929try_again: 8011try_again:
@@ -7931,10 +8013,10 @@ try_again:
7931 done.start = start; 8013 done.start = start;
7932 init_completion(&done.done); 8014 init_completion(&done.done);
7933 8015
7934 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 8016 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
7935 start + bvec->bv_len - 1, 8017 pgoff, start, start + sectorsize - 1,
7936 io_bio->mirror_num, 8018 io_bio->mirror_num,
7937 btrfs_retry_endio, &done); 8019 btrfs_retry_endio, &done);
7938 if (ret) { 8020 if (ret) {
7939 err = ret; 8021 err = ret;
7940 goto next; 8022 goto next;
@@ -7947,8 +8029,15 @@ try_again:
7947 goto try_again; 8029 goto try_again;
7948 } 8030 }
7949next: 8031next:
7950 offset += bvec->bv_len; 8032 offset += sectorsize;
7951 start += bvec->bv_len; 8033 start += sectorsize;
8034
8035 ASSERT(nr_sectors);
8036
8037 if (--nr_sectors) {
8038 pgoff += sectorsize;
8039 goto next_block;
8040 }
7952 } 8041 }
7953 8042
7954 return err; 8043 return err;
@@ -8202,9 +8291,11 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8202 u64 file_offset = dip->logical_offset; 8291 u64 file_offset = dip->logical_offset;
8203 u64 submit_len = 0; 8292 u64 submit_len = 0;
8204 u64 map_length; 8293 u64 map_length;
8205 int nr_pages = 0; 8294 u32 blocksize = root->sectorsize;
8206 int ret;
8207 int async_submit = 0; 8295 int async_submit = 0;
8296 int nr_sectors;
8297 int ret;
8298 int i;
8208 8299
8209 map_length = orig_bio->bi_iter.bi_size; 8300 map_length = orig_bio->bi_iter.bi_size;
8210 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 8301 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
@@ -8234,9 +8325,12 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8234 atomic_inc(&dip->pending_bios); 8325 atomic_inc(&dip->pending_bios);
8235 8326
8236 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 8327 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8237 if (map_length < submit_len + bvec->bv_len || 8328 nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
8238 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 8329 i = 0;
8239 bvec->bv_offset) < bvec->bv_len) { 8330next_block:
8331 if (unlikely(map_length < submit_len + blocksize ||
8332 bio_add_page(bio, bvec->bv_page, blocksize,
8333 bvec->bv_offset + (i * blocksize)) < blocksize)) {
8240 /* 8334 /*
8241 * inc the count before we submit the bio so 8335 * inc the count before we submit the bio so
8242 * we know the end IO handler won't happen before 8336 * we know the end IO handler won't happen before
@@ -8257,7 +8351,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8257 file_offset += submit_len; 8351 file_offset += submit_len;
8258 8352
8259 submit_len = 0; 8353 submit_len = 0;
8260 nr_pages = 0;
8261 8354
8262 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, 8355 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8263 start_sector, GFP_NOFS); 8356 start_sector, GFP_NOFS);
@@ -8275,9 +8368,14 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8275 bio_put(bio); 8368 bio_put(bio);
8276 goto out_err; 8369 goto out_err;
8277 } 8370 }
8371
8372 goto next_block;
8278 } else { 8373 } else {
8279 submit_len += bvec->bv_len; 8374 submit_len += blocksize;
8280 nr_pages++; 8375 if (--nr_sectors) {
8376 i++;
8377 goto next_block;
8378 }
8281 bvec++; 8379 bvec++;
8282 } 8380 }
8283 } 8381 }
@@ -8642,6 +8740,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8642 struct extent_state *cached_state = NULL; 8740 struct extent_state *cached_state = NULL;
8643 u64 page_start = page_offset(page); 8741 u64 page_start = page_offset(page);
8644 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8742 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
8743 u64 start;
8744 u64 end;
8645 int inode_evicting = inode->i_state & I_FREEING; 8745 int inode_evicting = inode->i_state & I_FREEING;
8646 8746
8647 /* 8747 /*
@@ -8661,14 +8761,18 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8661 8761
8662 if (!inode_evicting) 8762 if (!inode_evicting)
8663 lock_extent_bits(tree, page_start, page_end, &cached_state); 8763 lock_extent_bits(tree, page_start, page_end, &cached_state);
8664 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8764again:
8765 start = page_start;
8766 ordered = btrfs_lookup_ordered_range(inode, start,
8767 page_end - start + 1);
8665 if (ordered) { 8768 if (ordered) {
8769 end = min(page_end, ordered->file_offset + ordered->len - 1);
8666 /* 8770 /*
8667 * IO on this page will never be started, so we need 8771 * IO on this page will never be started, so we need
8668 * to account for any ordered extents now 8772 * to account for any ordered extents now
8669 */ 8773 */
8670 if (!inode_evicting) 8774 if (!inode_evicting)
8671 clear_extent_bit(tree, page_start, page_end, 8775 clear_extent_bit(tree, start, end,
8672 EXTENT_DIRTY | EXTENT_DELALLOC | 8776 EXTENT_DIRTY | EXTENT_DELALLOC |
8673 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8777 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8674 EXTENT_DEFRAG, 1, 0, &cached_state, 8778 EXTENT_DEFRAG, 1, 0, &cached_state,
@@ -8685,22 +8789,26 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8685 8789
8686 spin_lock_irq(&tree->lock); 8790 spin_lock_irq(&tree->lock);
8687 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8791 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8688 new_len = page_start - ordered->file_offset; 8792 new_len = start - ordered->file_offset;
8689 if (new_len < ordered->truncated_len) 8793 if (new_len < ordered->truncated_len)
8690 ordered->truncated_len = new_len; 8794 ordered->truncated_len = new_len;
8691 spin_unlock_irq(&tree->lock); 8795 spin_unlock_irq(&tree->lock);
8692 8796
8693 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8797 if (btrfs_dec_test_ordered_pending(inode, &ordered,
8694 page_start, 8798 start,
8695 PAGE_CACHE_SIZE, 1)) 8799 end - start + 1, 1))
8696 btrfs_finish_ordered_io(ordered); 8800 btrfs_finish_ordered_io(ordered);
8697 } 8801 }
8698 btrfs_put_ordered_extent(ordered); 8802 btrfs_put_ordered_extent(ordered);
8699 if (!inode_evicting) { 8803 if (!inode_evicting) {
8700 cached_state = NULL; 8804 cached_state = NULL;
8701 lock_extent_bits(tree, page_start, page_end, 8805 lock_extent_bits(tree, start, end,
8702 &cached_state); 8806 &cached_state);
8703 } 8807 }
8808
8809 start = end + 1;
8810 if (start < page_end)
8811 goto again;
8704 } 8812 }
8705 8813
8706 /* 8814 /*
@@ -8761,15 +8869,28 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8761 loff_t size; 8869 loff_t size;
8762 int ret; 8870 int ret;
8763 int reserved = 0; 8871 int reserved = 0;
8872 u64 reserved_space;
8764 u64 page_start; 8873 u64 page_start;
8765 u64 page_end; 8874 u64 page_end;
8875 u64 end;
8876
8877 reserved_space = PAGE_CACHE_SIZE;
8766 8878
8767 sb_start_pagefault(inode->i_sb); 8879 sb_start_pagefault(inode->i_sb);
8768 page_start = page_offset(page); 8880 page_start = page_offset(page);
8769 page_end = page_start + PAGE_CACHE_SIZE - 1; 8881 page_end = page_start + PAGE_CACHE_SIZE - 1;
8882 end = page_end;
8770 8883
8884 /*
8885 * Reserving delalloc space after obtaining the page lock can lead to
8886 * deadlock. For example, if a dirty page is locked by this function
8887 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8888 * dirty page write out, then the btrfs_writepage() function could
8889 * end up waiting indefinitely to get a lock on the page currently
8890 * being processed by btrfs_page_mkwrite() function.
8891 */
8771 ret = btrfs_delalloc_reserve_space(inode, page_start, 8892 ret = btrfs_delalloc_reserve_space(inode, page_start,
8772 PAGE_CACHE_SIZE); 8893 reserved_space);
8773 if (!ret) { 8894 if (!ret) {
8774 ret = file_update_time(vma->vm_file); 8895 ret = file_update_time(vma->vm_file);
8775 reserved = 1; 8896 reserved = 1;
@@ -8803,7 +8924,7 @@ again:
8803 * we can't set the delalloc bits if there are pending ordered 8924 * we can't set the delalloc bits if there are pending ordered
8804 * extents. Drop our locks and wait for them to finish 8925 * extents. Drop our locks and wait for them to finish
8805 */ 8926 */
8806 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8927 ordered = btrfs_lookup_ordered_range(inode, page_start, page_end);
8807 if (ordered) { 8928 if (ordered) {
8808 unlock_extent_cached(io_tree, page_start, page_end, 8929 unlock_extent_cached(io_tree, page_start, page_end,
8809 &cached_state, GFP_NOFS); 8930 &cached_state, GFP_NOFS);
@@ -8813,6 +8934,18 @@ again:
8813 goto again; 8934 goto again;
8814 } 8935 }
8815 8936
8937 if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
8938 reserved_space = round_up(size - page_start, root->sectorsize);
8939 if (reserved_space < PAGE_CACHE_SIZE) {
8940 end = page_start + reserved_space - 1;
8941 spin_lock(&BTRFS_I(inode)->lock);
8942 BTRFS_I(inode)->outstanding_extents++;
8943 spin_unlock(&BTRFS_I(inode)->lock);
8944 btrfs_delalloc_release_space(inode, page_start,
8945 PAGE_CACHE_SIZE - reserved_space);
8946 }
8947 }
8948
8816 /* 8949 /*
8817 * XXX - page_mkwrite gets called every time the page is dirtied, even 8950 * XXX - page_mkwrite gets called every time the page is dirtied, even
8818 * if it was already dirty, so for space accounting reasons we need to 8951 * if it was already dirty, so for space accounting reasons we need to
@@ -8820,12 +8953,12 @@ again:
8820 * is probably a better way to do this, but for now keep consistent with 8953 * is probably a better way to do this, but for now keep consistent with
8821 * prepare_pages in the normal write path. 8954 * prepare_pages in the normal write path.
8822 */ 8955 */
8823 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 8956 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8824 EXTENT_DIRTY | EXTENT_DELALLOC | 8957 EXTENT_DIRTY | EXTENT_DELALLOC |
8825 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 8958 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8826 0, 0, &cached_state, GFP_NOFS); 8959 0, 0, &cached_state, GFP_NOFS);
8827 8960
8828 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 8961 ret = btrfs_set_extent_delalloc(inode, page_start, end,
8829 &cached_state); 8962 &cached_state);
8830 if (ret) { 8963 if (ret) {
8831 unlock_extent_cached(io_tree, page_start, page_end, 8964 unlock_extent_cached(io_tree, page_start, page_end,
@@ -8864,7 +8997,7 @@ out_unlock:
8864 } 8997 }
8865 unlock_page(page); 8998 unlock_page(page);
8866out: 8999out:
8867 btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE); 9000 btrfs_delalloc_release_space(inode, page_start, reserved_space);
8868out_noreserve: 9001out_noreserve:
8869 sb_end_pagefault(inode->i_sb); 9002 sb_end_pagefault(inode->i_sb);
8870 return ret; 9003 return ret;
@@ -9190,16 +9323,11 @@ void btrfs_destroy_cachep(void)
9190 * destroy cache. 9323 * destroy cache.
9191 */ 9324 */
9192 rcu_barrier(); 9325 rcu_barrier();
9193 if (btrfs_inode_cachep) 9326 kmem_cache_destroy(btrfs_inode_cachep);
9194 kmem_cache_destroy(btrfs_inode_cachep); 9327 kmem_cache_destroy(btrfs_trans_handle_cachep);
9195 if (btrfs_trans_handle_cachep) 9328 kmem_cache_destroy(btrfs_transaction_cachep);
9196 kmem_cache_destroy(btrfs_trans_handle_cachep); 9329 kmem_cache_destroy(btrfs_path_cachep);
9197 if (btrfs_transaction_cachep) 9330 kmem_cache_destroy(btrfs_free_space_cachep);
9198 kmem_cache_destroy(btrfs_transaction_cachep);
9199 if (btrfs_path_cachep)
9200 kmem_cache_destroy(btrfs_path_cachep);
9201 if (btrfs_free_space_cachep)
9202 kmem_cache_destroy(btrfs_free_space_cachep);
9203} 9331}
9204 9332
9205int btrfs_init_cachep(void) 9333int btrfs_init_cachep(void)
@@ -9250,7 +9378,6 @@ static int btrfs_getattr(struct vfsmount *mnt,
9250 9378
9251 generic_fillattr(inode, stat); 9379 generic_fillattr(inode, stat);
9252 stat->dev = BTRFS_I(inode)->root->anon_dev; 9380 stat->dev = BTRFS_I(inode)->root->anon_dev;
9253 stat->blksize = PAGE_CACHE_SIZE;
9254 9381
9255 spin_lock(&BTRFS_I(inode)->lock); 9382 spin_lock(&BTRFS_I(inode)->lock);
9256 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes; 9383 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
@@ -9268,7 +9395,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9268 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9395 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9269 struct inode *new_inode = d_inode(new_dentry); 9396 struct inode *new_inode = d_inode(new_dentry);
9270 struct inode *old_inode = d_inode(old_dentry); 9397 struct inode *old_inode = d_inode(old_dentry);
9271 struct timespec ctime = CURRENT_TIME;
9272 u64 index = 0; 9398 u64 index = 0;
9273 u64 root_objectid; 9399 u64 root_objectid;
9274 int ret; 9400 int ret;
@@ -9365,9 +9491,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9365 inode_inc_iversion(old_dir); 9491 inode_inc_iversion(old_dir);
9366 inode_inc_iversion(new_dir); 9492 inode_inc_iversion(new_dir);
9367 inode_inc_iversion(old_inode); 9493 inode_inc_iversion(old_inode);
9368 old_dir->i_ctime = old_dir->i_mtime = ctime; 9494 old_dir->i_ctime = old_dir->i_mtime =
9369 new_dir->i_ctime = new_dir->i_mtime = ctime; 9495 new_dir->i_ctime = new_dir->i_mtime =
9370 old_inode->i_ctime = ctime; 9496 old_inode->i_ctime = current_fs_time(old_dir->i_sb);
9371 9497
9372 if (old_dentry->d_parent != new_dentry->d_parent) 9498 if (old_dentry->d_parent != new_dentry->d_parent)
9373 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 9499 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
@@ -9392,7 +9518,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9392 9518
9393 if (new_inode) { 9519 if (new_inode) {
9394 inode_inc_iversion(new_inode); 9520 inode_inc_iversion(new_inode);
9395 new_inode->i_ctime = CURRENT_TIME; 9521 new_inode->i_ctime = current_fs_time(new_inode->i_sb);
9396 if (unlikely(btrfs_ino(new_inode) == 9522 if (unlikely(btrfs_ino(new_inode) ==
9397 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9523 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9398 root_objectid = BTRFS_I(new_inode)->location.objectid; 9524 root_objectid = BTRFS_I(new_inode)->location.objectid;
@@ -9870,7 +9996,7 @@ next:
9870 *alloc_hint = ins.objectid + ins.offset; 9996 *alloc_hint = ins.objectid + ins.offset;
9871 9997
9872 inode_inc_iversion(inode); 9998 inode_inc_iversion(inode);
9873 inode->i_ctime = CURRENT_TIME; 9999 inode->i_ctime = current_fs_time(inode->i_sb);
9874 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 10000 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9875 if (!(mode & FALLOC_FL_KEEP_SIZE) && 10001 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9876 (actual_len > inode->i_size) && 10002 (actual_len > inode->i_size) &&