aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2015-04-03 00:09:13 -0400
committerTheodore Ts'o <tytso@mit.edu>2015-04-03 00:09:13 -0400
commit0f2af21aae11972fa924374ddcf52e88347cf5a8 (patch)
tree753b0c67fecebe1cc2bf15ca775b9ea13312b85c /fs
parent5a4f3145aa685ddc2caf424763d42d3eaaebbc6f (diff)
ext4: allocate entire range in zero range
Currently there is a bug in zero range code which causes zero range calls to only allocate block aligned portion of the range, while ignoring the rest in some cases. In some cases, namely if the end of the range is past i_size, we do attempt to preallocate the last nonaligned block. However this might cause kernel to BUG() in some carefully designed zero range requests on setups where page size > block size. Fix this problem by first preallocating the entire range, including the nonaligned edges and converting the written extents to unwritten in the next step. This approach will also give us the advantage of having the range to be as linearly contiguous as possible. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/extents.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2e6af88d112f..3cc17aacc4c7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4797,12 +4797,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4797 else 4797 else
4798 max_blocks -= lblk; 4798 max_blocks -= lblk;
4799 4799
4800 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
4801 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4802 EXT4_EX_NOCACHE;
4803 if (mode & FALLOC_FL_KEEP_SIZE)
4804 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4805
4806 mutex_lock(&inode->i_mutex); 4800 mutex_lock(&inode->i_mutex);
4807 4801
4808 /* 4802 /*
@@ -4819,15 +4813,28 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4819 ret = inode_newsize_ok(inode, new_size); 4813 ret = inode_newsize_ok(inode, new_size);
4820 if (ret) 4814 if (ret)
4821 goto out_mutex; 4815 goto out_mutex;
4822 /*
4823 * If we have a partial block after EOF we have to allocate
4824 * the entire block.
4825 */
4826 if (partial_end)
4827 max_blocks += 1;
4828 } 4816 }
4829 4817
4818 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4819 if (mode & FALLOC_FL_KEEP_SIZE)
4820 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4821
4822 /* Preallocate the range including the unaligned edges */
4823 if (partial_begin || partial_end) {
4824 ret = ext4_alloc_file_blocks(file,
4825 round_down(offset, 1 << blkbits) >> blkbits,
4826 (round_up((offset + len), 1 << blkbits) -
4827 round_down(offset, 1 << blkbits)) >> blkbits,
4828 new_size, flags, mode);
4829 if (ret)
4830 goto out_mutex;
4831
4832 }
4833
4834 /* Zero range excluding the unaligned edges */
4830 if (max_blocks > 0) { 4835 if (max_blocks > 0) {
4836 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4837 EXT4_EX_NOCACHE);
4831 4838
4832 /* Now release the pages and zero block aligned part of pages*/ 4839 /* Now release the pages and zero block aligned part of pages*/
4833 truncate_pagecache_range(inode, start, end - 1); 4840 truncate_pagecache_range(inode, start, end - 1);