diff options
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 114 |
1 files changed, 89 insertions, 25 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f2fa5e8a582..50d0e9c6458 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
639 | while (target > 0) { | 639 | while (target > 0) { |
640 | count = target; | 640 | count = target; |
641 | /* allocating blocks for indirect blocks and direct blocks */ | 641 | /* allocating blocks for indirect blocks and direct blocks */ |
642 | current_block = ext4_new_meta_blocks(handle, inode, | 642 | current_block = ext4_new_meta_blocks(handle, inode, goal, |
643 | goal, &count, err); | 643 | 0, &count, err); |
644 | if (*err) | 644 | if (*err) |
645 | goto failed_out; | 645 | goto failed_out; |
646 | 646 | ||
@@ -1930,7 +1930,7 @@ repeat: | |||
1930 | * We do still charge estimated metadata to the sb though; | 1930 | * We do still charge estimated metadata to the sb though; |
1931 | * we cannot afford to run out of free blocks. | 1931 | * we cannot afford to run out of free blocks. |
1932 | */ | 1932 | */ |
1933 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { | 1933 | if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) { |
1934 | dquot_release_reservation_block(inode, 1); | 1934 | dquot_release_reservation_block(inode, 1); |
1935 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1935 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
1936 | yield(); | 1936 | yield(); |
@@ -2796,9 +2796,7 @@ static int write_cache_pages_da(struct address_space *mapping, | |||
2796 | continue; | 2796 | continue; |
2797 | } | 2797 | } |
2798 | 2798 | ||
2799 | if (PageWriteback(page)) | 2799 | wait_on_page_writeback(page); |
2800 | wait_on_page_writeback(page); | ||
2801 | |||
2802 | BUG_ON(PageWriteback(page)); | 2800 | BUG_ON(PageWriteback(page)); |
2803 | 2801 | ||
2804 | if (mpd->next_page != page->index) | 2802 | if (mpd->next_page != page->index) |
@@ -3513,7 +3511,7 @@ retry: | |||
3513 | loff_t end = offset + iov_length(iov, nr_segs); | 3511 | loff_t end = offset + iov_length(iov, nr_segs); |
3514 | 3512 | ||
3515 | if (end > isize) | 3513 | if (end > isize) |
3516 | vmtruncate(inode, isize); | 3514 | ext4_truncate_failed_write(inode); |
3517 | } | 3515 | } |
3518 | } | 3516 | } |
3519 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 3517 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
@@ -3916,9 +3914,30 @@ void ext4_set_aops(struct inode *inode) | |||
3916 | int ext4_block_truncate_page(handle_t *handle, | 3914 | int ext4_block_truncate_page(handle_t *handle, |
3917 | struct address_space *mapping, loff_t from) | 3915 | struct address_space *mapping, loff_t from) |
3918 | { | 3916 | { |
3917 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | ||
3918 | unsigned length; | ||
3919 | unsigned blocksize; | ||
3920 | struct inode *inode = mapping->host; | ||
3921 | |||
3922 | blocksize = inode->i_sb->s_blocksize; | ||
3923 | length = blocksize - (offset & (blocksize - 1)); | ||
3924 | |||
3925 | return ext4_block_zero_page_range(handle, mapping, from, length); | ||
3926 | } | ||
3927 | |||
3928 | /* | ||
3929 | * ext4_block_zero_page_range() zeros out a mapping of length 'length' | ||
3930 | * starting from file offset 'from'. The range to be zero'd must | ||
3931 | * be contained with in one block. If the specified range exceeds | ||
3932 | * the end of the block it will be shortened to end of the block | ||
3933 | * that cooresponds to 'from' | ||
3934 | */ | ||
3935 | int ext4_block_zero_page_range(handle_t *handle, | ||
3936 | struct address_space *mapping, loff_t from, loff_t length) | ||
3937 | { | ||
3919 | ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; | 3938 | ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; |
3920 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 3939 | unsigned offset = from & (PAGE_CACHE_SIZE-1); |
3921 | unsigned blocksize, length, pos; | 3940 | unsigned blocksize, max, pos; |
3922 | ext4_lblk_t iblock; | 3941 | ext4_lblk_t iblock; |
3923 | struct inode *inode = mapping->host; | 3942 | struct inode *inode = mapping->host; |
3924 | struct buffer_head *bh; | 3943 | struct buffer_head *bh; |
@@ -3931,7 +3950,15 @@ int ext4_block_truncate_page(handle_t *handle, | |||
3931 | return -EINVAL; | 3950 | return -EINVAL; |
3932 | 3951 | ||
3933 | blocksize = inode->i_sb->s_blocksize; | 3952 | blocksize = inode->i_sb->s_blocksize; |
3934 | length = blocksize - (offset & (blocksize - 1)); | 3953 | max = blocksize - (offset & (blocksize - 1)); |
3954 | |||
3955 | /* | ||
3956 | * correct length if it does not fall between | ||
3957 | * 'from' and the end of the block | ||
3958 | */ | ||
3959 | if (length > max || length < 0) | ||
3960 | length = max; | ||
3961 | |||
3935 | iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); | 3962 | iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); |
3936 | 3963 | ||
3937 | if (!page_has_buffers(page)) | 3964 | if (!page_has_buffers(page)) |
@@ -4380,8 +4407,6 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
4380 | 4407 | ||
4381 | int ext4_can_truncate(struct inode *inode) | 4408 | int ext4_can_truncate(struct inode *inode) |
4382 | { | 4409 | { |
4383 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | ||
4384 | return 0; | ||
4385 | if (S_ISREG(inode->i_mode)) | 4410 | if (S_ISREG(inode->i_mode)) |
4386 | return 1; | 4411 | return 1; |
4387 | if (S_ISDIR(inode->i_mode)) | 4412 | if (S_ISDIR(inode->i_mode)) |
@@ -4392,6 +4417,31 @@ int ext4_can_truncate(struct inode *inode) | |||
4392 | } | 4417 | } |
4393 | 4418 | ||
4394 | /* | 4419 | /* |
4420 | * ext4_punch_hole: punches a hole in a file by releaseing the blocks | ||
4421 | * associated with the given offset and length | ||
4422 | * | ||
4423 | * @inode: File inode | ||
4424 | * @offset: The offset where the hole will begin | ||
4425 | * @len: The length of the hole | ||
4426 | * | ||
4427 | * Returns: 0 on sucess or negative on failure | ||
4428 | */ | ||
4429 | |||
4430 | int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) | ||
4431 | { | ||
4432 | struct inode *inode = file->f_path.dentry->d_inode; | ||
4433 | if (!S_ISREG(inode->i_mode)) | ||
4434 | return -ENOTSUPP; | ||
4435 | |||
4436 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | ||
4437 | /* TODO: Add support for non extent hole punching */ | ||
4438 | return -ENOTSUPP; | ||
4439 | } | ||
4440 | |||
4441 | return ext4_ext_punch_hole(file, offset, length); | ||
4442 | } | ||
4443 | |||
4444 | /* | ||
4395 | * ext4_truncate() | 4445 | * ext4_truncate() |
4396 | * | 4446 | * |
4397 | * We block out ext4_get_block() block instantiations across the entire | 4447 | * We block out ext4_get_block() block instantiations across the entire |
@@ -4617,7 +4667,7 @@ static int __ext4_get_inode_loc(struct inode *inode, | |||
4617 | /* | 4667 | /* |
4618 | * Figure out the offset within the block group inode table | 4668 | * Figure out the offset within the block group inode table |
4619 | */ | 4669 | */ |
4620 | inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); | 4670 | inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; |
4621 | inode_offset = ((inode->i_ino - 1) % | 4671 | inode_offset = ((inode->i_ino - 1) % |
4622 | EXT4_INODES_PER_GROUP(sb)); | 4672 | EXT4_INODES_PER_GROUP(sb)); |
4623 | block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); | 4673 | block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); |
@@ -5311,8 +5361,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5311 | 5361 | ||
5312 | if (S_ISREG(inode->i_mode) && | 5362 | if (S_ISREG(inode->i_mode) && |
5313 | attr->ia_valid & ATTR_SIZE && | 5363 | attr->ia_valid & ATTR_SIZE && |
5314 | (attr->ia_size < inode->i_size || | 5364 | (attr->ia_size < inode->i_size)) { |
5315 | (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) { | ||
5316 | handle_t *handle; | 5365 | handle_t *handle; |
5317 | 5366 | ||
5318 | handle = ext4_journal_start(inode, 3); | 5367 | handle = ext4_journal_start(inode, 3); |
@@ -5346,14 +5395,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5346 | goto err_out; | 5395 | goto err_out; |
5347 | } | 5396 | } |
5348 | } | 5397 | } |
5349 | /* ext4_truncate will clear the flag */ | ||
5350 | if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) | ||
5351 | ext4_truncate(inode); | ||
5352 | } | 5398 | } |
5353 | 5399 | ||
5354 | if ((attr->ia_valid & ATTR_SIZE) && | 5400 | if (attr->ia_valid & ATTR_SIZE) { |
5355 | attr->ia_size != i_size_read(inode)) | 5401 | if (attr->ia_size != i_size_read(inode)) { |
5356 | rc = vmtruncate(inode, attr->ia_size); | 5402 | truncate_setsize(inode, attr->ia_size); |
5403 | ext4_truncate(inode); | ||
5404 | } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) | ||
5405 | ext4_truncate(inode); | ||
5406 | } | ||
5357 | 5407 | ||
5358 | if (!rc) { | 5408 | if (!rc) { |
5359 | setattr_copy(inode, attr); | 5409 | setattr_copy(inode, attr); |
@@ -5811,15 +5861,19 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5811 | goto out_unlock; | 5861 | goto out_unlock; |
5812 | } | 5862 | } |
5813 | ret = 0; | 5863 | ret = 0; |
5814 | if (PageMappedToDisk(page)) | 5864 | |
5815 | goto out_unlock; | 5865 | lock_page(page); |
5866 | wait_on_page_writeback(page); | ||
5867 | if (PageMappedToDisk(page)) { | ||
5868 | up_read(&inode->i_alloc_sem); | ||
5869 | return VM_FAULT_LOCKED; | ||
5870 | } | ||
5816 | 5871 | ||
5817 | if (page->index == size >> PAGE_CACHE_SHIFT) | 5872 | if (page->index == size >> PAGE_CACHE_SHIFT) |
5818 | len = size & ~PAGE_CACHE_MASK; | 5873 | len = size & ~PAGE_CACHE_MASK; |
5819 | else | 5874 | else |
5820 | len = PAGE_CACHE_SIZE; | 5875 | len = PAGE_CACHE_SIZE; |
5821 | 5876 | ||
5822 | lock_page(page); | ||
5823 | /* | 5877 | /* |
5824 | * return if we have all the buffers mapped. This avoid | 5878 | * return if we have all the buffers mapped. This avoid |
5825 | * the need to call write_begin/write_end which does a | 5879 | * the need to call write_begin/write_end which does a |
@@ -5829,8 +5883,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5829 | if (page_has_buffers(page)) { | 5883 | if (page_has_buffers(page)) { |
5830 | if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, | 5884 | if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, |
5831 | ext4_bh_unmapped)) { | 5885 | ext4_bh_unmapped)) { |
5832 | unlock_page(page); | 5886 | up_read(&inode->i_alloc_sem); |
5833 | goto out_unlock; | 5887 | return VM_FAULT_LOCKED; |
5834 | } | 5888 | } |
5835 | } | 5889 | } |
5836 | unlock_page(page); | 5890 | unlock_page(page); |
@@ -5850,6 +5904,16 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5850 | if (ret < 0) | 5904 | if (ret < 0) |
5851 | goto out_unlock; | 5905 | goto out_unlock; |
5852 | ret = 0; | 5906 | ret = 0; |
5907 | |||
5908 | /* | ||
5909 | * write_begin/end might have created a dirty page and someone | ||
5910 | * could wander in and start the IO. Make sure that hasn't | ||
5911 | * happened. | ||
5912 | */ | ||
5913 | lock_page(page); | ||
5914 | wait_on_page_writeback(page); | ||
5915 | up_read(&inode->i_alloc_sem); | ||
5916 | return VM_FAULT_LOCKED; | ||
5853 | out_unlock: | 5917 | out_unlock: |
5854 | if (ret) | 5918 | if (ret) |
5855 | ret = VM_FAULT_SIGBUS; | 5919 | ret = VM_FAULT_SIGBUS; |