aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c320
1 files changed, 168 insertions, 152 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c2ca04e67a4f..0d424d7ac02b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -553,7 +553,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
553 } 553 }
554 if (retval > 0) { 554 if (retval > 0) {
555 int ret; 555 int ret;
556 unsigned long long status; 556 unsigned int status;
557 557
558 if (unlikely(retval != map->m_len)) { 558 if (unlikely(retval != map->m_len)) {
559 ext4_warning(inode->i_sb, 559 ext4_warning(inode->i_sb,
@@ -653,7 +653,7 @@ found:
653 653
654 if (retval > 0) { 654 if (retval > 0) {
655 int ret; 655 int ret;
656 unsigned long long status; 656 unsigned int status;
657 657
658 if (unlikely(retval != map->m_len)) { 658 if (unlikely(retval != map->m_len)) {
659 ext4_warning(inode->i_sb, 659 ext4_warning(inode->i_sb,
@@ -727,8 +727,12 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
727 727
728 ret = ext4_map_blocks(handle, inode, &map, flags); 728 ret = ext4_map_blocks(handle, inode, &map, flags);
729 if (ret > 0) { 729 if (ret > 0) {
730 ext4_io_end_t *io_end = ext4_inode_aio(inode);
731
730 map_bh(bh, inode->i_sb, map.m_pblk); 732 map_bh(bh, inode->i_sb, map.m_pblk);
731 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 733 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
734 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
735 set_buffer_defer_completion(bh);
732 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 736 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
733 ret = 0; 737 ret = 0;
734 } 738 }
@@ -969,7 +973,8 @@ retry_journal:
969 ext4_journal_stop(handle); 973 ext4_journal_stop(handle);
970 goto retry_grab; 974 goto retry_grab;
971 } 975 }
972 wait_on_page_writeback(page); 976 /* In case writeback began while the page was unlocked */
977 wait_for_stable_page(page);
973 978
974 if (ext4_should_dioread_nolock(inode)) 979 if (ext4_should_dioread_nolock(inode))
975 ret = __block_write_begin(page, pos, len, ext4_get_block_write); 980 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
@@ -1633,7 +1638,7 @@ add_delayed:
1633 set_buffer_delay(bh); 1638 set_buffer_delay(bh);
1634 } else if (retval > 0) { 1639 } else if (retval > 0) {
1635 int ret; 1640 int ret;
1636 unsigned long long status; 1641 unsigned int status;
1637 1642
1638 if (unlikely(retval != map->m_len)) { 1643 if (unlikely(retval != map->m_len)) {
1639 ext4_warning(inode->i_sb, 1644 ext4_warning(inode->i_sb,
@@ -1890,12 +1895,32 @@ static int ext4_writepage(struct page *page,
1890 return ret; 1895 return ret;
1891} 1896}
1892 1897
1898static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
1899{
1900 int len;
1901 loff_t size = i_size_read(mpd->inode);
1902 int err;
1903
1904 BUG_ON(page->index != mpd->first_page);
1905 if (page->index == size >> PAGE_CACHE_SHIFT)
1906 len = size & ~PAGE_CACHE_MASK;
1907 else
1908 len = PAGE_CACHE_SIZE;
1909 clear_page_dirty_for_io(page);
1910 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
1911 if (!err)
1912 mpd->wbc->nr_to_write--;
1913 mpd->first_page++;
1914
1915 return err;
1916}
1917
1893#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 1918#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
1894 1919
1895/* 1920/*
1896 * mballoc gives us at most this number of blocks... 1921 * mballoc gives us at most this number of blocks...
1897 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 1922 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1898 * The rest of mballoc seems to handle chunks upto full group size. 1923 * The rest of mballoc seems to handle chunks up to full group size.
1899 */ 1924 */
1900#define MAX_WRITEPAGES_EXTENT_LEN 2048 1925#define MAX_WRITEPAGES_EXTENT_LEN 2048
1901 1926
@@ -1904,82 +1929,94 @@ static int ext4_writepage(struct page *page,
1904 * 1929 *
1905 * @mpd - extent of blocks 1930 * @mpd - extent of blocks
1906 * @lblk - logical number of the block in the file 1931 * @lblk - logical number of the block in the file
1907 * @b_state - b_state of the buffer head added 1932 * @bh - buffer head we want to add to the extent
1908 * 1933 *
1909 * the function is used to collect contig. blocks in same state 1934 * The function is used to collect contig. blocks in the same state. If the
1935 * buffer doesn't require mapping for writeback and we haven't started the
1936 * extent of buffers to map yet, the function returns 'true' immediately - the
1937 * caller can write the buffer right away. Otherwise the function returns true
1938 * if the block has been added to the extent, false if the block couldn't be
1939 * added.
1910 */ 1940 */
1911static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 1941static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1912 unsigned long b_state) 1942 struct buffer_head *bh)
1913{ 1943{
1914 struct ext4_map_blocks *map = &mpd->map; 1944 struct ext4_map_blocks *map = &mpd->map;
1915 1945
1916 /* Don't go larger than mballoc is willing to allocate */ 1946 /* Buffer that doesn't need mapping for writeback? */
1917 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 1947 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1918 return 0; 1948 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1949 /* So far no extent to map => we write the buffer right away */
1950 if (map->m_len == 0)
1951 return true;
1952 return false;
1953 }
1919 1954
1920 /* First block in the extent? */ 1955 /* First block in the extent? */
1921 if (map->m_len == 0) { 1956 if (map->m_len == 0) {
1922 map->m_lblk = lblk; 1957 map->m_lblk = lblk;
1923 map->m_len = 1; 1958 map->m_len = 1;
1924 map->m_flags = b_state & BH_FLAGS; 1959 map->m_flags = bh->b_state & BH_FLAGS;
1925 return 1; 1960 return true;
1926 } 1961 }
1927 1962
1963 /* Don't go larger than mballoc is willing to allocate */
1964 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1965 return false;
1966
1928 /* Can we merge the block to our big extent? */ 1967 /* Can we merge the block to our big extent? */
1929 if (lblk == map->m_lblk + map->m_len && 1968 if (lblk == map->m_lblk + map->m_len &&
1930 (b_state & BH_FLAGS) == map->m_flags) { 1969 (bh->b_state & BH_FLAGS) == map->m_flags) {
1931 map->m_len++; 1970 map->m_len++;
1932 return 1; 1971 return true;
1933 } 1972 }
1934 return 0; 1973 return false;
1935} 1974}
1936 1975
1937static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, 1976/*
1938 struct buffer_head *head, 1977 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1939 struct buffer_head *bh, 1978 *
1940 ext4_lblk_t lblk) 1979 * @mpd - extent of blocks for mapping
1980 * @head - the first buffer in the page
1981 * @bh - buffer we should start processing from
1982 * @lblk - logical number of the block in the file corresponding to @bh
1983 *
1984 * Walk through page buffers from @bh upto @head (exclusive) and either submit
1985 * the page for IO if all buffers in this page were mapped and there's no
1986 * accumulated extent of buffers to map or add buffers in the page to the
1987 * extent of buffers to map. The function returns 1 if the caller can continue
1988 * by processing the next page, 0 if it should stop adding buffers to the
1989 * extent to map because we cannot extend it anymore. It can also return value
1990 * < 0 in case of error during IO submission.
1991 */
1992static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1993 struct buffer_head *head,
1994 struct buffer_head *bh,
1995 ext4_lblk_t lblk)
1941{ 1996{
1942 struct inode *inode = mpd->inode; 1997 struct inode *inode = mpd->inode;
1998 int err;
1943 ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) 1999 ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
1944 >> inode->i_blkbits; 2000 >> inode->i_blkbits;
1945 2001
1946 do { 2002 do {
1947 BUG_ON(buffer_locked(bh)); 2003 BUG_ON(buffer_locked(bh));
1948 2004
1949 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 2005 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
1950 (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
1951 lblk >= blocks) {
1952 /* Found extent to map? */ 2006 /* Found extent to map? */
1953 if (mpd->map.m_len) 2007 if (mpd->map.m_len)
1954 return false; 2008 return 0;
1955 if (lblk >= blocks) 2009 /* Everything mapped so far and we hit EOF */
1956 return true; 2010 break;
1957 continue;
1958 } 2011 }
1959 if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
1960 return false;
1961 } while (lblk++, (bh = bh->b_this_page) != head); 2012 } while (lblk++, (bh = bh->b_this_page) != head);
1962 return true; 2013 /* So far everything mapped? Submit the page for IO. */
1963} 2014 if (mpd->map.m_len == 0) {
1964 2015 err = mpage_submit_page(mpd, head->b_page);
1965static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2016 if (err < 0)
1966{ 2017 return err;
1967 int len; 2018 }
1968 loff_t size = i_size_read(mpd->inode); 2019 return lblk < blocks;
1969 int err;
1970
1971 BUG_ON(page->index != mpd->first_page);
1972 if (page->index == size >> PAGE_CACHE_SHIFT)
1973 len = size & ~PAGE_CACHE_MASK;
1974 else
1975 len = PAGE_CACHE_SIZE;
1976 clear_page_dirty_for_io(page);
1977 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
1978 if (!err)
1979 mpd->wbc->nr_to_write--;
1980 mpd->first_page++;
1981
1982 return err;
1983} 2020}
1984 2021
1985/* 2022/*
@@ -2003,8 +2040,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2003 struct inode *inode = mpd->inode; 2040 struct inode *inode = mpd->inode;
2004 struct buffer_head *head, *bh; 2041 struct buffer_head *head, *bh;
2005 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; 2042 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
2006 ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
2007 >> inode->i_blkbits;
2008 pgoff_t start, end; 2043 pgoff_t start, end;
2009 ext4_lblk_t lblk; 2044 ext4_lblk_t lblk;
2010 sector_t pblock; 2045 sector_t pblock;
@@ -2026,7 +2061,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2026 2061
2027 if (page->index > end) 2062 if (page->index > end)
2028 break; 2063 break;
2029 /* Upto 'end' pages must be contiguous */ 2064 /* Up to 'end' pages must be contiguous */
2030 BUG_ON(page->index != start); 2065 BUG_ON(page->index != start);
2031 bh = head = page_buffers(page); 2066 bh = head = page_buffers(page);
2032 do { 2067 do {
@@ -2039,18 +2074,26 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2039 */ 2074 */
2040 mpd->map.m_len = 0; 2075 mpd->map.m_len = 0;
2041 mpd->map.m_flags = 0; 2076 mpd->map.m_flags = 0;
2042 add_page_bufs_to_extent(mpd, head, bh, 2077 /*
2043 lblk); 2078 * FIXME: If dioread_nolock supports
2079 * blocksize < pagesize, we need to make
2080 * sure we add size mapped so far to
2081 * io_end->size as the following call
2082 * can submit the page for IO.
2083 */
2084 err = mpage_process_page_bufs(mpd, head,
2085 bh, lblk);
2044 pagevec_release(&pvec); 2086 pagevec_release(&pvec);
2045 return 0; 2087 if (err > 0)
2088 err = 0;
2089 return err;
2046 } 2090 }
2047 if (buffer_delay(bh)) { 2091 if (buffer_delay(bh)) {
2048 clear_buffer_delay(bh); 2092 clear_buffer_delay(bh);
2049 bh->b_blocknr = pblock++; 2093 bh->b_blocknr = pblock++;
2050 } 2094 }
2051 clear_buffer_unwritten(bh); 2095 clear_buffer_unwritten(bh);
2052 } while (++lblk < blocks && 2096 } while (lblk++, (bh = bh->b_this_page) != head);
2053 (bh = bh->b_this_page) != head);
2054 2097
2055 /* 2098 /*
2056 * FIXME: This is going to break if dioread_nolock 2099 * FIXME: This is going to break if dioread_nolock
@@ -2199,12 +2242,10 @@ static int mpage_map_and_submit_extent(handle_t *handle,
2199 2242
2200 /* Update on-disk size after IO is submitted */ 2243 /* Update on-disk size after IO is submitted */
2201 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; 2244 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
2202 if (disksize > i_size_read(inode))
2203 disksize = i_size_read(inode);
2204 if (disksize > EXT4_I(inode)->i_disksize) { 2245 if (disksize > EXT4_I(inode)->i_disksize) {
2205 int err2; 2246 int err2;
2206 2247
2207 ext4_update_i_disksize(inode, disksize); 2248 ext4_wb_update_i_disksize(inode, disksize);
2208 err2 = ext4_mark_inode_dirty(handle, inode); 2249 err2 = ext4_mark_inode_dirty(handle, inode);
2209 if (err2) 2250 if (err2)
2210 ext4_error(inode->i_sb, 2251 ext4_error(inode->i_sb,
@@ -2219,7 +2260,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
2219/* 2260/*
2220 * Calculate the total number of credits to reserve for one writepages 2261 * Calculate the total number of credits to reserve for one writepages
2221 * iteration. This is called from ext4_writepages(). We map an extent of 2262 * iteration. This is called from ext4_writepages(). We map an extent of
2222 * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2263 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2223 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2264 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2224 * bpp - 1 blocks in bpp different extents. 2265 * bpp - 1 blocks in bpp different extents.
2225 */ 2266 */
@@ -2319,14 +2360,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2319 lblk = ((ext4_lblk_t)page->index) << 2360 lblk = ((ext4_lblk_t)page->index) <<
2320 (PAGE_CACHE_SHIFT - blkbits); 2361 (PAGE_CACHE_SHIFT - blkbits);
2321 head = page_buffers(page); 2362 head = page_buffers(page);
2322 if (!add_page_bufs_to_extent(mpd, head, head, lblk)) 2363 err = mpage_process_page_bufs(mpd, head, head, lblk);
2364 if (err <= 0)
2323 goto out; 2365 goto out;
2324 /* So far everything mapped? Submit the page for IO. */ 2366 err = 0;
2325 if (mpd->map.m_len == 0) {
2326 err = mpage_submit_page(mpd, page);
2327 if (err < 0)
2328 goto out;
2329 }
2330 2367
2331 /* 2368 /*
2332 * Accumulated enough dirty pages? This doesn't apply 2369 * Accumulated enough dirty pages? This doesn't apply
@@ -2410,7 +2447,7 @@ static int ext4_writepages(struct address_space *mapping,
2410 2447
2411 if (ext4_should_dioread_nolock(inode)) { 2448 if (ext4_should_dioread_nolock(inode)) {
2412 /* 2449 /*
2413 * We may need to convert upto one extent per block in 2450 * We may need to convert up to one extent per block in
2414 * the page and we may dirty the inode. 2451 * the page and we may dirty the inode.
2415 */ 2452 */
2416 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); 2453 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
@@ -2646,7 +2683,7 @@ retry_journal:
2646 goto retry_grab; 2683 goto retry_grab;
2647 } 2684 }
2648 /* In case writeback began while the page was unlocked */ 2685 /* In case writeback began while the page was unlocked */
2649 wait_on_page_writeback(page); 2686 wait_for_stable_page(page);
2650 2687
2651 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 2688 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2652 if (ret < 0) { 2689 if (ret < 0) {
@@ -2991,19 +3028,13 @@ static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
2991} 3028}
2992 3029
2993static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3030static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2994 ssize_t size, void *private, int ret, 3031 ssize_t size, void *private)
2995 bool is_async)
2996{ 3032{
2997 struct inode *inode = file_inode(iocb->ki_filp);
2998 ext4_io_end_t *io_end = iocb->private; 3033 ext4_io_end_t *io_end = iocb->private;
2999 3034
3000 /* if not async direct IO just return */ 3035 /* if not async direct IO just return */
3001 if (!io_end) { 3036 if (!io_end)
3002 inode_dio_done(inode);
3003 if (is_async)
3004 aio_complete(iocb, ret, 0);
3005 return; 3037 return;
3006 }
3007 3038
3008 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3039 ext_debug("ext4_end_io_dio(): io_end 0x%p "
3009 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3040 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3013,11 +3044,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3013 iocb->private = NULL; 3044 iocb->private = NULL;
3014 io_end->offset = offset; 3045 io_end->offset = offset;
3015 io_end->size = size; 3046 io_end->size = size;
3016 if (is_async) { 3047 ext4_put_io_end(io_end);
3017 io_end->iocb = iocb;
3018 io_end->result = ret;
3019 }
3020 ext4_put_io_end_defer(io_end);
3021} 3048}
3022 3049
3023/* 3050/*
@@ -3102,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3102 ret = -ENOMEM; 3129 ret = -ENOMEM;
3103 goto retake_lock; 3130 goto retake_lock;
3104 } 3131 }
3105 io_end->flag |= EXT4_IO_END_DIRECT;
3106 /* 3132 /*
3107 * Grab reference for DIO. Will be dropped in ext4_end_io_dio() 3133 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3108 */ 3134 */
@@ -3147,13 +3173,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3147 if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { 3173 if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
3148 WARN_ON(iocb->private != io_end); 3174 WARN_ON(iocb->private != io_end);
3149 WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); 3175 WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
3150 WARN_ON(io_end->iocb);
3151 /*
3152 * Generic code already did inode_dio_done() so we
3153 * have to clear EXT4_IO_END_DIRECT to not do it for
3154 * the second time.
3155 */
3156 io_end->flag = 0;
3157 ext4_put_io_end(io_end); 3176 ext4_put_io_end(io_end);
3158 iocb->private = NULL; 3177 iocb->private = NULL;
3159 } 3178 }
@@ -4566,7 +4585,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4566 ext4_journal_stop(handle); 4585 ext4_journal_stop(handle);
4567 } 4586 }
4568 4587
4569 if (attr->ia_valid & ATTR_SIZE) { 4588 if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
4589 handle_t *handle;
4570 4590
4571 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4591 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4572 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4592 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -4574,73 +4594,69 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4574 if (attr->ia_size > sbi->s_bitmap_maxbytes) 4594 if (attr->ia_size > sbi->s_bitmap_maxbytes)
4575 return -EFBIG; 4595 return -EFBIG;
4576 } 4596 }
4577 } 4597 if (S_ISREG(inode->i_mode) &&
4578 4598 (attr->ia_size < inode->i_size)) {
4579 if (S_ISREG(inode->i_mode) && 4599 if (ext4_should_order_data(inode)) {
4580 attr->ia_valid & ATTR_SIZE && 4600 error = ext4_begin_ordered_truncate(inode,
4581 (attr->ia_size < inode->i_size)) {
4582 handle_t *handle;
4583
4584 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4585 if (IS_ERR(handle)) {
4586 error = PTR_ERR(handle);
4587 goto err_out;
4588 }
4589 if (ext4_handle_valid(handle)) {
4590 error = ext4_orphan_add(handle, inode);
4591 orphan = 1;
4592 }
4593 EXT4_I(inode)->i_disksize = attr->ia_size;
4594 rc = ext4_mark_inode_dirty(handle, inode);
4595 if (!error)
4596 error = rc;
4597 ext4_journal_stop(handle);
4598
4599 if (ext4_should_order_data(inode)) {
4600 error = ext4_begin_ordered_truncate(inode,
4601 attr->ia_size); 4601 attr->ia_size);
4602 if (error) { 4602 if (error)
4603 /* Do as much error cleanup as possible */
4604 handle = ext4_journal_start(inode,
4605 EXT4_HT_INODE, 3);
4606 if (IS_ERR(handle)) {
4607 ext4_orphan_del(NULL, inode);
4608 goto err_out; 4603 goto err_out;
4609 } 4604 }
4610 ext4_orphan_del(handle, inode); 4605 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4611 orphan = 0; 4606 if (IS_ERR(handle)) {
4612 ext4_journal_stop(handle); 4607 error = PTR_ERR(handle);
4613 goto err_out; 4608 goto err_out;
4614 } 4609 }
4615 } 4610 if (ext4_handle_valid(handle)) {
4616 } 4611 error = ext4_orphan_add(handle, inode);
4617 4612 orphan = 1;
4618 if (attr->ia_valid & ATTR_SIZE) {
4619 if (attr->ia_size != inode->i_size) {
4620 loff_t oldsize = inode->i_size;
4621
4622 i_size_write(inode, attr->ia_size);
4623 /*
4624 * Blocks are going to be removed from the inode. Wait
4625 * for dio in flight. Temporarily disable
4626 * dioread_nolock to prevent livelock.
4627 */
4628 if (orphan) {
4629 if (!ext4_should_journal_data(inode)) {
4630 ext4_inode_block_unlocked_dio(inode);
4631 inode_dio_wait(inode);
4632 ext4_inode_resume_unlocked_dio(inode);
4633 } else
4634 ext4_wait_for_tail_page_commit(inode);
4635 } 4613 }
4614 down_write(&EXT4_I(inode)->i_data_sem);
4615 EXT4_I(inode)->i_disksize = attr->ia_size;
4616 rc = ext4_mark_inode_dirty(handle, inode);
4617 if (!error)
4618 error = rc;
4636 /* 4619 /*
4637 * Truncate pagecache after we've waited for commit 4620 * We have to update i_size under i_data_sem together
4638 * in data=journal mode to make pages freeable. 4621 * with i_disksize to avoid races with writeback code
4622 * running ext4_wb_update_i_disksize().
4639 */ 4623 */
4640 truncate_pagecache(inode, oldsize, inode->i_size); 4624 if (!error)
4625 i_size_write(inode, attr->ia_size);
4626 up_write(&EXT4_I(inode)->i_data_sem);
4627 ext4_journal_stop(handle);
4628 if (error) {
4629 ext4_orphan_del(NULL, inode);
4630 goto err_out;
4631 }
4632 } else
4633 i_size_write(inode, attr->ia_size);
4634
4635 /*
4636 * Blocks are going to be removed from the inode. Wait
4637 * for dio in flight. Temporarily disable
4638 * dioread_nolock to prevent livelock.
4639 */
4640 if (orphan) {
4641 if (!ext4_should_journal_data(inode)) {
4642 ext4_inode_block_unlocked_dio(inode);
4643 inode_dio_wait(inode);
4644 ext4_inode_resume_unlocked_dio(inode);
4645 } else
4646 ext4_wait_for_tail_page_commit(inode);
4641 } 4647 }
4642 ext4_truncate(inode); 4648 /*
4649 * Truncate pagecache after we've waited for commit
4650 * in data=journal mode to make pages freeable.
4651 */
4652 truncate_pagecache(inode, inode->i_size);
4643 } 4653 }
4654 /*
4655 * We want to call ext4_truncate() even if attr->ia_size ==
4656 * inode->i_size for cases like truncation of fallocated space
4657 */
4658 if (attr->ia_valid & ATTR_SIZE)
4659 ext4_truncate(inode);
4644 4660
4645 if (!rc) { 4661 if (!rc) {
4646 setattr_copy(inode, attr); 4662 setattr_copy(inode, attr);