aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c54
1 files changed, 33 insertions, 21 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e274e9c1171f..075763474118 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2178,6 +2178,9 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2178 * 2178 *
2179 * @handle - handle for journal operations 2179 * @handle - handle for journal operations
2180 * @mpd - extent to map 2180 * @mpd - extent to map
2181 * @give_up_on_write - we set this to true iff there is a fatal error and there
2182 * is no hope of writing the data. The caller should discard
2183 * dirty pages to avoid infinite loops.
2181 * 2184 *
2182 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2185 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2183 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2186 * delayed, blocks are allocated, if it is unwritten, we may need to convert
@@ -2295,6 +2298,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2295 struct address_space *mapping = mpd->inode->i_mapping; 2298 struct address_space *mapping = mpd->inode->i_mapping;
2296 struct pagevec pvec; 2299 struct pagevec pvec;
2297 unsigned int nr_pages; 2300 unsigned int nr_pages;
2301 long left = mpd->wbc->nr_to_write;
2298 pgoff_t index = mpd->first_page; 2302 pgoff_t index = mpd->first_page;
2299 pgoff_t end = mpd->last_page; 2303 pgoff_t end = mpd->last_page;
2300 int tag; 2304 int tag;
@@ -2330,6 +2334,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2330 if (page->index > end) 2334 if (page->index > end)
2331 goto out; 2335 goto out;
2332 2336
2337 /*
2338 * Accumulated enough dirty pages? This doesn't apply
2339 * to WB_SYNC_ALL mode. For integrity sync we have to
2340 * keep going because someone may be concurrently
2341 * dirtying pages, and we might have synced a lot of
2342 * newly appeared dirty pages, but have not synced all
2343 * of the old dirty pages.
2344 */
2345 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2346 goto out;
2347
2333 /* If we can't merge this page, we are done. */ 2348 /* If we can't merge this page, we are done. */
2334 if (mpd->map.m_len > 0 && mpd->next_page != page->index) 2349 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2335 goto out; 2350 goto out;
@@ -2364,19 +2379,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2364 if (err <= 0) 2379 if (err <= 0)
2365 goto out; 2380 goto out;
2366 err = 0; 2381 err = 0;
2367 2382 left--;
2368 /*
2369 * Accumulated enough dirty pages? This doesn't apply
2370 * to WB_SYNC_ALL mode. For integrity sync we have to
2371 * keep going because someone may be concurrently
2372 * dirtying pages, and we might have synced a lot of
2373 * newly appeared dirty pages, but have not synced all
2374 * of the old dirty pages.
2375 */
2376 if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2377 mpd->next_page - mpd->first_page >=
2378 mpd->wbc->nr_to_write)
2379 goto out;
2380 } 2383 }
2381 pagevec_release(&pvec); 2384 pagevec_release(&pvec);
2382 cond_resched(); 2385 cond_resched();
@@ -2420,16 +2423,15 @@ static int ext4_writepages(struct address_space *mapping,
2420 * because that could violate lock ordering on umount 2423 * because that could violate lock ordering on umount
2421 */ 2424 */
2422 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2425 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2423 return 0; 2426 goto out_writepages;
2424 2427
2425 if (ext4_should_journal_data(inode)) { 2428 if (ext4_should_journal_data(inode)) {
2426 struct blk_plug plug; 2429 struct blk_plug plug;
2427 int ret;
2428 2430
2429 blk_start_plug(&plug); 2431 blk_start_plug(&plug);
2430 ret = write_cache_pages(mapping, wbc, __writepage, mapping); 2432 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2431 blk_finish_plug(&plug); 2433 blk_finish_plug(&plug);
2432 return ret; 2434 goto out_writepages;
2433 } 2435 }
2434 2436
2435 /* 2437 /*
@@ -2442,8 +2444,10 @@ static int ext4_writepages(struct address_space *mapping,
2442 * *never* be called, so if that ever happens, we would want 2444 * *never* be called, so if that ever happens, we would want
2443 * the stack trace. 2445 * the stack trace.
2444 */ 2446 */
2445 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2447 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2446 return -EROFS; 2448 ret = -EROFS;
2449 goto out_writepages;
2450 }
2447 2451
2448 if (ext4_should_dioread_nolock(inode)) { 2452 if (ext4_should_dioread_nolock(inode)) {
2449 /* 2453 /*
@@ -4690,6 +4694,15 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4690 generic_fillattr(inode, stat); 4694 generic_fillattr(inode, stat);
4691 4695
4692 /* 4696 /*
4697 * If there is inline data in the inode, the inode will normally not
4698 * have data blocks allocated (it may have an external xattr block).
4699 * Report at least one sector for such files, so tools like tar, rsync,
4700 * others doen't incorrectly think the file is completely sparse.
4701 */
4702 if (unlikely(ext4_has_inline_data(inode)))
4703 stat->blocks += (stat->size + 511) >> 9;
4704
4705 /*
4693 * We can't update i_blocks if the block allocation is delayed 4706 * We can't update i_blocks if the block allocation is delayed
4694 * otherwise in the case of system crash before the real block 4707 * otherwise in the case of system crash before the real block
4695 * allocation is done, we will have i_blocks inconsistent with 4708 * allocation is done, we will have i_blocks inconsistent with
@@ -4700,9 +4713,8 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4700 * blocks for this file. 4713 * blocks for this file.
4701 */ 4714 */
4702 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 4715 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4703 EXT4_I(inode)->i_reserved_data_blocks); 4716 EXT4_I(inode)->i_reserved_data_blocks);
4704 4717 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
4705 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
4706 return 0; 4718 return 0;
4707} 4719}
4708 4720