aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2009-06-14 17:45:34 -0400
committerTheodore Ts'o <tytso@mit.edu>2009-06-14 17:45:34 -0400
commitde9a55b841132f7ae097f6e31ccebad2d5030cf5 (patch)
tree9b66c7c68bff923a62d4c2b04114e22a8fa524e1 /fs/ext4/inode.c
parent0610b6e99939828b77eec020ead0e1f44cba38ca (diff)
ext4: Fix up whitespace issues in fs/ext4/inode.c
This is a pure cleanup patch. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c200
1 files changed, 103 insertions, 97 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5f927f6a1289..8d0908afbd5b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -81,7 +81,7 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
81 * If the handle isn't valid we're not journaling so there's nothing to do. 81 * If the handle isn't valid we're not journaling so there's nothing to do.
82 */ 82 */
83int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 83int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
84 struct buffer_head *bh, ext4_fsblk_t blocknr) 84 struct buffer_head *bh, ext4_fsblk_t blocknr)
85{ 85{
86 int err; 86 int err;
87 87
@@ -332,8 +332,8 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
332 */ 332 */
333 333
334static int ext4_block_to_path(struct inode *inode, 334static int ext4_block_to_path(struct inode *inode,
335 ext4_lblk_t i_block, 335 ext4_lblk_t i_block,
336 ext4_lblk_t offsets[4], int *boundary) 336 ext4_lblk_t offsets[4], int *boundary)
337{ 337{
338 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 338 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
339 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 339 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
@@ -365,9 +365,9 @@ static int ext4_block_to_path(struct inode *inode,
365 final = ptrs; 365 final = ptrs;
366 } else { 366 } else {
367 ext4_warning(inode->i_sb, "ext4_block_to_path", 367 ext4_warning(inode->i_sb, "ext4_block_to_path",
368 "block %lu > max in inode %lu", 368 "block %lu > max in inode %lu",
369 i_block + direct_blocks + 369 i_block + direct_blocks +
370 indirect_blocks + double_blocks, inode->i_ino); 370 indirect_blocks + double_blocks, inode->i_ino);
371 } 371 }
372 if (boundary) 372 if (boundary)
373 *boundary = final - 1 - (i_block & (ptrs - 1)); 373 *boundary = final - 1 - (i_block & (ptrs - 1));
@@ -382,25 +382,25 @@ static int __ext4_check_blockref(const char *function, struct inode *inode,
382 382
383 while (bref < p+max) { 383 while (bref < p+max) {
384 blk = le32_to_cpu(*bref++); 384 blk = le32_to_cpu(*bref++);
385 if (blk && 385 if (blk &&
386 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 386 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
387 blk, 1))) { 387 blk, 1))) {
388 ext4_error(inode->i_sb, function, 388 ext4_error(inode->i_sb, function,
389 "invalid block reference %u " 389 "invalid block reference %u "
390 "in inode #%lu", blk, inode->i_ino); 390 "in inode #%lu", blk, inode->i_ino);
391 return -EIO; 391 return -EIO;
392 } 392 }
393 } 393 }
394 return 0; 394 return 0;
395} 395}
396 396
397 397
398#define ext4_check_indirect_blockref(inode, bh) \ 398#define ext4_check_indirect_blockref(inode, bh) \
399 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \ 399 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \
400 EXT4_ADDR_PER_BLOCK((inode)->i_sb)) 400 EXT4_ADDR_PER_BLOCK((inode)->i_sb))
401 401
402#define ext4_check_inode_blockref(inode) \ 402#define ext4_check_inode_blockref(inode) \
403 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \ 403 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \
404 EXT4_NDIR_BLOCKS) 404 EXT4_NDIR_BLOCKS)
405 405
406/** 406/**
@@ -450,7 +450,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
450 bh = sb_getblk(sb, le32_to_cpu(p->key)); 450 bh = sb_getblk(sb, le32_to_cpu(p->key));
451 if (unlikely(!bh)) 451 if (unlikely(!bh))
452 goto failure; 452 goto failure;
453 453
454 if (!bh_uptodate_or_lock(bh)) { 454 if (!bh_uptodate_or_lock(bh)) {
455 if (bh_submit_read(bh) < 0) { 455 if (bh_submit_read(bh) < 0) {
456 put_bh(bh); 456 put_bh(bh);
@@ -462,7 +462,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
462 goto failure; 462 goto failure;
463 } 463 }
464 } 464 }
465 465
466 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 466 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
467 /* Reader: end */ 467 /* Reader: end */
468 if (!p->key) 468 if (!p->key)
@@ -555,7 +555,7 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
555 * returns it. 555 * returns it.
556 */ 556 */
557static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 557static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
558 Indirect *partial) 558 Indirect *partial)
559{ 559{
560 /* 560 /*
561 * XXX need to get goal block from mballoc's data structures 561 * XXX need to get goal block from mballoc's data structures
@@ -577,7 +577,7 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
577 * direct and indirect blocks. 577 * direct and indirect blocks.
578 */ 578 */
579static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 579static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
580 int blocks_to_boundary) 580 int blocks_to_boundary)
581{ 581{
582 unsigned int count = 0; 582 unsigned int count = 0;
583 583
@@ -613,9 +613,9 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
613 * direct blocks 613 * direct blocks
614 */ 614 */
615static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 615static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
616 ext4_lblk_t iblock, ext4_fsblk_t goal, 616 ext4_lblk_t iblock, ext4_fsblk_t goal,
617 int indirect_blks, int blks, 617 int indirect_blks, int blks,
618 ext4_fsblk_t new_blocks[4], int *err) 618 ext4_fsblk_t new_blocks[4], int *err)
619{ 619{
620 struct ext4_allocation_request ar; 620 struct ext4_allocation_request ar;
621 int target, i; 621 int target, i;
@@ -686,10 +686,10 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
686 } 686 }
687 if (!*err) { 687 if (!*err) {
688 if (target == blks) { 688 if (target == blks) {
689 /* 689 /*
690 * save the new block number 690 * save the new block number
691 * for the first direct block 691 * for the first direct block
692 */ 692 */
693 new_blocks[index] = current_block; 693 new_blocks[index] = current_block;
694 } 694 }
695 blk_allocated += ar.len; 695 blk_allocated += ar.len;
@@ -731,9 +731,9 @@ failed_out:
731 * as described above and return 0. 731 * as described above and return 0.
732 */ 732 */
733static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 733static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
734 ext4_lblk_t iblock, int indirect_blks, 734 ext4_lblk_t iblock, int indirect_blks,
735 int *blks, ext4_fsblk_t goal, 735 int *blks, ext4_fsblk_t goal,
736 ext4_lblk_t *offsets, Indirect *branch) 736 ext4_lblk_t *offsets, Indirect *branch)
737{ 737{
738 int blocksize = inode->i_sb->s_blocksize; 738 int blocksize = inode->i_sb->s_blocksize;
739 int i, n = 0; 739 int i, n = 0;
@@ -780,7 +780,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
780 * the chain to point to the new allocated 780 * the chain to point to the new allocated
781 * data blocks numbers 781 * data blocks numbers
782 */ 782 */
783 for (i=1; i < num; i++) 783 for (i = 1; i < num; i++)
784 *(branch[n].p + i) = cpu_to_le32(++current_block); 784 *(branch[n].p + i) = cpu_to_le32(++current_block);
785 } 785 }
786 BUFFER_TRACE(bh, "marking uptodate"); 786 BUFFER_TRACE(bh, "marking uptodate");
@@ -823,7 +823,8 @@ failed:
823 * chain to new block and return 0. 823 * chain to new block and return 0.
824 */ 824 */
825static int ext4_splice_branch(handle_t *handle, struct inode *inode, 825static int ext4_splice_branch(handle_t *handle, struct inode *inode,
826 ext4_lblk_t block, Indirect *where, int num, int blks) 826 ext4_lblk_t block, Indirect *where, int num,
827 int blks)
827{ 828{
828 int i; 829 int i;
829 int err = 0; 830 int err = 0;
@@ -924,9 +925,9 @@ err_out:
924 * blocks. 925 * blocks.
925 */ 926 */
926static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, 927static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
927 ext4_lblk_t iblock, unsigned int maxblocks, 928 ext4_lblk_t iblock, unsigned int maxblocks,
928 struct buffer_head *bh_result, 929 struct buffer_head *bh_result,
929 int flags) 930 int flags)
930{ 931{
931 int err = -EIO; 932 int err = -EIO;
932 ext4_lblk_t offsets[4]; 933 ext4_lblk_t offsets[4];
@@ -942,7 +943,7 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
942 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 943 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
943 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 944 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
944 depth = ext4_block_to_path(inode, iblock, offsets, 945 depth = ext4_block_to_path(inode, iblock, offsets,
945 &blocks_to_boundary); 946 &blocks_to_boundary);
946 947
947 if (depth == 0) 948 if (depth == 0)
948 goto out; 949 goto out;
@@ -990,8 +991,8 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
990 * Block out ext4_truncate while we alter the tree 991 * Block out ext4_truncate while we alter the tree
991 */ 992 */
992 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 993 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
993 &count, goal, 994 &count, goal,
994 offsets + (partial - chain), partial); 995 offsets + (partial - chain), partial);
995 996
996 /* 997 /*
997 * The ext4_splice_branch call will free and forget any buffers 998 * The ext4_splice_branch call will free and forget any buffers
@@ -1002,8 +1003,8 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
1002 */ 1003 */
1003 if (!err) 1004 if (!err)
1004 err = ext4_splice_branch(handle, inode, iblock, 1005 err = ext4_splice_branch(handle, inode, iblock,
1005 partial, indirect_blks, count); 1006 partial, indirect_blks, count);
1006 else 1007 else
1007 goto cleanup; 1008 goto cleanup;
1008 1009
1009 set_buffer_new(bh_result); 1010 set_buffer_new(bh_result);
@@ -1175,7 +1176,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1175 up_read((&EXT4_I(inode)->i_data_sem)); 1176 up_read((&EXT4_I(inode)->i_data_sem));
1176 1177
1177 if (retval > 0 && buffer_mapped(bh)) { 1178 if (retval > 0 && buffer_mapped(bh)) {
1178 int ret = check_block_validity(inode, block, 1179 int ret = check_block_validity(inode, block,
1179 bh->b_blocknr, retval); 1180 bh->b_blocknr, retval);
1180 if (ret != 0) 1181 if (ret != 0)
1181 return ret; 1182 return ret;
@@ -1257,7 +1258,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1257 1258
1258 up_write((&EXT4_I(inode)->i_data_sem)); 1259 up_write((&EXT4_I(inode)->i_data_sem));
1259 if (retval > 0 && buffer_mapped(bh)) { 1260 if (retval > 0 && buffer_mapped(bh)) {
1260 int ret = check_block_validity(inode, block, 1261 int ret = check_block_validity(inode, block,
1261 bh->b_blocknr, retval); 1262 bh->b_blocknr, retval);
1262 if (ret != 0) 1263 if (ret != 0)
1263 return ret; 1264 return ret;
@@ -1408,8 +1409,7 @@ static int walk_page_buffers(handle_t *handle,
1408 1409
1409 for (bh = head, block_start = 0; 1410 for (bh = head, block_start = 0;
1410 ret == 0 && (bh != head || !block_start); 1411 ret == 0 && (bh != head || !block_start);
1411 block_start = block_end, bh = next) 1412 block_start = block_end, bh = next) {
1412 {
1413 next = bh->b_this_page; 1413 next = bh->b_this_page;
1414 block_end = block_start + blocksize; 1414 block_end = block_start + blocksize;
1415 if (block_end <= from || block_start >= to) { 1415 if (block_end <= from || block_start >= to) {
@@ -1450,7 +1450,7 @@ static int walk_page_buffers(handle_t *handle,
1450 * write. 1450 * write.
1451 */ 1451 */
1452static int do_journal_get_write_access(handle_t *handle, 1452static int do_journal_get_write_access(handle_t *handle,
1453 struct buffer_head *bh) 1453 struct buffer_head *bh)
1454{ 1454{
1455 if (!buffer_mapped(bh) || buffer_freed(bh)) 1455 if (!buffer_mapped(bh) || buffer_freed(bh))
1456 return 0; 1456 return 0;
@@ -1458,15 +1458,15 @@ static int do_journal_get_write_access(handle_t *handle,
1458} 1458}
1459 1459
1460static int ext4_write_begin(struct file *file, struct address_space *mapping, 1460static int ext4_write_begin(struct file *file, struct address_space *mapping,
1461 loff_t pos, unsigned len, unsigned flags, 1461 loff_t pos, unsigned len, unsigned flags,
1462 struct page **pagep, void **fsdata) 1462 struct page **pagep, void **fsdata)
1463{ 1463{
1464 struct inode *inode = mapping->host; 1464 struct inode *inode = mapping->host;
1465 int ret, needed_blocks; 1465 int ret, needed_blocks;
1466 handle_t *handle; 1466 handle_t *handle;
1467 int retries = 0; 1467 int retries = 0;
1468 struct page *page; 1468 struct page *page;
1469 pgoff_t index; 1469 pgoff_t index;
1470 unsigned from, to; 1470 unsigned from, to;
1471 1471
1472 trace_ext4_write_begin(inode, pos, len, flags); 1472 trace_ext4_write_begin(inode, pos, len, flags);
@@ -1475,7 +1475,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
1475 * we allocate blocks but write fails for some reason 1475 * we allocate blocks but write fails for some reason
1476 */ 1476 */
1477 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1477 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1478 index = pos >> PAGE_CACHE_SHIFT; 1478 index = pos >> PAGE_CACHE_SHIFT;
1479 from = pos & (PAGE_CACHE_SIZE - 1); 1479 from = pos & (PAGE_CACHE_SIZE - 1);
1480 to = from + len; 1480 to = from + len;
1481 1481
@@ -1523,7 +1523,7 @@ retry:
1523 ext4_journal_stop(handle); 1523 ext4_journal_stop(handle);
1524 if (pos + len > inode->i_size) { 1524 if (pos + len > inode->i_size) {
1525 vmtruncate(inode, inode->i_size); 1525 vmtruncate(inode, inode->i_size);
1526 /* 1526 /*
1527 * If vmtruncate failed early the inode might 1527 * If vmtruncate failed early the inode might
1528 * still be on the orphan list; we need to 1528 * still be on the orphan list; we need to
1529 * make sure the inode is removed from the 1529 * make sure the inode is removed from the
@@ -1550,9 +1550,9 @@ static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1550} 1550}
1551 1551
1552static int ext4_generic_write_end(struct file *file, 1552static int ext4_generic_write_end(struct file *file,
1553 struct address_space *mapping, 1553 struct address_space *mapping,
1554 loff_t pos, unsigned len, unsigned copied, 1554 loff_t pos, unsigned len, unsigned copied,
1555 struct page *page, void *fsdata) 1555 struct page *page, void *fsdata)
1556{ 1556{
1557 int i_size_changed = 0; 1557 int i_size_changed = 0;
1558 struct inode *inode = mapping->host; 1558 struct inode *inode = mapping->host;
@@ -1603,9 +1603,9 @@ static int ext4_generic_write_end(struct file *file,
1603 * buffers are managed internally. 1603 * buffers are managed internally.
1604 */ 1604 */
1605static int ext4_ordered_write_end(struct file *file, 1605static int ext4_ordered_write_end(struct file *file,
1606 struct address_space *mapping, 1606 struct address_space *mapping,
1607 loff_t pos, unsigned len, unsigned copied, 1607 loff_t pos, unsigned len, unsigned copied,
1608 struct page *page, void *fsdata) 1608 struct page *page, void *fsdata)
1609{ 1609{
1610 handle_t *handle = ext4_journal_current_handle(); 1610 handle_t *handle = ext4_journal_current_handle();
1611 struct inode *inode = mapping->host; 1611 struct inode *inode = mapping->host;
@@ -1633,7 +1633,7 @@ static int ext4_ordered_write_end(struct file *file,
1633 1633
1634 if (pos + len > inode->i_size) { 1634 if (pos + len > inode->i_size) {
1635 vmtruncate(inode, inode->i_size); 1635 vmtruncate(inode, inode->i_size);
1636 /* 1636 /*
1637 * If vmtruncate failed early the inode might still be 1637 * If vmtruncate failed early the inode might still be
1638 * on the orphan list; we need to make sure the inode 1638 * on the orphan list; we need to make sure the inode
1639 * is removed from the orphan list in that case. 1639 * is removed from the orphan list in that case.
@@ -1647,9 +1647,9 @@ static int ext4_ordered_write_end(struct file *file,
1647} 1647}
1648 1648
1649static int ext4_writeback_write_end(struct file *file, 1649static int ext4_writeback_write_end(struct file *file,
1650 struct address_space *mapping, 1650 struct address_space *mapping,
1651 loff_t pos, unsigned len, unsigned copied, 1651 loff_t pos, unsigned len, unsigned copied,
1652 struct page *page, void *fsdata) 1652 struct page *page, void *fsdata)
1653{ 1653{
1654 handle_t *handle = ext4_journal_current_handle(); 1654 handle_t *handle = ext4_journal_current_handle();
1655 struct inode *inode = mapping->host; 1655 struct inode *inode = mapping->host;
@@ -1675,7 +1675,7 @@ static int ext4_writeback_write_end(struct file *file,
1675 1675
1676 if (pos + len > inode->i_size) { 1676 if (pos + len > inode->i_size) {
1677 vmtruncate(inode, inode->i_size); 1677 vmtruncate(inode, inode->i_size);
1678 /* 1678 /*
1679 * If vmtruncate failed early the inode might still be 1679 * If vmtruncate failed early the inode might still be
1680 * on the orphan list; we need to make sure the inode 1680 * on the orphan list; we need to make sure the inode
1681 * is removed from the orphan list in that case. 1681 * is removed from the orphan list in that case.
@@ -1688,9 +1688,9 @@ static int ext4_writeback_write_end(struct file *file,
1688} 1688}
1689 1689
1690static int ext4_journalled_write_end(struct file *file, 1690static int ext4_journalled_write_end(struct file *file,
1691 struct address_space *mapping, 1691 struct address_space *mapping,
1692 loff_t pos, unsigned len, unsigned copied, 1692 loff_t pos, unsigned len, unsigned copied,
1693 struct page *page, void *fsdata) 1693 struct page *page, void *fsdata)
1694{ 1694{
1695 handle_t *handle = ext4_journal_current_handle(); 1695 handle_t *handle = ext4_journal_current_handle();
1696 struct inode *inode = mapping->host; 1696 struct inode *inode = mapping->host;
@@ -1738,7 +1738,7 @@ static int ext4_journalled_write_end(struct file *file,
1738 ret = ret2; 1738 ret = ret2;
1739 if (pos + len > inode->i_size) { 1739 if (pos + len > inode->i_size) {
1740 vmtruncate(inode, inode->i_size); 1740 vmtruncate(inode, inode->i_size);
1741 /* 1741 /*
1742 * If vmtruncate failed early the inode might still be 1742 * If vmtruncate failed early the inode might still be
1743 * on the orphan list; we need to make sure the inode 1743 * on the orphan list; we need to make sure the inode
1744 * is removed from the orphan list in that case. 1744 * is removed from the orphan list in that case.
@@ -1845,7 +1845,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1845} 1845}
1846 1846
1847static void ext4_da_page_release_reservation(struct page *page, 1847static void ext4_da_page_release_reservation(struct page *page,
1848 unsigned long offset) 1848 unsigned long offset)
1849{ 1849{
1850 int to_release = 0; 1850 int to_release = 0;
1851 struct buffer_head *head, *bh; 1851 struct buffer_head *head, *bh;
@@ -2854,8 +2854,8 @@ static int ext4_nonda_switch(struct super_block *sb)
2854} 2854}
2855 2855
2856static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2856static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2857 loff_t pos, unsigned len, unsigned flags, 2857 loff_t pos, unsigned len, unsigned flags,
2858 struct page **pagep, void **fsdata) 2858 struct page **pagep, void **fsdata)
2859{ 2859{
2860 int ret, retries = 0; 2860 int ret, retries = 0;
2861 struct page *page; 2861 struct page *page;
@@ -2925,7 +2925,7 @@ out:
2925 * when write to the end of file but not require block allocation 2925 * when write to the end of file but not require block allocation
2926 */ 2926 */
2927static int ext4_da_should_update_i_disksize(struct page *page, 2927static int ext4_da_should_update_i_disksize(struct page *page,
2928 unsigned long offset) 2928 unsigned long offset)
2929{ 2929{
2930 struct buffer_head *bh; 2930 struct buffer_head *bh;
2931 struct inode *inode = page->mapping->host; 2931 struct inode *inode = page->mapping->host;
@@ -2944,9 +2944,9 @@ static int ext4_da_should_update_i_disksize(struct page *page,
2944} 2944}
2945 2945
2946static int ext4_da_write_end(struct file *file, 2946static int ext4_da_write_end(struct file *file,
2947 struct address_space *mapping, 2947 struct address_space *mapping,
2948 loff_t pos, unsigned len, unsigned copied, 2948 loff_t pos, unsigned len, unsigned copied,
2949 struct page *page, void *fsdata) 2949 struct page *page, void *fsdata)
2950{ 2950{
2951 struct inode *inode = mapping->host; 2951 struct inode *inode = mapping->host;
2952 int ret = 0, ret2; 2952 int ret = 0, ret2;
@@ -3044,7 +3044,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
3044 * not strictly speaking necessary (and for users of 3044 * not strictly speaking necessary (and for users of
3045 * laptop_mode, not even desirable). However, to do otherwise 3045 * laptop_mode, not even desirable). However, to do otherwise
3046 * would require replicating code paths in: 3046 * would require replicating code paths in:
3047 * 3047 *
3048 * ext4_da_writepages() -> 3048 * ext4_da_writepages() ->
3049 * write_cache_pages() ---> (via passed in callback function) 3049 * write_cache_pages() ---> (via passed in callback function)
3050 * __mpage_da_writepage() --> 3050 * __mpage_da_writepage() -->
@@ -3064,7 +3064,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
3064 * write out the pages, but rather only collect contiguous 3064 * write out the pages, but rather only collect contiguous
3065 * logical block extents, call the multi-block allocator, and 3065 * logical block extents, call the multi-block allocator, and
3066 * then update the buffer heads with the block allocations. 3066 * then update the buffer heads with the block allocations.
3067 * 3067 *
3068 * For now, though, we'll cheat by calling filemap_flush(), 3068 * For now, though, we'll cheat by calling filemap_flush(),
3069 * which will map the blocks, and start the I/O, but not 3069 * which will map the blocks, and start the I/O, but not
3070 * actually wait for the I/O to complete. 3070 * actually wait for the I/O to complete.
@@ -3200,7 +3200,7 @@ static int bput_one(handle_t *handle, struct buffer_head *bh)
3200 * 3200 *
3201 */ 3201 */
3202static int __ext4_normal_writepage(struct page *page, 3202static int __ext4_normal_writepage(struct page *page,
3203 struct writeback_control *wbc) 3203 struct writeback_control *wbc)
3204{ 3204{
3205 struct inode *inode = page->mapping->host; 3205 struct inode *inode = page->mapping->host;
3206 3206
@@ -3212,7 +3212,7 @@ static int __ext4_normal_writepage(struct page *page,
3212} 3212}
3213 3213
3214static int ext4_normal_writepage(struct page *page, 3214static int ext4_normal_writepage(struct page *page,
3215 struct writeback_control *wbc) 3215 struct writeback_control *wbc)
3216{ 3216{
3217 struct inode *inode = page->mapping->host; 3217 struct inode *inode = page->mapping->host;
3218 loff_t size = i_size_read(inode); 3218 loff_t size = i_size_read(inode);
@@ -3248,7 +3248,7 @@ static int ext4_normal_writepage(struct page *page,
3248} 3248}
3249 3249
3250static int __ext4_journalled_writepage(struct page *page, 3250static int __ext4_journalled_writepage(struct page *page,
3251 struct writeback_control *wbc) 3251 struct writeback_control *wbc)
3252{ 3252{
3253 struct address_space *mapping = page->mapping; 3253 struct address_space *mapping = page->mapping;
3254 struct inode *inode = mapping->host; 3254 struct inode *inode = mapping->host;
@@ -3298,7 +3298,7 @@ out:
3298} 3298}
3299 3299
3300static int ext4_journalled_writepage(struct page *page, 3300static int ext4_journalled_writepage(struct page *page,
3301 struct writeback_control *wbc) 3301 struct writeback_control *wbc)
3302{ 3302{
3303 struct inode *inode = page->mapping->host; 3303 struct inode *inode = page->mapping->host;
3304 loff_t size = i_size_read(inode); 3304 loff_t size = i_size_read(inode);
@@ -3401,8 +3401,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
3401 * VFS code falls back into buffered path in that case so we are safe. 3401 * VFS code falls back into buffered path in that case so we are safe.
3402 */ 3402 */
3403static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3403static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3404 const struct iovec *iov, loff_t offset, 3404 const struct iovec *iov, loff_t offset,
3405 unsigned long nr_segs) 3405 unsigned long nr_segs)
3406{ 3406{
3407 struct file *file = iocb->ki_filp; 3407 struct file *file = iocb->ki_filp;
3408 struct inode *inode = file->f_mapping->host; 3408 struct inode *inode = file->f_mapping->host;
@@ -3722,7 +3722,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
3722 * (no partially truncated stuff there). */ 3722 * (no partially truncated stuff there). */
3723 3723
3724static Indirect *ext4_find_shared(struct inode *inode, int depth, 3724static Indirect *ext4_find_shared(struct inode *inode, int depth,
3725 ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) 3725 ext4_lblk_t offsets[4], Indirect chain[4],
3726 __le32 *top)
3726{ 3727{
3727 Indirect *partial, *p; 3728 Indirect *partial, *p;
3728 int k, err; 3729 int k, err;
@@ -3778,8 +3779,10 @@ no_top:
3778 * than `count' because there can be holes in there. 3779 * than `count' because there can be holes in there.
3779 */ 3780 */
3780static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 3781static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
3781 struct buffer_head *bh, ext4_fsblk_t block_to_free, 3782 struct buffer_head *bh,
3782 unsigned long count, __le32 *first, __le32 *last) 3783 ext4_fsblk_t block_to_free,
3784 unsigned long count, __le32 *first,
3785 __le32 *last)
3783{ 3786{
3784 __le32 *p; 3787 __le32 *p;
3785 if (try_to_extend_transaction(handle, inode)) { 3788 if (try_to_extend_transaction(handle, inode)) {
@@ -3796,10 +3799,11 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
3796 } 3799 }
3797 3800
3798 /* 3801 /*
3799 * Any buffers which are on the journal will be in memory. We find 3802 * Any buffers which are on the journal will be in memory. We
3800 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget() 3803 * find them on the hash table so jbd2_journal_revoke() will
3801 * on them. We've already detached each block from the file, so 3804 * run jbd2_journal_forget() on them. We've already detached
3802 * bforget() in jbd2_journal_forget() should be safe. 3805 * each block from the file, so bforget() in
3806 * jbd2_journal_forget() should be safe.
3803 * 3807 *
3804 * AKPM: turn on bforget in jbd2_journal_forget()!!! 3808 * AKPM: turn on bforget in jbd2_journal_forget()!!!
3805 */ 3809 */
@@ -4171,7 +4175,7 @@ void ext4_truncate(struct inode *inode)
4171 (__le32*)partial->bh->b_data+addr_per_block, 4175 (__le32*)partial->bh->b_data+addr_per_block,
4172 (chain+n-1) - partial); 4176 (chain+n-1) - partial);
4173 BUFFER_TRACE(partial->bh, "call brelse"); 4177 BUFFER_TRACE(partial->bh, "call brelse");
4174 brelse (partial->bh); 4178 brelse(partial->bh);
4175 partial--; 4179 partial--;
4176 } 4180 }
4177do_indirects: 4181do_indirects:
@@ -4412,8 +4416,9 @@ void ext4_get_inode_flags(struct ext4_inode_info *ei)
4412 if (flags & S_DIRSYNC) 4416 if (flags & S_DIRSYNC)
4413 ei->i_flags |= EXT4_DIRSYNC_FL; 4417 ei->i_flags |= EXT4_DIRSYNC_FL;
4414} 4418}
4419
4415static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4420static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4416 struct ext4_inode_info *ei) 4421 struct ext4_inode_info *ei)
4417{ 4422{
4418 blkcnt_t i_blocks ; 4423 blkcnt_t i_blocks ;
4419 struct inode *inode = &(ei->vfs_inode); 4424 struct inode *inode = &(ei->vfs_inode);
@@ -4528,7 +4533,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4528 EXT4_GOOD_OLD_INODE_SIZE + 4533 EXT4_GOOD_OLD_INODE_SIZE +
4529 ei->i_extra_isize; 4534 ei->i_extra_isize;
4530 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4535 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4531 ei->i_state |= EXT4_STATE_XATTR; 4536 ei->i_state |= EXT4_STATE_XATTR;
4532 } 4537 }
4533 } else 4538 } else
4534 ei->i_extra_isize = 0; 4539 ei->i_extra_isize = 0;
@@ -4547,7 +4552,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4547 4552
4548 ret = 0; 4553 ret = 0;
4549 if (ei->i_file_acl && 4554 if (ei->i_file_acl &&
4550 ((ei->i_file_acl < 4555 ((ei->i_file_acl <
4551 (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + 4556 (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
4552 EXT4_SB(sb)->s_gdb_count)) || 4557 EXT4_SB(sb)->s_gdb_count)) ||
4553 (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) { 4558 (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
@@ -4562,15 +4567,15 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4562 !ext4_inode_is_fast_symlink(inode))) 4567 !ext4_inode_is_fast_symlink(inode)))
4563 /* Validate extent which is part of inode */ 4568 /* Validate extent which is part of inode */
4564 ret = ext4_ext_check_inode(inode); 4569 ret = ext4_ext_check_inode(inode);
4565 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4570 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4566 (S_ISLNK(inode->i_mode) && 4571 (S_ISLNK(inode->i_mode) &&
4567 !ext4_inode_is_fast_symlink(inode))) { 4572 !ext4_inode_is_fast_symlink(inode))) {
4568 /* Validate block references which are part of inode */ 4573 /* Validate block references which are part of inode */
4569 ret = ext4_check_inode_blockref(inode); 4574 ret = ext4_check_inode_blockref(inode);
4570 } 4575 }
4571 if (ret) { 4576 if (ret) {
4572 brelse(bh); 4577 brelse(bh);
4573 goto bad_inode; 4578 goto bad_inode;
4574 } 4579 }
4575 4580
4576 if (S_ISREG(inode->i_mode)) { 4581 if (S_ISREG(inode->i_mode)) {
@@ -4601,7 +4606,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4601 } else { 4606 } else {
4602 brelse(bh); 4607 brelse(bh);
4603 ret = -EIO; 4608 ret = -EIO;
4604 ext4_error(inode->i_sb, __func__, 4609 ext4_error(inode->i_sb, __func__,
4605 "bogus i_mode (%o) for inode=%lu", 4610 "bogus i_mode (%o) for inode=%lu",
4606 inode->i_mode, inode->i_ino); 4611 inode->i_mode, inode->i_ino);
4607 goto bad_inode; 4612 goto bad_inode;
@@ -4754,8 +4759,9 @@ static int ext4_do_update_inode(handle_t *handle,
4754 cpu_to_le32(new_encode_dev(inode->i_rdev)); 4759 cpu_to_le32(new_encode_dev(inode->i_rdev));
4755 raw_inode->i_block[2] = 0; 4760 raw_inode->i_block[2] = 0;
4756 } 4761 }
4757 } else for (block = 0; block < EXT4_N_BLOCKS; block++) 4762 } else
4758 raw_inode->i_block[block] = ei->i_data[block]; 4763 for (block = 0; block < EXT4_N_BLOCKS; block++)
4764 raw_inode->i_block[block] = ei->i_data[block];
4759 4765
4760 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 4766 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4761 if (ei->i_extra_isize) { 4767 if (ei->i_extra_isize) {
@@ -5109,7 +5115,7 @@ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5109 * Give this, we know that the caller already has write access to iloc->bh. 5115 * Give this, we know that the caller already has write access to iloc->bh.
5110 */ 5116 */
5111int ext4_mark_iloc_dirty(handle_t *handle, 5117int ext4_mark_iloc_dirty(handle_t *handle,
5112 struct inode *inode, struct ext4_iloc *iloc) 5118 struct inode *inode, struct ext4_iloc *iloc)
5113{ 5119{
5114 int err = 0; 5120 int err = 0;
5115 5121