aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorAvantika Mathur <mathur@us.ibm.com>2006-12-06 23:41:33 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 11:39:48 -0500
commit7e0289766a0750a56260565bd3b74eb544483d45 (patch)
tree1982232640bf359c572bd0b905005a79fd9c6030 /fs/ext4
parent7d1c520bb57e4b5e94ec937c13553dccf473341b (diff)
[PATCH] ext4: if expression format
changes instances of if ((lhs = expression)) { to the preferred coding style lhs=expression; if (lhs) { Signed-off-by: Avantika Mathur <mathur@us.ibm.com> Cc: <linux-ext4@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/extents.c63
1 files changed, 42 insertions, 21 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 994a6e450e06..06ce8e87731e 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -186,7 +186,8 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
186 depth = path->p_depth; 186 depth = path->p_depth;
187 187
188 /* try to predict block placement */ 188 /* try to predict block placement */
189 if ((ex = path[depth].p_ext)) 189 ex = path[depth].p_ext;
190 if (ex)
190 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); 191 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
191 192
192 /* it looks like index is empty; 193 /* it looks like index is empty;
@@ -543,7 +544,8 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
543 struct ext4_extent_idx *ix; 544 struct ext4_extent_idx *ix;
544 int len, err; 545 int len, err;
545 546
546 if ((err = ext4_ext_get_access(handle, inode, curp))) 547 err = ext4_ext_get_access(handle, inode, curp);
548 if (err)
547 return err; 549 return err;
548 550
549 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); 551 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
@@ -665,7 +667,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
665 } 667 }
666 lock_buffer(bh); 668 lock_buffer(bh);
667 669
668 if ((err = ext4_journal_get_create_access(handle, bh))) 670 err = ext4_journal_get_create_access(handle, bh);
671 if (err)
669 goto cleanup; 672 goto cleanup;
670 673
671 neh = ext_block_hdr(bh); 674 neh = ext_block_hdr(bh);
@@ -702,18 +705,21 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
702 set_buffer_uptodate(bh); 705 set_buffer_uptodate(bh);
703 unlock_buffer(bh); 706 unlock_buffer(bh);
704 707
705 if ((err = ext4_journal_dirty_metadata(handle, bh))) 708 err = ext4_journal_dirty_metadata(handle, bh);
709 if (err)
706 goto cleanup; 710 goto cleanup;
707 brelse(bh); 711 brelse(bh);
708 bh = NULL; 712 bh = NULL;
709 713
710 /* correct old leaf */ 714 /* correct old leaf */
711 if (m) { 715 if (m) {
712 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 716 err = ext4_ext_get_access(handle, inode, path + depth);
717 if (err)
713 goto cleanup; 718 goto cleanup;
714 path[depth].p_hdr->eh_entries = 719 path[depth].p_hdr->eh_entries =
715 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); 720 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
716 if ((err = ext4_ext_dirty(handle, inode, path + depth))) 721 err = ext4_ext_dirty(handle, inode, path + depth);
722 if (err)
717 goto cleanup; 723 goto cleanup;
718 724
719 } 725 }
@@ -736,7 +742,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
736 } 742 }
737 lock_buffer(bh); 743 lock_buffer(bh);
738 744
739 if ((err = ext4_journal_get_create_access(handle, bh))) 745 err = ext4_journal_get_create_access(handle, bh);
746 if (err)
740 goto cleanup; 747 goto cleanup;
741 748
742 neh = ext_block_hdr(bh); 749 neh = ext_block_hdr(bh);
@@ -780,7 +787,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
780 set_buffer_uptodate(bh); 787 set_buffer_uptodate(bh);
781 unlock_buffer(bh); 788 unlock_buffer(bh);
782 789
783 if ((err = ext4_journal_dirty_metadata(handle, bh))) 790 err = ext4_journal_dirty_metadata(handle, bh);
791 if (err)
784 goto cleanup; 792 goto cleanup;
785 brelse(bh); 793 brelse(bh);
786 bh = NULL; 794 bh = NULL;
@@ -854,7 +862,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
854 } 862 }
855 lock_buffer(bh); 863 lock_buffer(bh);
856 864
857 if ((err = ext4_journal_get_create_access(handle, bh))) { 865 err = ext4_journal_get_create_access(handle, bh);
866 if (err) {
858 unlock_buffer(bh); 867 unlock_buffer(bh);
859 goto out; 868 goto out;
860 } 869 }
@@ -874,11 +883,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
874 set_buffer_uptodate(bh); 883 set_buffer_uptodate(bh);
875 unlock_buffer(bh); 884 unlock_buffer(bh);
876 885
877 if ((err = ext4_journal_dirty_metadata(handle, bh))) 886 err = ext4_journal_dirty_metadata(handle, bh);
887 if (err)
878 goto out; 888 goto out;
879 889
880 /* create index in new top-level index: num,max,pointer */ 890 /* create index in new top-level index: num,max,pointer */
881 if ((err = ext4_ext_get_access(handle, inode, curp))) 891 err = ext4_ext_get_access(handle, inode, curp);
892 if (err)
882 goto out; 893 goto out;
883 894
884 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; 895 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
@@ -1070,20 +1081,24 @@ int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1070 */ 1081 */
1071 k = depth - 1; 1082 k = depth - 1;
1072 border = path[depth].p_ext->ee_block; 1083 border = path[depth].p_ext->ee_block;
1073 if ((err = ext4_ext_get_access(handle, inode, path + k))) 1084 err = ext4_ext_get_access(handle, inode, path + k);
1085 if (err)
1074 return err; 1086 return err;
1075 path[k].p_idx->ei_block = border; 1087 path[k].p_idx->ei_block = border;
1076 if ((err = ext4_ext_dirty(handle, inode, path + k))) 1088 err = ext4_ext_dirty(handle, inode, path + k);
1089 if (err)
1077 return err; 1090 return err;
1078 1091
1079 while (k--) { 1092 while (k--) {
1080 /* change all left-side indexes */ 1093 /* change all left-side indexes */
1081 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1094 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1082 break; 1095 break;
1083 if ((err = ext4_ext_get_access(handle, inode, path + k))) 1096 err = ext4_ext_get_access(handle, inode, path + k);
1097 if (err)
1084 break; 1098 break;
1085 path[k].p_idx->ei_block = border; 1099 path[k].p_idx->ei_block = border;
1086 if ((err = ext4_ext_dirty(handle, inode, path + k))) 1100 err = ext4_ext_dirty(handle, inode, path + k);
1101 if (err)
1087 break; 1102 break;
1088 } 1103 }
1089 1104
@@ -1142,7 +1157,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1142 le16_to_cpu(newext->ee_len), 1157 le16_to_cpu(newext->ee_len),
1143 le32_to_cpu(ex->ee_block), 1158 le32_to_cpu(ex->ee_block),
1144 le16_to_cpu(ex->ee_len), ext_pblock(ex)); 1159 le16_to_cpu(ex->ee_len), ext_pblock(ex));
1145 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 1160 err = ext4_ext_get_access(handle, inode, path + depth);
1161 if (err)
1146 return err; 1162 return err;
1147 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) 1163 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1148 + le16_to_cpu(newext->ee_len)); 1164 + le16_to_cpu(newext->ee_len));
@@ -1192,7 +1208,8 @@ repeat:
1192has_space: 1208has_space:
1193 nearex = path[depth].p_ext; 1209 nearex = path[depth].p_ext;
1194 1210
1195 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 1211 err = ext4_ext_get_access(handle, inode, path + depth);
1212 if (err)
1196 goto cleanup; 1213 goto cleanup;
1197 1214
1198 if (!nearex) { 1215 if (!nearex) {
@@ -1486,10 +1503,12 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1486 path--; 1503 path--;
1487 leaf = idx_pblock(path->p_idx); 1504 leaf = idx_pblock(path->p_idx);
1488 BUG_ON(path->p_hdr->eh_entries == 0); 1505 BUG_ON(path->p_hdr->eh_entries == 0);
1489 if ((err = ext4_ext_get_access(handle, inode, path))) 1506 err = ext4_ext_get_access(handle, inode, path);
1507 if (err)
1490 return err; 1508 return err;
1491 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); 1509 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1492 if ((err = ext4_ext_dirty(handle, inode, path))) 1510 err = ext4_ext_dirty(handle, inode, path);
1511 if (err)
1493 return err; 1512 return err;
1494 ext_debug("index is empty, remove it, free block %llu\n", leaf); 1513 ext_debug("index is empty, remove it, free block %llu\n", leaf);
1495 bh = sb_find_get_block(inode->i_sb, leaf); 1514 bh = sb_find_get_block(inode->i_sb, leaf);
@@ -1930,7 +1949,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1930 mutex_lock(&EXT4_I(inode)->truncate_mutex); 1949 mutex_lock(&EXT4_I(inode)->truncate_mutex);
1931 1950
1932 /* check in cache */ 1951 /* check in cache */
1933 if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) { 1952 goal = ext4_ext_in_cache(inode, iblock, &newex);
1953 if (goal) {
1934 if (goal == EXT4_EXT_CACHE_GAP) { 1954 if (goal == EXT4_EXT_CACHE_GAP) {
1935 if (!create) { 1955 if (!create) {
1936 /* block isn't allocated yet and 1956 /* block isn't allocated yet and
@@ -1969,7 +1989,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1969 */ 1989 */
1970 BUG_ON(path[depth].p_ext == NULL && depth != 0); 1990 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1971 1991
1972 if ((ex = path[depth].p_ext)) { 1992 ex = path[depth].p_ext;
1993 if (ex) {
1973 unsigned long ee_block = le32_to_cpu(ex->ee_block); 1994 unsigned long ee_block = le32_to_cpu(ex->ee_block);
1974 ext4_fsblk_t ee_start = ext_pblock(ex); 1995 ext4_fsblk_t ee_start = ext_pblock(ex);
1975 unsigned short ee_len = le16_to_cpu(ex->ee_len); 1996 unsigned short ee_len = le16_to_cpu(ex->ee_len);