aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/extents.c
diff options
context:
space:
mode:
authorFrank Mayhar <fmayhar@google.com>2010-03-02 11:46:09 -0500
committerTheodore Ts'o <tytso@mit.edu>2010-03-02 11:46:09 -0500
commit273df556b6ee2065bfe96edab5888d3dc9b108d8 (patch)
tree19c73685fce581e4ed85ff845e0b2fc485cedf9c /fs/ext4/extents.c
parentb7adc1f363e72e9131a582cc2cb00eaf83f51a39 (diff)
ext4: Convert BUG_ON checks to use ext4_error() instead
Convert a bunch of BUG_ONs to emit a ext4_error() message and return EIO. This is a first pass and most notably does _not_ cover mballoc.c, which is a morass of void functions. Signed-off-by: Frank Mayhar <fmayhar@google.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r--fs/ext4/extents.c189
1 files changed, 147 insertions, 42 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c7f166ab50eb..4bb69206f175 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -703,7 +703,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
703 } 703 }
704 eh = ext_block_hdr(bh); 704 eh = ext_block_hdr(bh);
705 ppos++; 705 ppos++;
706 BUG_ON(ppos > depth); 706 if (unlikely(ppos > depth)) {
707 put_bh(bh);
708 EXT4_ERROR_INODE(inode,
709 "ppos %d > depth %d", ppos, depth);
710 goto err;
711 }
707 path[ppos].p_bh = bh; 712 path[ppos].p_bh = bh;
708 path[ppos].p_hdr = eh; 713 path[ppos].p_hdr = eh;
709 i--; 714 i--;
@@ -749,7 +754,12 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
749 if (err) 754 if (err)
750 return err; 755 return err;
751 756
752 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); 757 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
758 EXT4_ERROR_INODE(inode,
759 "logical %d == ei_block %d!",
760 logical, le32_to_cpu(curp->p_idx->ei_block));
761 return -EIO;
762 }
753 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; 763 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
754 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 764 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
755 /* insert after */ 765 /* insert after */
@@ -779,9 +789,17 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
779 ext4_idx_store_pblock(ix, ptr); 789 ext4_idx_store_pblock(ix, ptr);
780 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 790 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
781 791
782 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) 792 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
783 > le16_to_cpu(curp->p_hdr->eh_max)); 793 > le16_to_cpu(curp->p_hdr->eh_max))) {
784 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); 794 EXT4_ERROR_INODE(inode,
795 "logical %d == ei_block %d!",
796 logical, le32_to_cpu(curp->p_idx->ei_block));
797 return -EIO;
798 }
799 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
800 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
801 return -EIO;
802 }
785 803
786 err = ext4_ext_dirty(handle, inode, curp); 804 err = ext4_ext_dirty(handle, inode, curp);
787 ext4_std_error(inode->i_sb, err); 805 ext4_std_error(inode->i_sb, err);
@@ -819,7 +837,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
819 837
820 /* if current leaf will be split, then we should use 838 /* if current leaf will be split, then we should use
821 * border from split point */ 839 * border from split point */
822 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); 840 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
841 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
842 return -EIO;
843 }
823 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 844 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
824 border = path[depth].p_ext[1].ee_block; 845 border = path[depth].p_ext[1].ee_block;
825 ext_debug("leaf will be split." 846 ext_debug("leaf will be split."
@@ -860,7 +881,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
860 881
861 /* initialize new leaf */ 882 /* initialize new leaf */
862 newblock = ablocks[--a]; 883 newblock = ablocks[--a];
863 BUG_ON(newblock == 0); 884 if (unlikely(newblock == 0)) {
885 EXT4_ERROR_INODE(inode, "newblock == 0!");
886 err = -EIO;
887 goto cleanup;
888 }
864 bh = sb_getblk(inode->i_sb, newblock); 889 bh = sb_getblk(inode->i_sb, newblock);
865 if (!bh) { 890 if (!bh) {
866 err = -EIO; 891 err = -EIO;
@@ -880,7 +905,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
880 ex = EXT_FIRST_EXTENT(neh); 905 ex = EXT_FIRST_EXTENT(neh);
881 906
882 /* move remainder of path[depth] to the new leaf */ 907 /* move remainder of path[depth] to the new leaf */
883 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); 908 if (unlikely(path[depth].p_hdr->eh_entries !=
909 path[depth].p_hdr->eh_max)) {
910 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
911 path[depth].p_hdr->eh_entries,
912 path[depth].p_hdr->eh_max);
913 err = -EIO;
914 goto cleanup;
915 }
884 /* start copy from next extent */ 916 /* start copy from next extent */
885 /* TODO: we could do it by single memmove */ 917 /* TODO: we could do it by single memmove */
886 m = 0; 918 m = 0;
@@ -927,7 +959,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
927 959
928 /* create intermediate indexes */ 960 /* create intermediate indexes */
929 k = depth - at - 1; 961 k = depth - at - 1;
930 BUG_ON(k < 0); 962 if (unlikely(k < 0)) {
963 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
964 err = -EIO;
965 goto cleanup;
966 }
931 if (k) 967 if (k)
932 ext_debug("create %d intermediate indices\n", k); 968 ext_debug("create %d intermediate indices\n", k);
933 /* insert new index into current index block */ 969 /* insert new index into current index block */
@@ -964,8 +1000,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
964 1000
965 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 1001 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
966 EXT_MAX_INDEX(path[i].p_hdr)); 1002 EXT_MAX_INDEX(path[i].p_hdr));
967 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != 1003 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
968 EXT_LAST_INDEX(path[i].p_hdr)); 1004 EXT_LAST_INDEX(path[i].p_hdr))) {
1005 EXT4_ERROR_INODE(inode,
1006 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1007 le32_to_cpu(path[i].p_ext->ee_block));
1008 err = -EIO;
1009 goto cleanup;
1010 }
969 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { 1011 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
970 ext_debug("%d: move %d:%llu in new index %llu\n", i, 1012 ext_debug("%d: move %d:%llu in new index %llu\n", i,
971 le32_to_cpu(path[i].p_idx->ei_block), 1013 le32_to_cpu(path[i].p_idx->ei_block),
@@ -1203,7 +1245,10 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1203 struct ext4_extent *ex; 1245 struct ext4_extent *ex;
1204 int depth, ee_len; 1246 int depth, ee_len;
1205 1247
1206 BUG_ON(path == NULL); 1248 if (unlikely(path == NULL)) {
1249 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1250 return -EIO;
1251 }
1207 depth = path->p_depth; 1252 depth = path->p_depth;
1208 *phys = 0; 1253 *phys = 0;
1209 1254
@@ -1217,15 +1262,33 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1217 ex = path[depth].p_ext; 1262 ex = path[depth].p_ext;
1218 ee_len = ext4_ext_get_actual_len(ex); 1263 ee_len = ext4_ext_get_actual_len(ex);
1219 if (*logical < le32_to_cpu(ex->ee_block)) { 1264 if (*logical < le32_to_cpu(ex->ee_block)) {
1220 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); 1265 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1266 EXT4_ERROR_INODE(inode,
1267 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1268 *logical, le32_to_cpu(ex->ee_block));
1269 return -EIO;
1270 }
1221 while (--depth >= 0) { 1271 while (--depth >= 0) {
1222 ix = path[depth].p_idx; 1272 ix = path[depth].p_idx;
1223 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); 1273 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1274 EXT4_ERROR_INODE(inode,
1275 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1276 ix != NULL ? ix->ei_block : 0,
1277 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1278 EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
1279 depth);
1280 return -EIO;
1281 }
1224 } 1282 }
1225 return 0; 1283 return 0;
1226 } 1284 }
1227 1285
1228 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); 1286 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1287 EXT4_ERROR_INODE(inode,
1288 "logical %d < ee_block %d + ee_len %d!",
1289 *logical, le32_to_cpu(ex->ee_block), ee_len);
1290 return -EIO;
1291 }
1229 1292
1230 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1293 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1231 *phys = ext_pblock(ex) + ee_len - 1; 1294 *phys = ext_pblock(ex) + ee_len - 1;
@@ -1251,7 +1314,10 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1251 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1314 int depth; /* Note, NOT eh_depth; depth from top of tree */
1252 int ee_len; 1315 int ee_len;
1253 1316
1254 BUG_ON(path == NULL); 1317 if (unlikely(path == NULL)) {
1318 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1319 return -EIO;
1320 }
1255 depth = path->p_depth; 1321 depth = path->p_depth;
1256 *phys = 0; 1322 *phys = 0;
1257 1323
@@ -1265,17 +1331,32 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1265 ex = path[depth].p_ext; 1331 ex = path[depth].p_ext;
1266 ee_len = ext4_ext_get_actual_len(ex); 1332 ee_len = ext4_ext_get_actual_len(ex);
1267 if (*logical < le32_to_cpu(ex->ee_block)) { 1333 if (*logical < le32_to_cpu(ex->ee_block)) {
1268 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); 1334 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1335 EXT4_ERROR_INODE(inode,
1336 "first_extent(path[%d].p_hdr) != ex",
1337 depth);
1338 return -EIO;
1339 }
1269 while (--depth >= 0) { 1340 while (--depth >= 0) {
1270 ix = path[depth].p_idx; 1341 ix = path[depth].p_idx;
1271 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); 1342 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1343 EXT4_ERROR_INODE(inode,
1344 "ix != EXT_FIRST_INDEX *logical %d!",
1345 *logical);
1346 return -EIO;
1347 }
1272 } 1348 }
1273 *logical = le32_to_cpu(ex->ee_block); 1349 *logical = le32_to_cpu(ex->ee_block);
1274 *phys = ext_pblock(ex); 1350 *phys = ext_pblock(ex);
1275 return 0; 1351 return 0;
1276 } 1352 }
1277 1353
1278 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); 1354 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1355 EXT4_ERROR_INODE(inode,
1356 "logical %d < ee_block %d + ee_len %d!",
1357 *logical, le32_to_cpu(ex->ee_block), ee_len);
1358 return -EIO;
1359 }
1279 1360
1280 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1361 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1281 /* next allocated block in this leaf */ 1362 /* next allocated block in this leaf */
@@ -1414,8 +1495,12 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1414 1495
1415 eh = path[depth].p_hdr; 1496 eh = path[depth].p_hdr;
1416 ex = path[depth].p_ext; 1497 ex = path[depth].p_ext;
1417 BUG_ON(ex == NULL); 1498
1418 BUG_ON(eh == NULL); 1499 if (unlikely(ex == NULL || eh == NULL)) {
1500 EXT4_ERROR_INODE(inode,
1501 "ex %p == NULL or eh %p == NULL", ex, eh);
1502 return -EIO;
1503 }
1419 1504
1420 if (depth == 0) { 1505 if (depth == 0) {
1421 /* there is no tree at all */ 1506 /* there is no tree at all */
@@ -1613,10 +1698,16 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1613 ext4_lblk_t next; 1698 ext4_lblk_t next;
1614 unsigned uninitialized = 0; 1699 unsigned uninitialized = 0;
1615 1700
1616 BUG_ON(ext4_ext_get_actual_len(newext) == 0); 1701 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1702 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1703 return -EIO;
1704 }
1617 depth = ext_depth(inode); 1705 depth = ext_depth(inode);
1618 ex = path[depth].p_ext; 1706 ex = path[depth].p_ext;
1619 BUG_ON(path[depth].p_hdr == NULL); 1707 if (unlikely(path[depth].p_hdr == NULL)) {
1708 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1709 return -EIO;
1710 }
1620 1711
1621 /* try to insert block into found extent and return */ 1712 /* try to insert block into found extent and return */
1622 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) 1713 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
@@ -1788,7 +1879,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1788 } 1879 }
1789 1880
1790 depth = ext_depth(inode); 1881 depth = ext_depth(inode);
1791 BUG_ON(path[depth].p_hdr == NULL); 1882 if (unlikely(path[depth].p_hdr == NULL)) {
1883 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1884 err = -EIO;
1885 break;
1886 }
1792 ex = path[depth].p_ext; 1887 ex = path[depth].p_ext;
1793 next = ext4_ext_next_allocated_block(path); 1888 next = ext4_ext_next_allocated_block(path);
1794 1889
@@ -1839,7 +1934,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1839 cbex.ec_type = EXT4_EXT_CACHE_EXTENT; 1934 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1840 } 1935 }
1841 1936
1842 BUG_ON(cbex.ec_len == 0); 1937 if (unlikely(cbex.ec_len == 0)) {
1938 EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1939 err = -EIO;
1940 break;
1941 }
1843 err = func(inode, path, &cbex, ex, cbdata); 1942 err = func(inode, path, &cbex, ex, cbdata);
1844 ext4_ext_drop_refs(path); 1943 ext4_ext_drop_refs(path);
1845 1944
@@ -1982,7 +2081,10 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1982 /* free index block */ 2081 /* free index block */
1983 path--; 2082 path--;
1984 leaf = idx_pblock(path->p_idx); 2083 leaf = idx_pblock(path->p_idx);
1985 BUG_ON(path->p_hdr->eh_entries == 0); 2084 if (unlikely(path->p_hdr->eh_entries == 0)) {
2085 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2086 return -EIO;
2087 }
1986 err = ext4_ext_get_access(handle, inode, path); 2088 err = ext4_ext_get_access(handle, inode, path);
1987 if (err) 2089 if (err)
1988 return err; 2090 return err;
@@ -2120,8 +2222,10 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2120 if (!path[depth].p_hdr) 2222 if (!path[depth].p_hdr)
2121 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2223 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2122 eh = path[depth].p_hdr; 2224 eh = path[depth].p_hdr;
2123 BUG_ON(eh == NULL); 2225 if (unlikely(path[depth].p_hdr == NULL)) {
2124 2226 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2227 return -EIO;
2228 }
2125 /* find where to start removing */ 2229 /* find where to start removing */
2126 ex = EXT_LAST_EXTENT(eh); 2230 ex = EXT_LAST_EXTENT(eh);
2127 2231
@@ -3240,10 +3344,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3240 * this situation is possible, though, _during_ tree modification; 3344 * this situation is possible, though, _during_ tree modification;
3241 * this is why assert can't be put in ext4_ext_find_extent() 3345 * this is why assert can't be put in ext4_ext_find_extent()
3242 */ 3346 */
3243 if (path[depth].p_ext == NULL && depth != 0) { 3347 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3244 ext4_error(inode->i_sb, "bad extent address " 3348 EXT4_ERROR_INODE(inode, "bad extent address "
3245 "inode: %lu, iblock: %d, depth: %d", 3349 "iblock: %d, depth: %d pblock %lld",
3246 inode->i_ino, iblock, depth); 3350 iblock, depth, path[depth].p_block);
3247 err = -EIO; 3351 err = -EIO;
3248 goto out2; 3352 goto out2;
3249 } 3353 }
@@ -3371,16 +3475,17 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3371 } 3475 }
3372 3476
3373 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { 3477 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
3374 if (eh->eh_entries) { 3478 if (unlikely(!eh->eh_entries)) {
3375 last_ex = EXT_LAST_EXTENT(eh); 3479 EXT4_ERROR_INODE(inode,
3376 if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) 3480 "eh->eh_entries == 0 ee_block %d",
3377 + ext4_ext_get_actual_len(last_ex)) 3481 ex->ee_block);
3378 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; 3482 err = -EIO;
3379 } else { 3483 goto out2;
3380 WARN_ON(eh->eh_entries == 0); 3484 }
3381 ext4_error(inode->i_sb, __func__, 3485 last_ex = EXT_LAST_EXTENT(eh);
3382 "inode#%lu, eh->eh_entries = 0!", inode->i_ino); 3486 if (iblock + ar.len > le32_to_cpu(last_ex->ee_block)
3383 } 3487 + ext4_ext_get_actual_len(last_ex))
3488 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
3384 } 3489 }
3385 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3490 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3386 if (err) { 3491 if (err) {