diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-01 10:49:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-01 10:49:18 -0400 |
commit | e984fd486fdbd65d1b4a637f0ef80086eee8fbe6 (patch) | |
tree | b2d5027fc0ca5eabca5a47e1bbdcc90133f5cc1e | |
parent | 184b812f7da6726d7ea4ca409c7a8762ff6c6df6 (diff) | |
parent | 436bd75e47cf804dfe89f805106bb53ff577e99a (diff) |
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
Define/reserve new ext4 superblock fields
When ext4_ext_insert_extent() fails to insert new blocks
ext4: Extent overlap bugfix
Remove unnecessary exported symbols.
EXT4: Fix whitespace
-rw-r--r-- | fs/ext4/balloc.c | 6 | ||||
-rw-r--r-- | fs/ext4/extents.c | 148 | ||||
-rw-r--r-- | fs/ext4/inode.c | 4 | ||||
-rw-r--r-- | fs/ext4/namei.c | 4 | ||||
-rw-r--r-- | fs/ext4/super.c | 2 | ||||
-rw-r--r-- | include/linux/ext4_fs.h | 33 | ||||
-rw-r--r-- | include/linux/ext4_fs_extents.h | 5 | ||||
-rw-r--r-- | include/linux/ext4_fs_i.h | 6 |
8 files changed, 134 insertions, 74 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 8a23483ca8d0..3b64bb16c727 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -30,15 +30,15 @@ | |||
30 | void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, | 30 | void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, |
31 | unsigned long *blockgrpp, ext4_grpblk_t *offsetp) | 31 | unsigned long *blockgrpp, ext4_grpblk_t *offsetp) |
32 | { | 32 | { |
33 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | 33 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
34 | ext4_grpblk_t offset; | 34 | ext4_grpblk_t offset; |
35 | 35 | ||
36 | blocknr = blocknr - le32_to_cpu(es->s_first_data_block); | 36 | blocknr = blocknr - le32_to_cpu(es->s_first_data_block); |
37 | offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); | 37 | offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); |
38 | if (offsetp) | 38 | if (offsetp) |
39 | *offsetp = offset; | 39 | *offsetp = offset; |
40 | if (blockgrpp) | 40 | if (blockgrpp) |
41 | *blockgrpp = blocknr; | 41 | *blockgrpp = blocknr; |
42 | 42 | ||
43 | } | 43 | } |
44 | 44 | ||
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a0f0c04e79b2..b9ce24129070 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -374,7 +374,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc | |||
374 | le32_to_cpu(ix[-1].ei_block)); | 374 | le32_to_cpu(ix[-1].ei_block)); |
375 | } | 375 | } |
376 | BUG_ON(k && le32_to_cpu(ix->ei_block) | 376 | BUG_ON(k && le32_to_cpu(ix->ei_block) |
377 | <= le32_to_cpu(ix[-1].ei_block)); | 377 | <= le32_to_cpu(ix[-1].ei_block)); |
378 | if (block < le32_to_cpu(ix->ei_block)) | 378 | if (block < le32_to_cpu(ix->ei_block)) |
379 | break; | 379 | break; |
380 | chix = ix; | 380 | chix = ix; |
@@ -423,8 +423,8 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) | |||
423 | 423 | ||
424 | path->p_ext = l - 1; | 424 | path->p_ext = l - 1; |
425 | ext_debug(" -> %d:%llu:%d ", | 425 | ext_debug(" -> %d:%llu:%d ", |
426 | le32_to_cpu(path->p_ext->ee_block), | 426 | le32_to_cpu(path->p_ext->ee_block), |
427 | ext_pblock(path->p_ext), | 427 | ext_pblock(path->p_ext), |
428 | le16_to_cpu(path->p_ext->ee_len)); | 428 | le16_to_cpu(path->p_ext->ee_len)); |
429 | 429 | ||
430 | #ifdef CHECK_BINSEARCH | 430 | #ifdef CHECK_BINSEARCH |
@@ -435,7 +435,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) | |||
435 | chex = ex = EXT_FIRST_EXTENT(eh); | 435 | chex = ex = EXT_FIRST_EXTENT(eh); |
436 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { | 436 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { |
437 | BUG_ON(k && le32_to_cpu(ex->ee_block) | 437 | BUG_ON(k && le32_to_cpu(ex->ee_block) |
438 | <= le32_to_cpu(ex[-1].ee_block)); | 438 | <= le32_to_cpu(ex[-1].ee_block)); |
439 | if (block < le32_to_cpu(ex->ee_block)) | 439 | if (block < le32_to_cpu(ex->ee_block)) |
440 | break; | 440 | break; |
441 | chex = ex; | 441 | chex = ex; |
@@ -577,7 +577,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |||
577 | curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); | 577 | curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); |
578 | 578 | ||
579 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) | 579 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) |
580 | > le16_to_cpu(curp->p_hdr->eh_max)); | 580 | > le16_to_cpu(curp->p_hdr->eh_max)); |
581 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); | 581 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); |
582 | 582 | ||
583 | err = ext4_ext_dirty(handle, inode, curp); | 583 | err = ext4_ext_dirty(handle, inode, curp); |
@@ -621,12 +621,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
621 | border = path[depth].p_ext[1].ee_block; | 621 | border = path[depth].p_ext[1].ee_block; |
622 | ext_debug("leaf will be split." | 622 | ext_debug("leaf will be split." |
623 | " next leaf starts at %d\n", | 623 | " next leaf starts at %d\n", |
624 | le32_to_cpu(border)); | 624 | le32_to_cpu(border)); |
625 | } else { | 625 | } else { |
626 | border = newext->ee_block; | 626 | border = newext->ee_block; |
627 | ext_debug("leaf will be added." | 627 | ext_debug("leaf will be added." |
628 | " next leaf starts at %d\n", | 628 | " next leaf starts at %d\n", |
629 | le32_to_cpu(border)); | 629 | le32_to_cpu(border)); |
630 | } | 630 | } |
631 | 631 | ||
632 | /* | 632 | /* |
@@ -684,9 +684,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
684 | while (path[depth].p_ext <= | 684 | while (path[depth].p_ext <= |
685 | EXT_MAX_EXTENT(path[depth].p_hdr)) { | 685 | EXT_MAX_EXTENT(path[depth].p_hdr)) { |
686 | ext_debug("move %d:%llu:%d in new leaf %llu\n", | 686 | ext_debug("move %d:%llu:%d in new leaf %llu\n", |
687 | le32_to_cpu(path[depth].p_ext->ee_block), | 687 | le32_to_cpu(path[depth].p_ext->ee_block), |
688 | ext_pblock(path[depth].p_ext), | 688 | ext_pblock(path[depth].p_ext), |
689 | le16_to_cpu(path[depth].p_ext->ee_len), | 689 | le16_to_cpu(path[depth].p_ext->ee_len), |
690 | newblock); | 690 | newblock); |
691 | /*memmove(ex++, path[depth].p_ext++, | 691 | /*memmove(ex++, path[depth].p_ext++, |
692 | sizeof(struct ext4_extent)); | 692 | sizeof(struct ext4_extent)); |
@@ -765,9 +765,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
765 | EXT_LAST_INDEX(path[i].p_hdr)); | 765 | EXT_LAST_INDEX(path[i].p_hdr)); |
766 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | 766 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { |
767 | ext_debug("%d: move %d:%d in new index %llu\n", i, | 767 | ext_debug("%d: move %d:%d in new index %llu\n", i, |
768 | le32_to_cpu(path[i].p_idx->ei_block), | 768 | le32_to_cpu(path[i].p_idx->ei_block), |
769 | idx_pblock(path[i].p_idx), | 769 | idx_pblock(path[i].p_idx), |
770 | newblock); | 770 | newblock); |
771 | /*memmove(++fidx, path[i].p_idx++, | 771 | /*memmove(++fidx, path[i].p_idx++, |
772 | sizeof(struct ext4_extent_idx)); | 772 | sizeof(struct ext4_extent_idx)); |
773 | neh->eh_entries++; | 773 | neh->eh_entries++; |
@@ -1128,6 +1128,55 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, | |||
1128 | } | 1128 | } |
1129 | 1129 | ||
1130 | /* | 1130 | /* |
1131 | * check if a portion of the "newext" extent overlaps with an | ||
1132 | * existing extent. | ||
1133 | * | ||
1134 | * If there is an overlap discovered, it updates the length of the newext | ||
1135 | * such that there will be no overlap, and then returns 1. | ||
1136 | * If there is no overlap found, it returns 0. | ||
1137 | */ | ||
1138 | unsigned int ext4_ext_check_overlap(struct inode *inode, | ||
1139 | struct ext4_extent *newext, | ||
1140 | struct ext4_ext_path *path) | ||
1141 | { | ||
1142 | unsigned long b1, b2; | ||
1143 | unsigned int depth, len1; | ||
1144 | unsigned int ret = 0; | ||
1145 | |||
1146 | b1 = le32_to_cpu(newext->ee_block); | ||
1147 | len1 = le16_to_cpu(newext->ee_len); | ||
1148 | depth = ext_depth(inode); | ||
1149 | if (!path[depth].p_ext) | ||
1150 | goto out; | ||
1151 | b2 = le32_to_cpu(path[depth].p_ext->ee_block); | ||
1152 | |||
1153 | /* | ||
1154 | * get the next allocated block if the extent in the path | ||
1155 | * is before the requested block(s) | ||
1156 | */ | ||
1157 | if (b2 < b1) { | ||
1158 | b2 = ext4_ext_next_allocated_block(path); | ||
1159 | if (b2 == EXT_MAX_BLOCK) | ||
1160 | goto out; | ||
1161 | } | ||
1162 | |||
1163 | /* check for wrap through zero */ | ||
1164 | if (b1 + len1 < b1) { | ||
1165 | len1 = EXT_MAX_BLOCK - b1; | ||
1166 | newext->ee_len = cpu_to_le16(len1); | ||
1167 | ret = 1; | ||
1168 | } | ||
1169 | |||
1170 | /* check for overlap */ | ||
1171 | if (b1 + len1 > b2) { | ||
1172 | newext->ee_len = cpu_to_le16(b2 - b1); | ||
1173 | ret = 1; | ||
1174 | } | ||
1175 | out: | ||
1176 | return ret; | ||
1177 | } | ||
1178 | |||
1179 | /* | ||
1131 | * ext4_ext_insert_extent: | 1180 | * ext4_ext_insert_extent: |
1132 | * tries to merge requsted extent into the existing extent or | 1181 | * tries to merge requsted extent into the existing extent or |
1133 | * inserts requested extent as new one into the tree, | 1182 | * inserts requested extent as new one into the tree, |
@@ -1212,12 +1261,12 @@ has_space: | |||
1212 | if (!nearex) { | 1261 | if (!nearex) { |
1213 | /* there is no extent in this leaf, create first one */ | 1262 | /* there is no extent in this leaf, create first one */ |
1214 | ext_debug("first extent in the leaf: %d:%llu:%d\n", | 1263 | ext_debug("first extent in the leaf: %d:%llu:%d\n", |
1215 | le32_to_cpu(newext->ee_block), | 1264 | le32_to_cpu(newext->ee_block), |
1216 | ext_pblock(newext), | 1265 | ext_pblock(newext), |
1217 | le16_to_cpu(newext->ee_len)); | 1266 | le16_to_cpu(newext->ee_len)); |
1218 | path[depth].p_ext = EXT_FIRST_EXTENT(eh); | 1267 | path[depth].p_ext = EXT_FIRST_EXTENT(eh); |
1219 | } else if (le32_to_cpu(newext->ee_block) | 1268 | } else if (le32_to_cpu(newext->ee_block) |
1220 | > le32_to_cpu(nearex->ee_block)) { | 1269 | > le32_to_cpu(nearex->ee_block)) { |
1221 | /* BUG_ON(newext->ee_block == nearex->ee_block); */ | 1270 | /* BUG_ON(newext->ee_block == nearex->ee_block); */ |
1222 | if (nearex != EXT_LAST_EXTENT(eh)) { | 1271 | if (nearex != EXT_LAST_EXTENT(eh)) { |
1223 | len = EXT_MAX_EXTENT(eh) - nearex; | 1272 | len = EXT_MAX_EXTENT(eh) - nearex; |
@@ -1225,9 +1274,9 @@ has_space: | |||
1225 | len = len < 0 ? 0 : len; | 1274 | len = len < 0 ? 0 : len; |
1226 | ext_debug("insert %d:%llu:%d after: nearest 0x%p, " | 1275 | ext_debug("insert %d:%llu:%d after: nearest 0x%p, " |
1227 | "move %d from 0x%p to 0x%p\n", | 1276 | "move %d from 0x%p to 0x%p\n", |
1228 | le32_to_cpu(newext->ee_block), | 1277 | le32_to_cpu(newext->ee_block), |
1229 | ext_pblock(newext), | 1278 | ext_pblock(newext), |
1230 | le16_to_cpu(newext->ee_len), | 1279 | le16_to_cpu(newext->ee_len), |
1231 | nearex, len, nearex + 1, nearex + 2); | 1280 | nearex, len, nearex + 1, nearex + 2); |
1232 | memmove(nearex + 2, nearex + 1, len); | 1281 | memmove(nearex + 2, nearex + 1, len); |
1233 | } | 1282 | } |
@@ -1358,9 +1407,9 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, | |||
1358 | cbex.ec_start = 0; | 1407 | cbex.ec_start = 0; |
1359 | cbex.ec_type = EXT4_EXT_CACHE_GAP; | 1408 | cbex.ec_type = EXT4_EXT_CACHE_GAP; |
1360 | } else { | 1409 | } else { |
1361 | cbex.ec_block = le32_to_cpu(ex->ee_block); | 1410 | cbex.ec_block = le32_to_cpu(ex->ee_block); |
1362 | cbex.ec_len = le16_to_cpu(ex->ee_len); | 1411 | cbex.ec_len = le16_to_cpu(ex->ee_len); |
1363 | cbex.ec_start = ext_pblock(ex); | 1412 | cbex.ec_start = ext_pblock(ex); |
1364 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | 1413 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; |
1365 | } | 1414 | } |
1366 | 1415 | ||
@@ -1431,16 +1480,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, | |||
1431 | len = le32_to_cpu(ex->ee_block) - block; | 1480 | len = le32_to_cpu(ex->ee_block) - block; |
1432 | ext_debug("cache gap(before): %lu [%lu:%lu]", | 1481 | ext_debug("cache gap(before): %lu [%lu:%lu]", |
1433 | (unsigned long) block, | 1482 | (unsigned long) block, |
1434 | (unsigned long) le32_to_cpu(ex->ee_block), | 1483 | (unsigned long) le32_to_cpu(ex->ee_block), |
1435 | (unsigned long) le16_to_cpu(ex->ee_len)); | 1484 | (unsigned long) le16_to_cpu(ex->ee_len)); |
1436 | } else if (block >= le32_to_cpu(ex->ee_block) | 1485 | } else if (block >= le32_to_cpu(ex->ee_block) |
1437 | + le16_to_cpu(ex->ee_len)) { | 1486 | + le16_to_cpu(ex->ee_len)) { |
1438 | lblock = le32_to_cpu(ex->ee_block) | 1487 | lblock = le32_to_cpu(ex->ee_block) |
1439 | + le16_to_cpu(ex->ee_len); | 1488 | + le16_to_cpu(ex->ee_len); |
1440 | len = ext4_ext_next_allocated_block(path); | 1489 | len = ext4_ext_next_allocated_block(path); |
1441 | ext_debug("cache gap(after): [%lu:%lu] %lu", | 1490 | ext_debug("cache gap(after): [%lu:%lu] %lu", |
1442 | (unsigned long) le32_to_cpu(ex->ee_block), | 1491 | (unsigned long) le32_to_cpu(ex->ee_block), |
1443 | (unsigned long) le16_to_cpu(ex->ee_len), | 1492 | (unsigned long) le16_to_cpu(ex->ee_len), |
1444 | (unsigned long) block); | 1493 | (unsigned long) block); |
1445 | BUG_ON(len == lblock); | 1494 | BUG_ON(len == lblock); |
1446 | len = len - lblock; | 1495 | len = len - lblock; |
@@ -1468,9 +1517,9 @@ ext4_ext_in_cache(struct inode *inode, unsigned long block, | |||
1468 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | 1517 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && |
1469 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); | 1518 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); |
1470 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { | 1519 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { |
1471 | ex->ee_block = cpu_to_le32(cex->ec_block); | 1520 | ex->ee_block = cpu_to_le32(cex->ec_block); |
1472 | ext4_ext_store_pblock(ex, cex->ec_start); | 1521 | ext4_ext_store_pblock(ex, cex->ec_start); |
1473 | ex->ee_len = cpu_to_le16(cex->ec_len); | 1522 | ex->ee_len = cpu_to_le16(cex->ec_len); |
1474 | ext_debug("%lu cached by %lu:%lu:%llu\n", | 1523 | ext_debug("%lu cached by %lu:%lu:%llu\n", |
1475 | (unsigned long) block, | 1524 | (unsigned long) block, |
1476 | (unsigned long) cex->ec_block, | 1525 | (unsigned long) cex->ec_block, |
@@ -1956,9 +2005,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
1956 | /* we should allocate requested block */ | 2005 | /* we should allocate requested block */ |
1957 | } else if (goal == EXT4_EXT_CACHE_EXTENT) { | 2006 | } else if (goal == EXT4_EXT_CACHE_EXTENT) { |
1958 | /* block is already allocated */ | 2007 | /* block is already allocated */ |
1959 | newblock = iblock | 2008 | newblock = iblock |
1960 | - le32_to_cpu(newex.ee_block) | 2009 | - le32_to_cpu(newex.ee_block) |
1961 | + ext_pblock(&newex); | 2010 | + ext_pblock(&newex); |
1962 | /* number of remaining blocks in the extent */ | 2011 | /* number of remaining blocks in the extent */ |
1963 | allocated = le16_to_cpu(newex.ee_len) - | 2012 | allocated = le16_to_cpu(newex.ee_len) - |
1964 | (iblock - le32_to_cpu(newex.ee_block)); | 2013 | (iblock - le32_to_cpu(newex.ee_block)); |
@@ -1987,7 +2036,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
1987 | 2036 | ||
1988 | ex = path[depth].p_ext; | 2037 | ex = path[depth].p_ext; |
1989 | if (ex) { | 2038 | if (ex) { |
1990 | unsigned long ee_block = le32_to_cpu(ex->ee_block); | 2039 | unsigned long ee_block = le32_to_cpu(ex->ee_block); |
1991 | ext4_fsblk_t ee_start = ext_pblock(ex); | 2040 | ext4_fsblk_t ee_start = ext_pblock(ex); |
1992 | unsigned short ee_len = le16_to_cpu(ex->ee_len); | 2041 | unsigned short ee_len = le16_to_cpu(ex->ee_len); |
1993 | 2042 | ||
@@ -2000,7 +2049,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
2000 | if (ee_len > EXT_MAX_LEN) | 2049 | if (ee_len > EXT_MAX_LEN) |
2001 | goto out2; | 2050 | goto out2; |
2002 | /* if found extent covers block, simply return it */ | 2051 | /* if found extent covers block, simply return it */ |
2003 | if (iblock >= ee_block && iblock < ee_block + ee_len) { | 2052 | if (iblock >= ee_block && iblock < ee_block + ee_len) { |
2004 | newblock = iblock - ee_block + ee_start; | 2053 | newblock = iblock - ee_block + ee_start; |
2005 | /* number of remaining blocks in the extent */ | 2054 | /* number of remaining blocks in the extent */ |
2006 | allocated = ee_len - (iblock - ee_block); | 2055 | allocated = ee_len - (iblock - ee_block); |
@@ -2031,7 +2080,15 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
2031 | 2080 | ||
2032 | /* allocate new block */ | 2081 | /* allocate new block */ |
2033 | goal = ext4_ext_find_goal(inode, path, iblock); | 2082 | goal = ext4_ext_find_goal(inode, path, iblock); |
2034 | allocated = max_blocks; | 2083 | |
2084 | /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ | ||
2085 | newex.ee_block = cpu_to_le32(iblock); | ||
2086 | newex.ee_len = cpu_to_le16(max_blocks); | ||
2087 | err = ext4_ext_check_overlap(inode, &newex, path); | ||
2088 | if (err) | ||
2089 | allocated = le16_to_cpu(newex.ee_len); | ||
2090 | else | ||
2091 | allocated = max_blocks; | ||
2035 | newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err); | 2092 | newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err); |
2036 | if (!newblock) | 2093 | if (!newblock) |
2037 | goto out2; | 2094 | goto out2; |
@@ -2039,12 +2096,15 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
2039 | goal, newblock, allocated); | 2096 | goal, newblock, allocated); |
2040 | 2097 | ||
2041 | /* try to insert new extent into found leaf and return */ | 2098 | /* try to insert new extent into found leaf and return */ |
2042 | newex.ee_block = cpu_to_le32(iblock); | ||
2043 | ext4_ext_store_pblock(&newex, newblock); | 2099 | ext4_ext_store_pblock(&newex, newblock); |
2044 | newex.ee_len = cpu_to_le16(allocated); | 2100 | newex.ee_len = cpu_to_le16(allocated); |
2045 | err = ext4_ext_insert_extent(handle, inode, path, &newex); | 2101 | err = ext4_ext_insert_extent(handle, inode, path, &newex); |
2046 | if (err) | 2102 | if (err) { |
2103 | /* free data blocks we just allocated */ | ||
2104 | ext4_free_blocks(handle, inode, ext_pblock(&newex), | ||
2105 | le16_to_cpu(newex.ee_len)); | ||
2047 | goto out2; | 2106 | goto out2; |
2107 | } | ||
2048 | 2108 | ||
2049 | if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize) | 2109 | if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize) |
2050 | EXT4_I(inode)->i_disksize = inode->i_size; | 2110 | EXT4_I(inode)->i_disksize = inode->i_size; |
@@ -2157,11 +2217,3 @@ int ext4_ext_writepage_trans_blocks(struct inode *inode, int num) | |||
2157 | 2217 | ||
2158 | return needed; | 2218 | return needed; |
2159 | } | 2219 | } |
2160 | |||
2161 | EXPORT_SYMBOL(ext4_mark_inode_dirty); | ||
2162 | EXPORT_SYMBOL(ext4_ext_invalidate_cache); | ||
2163 | EXPORT_SYMBOL(ext4_ext_insert_extent); | ||
2164 | EXPORT_SYMBOL(ext4_ext_walk_space); | ||
2165 | EXPORT_SYMBOL(ext4_ext_find_goal); | ||
2166 | EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert); | ||
2167 | |||
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index b34182b6ee4d..0bcf62a750ff 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -255,8 +255,8 @@ static int verify_chain(Indirect *from, Indirect *to) | |||
255 | * @inode: inode in question (we are only interested in its superblock) | 255 | * @inode: inode in question (we are only interested in its superblock) |
256 | * @i_block: block number to be parsed | 256 | * @i_block: block number to be parsed |
257 | * @offsets: array to store the offsets in | 257 | * @offsets: array to store the offsets in |
258 | * @boundary: set this non-zero if the referred-to block is likely to be | 258 | * @boundary: set this non-zero if the referred-to block is likely to be |
259 | * followed (on disk) by an indirect block. | 259 | * followed (on disk) by an indirect block. |
260 | * | 260 | * |
261 | * To store the locations of file's data ext4 uses a data structure common | 261 | * To store the locations of file's data ext4 uses a data structure common |
262 | * for UNIX filesystems - tree of pointers anchored in the inode, with | 262 | * for UNIX filesystems - tree of pointers anchored in the inode, with |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 4ec57be5baf5..2811e5720ad0 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -46,7 +46,7 @@ | |||
46 | */ | 46 | */ |
47 | #define NAMEI_RA_CHUNKS 2 | 47 | #define NAMEI_RA_CHUNKS 2 |
48 | #define NAMEI_RA_BLOCKS 4 | 48 | #define NAMEI_RA_BLOCKS 4 |
49 | #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) | 49 | #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) |
50 | #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) | 50 | #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) |
51 | 51 | ||
52 | static struct buffer_head *ext4_append(handle_t *handle, | 52 | static struct buffer_head *ext4_append(handle_t *handle, |
@@ -241,7 +241,7 @@ static inline unsigned dx_node_limit (struct inode *dir) | |||
241 | static void dx_show_index (char * label, struct dx_entry *entries) | 241 | static void dx_show_index (char * label, struct dx_entry *entries) |
242 | { | 242 | { |
243 | int i, n = dx_get_count (entries); | 243 | int i, n = dx_get_count (entries); |
244 | printk("%s index ", label); | 244 | printk("%s index ", label); |
245 | for (i = 0; i < n; i++) { | 245 | for (i = 0; i < n; i++) { |
246 | printk("%x->%u ", i? dx_get_hash(entries + i) : | 246 | printk("%x->%u ", i? dx_get_hash(entries + i) : |
247 | 0, dx_get_block(entries + i)); | 247 | 0, dx_get_block(entries + i)); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index cb9afdd0e26e..175b68c60968 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1985,7 +1985,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, | |||
1985 | 1985 | ||
1986 | if (bd_claim(bdev, sb)) { | 1986 | if (bd_claim(bdev, sb)) { |
1987 | printk(KERN_ERR | 1987 | printk(KERN_ERR |
1988 | "EXT4: failed to claim external journal device.\n"); | 1988 | "EXT4: failed to claim external journal device.\n"); |
1989 | blkdev_put(bdev); | 1989 | blkdev_put(bdev); |
1990 | return NULL; | 1990 | return NULL; |
1991 | } | 1991 | } |
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h index 54c576d414c3..de1f9f78625a 100644 --- a/include/linux/ext4_fs.h +++ b/include/linux/ext4_fs.h | |||
@@ -32,9 +32,9 @@ | |||
32 | /* | 32 | /* |
33 | * Define EXT4_RESERVATION to reserve data blocks for expanding files | 33 | * Define EXT4_RESERVATION to reserve data blocks for expanding files |
34 | */ | 34 | */ |
35 | #define EXT4_DEFAULT_RESERVE_BLOCKS 8 | 35 | #define EXT4_DEFAULT_RESERVE_BLOCKS 8 |
36 | /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ | 36 | /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ |
37 | #define EXT4_MAX_RESERVE_BLOCKS 1027 | 37 | #define EXT4_MAX_RESERVE_BLOCKS 1027 |
38 | #define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0 | 38 | #define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0 |
39 | /* | 39 | /* |
40 | * Always enable hashed directories | 40 | * Always enable hashed directories |
@@ -204,12 +204,12 @@ struct ext4_group_desc | |||
204 | 204 | ||
205 | /* Used to pass group descriptor data when online resize is done */ | 205 | /* Used to pass group descriptor data when online resize is done */ |
206 | struct ext4_new_group_input { | 206 | struct ext4_new_group_input { |
207 | __u32 group; /* Group number for this data */ | 207 | __u32 group; /* Group number for this data */ |
208 | __u64 block_bitmap; /* Absolute block number of block bitmap */ | 208 | __u64 block_bitmap; /* Absolute block number of block bitmap */ |
209 | __u64 inode_bitmap; /* Absolute block number of inode bitmap */ | 209 | __u64 inode_bitmap; /* Absolute block number of inode bitmap */ |
210 | __u64 inode_table; /* Absolute block number of inode table start */ | 210 | __u64 inode_table; /* Absolute block number of inode table start */ |
211 | __u32 blocks_count; /* Total number of blocks in this group */ | 211 | __u32 blocks_count; /* Total number of blocks in this group */ |
212 | __u16 reserved_blocks; /* Number of reserved blocks in this group */ | 212 | __u16 reserved_blocks; /* Number of reserved blocks in this group */ |
213 | __u16 unused; | 213 | __u16 unused; |
214 | }; | 214 | }; |
215 | 215 | ||
@@ -310,7 +310,7 @@ struct ext4_inode { | |||
310 | __u8 l_i_frag; /* Fragment number */ | 310 | __u8 l_i_frag; /* Fragment number */ |
311 | __u8 l_i_fsize; /* Fragment size */ | 311 | __u8 l_i_fsize; /* Fragment size */ |
312 | __le16 l_i_file_acl_high; | 312 | __le16 l_i_file_acl_high; |
313 | __le16 l_i_uid_high; /* these 2 fields */ | 313 | __le16 l_i_uid_high; /* these 2 fields */ |
314 | __le16 l_i_gid_high; /* were reserved2[0] */ | 314 | __le16 l_i_gid_high; /* were reserved2[0] */ |
315 | __u32 l_i_reserved2; | 315 | __u32 l_i_reserved2; |
316 | } linux2; | 316 | } linux2; |
@@ -513,7 +513,14 @@ struct ext4_super_block { | |||
513 | /*150*/ __le32 s_blocks_count_hi; /* Blocks count */ | 513 | /*150*/ __le32 s_blocks_count_hi; /* Blocks count */ |
514 | __le32 s_r_blocks_count_hi; /* Reserved blocks count */ | 514 | __le32 s_r_blocks_count_hi; /* Reserved blocks count */ |
515 | __le32 s_free_blocks_count_hi; /* Free blocks count */ | 515 | __le32 s_free_blocks_count_hi; /* Free blocks count */ |
516 | __u32 s_reserved[169]; /* Padding to the end of the block */ | 516 | __u16 s_min_extra_isize; /* All inodes have at least # bytes */ |
517 | __u16 s_want_extra_isize; /* New inodes should reserve # bytes */ | ||
518 | __u32 s_flags; /* Miscellaneous flags */ | ||
519 | __u16 s_raid_stride; /* RAID stride */ | ||
520 | __u16 s_mmp_interval; /* # seconds to wait in MMP checking */ | ||
521 | __u64 s_mmp_block; /* Block for multi-mount protection */ | ||
522 | __u32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ | ||
523 | __u32 s_reserved[163]; /* Padding to the end of the block */ | ||
517 | }; | 524 | }; |
518 | 525 | ||
519 | #ifdef __KERNEL__ | 526 | #ifdef __KERNEL__ |
@@ -780,9 +787,9 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, | |||
780 | * Ok, these declarations are also in <linux/kernel.h> but none of the | 787 | * Ok, these declarations are also in <linux/kernel.h> but none of the |
781 | * ext4 source programs needs to include it so they are duplicated here. | 788 | * ext4 source programs needs to include it so they are duplicated here. |
782 | */ | 789 | */ |
783 | # define NORET_TYPE /**/ | 790 | # define NORET_TYPE /**/ |
784 | # define ATTRIB_NORET __attribute__((noreturn)) | 791 | # define ATTRIB_NORET __attribute__((noreturn)) |
785 | # define NORET_AND noreturn, | 792 | # define NORET_AND noreturn, |
786 | 793 | ||
787 | /* balloc.c */ | 794 | /* balloc.c */ |
788 | extern unsigned int ext4_block_group(struct super_block *sb, | 795 | extern unsigned int ext4_block_group(struct super_block *sb, |
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h index 7eb1d73fc5d1..acfe59740b03 100644 --- a/include/linux/ext4_fs_extents.h +++ b/include/linux/ext4_fs_extents.h | |||
@@ -151,8 +151,8 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, | |||
151 | ((struct ext4_extent_idx *) (((char *) (__hdr__)) + \ | 151 | ((struct ext4_extent_idx *) (((char *) (__hdr__)) + \ |
152 | sizeof(struct ext4_extent_header))) | 152 | sizeof(struct ext4_extent_header))) |
153 | #define EXT_HAS_FREE_INDEX(__path__) \ | 153 | #define EXT_HAS_FREE_INDEX(__path__) \ |
154 | (le16_to_cpu((__path__)->p_hdr->eh_entries) \ | 154 | (le16_to_cpu((__path__)->p_hdr->eh_entries) \ |
155 | < le16_to_cpu((__path__)->p_hdr->eh_max)) | 155 | < le16_to_cpu((__path__)->p_hdr->eh_max)) |
156 | #define EXT_LAST_EXTENT(__hdr__) \ | 156 | #define EXT_LAST_EXTENT(__hdr__) \ |
157 | (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) | 157 | (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) |
158 | #define EXT_LAST_INDEX(__hdr__) \ | 158 | #define EXT_LAST_INDEX(__hdr__) \ |
@@ -190,6 +190,7 @@ ext4_ext_invalidate_cache(struct inode *inode) | |||
190 | 190 | ||
191 | extern int ext4_extent_tree_init(handle_t *, struct inode *); | 191 | extern int ext4_extent_tree_init(handle_t *, struct inode *); |
192 | extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *); | 192 | extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *); |
193 | extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *); | ||
193 | extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *); | 194 | extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *); |
194 | extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *); | 195 | extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *); |
195 | extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *); | 196 | extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *); |
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h index d5b177e5b395..9de494406995 100644 --- a/include/linux/ext4_fs_i.h +++ b/include/linux/ext4_fs_i.h | |||
@@ -41,14 +41,14 @@ struct ext4_reserve_window_node { | |||
41 | 41 | ||
42 | struct ext4_block_alloc_info { | 42 | struct ext4_block_alloc_info { |
43 | /* information about reservation window */ | 43 | /* information about reservation window */ |
44 | struct ext4_reserve_window_node rsv_window_node; | 44 | struct ext4_reserve_window_node rsv_window_node; |
45 | /* | 45 | /* |
46 | * was i_next_alloc_block in ext4_inode_info | 46 | * was i_next_alloc_block in ext4_inode_info |
47 | * is the logical (file-relative) number of the | 47 | * is the logical (file-relative) number of the |
48 | * most-recently-allocated block in this file. | 48 | * most-recently-allocated block in this file. |
49 | * We use this for detecting linearly ascending allocation requests. | 49 | * We use this for detecting linearly ascending allocation requests. |
50 | */ | 50 | */ |
51 | __u32 last_alloc_logical_block; | 51 | __u32 last_alloc_logical_block; |
52 | /* | 52 | /* |
53 | * Was i_next_alloc_goal in ext4_inode_info | 53 | * Was i_next_alloc_goal in ext4_inode_info |
54 | * is the *physical* companion to i_next_alloc_block. | 54 | * is the *physical* companion to i_next_alloc_block. |
@@ -56,7 +56,7 @@ struct ext4_block_alloc_info { | |||
56 | * allocated to this file. This give us the goal (target) for the next | 56 | * allocated to this file. This give us the goal (target) for the next |
57 | * allocation when we detect linearly ascending requests. | 57 | * allocation when we detect linearly ascending requests. |
58 | */ | 58 | */ |
59 | ext4_fsblk_t last_alloc_physical_block; | 59 | ext4_fsblk_t last_alloc_physical_block; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | #define rsv_start rsv_window._rsv_start | 62 | #define rsv_start rsv_window._rsv_start |