diff options
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r-- | fs/ext4/extents.c | 74 |
1 files changed, 37 insertions, 37 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a0f0c04e79b2..ffe7cb6c4c00 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -374,7 +374,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc | |||
374 | le32_to_cpu(ix[-1].ei_block)); | 374 | le32_to_cpu(ix[-1].ei_block)); |
375 | } | 375 | } |
376 | BUG_ON(k && le32_to_cpu(ix->ei_block) | 376 | BUG_ON(k && le32_to_cpu(ix->ei_block) |
377 | <= le32_to_cpu(ix[-1].ei_block)); | 377 | <= le32_to_cpu(ix[-1].ei_block)); |
378 | if (block < le32_to_cpu(ix->ei_block)) | 378 | if (block < le32_to_cpu(ix->ei_block)) |
379 | break; | 379 | break; |
380 | chix = ix; | 380 | chix = ix; |
@@ -423,8 +423,8 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) | |||
423 | 423 | ||
424 | path->p_ext = l - 1; | 424 | path->p_ext = l - 1; |
425 | ext_debug(" -> %d:%llu:%d ", | 425 | ext_debug(" -> %d:%llu:%d ", |
426 | le32_to_cpu(path->p_ext->ee_block), | 426 | le32_to_cpu(path->p_ext->ee_block), |
427 | ext_pblock(path->p_ext), | 427 | ext_pblock(path->p_ext), |
428 | le16_to_cpu(path->p_ext->ee_len)); | 428 | le16_to_cpu(path->p_ext->ee_len)); |
429 | 429 | ||
430 | #ifdef CHECK_BINSEARCH | 430 | #ifdef CHECK_BINSEARCH |
@@ -435,7 +435,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) | |||
435 | chex = ex = EXT_FIRST_EXTENT(eh); | 435 | chex = ex = EXT_FIRST_EXTENT(eh); |
436 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { | 436 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { |
437 | BUG_ON(k && le32_to_cpu(ex->ee_block) | 437 | BUG_ON(k && le32_to_cpu(ex->ee_block) |
438 | <= le32_to_cpu(ex[-1].ee_block)); | 438 | <= le32_to_cpu(ex[-1].ee_block)); |
439 | if (block < le32_to_cpu(ex->ee_block)) | 439 | if (block < le32_to_cpu(ex->ee_block)) |
440 | break; | 440 | break; |
441 | chex = ex; | 441 | chex = ex; |
@@ -577,7 +577,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |||
577 | curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); | 577 | curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); |
578 | 578 | ||
579 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) | 579 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) |
580 | > le16_to_cpu(curp->p_hdr->eh_max)); | 580 | > le16_to_cpu(curp->p_hdr->eh_max)); |
581 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); | 581 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); |
582 | 582 | ||
583 | err = ext4_ext_dirty(handle, inode, curp); | 583 | err = ext4_ext_dirty(handle, inode, curp); |
@@ -621,12 +621,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
621 | border = path[depth].p_ext[1].ee_block; | 621 | border = path[depth].p_ext[1].ee_block; |
622 | ext_debug("leaf will be split." | 622 | ext_debug("leaf will be split." |
623 | " next leaf starts at %d\n", | 623 | " next leaf starts at %d\n", |
624 | le32_to_cpu(border)); | 624 | le32_to_cpu(border)); |
625 | } else { | 625 | } else { |
626 | border = newext->ee_block; | 626 | border = newext->ee_block; |
627 | ext_debug("leaf will be added." | 627 | ext_debug("leaf will be added." |
628 | " next leaf starts at %d\n", | 628 | " next leaf starts at %d\n", |
629 | le32_to_cpu(border)); | 629 | le32_to_cpu(border)); |
630 | } | 630 | } |
631 | 631 | ||
632 | /* | 632 | /* |
@@ -684,9 +684,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
684 | while (path[depth].p_ext <= | 684 | while (path[depth].p_ext <= |
685 | EXT_MAX_EXTENT(path[depth].p_hdr)) { | 685 | EXT_MAX_EXTENT(path[depth].p_hdr)) { |
686 | ext_debug("move %d:%llu:%d in new leaf %llu\n", | 686 | ext_debug("move %d:%llu:%d in new leaf %llu\n", |
687 | le32_to_cpu(path[depth].p_ext->ee_block), | 687 | le32_to_cpu(path[depth].p_ext->ee_block), |
688 | ext_pblock(path[depth].p_ext), | 688 | ext_pblock(path[depth].p_ext), |
689 | le16_to_cpu(path[depth].p_ext->ee_len), | 689 | le16_to_cpu(path[depth].p_ext->ee_len), |
690 | newblock); | 690 | newblock); |
691 | /*memmove(ex++, path[depth].p_ext++, | 691 | /*memmove(ex++, path[depth].p_ext++, |
692 | sizeof(struct ext4_extent)); | 692 | sizeof(struct ext4_extent)); |
@@ -765,9 +765,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
765 | EXT_LAST_INDEX(path[i].p_hdr)); | 765 | EXT_LAST_INDEX(path[i].p_hdr)); |
766 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | 766 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { |
767 | ext_debug("%d: move %d:%d in new index %llu\n", i, | 767 | ext_debug("%d: move %d:%d in new index %llu\n", i, |
768 | le32_to_cpu(path[i].p_idx->ei_block), | 768 | le32_to_cpu(path[i].p_idx->ei_block), |
769 | idx_pblock(path[i].p_idx), | 769 | idx_pblock(path[i].p_idx), |
770 | newblock); | 770 | newblock); |
771 | /*memmove(++fidx, path[i].p_idx++, | 771 | /*memmove(++fidx, path[i].p_idx++, |
772 | sizeof(struct ext4_extent_idx)); | 772 | sizeof(struct ext4_extent_idx)); |
773 | neh->eh_entries++; | 773 | neh->eh_entries++; |
@@ -1212,12 +1212,12 @@ has_space: | |||
1212 | if (!nearex) { | 1212 | if (!nearex) { |
1213 | /* there is no extent in this leaf, create first one */ | 1213 | /* there is no extent in this leaf, create first one */ |
1214 | ext_debug("first extent in the leaf: %d:%llu:%d\n", | 1214 | ext_debug("first extent in the leaf: %d:%llu:%d\n", |
1215 | le32_to_cpu(newext->ee_block), | 1215 | le32_to_cpu(newext->ee_block), |
1216 | ext_pblock(newext), | 1216 | ext_pblock(newext), |
1217 | le16_to_cpu(newext->ee_len)); | 1217 | le16_to_cpu(newext->ee_len)); |
1218 | path[depth].p_ext = EXT_FIRST_EXTENT(eh); | 1218 | path[depth].p_ext = EXT_FIRST_EXTENT(eh); |
1219 | } else if (le32_to_cpu(newext->ee_block) | 1219 | } else if (le32_to_cpu(newext->ee_block) |
1220 | > le32_to_cpu(nearex->ee_block)) { | 1220 | > le32_to_cpu(nearex->ee_block)) { |
1221 | /* BUG_ON(newext->ee_block == nearex->ee_block); */ | 1221 | /* BUG_ON(newext->ee_block == nearex->ee_block); */ |
1222 | if (nearex != EXT_LAST_EXTENT(eh)) { | 1222 | if (nearex != EXT_LAST_EXTENT(eh)) { |
1223 | len = EXT_MAX_EXTENT(eh) - nearex; | 1223 | len = EXT_MAX_EXTENT(eh) - nearex; |
@@ -1225,9 +1225,9 @@ has_space: | |||
1225 | len = len < 0 ? 0 : len; | 1225 | len = len < 0 ? 0 : len; |
1226 | ext_debug("insert %d:%llu:%d after: nearest 0x%p, " | 1226 | ext_debug("insert %d:%llu:%d after: nearest 0x%p, " |
1227 | "move %d from 0x%p to 0x%p\n", | 1227 | "move %d from 0x%p to 0x%p\n", |
1228 | le32_to_cpu(newext->ee_block), | 1228 | le32_to_cpu(newext->ee_block), |
1229 | ext_pblock(newext), | 1229 | ext_pblock(newext), |
1230 | le16_to_cpu(newext->ee_len), | 1230 | le16_to_cpu(newext->ee_len), |
1231 | nearex, len, nearex + 1, nearex + 2); | 1231 | nearex, len, nearex + 1, nearex + 2); |
1232 | memmove(nearex + 2, nearex + 1, len); | 1232 | memmove(nearex + 2, nearex + 1, len); |
1233 | } | 1233 | } |
@@ -1358,9 +1358,9 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, | |||
1358 | cbex.ec_start = 0; | 1358 | cbex.ec_start = 0; |
1359 | cbex.ec_type = EXT4_EXT_CACHE_GAP; | 1359 | cbex.ec_type = EXT4_EXT_CACHE_GAP; |
1360 | } else { | 1360 | } else { |
1361 | cbex.ec_block = le32_to_cpu(ex->ee_block); | 1361 | cbex.ec_block = le32_to_cpu(ex->ee_block); |
1362 | cbex.ec_len = le16_to_cpu(ex->ee_len); | 1362 | cbex.ec_len = le16_to_cpu(ex->ee_len); |
1363 | cbex.ec_start = ext_pblock(ex); | 1363 | cbex.ec_start = ext_pblock(ex); |
1364 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | 1364 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; |
1365 | } | 1365 | } |
1366 | 1366 | ||
@@ -1431,16 +1431,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, | |||
1431 | len = le32_to_cpu(ex->ee_block) - block; | 1431 | len = le32_to_cpu(ex->ee_block) - block; |
1432 | ext_debug("cache gap(before): %lu [%lu:%lu]", | 1432 | ext_debug("cache gap(before): %lu [%lu:%lu]", |
1433 | (unsigned long) block, | 1433 | (unsigned long) block, |
1434 | (unsigned long) le32_to_cpu(ex->ee_block), | 1434 | (unsigned long) le32_to_cpu(ex->ee_block), |
1435 | (unsigned long) le16_to_cpu(ex->ee_len)); | 1435 | (unsigned long) le16_to_cpu(ex->ee_len)); |
1436 | } else if (block >= le32_to_cpu(ex->ee_block) | 1436 | } else if (block >= le32_to_cpu(ex->ee_block) |
1437 | + le16_to_cpu(ex->ee_len)) { | 1437 | + le16_to_cpu(ex->ee_len)) { |
1438 | lblock = le32_to_cpu(ex->ee_block) | 1438 | lblock = le32_to_cpu(ex->ee_block) |
1439 | + le16_to_cpu(ex->ee_len); | 1439 | + le16_to_cpu(ex->ee_len); |
1440 | len = ext4_ext_next_allocated_block(path); | 1440 | len = ext4_ext_next_allocated_block(path); |
1441 | ext_debug("cache gap(after): [%lu:%lu] %lu", | 1441 | ext_debug("cache gap(after): [%lu:%lu] %lu", |
1442 | (unsigned long) le32_to_cpu(ex->ee_block), | 1442 | (unsigned long) le32_to_cpu(ex->ee_block), |
1443 | (unsigned long) le16_to_cpu(ex->ee_len), | 1443 | (unsigned long) le16_to_cpu(ex->ee_len), |
1444 | (unsigned long) block); | 1444 | (unsigned long) block); |
1445 | BUG_ON(len == lblock); | 1445 | BUG_ON(len == lblock); |
1446 | len = len - lblock; | 1446 | len = len - lblock; |
@@ -1468,9 +1468,9 @@ ext4_ext_in_cache(struct inode *inode, unsigned long block, | |||
1468 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | 1468 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && |
1469 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); | 1469 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); |
1470 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { | 1470 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { |
1471 | ex->ee_block = cpu_to_le32(cex->ec_block); | 1471 | ex->ee_block = cpu_to_le32(cex->ec_block); |
1472 | ext4_ext_store_pblock(ex, cex->ec_start); | 1472 | ext4_ext_store_pblock(ex, cex->ec_start); |
1473 | ex->ee_len = cpu_to_le16(cex->ec_len); | 1473 | ex->ee_len = cpu_to_le16(cex->ec_len); |
1474 | ext_debug("%lu cached by %lu:%lu:%llu\n", | 1474 | ext_debug("%lu cached by %lu:%lu:%llu\n", |
1475 | (unsigned long) block, | 1475 | (unsigned long) block, |
1476 | (unsigned long) cex->ec_block, | 1476 | (unsigned long) cex->ec_block, |
@@ -1956,9 +1956,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
1956 | /* we should allocate requested block */ | 1956 | /* we should allocate requested block */ |
1957 | } else if (goal == EXT4_EXT_CACHE_EXTENT) { | 1957 | } else if (goal == EXT4_EXT_CACHE_EXTENT) { |
1958 | /* block is already allocated */ | 1958 | /* block is already allocated */ |
1959 | newblock = iblock | 1959 | newblock = iblock |
1960 | - le32_to_cpu(newex.ee_block) | 1960 | - le32_to_cpu(newex.ee_block) |
1961 | + ext_pblock(&newex); | 1961 | + ext_pblock(&newex); |
1962 | /* number of remaining blocks in the extent */ | 1962 | /* number of remaining blocks in the extent */ |
1963 | allocated = le16_to_cpu(newex.ee_len) - | 1963 | allocated = le16_to_cpu(newex.ee_len) - |
1964 | (iblock - le32_to_cpu(newex.ee_block)); | 1964 | (iblock - le32_to_cpu(newex.ee_block)); |
@@ -1987,7 +1987,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
1987 | 1987 | ||
1988 | ex = path[depth].p_ext; | 1988 | ex = path[depth].p_ext; |
1989 | if (ex) { | 1989 | if (ex) { |
1990 | unsigned long ee_block = le32_to_cpu(ex->ee_block); | 1990 | unsigned long ee_block = le32_to_cpu(ex->ee_block); |
1991 | ext4_fsblk_t ee_start = ext_pblock(ex); | 1991 | ext4_fsblk_t ee_start = ext_pblock(ex); |
1992 | unsigned short ee_len = le16_to_cpu(ex->ee_len); | 1992 | unsigned short ee_len = le16_to_cpu(ex->ee_len); |
1993 | 1993 | ||
@@ -2000,7 +2000,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
2000 | if (ee_len > EXT_MAX_LEN) | 2000 | if (ee_len > EXT_MAX_LEN) |
2001 | goto out2; | 2001 | goto out2; |
2002 | /* if found extent covers block, simply return it */ | 2002 | /* if found extent covers block, simply return it */ |
2003 | if (iblock >= ee_block && iblock < ee_block + ee_len) { | 2003 | if (iblock >= ee_block && iblock < ee_block + ee_len) { |
2004 | newblock = iblock - ee_block + ee_start; | 2004 | newblock = iblock - ee_block + ee_start; |
2005 | /* number of remaining blocks in the extent */ | 2005 | /* number of remaining blocks in the extent */ |
2006 | allocated = ee_len - (iblock - ee_block); | 2006 | allocated = ee_len - (iblock - ee_block); |