diff options
author | Yongqiang Yang <xiaoqiangnk@gmail.com> | 2011-05-25 17:41:48 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2011-05-25 17:41:48 -0400 |
commit | 1b16da77f90328661fc7e556ad591f9ee6b7ef6a (patch) | |
tree | b86caf7fd1c4e0f205a69d8bb7ca1ebb883cab63 /fs/ext4 | |
parent | ae24f28d39610a4810c78185cf599a771cf6ee1f (diff) |
ext4: teach ext4_ext_split to calculate extents efficiently
Make ext4_ext_split() get extents to be moved by calculating in a statement
instead of counting in a loop.
Signed-off-by: Yongqiang Yang <xiaoqiangnk@gmail.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4')
-rw-r--r-- | fs/ext4/extents.c | 84 |
1 files changed, 46 insertions, 38 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index ae65f247ceda..5199bac7fc62 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -482,9 +482,43 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |||
482 | } | 482 | } |
483 | ext_debug("\n"); | 483 | ext_debug("\n"); |
484 | } | 484 | } |
485 | |||
486 | static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, | ||
487 | ext4_fsblk_t newblock, int level) | ||
488 | { | ||
489 | int depth = ext_depth(inode); | ||
490 | struct ext4_extent *ex; | ||
491 | |||
492 | if (depth != level) { | ||
493 | struct ext4_extent_idx *idx; | ||
494 | idx = path[level].p_idx; | ||
495 | while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { | ||
496 | ext_debug("%d: move %d:%llu in new index %llu\n", level, | ||
497 | le32_to_cpu(idx->ei_block), | ||
498 | ext4_idx_pblock(idx), | ||
499 | newblock); | ||
500 | idx++; | ||
501 | } | ||
502 | |||
503 | return; | ||
504 | } | ||
505 | |||
506 | ex = path[depth].p_ext; | ||
507 | while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { | ||
508 | ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", | ||
509 | le32_to_cpu(ex->ee_block), | ||
510 | ext4_ext_pblock(ex), | ||
511 | ext4_ext_is_uninitialized(ex), | ||
512 | ext4_ext_get_actual_len(ex), | ||
513 | newblock); | ||
514 | ex++; | ||
515 | } | ||
516 | } | ||
517 | |||
485 | #else | 518 | #else |
486 | #define ext4_ext_show_path(inode, path) | 519 | #define ext4_ext_show_path(inode, path) |
487 | #define ext4_ext_show_leaf(inode, path) | 520 | #define ext4_ext_show_leaf(inode, path) |
521 | #define ext4_ext_show_move(inode, path, newblock, level) | ||
488 | #endif | 522 | #endif |
489 | 523 | ||
490 | void ext4_ext_drop_refs(struct ext4_ext_path *path) | 524 | void ext4_ext_drop_refs(struct ext4_ext_path *path) |
@@ -808,7 +842,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
808 | int depth = ext_depth(inode); | 842 | int depth = ext_depth(inode); |
809 | struct ext4_extent_header *neh; | 843 | struct ext4_extent_header *neh; |
810 | struct ext4_extent_idx *fidx; | 844 | struct ext4_extent_idx *fidx; |
811 | struct ext4_extent *ex; | ||
812 | int i = at, k, m, a; | 845 | int i = at, k, m, a; |
813 | ext4_fsblk_t newblock, oldblock; | 846 | ext4_fsblk_t newblock, oldblock; |
814 | __le32 border; | 847 | __le32 border; |
@@ -885,7 +918,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
885 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); | 918 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
886 | neh->eh_magic = EXT4_EXT_MAGIC; | 919 | neh->eh_magic = EXT4_EXT_MAGIC; |
887 | neh->eh_depth = 0; | 920 | neh->eh_depth = 0; |
888 | ex = EXT_FIRST_EXTENT(neh); | ||
889 | 921 | ||
890 | /* move remainder of path[depth] to the new leaf */ | 922 | /* move remainder of path[depth] to the new leaf */ |
891 | if (unlikely(path[depth].p_hdr->eh_entries != | 923 | if (unlikely(path[depth].p_hdr->eh_entries != |
@@ -897,25 +929,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
897 | goto cleanup; | 929 | goto cleanup; |
898 | } | 930 | } |
899 | /* start copy from next extent */ | 931 | /* start copy from next extent */ |
900 | /* TODO: we could do it by single memmove */ | 932 | m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; |
901 | m = 0; | 933 | ext4_ext_show_move(inode, path, newblock, depth); |
902 | path[depth].p_ext++; | ||
903 | while (path[depth].p_ext <= | ||
904 | EXT_MAX_EXTENT(path[depth].p_hdr)) { | ||
905 | ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", | ||
906 | le32_to_cpu(path[depth].p_ext->ee_block), | ||
907 | ext4_ext_pblock(path[depth].p_ext), | ||
908 | ext4_ext_is_uninitialized(path[depth].p_ext), | ||
909 | ext4_ext_get_actual_len(path[depth].p_ext), | ||
910 | newblock); | ||
911 | /*memmove(ex++, path[depth].p_ext++, | ||
912 | sizeof(struct ext4_extent)); | ||
913 | neh->eh_entries++;*/ | ||
914 | path[depth].p_ext++; | ||
915 | m++; | ||
916 | } | ||
917 | if (m) { | 934 | if (m) { |
918 | memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); | 935 | struct ext4_extent *ex; |
936 | ex = EXT_FIRST_EXTENT(neh); | ||
937 | memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); | ||
919 | le16_add_cpu(&neh->eh_entries, m); | 938 | le16_add_cpu(&neh->eh_entries, m); |
920 | } | 939 | } |
921 | 940 | ||
@@ -977,12 +996,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
977 | 996 | ||
978 | ext_debug("int.index at %d (block %llu): %u -> %llu\n", | 997 | ext_debug("int.index at %d (block %llu): %u -> %llu\n", |
979 | i, newblock, le32_to_cpu(border), oldblock); | 998 | i, newblock, le32_to_cpu(border), oldblock); |
980 | /* copy indexes */ | ||
981 | m = 0; | ||
982 | path[i].p_idx++; | ||
983 | 999 | ||
984 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, | 1000 | /* move remainder of path[i] to the new index block */ |
985 | EXT_MAX_INDEX(path[i].p_hdr)); | ||
986 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != | 1001 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
987 | EXT_LAST_INDEX(path[i].p_hdr))) { | 1002 | EXT_LAST_INDEX(path[i].p_hdr))) { |
988 | EXT4_ERROR_INODE(inode, | 1003 | EXT4_ERROR_INODE(inode, |
@@ -991,20 +1006,13 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
991 | err = -EIO; | 1006 | err = -EIO; |
992 | goto cleanup; | 1007 | goto cleanup; |
993 | } | 1008 | } |
994 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | 1009 | /* start copy indexes */ |
995 | ext_debug("%d: move %d:%llu in new index %llu\n", i, | 1010 | m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; |
996 | le32_to_cpu(path[i].p_idx->ei_block), | 1011 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, |
997 | ext4_idx_pblock(path[i].p_idx), | 1012 | EXT_MAX_INDEX(path[i].p_hdr)); |
998 | newblock); | 1013 | ext4_ext_show_move(inode, path, newblock, i); |
999 | /*memmove(++fidx, path[i].p_idx++, | ||
1000 | sizeof(struct ext4_extent_idx)); | ||
1001 | neh->eh_entries++; | ||
1002 | BUG_ON(neh->eh_entries > neh->eh_max);*/ | ||
1003 | path[i].p_idx++; | ||
1004 | m++; | ||
1005 | } | ||
1006 | if (m) { | 1014 | if (m) { |
1007 | memmove(++fidx, path[i].p_idx - m, | 1015 | memmove(++fidx, path[i].p_idx, |
1008 | sizeof(struct ext4_extent_idx) * m); | 1016 | sizeof(struct ext4_extent_idx) * m); |
1009 | le16_add_cpu(&neh->eh_entries, m); | 1017 | le16_add_cpu(&neh->eh_entries, m); |
1010 | } | 1018 | } |