diff options
| author | Theodore Ts'o <tytso@mit.edu> | 2010-05-16 19:00:00 -0400 |
|---|---|---|
| committer | Theodore Ts'o <tytso@mit.edu> | 2010-05-16 19:00:00 -0400 |
| commit | e35fd6609b2fee54484d520deccb8f18bf7d38f3 (patch) | |
| tree | 9b786445602819074f599c282b31bead166e8c03 | |
| parent | 8e48dcfbd7c0892b4cfd064d682cc4c95a29df32 (diff) | |
ext4: Add new abstraction ext4_map_blocks() underneath ext4_get_blocks()
Jack up ext4_get_blocks() and add a new function, ext4_map_blocks()
which uses a much smaller structure, struct ext4_map_blocks which is
20 bytes, as opposed to a struct buffer_head, which nearly 5 times
bigger on an x86_64 machine. By switching things to use
ext4_map_blocks(), we can save stack space by using ext4_map_blocks()
since we can avoid allocating a struct buffer_head on the stack.
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
| -rw-r--r-- | fs/ext4/ext4.h | 30 | ||||
| -rw-r--r-- | fs/ext4/extents.c | 237 | ||||
| -rw-r--r-- | fs/ext4/inode.c | 102 |
3 files changed, 199 insertions, 170 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index d266003cac3e..57fc0e5c0918 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -126,6 +126,29 @@ struct ext4_allocation_request { | |||
| 126 | }; | 126 | }; |
| 127 | 127 | ||
| 128 | /* | 128 | /* |
| 129 | * Logical to physical block mapping, used by ext4_map_blocks() | ||
| 130 | * | ||
| 131 | * This structure is used to pass requests into ext4_map_blocks() as | ||
| 132 | * well as to store the information returned by ext4_map_blocks(). It | ||
| 133 | * takes less room on the stack than a struct buffer_head. | ||
| 134 | */ | ||
| 135 | #define EXT4_MAP_NEW (1 << BH_New) | ||
| 136 | #define EXT4_MAP_MAPPED (1 << BH_Mapped) | ||
| 137 | #define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten) | ||
| 138 | #define EXT4_MAP_BOUNDARY (1 << BH_Boundary) | ||
| 139 | #define EXT4_MAP_UNINIT (1 << BH_Uninit) | ||
| 140 | #define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\ | ||
| 141 | EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\ | ||
| 142 | EXT4_MAP_UNINIT) | ||
| 143 | |||
| 144 | struct ext4_map_blocks { | ||
| 145 | ext4_fsblk_t m_pblk; | ||
| 146 | ext4_lblk_t m_lblk; | ||
| 147 | unsigned int m_len; | ||
| 148 | unsigned int m_flags; | ||
| 149 | }; | ||
| 150 | |||
| 151 | /* | ||
| 129 | * For delayed allocation tracking | 152 | * For delayed allocation tracking |
| 130 | */ | 153 | */ |
| 131 | struct mpage_da_data { | 154 | struct mpage_da_data { |
| @@ -1773,9 +1796,8 @@ extern int ext4_ext_tree_init(handle_t *handle, struct inode *); | |||
| 1773 | extern int ext4_ext_writepage_trans_blocks(struct inode *, int); | 1796 | extern int ext4_ext_writepage_trans_blocks(struct inode *, int); |
| 1774 | extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, | 1797 | extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, |
| 1775 | int chunk); | 1798 | int chunk); |
| 1776 | extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | 1799 | extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
| 1777 | ext4_lblk_t iblock, unsigned int max_blocks, | 1800 | struct ext4_map_blocks *map, int flags); |
| 1778 | struct buffer_head *bh_result, int flags); | ||
| 1779 | extern void ext4_ext_truncate(struct inode *); | 1801 | extern void ext4_ext_truncate(struct inode *); |
| 1780 | extern void ext4_ext_init(struct super_block *); | 1802 | extern void ext4_ext_init(struct super_block *); |
| 1781 | extern void ext4_ext_release(struct super_block *); | 1803 | extern void ext4_ext_release(struct super_block *); |
| @@ -1783,6 +1805,8 @@ extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, | |||
| 1783 | loff_t len); | 1805 | loff_t len); |
| 1784 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | 1806 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, |
| 1785 | ssize_t len); | 1807 | ssize_t len); |
| 1808 | extern int ext4_map_blocks(handle_t *handle, struct inode *inode, | ||
| 1809 | struct ext4_map_blocks *map, int flags); | ||
| 1786 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, | 1810 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, |
| 1787 | sector_t block, unsigned int max_blocks, | 1811 | sector_t block, unsigned int max_blocks, |
| 1788 | struct buffer_head *bh, int flags); | 1812 | struct buffer_head *bh, int flags); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 8a8f9f0be911..37f938789344 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -2611,7 +2611,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
| 2611 | 2611 | ||
| 2612 | #define EXT4_EXT_ZERO_LEN 7 | 2612 | #define EXT4_EXT_ZERO_LEN 7 |
| 2613 | /* | 2613 | /* |
| 2614 | * This function is called by ext4_ext_get_blocks() if someone tries to write | 2614 | * This function is called by ext4_ext_map_blocks() if someone tries to write |
| 2615 | * to an uninitialized extent. It may result in splitting the uninitialized | 2615 | * to an uninitialized extent. It may result in splitting the uninitialized |
| 2616 | * extent into multiple extents (upto three - one initialized and two | 2616 | * extent into multiple extents (upto three - one initialized and two |
| 2617 | * uninitialized). | 2617 | * uninitialized). |
| @@ -2621,10 +2621,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
| 2621 | * c> Splits in three extents: Somone is writing in middle of the extent | 2621 | * c> Splits in three extents: Somone is writing in middle of the extent |
| 2622 | */ | 2622 | */ |
| 2623 | static int ext4_ext_convert_to_initialized(handle_t *handle, | 2623 | static int ext4_ext_convert_to_initialized(handle_t *handle, |
| 2624 | struct inode *inode, | 2624 | struct inode *inode, |
| 2625 | struct ext4_ext_path *path, | 2625 | struct ext4_map_blocks *map, |
| 2626 | ext4_lblk_t iblock, | 2626 | struct ext4_ext_path *path) |
| 2627 | unsigned int max_blocks) | ||
| 2628 | { | 2627 | { |
| 2629 | struct ext4_extent *ex, newex, orig_ex; | 2628 | struct ext4_extent *ex, newex, orig_ex; |
| 2630 | struct ext4_extent *ex1 = NULL; | 2629 | struct ext4_extent *ex1 = NULL; |
| @@ -2640,20 +2639,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2640 | 2639 | ||
| 2641 | ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" | 2640 | ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" |
| 2642 | "block %llu, max_blocks %u\n", inode->i_ino, | 2641 | "block %llu, max_blocks %u\n", inode->i_ino, |
| 2643 | (unsigned long long)iblock, max_blocks); | 2642 | (unsigned long long)map->m_lblk, map->m_len); |
| 2644 | 2643 | ||
| 2645 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> | 2644 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> |
| 2646 | inode->i_sb->s_blocksize_bits; | 2645 | inode->i_sb->s_blocksize_bits; |
| 2647 | if (eof_block < iblock + max_blocks) | 2646 | if (eof_block < map->m_lblk + map->m_len) |
| 2648 | eof_block = iblock + max_blocks; | 2647 | eof_block = map->m_lblk + map->m_len; |
| 2649 | 2648 | ||
| 2650 | depth = ext_depth(inode); | 2649 | depth = ext_depth(inode); |
| 2651 | eh = path[depth].p_hdr; | 2650 | eh = path[depth].p_hdr; |
| 2652 | ex = path[depth].p_ext; | 2651 | ex = path[depth].p_ext; |
| 2653 | ee_block = le32_to_cpu(ex->ee_block); | 2652 | ee_block = le32_to_cpu(ex->ee_block); |
| 2654 | ee_len = ext4_ext_get_actual_len(ex); | 2653 | ee_len = ext4_ext_get_actual_len(ex); |
| 2655 | allocated = ee_len - (iblock - ee_block); | 2654 | allocated = ee_len - (map->m_lblk - ee_block); |
| 2656 | newblock = iblock - ee_block + ext_pblock(ex); | 2655 | newblock = map->m_lblk - ee_block + ext_pblock(ex); |
| 2657 | 2656 | ||
| 2658 | ex2 = ex; | 2657 | ex2 = ex; |
| 2659 | orig_ex.ee_block = ex->ee_block; | 2658 | orig_ex.ee_block = ex->ee_block; |
| @@ -2683,10 +2682,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2683 | return allocated; | 2682 | return allocated; |
| 2684 | } | 2683 | } |
| 2685 | 2684 | ||
| 2686 | /* ex1: ee_block to iblock - 1 : uninitialized */ | 2685 | /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ |
| 2687 | if (iblock > ee_block) { | 2686 | if (map->m_lblk > ee_block) { |
| 2688 | ex1 = ex; | 2687 | ex1 = ex; |
| 2689 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 2688 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
| 2690 | ext4_ext_mark_uninitialized(ex1); | 2689 | ext4_ext_mark_uninitialized(ex1); |
| 2691 | ex2 = &newex; | 2690 | ex2 = &newex; |
| 2692 | } | 2691 | } |
| @@ -2695,15 +2694,15 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2695 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | 2694 | * we insert ex3, if ex1 is NULL. This is to avoid temporary |
| 2696 | * overlap of blocks. | 2695 | * overlap of blocks. |
| 2697 | */ | 2696 | */ |
| 2698 | if (!ex1 && allocated > max_blocks) | 2697 | if (!ex1 && allocated > map->m_len) |
| 2699 | ex2->ee_len = cpu_to_le16(max_blocks); | 2698 | ex2->ee_len = cpu_to_le16(map->m_len); |
| 2700 | /* ex3: to ee_block + ee_len : uninitialised */ | 2699 | /* ex3: to ee_block + ee_len : uninitialised */ |
| 2701 | if (allocated > max_blocks) { | 2700 | if (allocated > map->m_len) { |
| 2702 | unsigned int newdepth; | 2701 | unsigned int newdepth; |
| 2703 | /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ | 2702 | /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ |
| 2704 | if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) { | 2703 | if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) { |
| 2705 | /* | 2704 | /* |
| 2706 | * iblock == ee_block is handled by the zerouout | 2705 | * map->m_lblk == ee_block is handled by the zerouout |
| 2707 | * at the beginning. | 2706 | * at the beginning. |
| 2708 | * Mark first half uninitialized. | 2707 | * Mark first half uninitialized. |
| 2709 | * Mark second half initialized and zero out the | 2708 | * Mark second half initialized and zero out the |
| @@ -2716,7 +2715,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2716 | ext4_ext_dirty(handle, inode, path + depth); | 2715 | ext4_ext_dirty(handle, inode, path + depth); |
| 2717 | 2716 | ||
| 2718 | ex3 = &newex; | 2717 | ex3 = &newex; |
| 2719 | ex3->ee_block = cpu_to_le32(iblock); | 2718 | ex3->ee_block = cpu_to_le32(map->m_lblk); |
| 2720 | ext4_ext_store_pblock(ex3, newblock); | 2719 | ext4_ext_store_pblock(ex3, newblock); |
| 2721 | ex3->ee_len = cpu_to_le16(allocated); | 2720 | ex3->ee_len = cpu_to_le16(allocated); |
| 2722 | err = ext4_ext_insert_extent(handle, inode, path, | 2721 | err = ext4_ext_insert_extent(handle, inode, path, |
| @@ -2729,7 +2728,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2729 | ex->ee_len = orig_ex.ee_len; | 2728 | ex->ee_len = orig_ex.ee_len; |
| 2730 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 2729 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
| 2731 | ext4_ext_dirty(handle, inode, path + depth); | 2730 | ext4_ext_dirty(handle, inode, path + depth); |
| 2732 | /* blocks available from iblock */ | 2731 | /* blocks available from map->m_lblk */ |
| 2733 | return allocated; | 2732 | return allocated; |
| 2734 | 2733 | ||
| 2735 | } else if (err) | 2734 | } else if (err) |
| @@ -2751,8 +2750,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2751 | */ | 2750 | */ |
| 2752 | depth = ext_depth(inode); | 2751 | depth = ext_depth(inode); |
| 2753 | ext4_ext_drop_refs(path); | 2752 | ext4_ext_drop_refs(path); |
| 2754 | path = ext4_ext_find_extent(inode, | 2753 | path = ext4_ext_find_extent(inode, map->m_lblk, |
| 2755 | iblock, path); | 2754 | path); |
| 2756 | if (IS_ERR(path)) { | 2755 | if (IS_ERR(path)) { |
| 2757 | err = PTR_ERR(path); | 2756 | err = PTR_ERR(path); |
| 2758 | return err; | 2757 | return err; |
| @@ -2772,9 +2771,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2772 | return allocated; | 2771 | return allocated; |
| 2773 | } | 2772 | } |
| 2774 | ex3 = &newex; | 2773 | ex3 = &newex; |
| 2775 | ex3->ee_block = cpu_to_le32(iblock + max_blocks); | 2774 | ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); |
| 2776 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | 2775 | ext4_ext_store_pblock(ex3, newblock + map->m_len); |
| 2777 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | 2776 | ex3->ee_len = cpu_to_le16(allocated - map->m_len); |
| 2778 | ext4_ext_mark_uninitialized(ex3); | 2777 | ext4_ext_mark_uninitialized(ex3); |
| 2779 | err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); | 2778 | err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); |
| 2780 | if (err == -ENOSPC && may_zeroout) { | 2779 | if (err == -ENOSPC && may_zeroout) { |
| @@ -2787,7 +2786,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2787 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 2786 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
| 2788 | ext4_ext_dirty(handle, inode, path + depth); | 2787 | ext4_ext_dirty(handle, inode, path + depth); |
| 2789 | /* zeroed the full extent */ | 2788 | /* zeroed the full extent */ |
| 2790 | /* blocks available from iblock */ | 2789 | /* blocks available from map->m_lblk */ |
| 2791 | return allocated; | 2790 | return allocated; |
| 2792 | 2791 | ||
| 2793 | } else if (err) | 2792 | } else if (err) |
| @@ -2807,7 +2806,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2807 | 2806 | ||
| 2808 | depth = newdepth; | 2807 | depth = newdepth; |
| 2809 | ext4_ext_drop_refs(path); | 2808 | ext4_ext_drop_refs(path); |
| 2810 | path = ext4_ext_find_extent(inode, iblock, path); | 2809 | path = ext4_ext_find_extent(inode, map->m_lblk, path); |
| 2811 | if (IS_ERR(path)) { | 2810 | if (IS_ERR(path)) { |
| 2812 | err = PTR_ERR(path); | 2811 | err = PTR_ERR(path); |
| 2813 | goto out; | 2812 | goto out; |
| @@ -2821,14 +2820,14 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2821 | if (err) | 2820 | if (err) |
| 2822 | goto out; | 2821 | goto out; |
| 2823 | 2822 | ||
| 2824 | allocated = max_blocks; | 2823 | allocated = map->m_len; |
| 2825 | 2824 | ||
| 2826 | /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying | 2825 | /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying |
| 2827 | * to insert a extent in the middle zerout directly | 2826 | * to insert a extent in the middle zerout directly |
| 2828 | * otherwise give the extent a chance to merge to left | 2827 | * otherwise give the extent a chance to merge to left |
| 2829 | */ | 2828 | */ |
| 2830 | if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && | 2829 | if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && |
| 2831 | iblock != ee_block && may_zeroout) { | 2830 | map->m_lblk != ee_block && may_zeroout) { |
| 2832 | err = ext4_ext_zeroout(inode, &orig_ex); | 2831 | err = ext4_ext_zeroout(inode, &orig_ex); |
| 2833 | if (err) | 2832 | if (err) |
| 2834 | goto fix_extent_len; | 2833 | goto fix_extent_len; |
| @@ -2838,7 +2837,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2838 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 2837 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
| 2839 | ext4_ext_dirty(handle, inode, path + depth); | 2838 | ext4_ext_dirty(handle, inode, path + depth); |
| 2840 | /* zero out the first half */ | 2839 | /* zero out the first half */ |
| 2841 | /* blocks available from iblock */ | 2840 | /* blocks available from map->m_lblk */ |
| 2842 | return allocated; | 2841 | return allocated; |
| 2843 | } | 2842 | } |
| 2844 | } | 2843 | } |
| @@ -2849,12 +2848,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
| 2849 | */ | 2848 | */ |
| 2850 | if (ex1 && ex1 != ex) { | 2849 | if (ex1 && ex1 != ex) { |
| 2851 | ex1 = ex; | 2850 | ex1 = ex; |
| 2852 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 2851 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
| 2853 | ext4_ext_mark_uninitialized(ex1); | 2852 | ext4_ext_mark_uninitialized(ex1); |
| 2854 | ex2 = &newex; | 2853 | ex2 = &newex; |
| 2855 | } | 2854 | } |
| 2856 | /* ex2: iblock to iblock + maxblocks-1 : initialised */ | 2855 | /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */ |
| 2857 | ex2->ee_block = cpu_to_le32(iblock); | 2856 | ex2->ee_block = cpu_to_le32(map->m_lblk); |
| 2858 | ext4_ext_store_pblock(ex2, newblock); | 2857 | ext4_ext_store_pblock(ex2, newblock); |
| 2859 | ex2->ee_len = cpu_to_le16(allocated); | 2858 | ex2->ee_len = cpu_to_le16(allocated); |
| 2860 | if (ex2 != ex) | 2859 | if (ex2 != ex) |
| @@ -2924,7 +2923,7 @@ fix_extent_len: | |||
| 2924 | } | 2923 | } |
| 2925 | 2924 | ||
| 2926 | /* | 2925 | /* |
| 2927 | * This function is called by ext4_ext_get_blocks() from | 2926 | * This function is called by ext4_ext_map_blocks() from |
| 2928 | * ext4_get_blocks_dio_write() when DIO to write | 2927 | * ext4_get_blocks_dio_write() when DIO to write |
| 2929 | * to an uninitialized extent. | 2928 | * to an uninitialized extent. |
| 2930 | * | 2929 | * |
| @@ -2947,9 +2946,8 @@ fix_extent_len: | |||
| 2947 | */ | 2946 | */ |
| 2948 | static int ext4_split_unwritten_extents(handle_t *handle, | 2947 | static int ext4_split_unwritten_extents(handle_t *handle, |
| 2949 | struct inode *inode, | 2948 | struct inode *inode, |
| 2949 | struct ext4_map_blocks *map, | ||
| 2950 | struct ext4_ext_path *path, | 2950 | struct ext4_ext_path *path, |
| 2951 | ext4_lblk_t iblock, | ||
| 2952 | unsigned int max_blocks, | ||
| 2953 | int flags) | 2951 | int flags) |
| 2954 | { | 2952 | { |
| 2955 | struct ext4_extent *ex, newex, orig_ex; | 2953 | struct ext4_extent *ex, newex, orig_ex; |
| @@ -2965,20 +2963,20 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
| 2965 | 2963 | ||
| 2966 | ext_debug("ext4_split_unwritten_extents: inode %lu, logical" | 2964 | ext_debug("ext4_split_unwritten_extents: inode %lu, logical" |
| 2967 | "block %llu, max_blocks %u\n", inode->i_ino, | 2965 | "block %llu, max_blocks %u\n", inode->i_ino, |
| 2968 | (unsigned long long)iblock, max_blocks); | 2966 | (unsigned long long)map->m_lblk, map->m_len); |
| 2969 | 2967 | ||
| 2970 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> | 2968 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> |
| 2971 | inode->i_sb->s_blocksize_bits; | 2969 | inode->i_sb->s_blocksize_bits; |
| 2972 | if (eof_block < iblock + max_blocks) | 2970 | if (eof_block < map->m_lblk + map->m_len) |
| 2973 | eof_block = iblock + max_blocks; | 2971 | eof_block = map->m_lblk + map->m_len; |
| 2974 | 2972 | ||
| 2975 | depth = ext_depth(inode); | 2973 | depth = ext_depth(inode); |
| 2976 | eh = path[depth].p_hdr; | 2974 | eh = path[depth].p_hdr; |
| 2977 | ex = path[depth].p_ext; | 2975 | ex = path[depth].p_ext; |
| 2978 | ee_block = le32_to_cpu(ex->ee_block); | 2976 | ee_block = le32_to_cpu(ex->ee_block); |
| 2979 | ee_len = ext4_ext_get_actual_len(ex); | 2977 | ee_len = ext4_ext_get_actual_len(ex); |
| 2980 | allocated = ee_len - (iblock - ee_block); | 2978 | allocated = ee_len - (map->m_lblk - ee_block); |
| 2981 | newblock = iblock - ee_block + ext_pblock(ex); | 2979 | newblock = map->m_lblk - ee_block + ext_pblock(ex); |
| 2982 | 2980 | ||
| 2983 | ex2 = ex; | 2981 | ex2 = ex; |
| 2984 | orig_ex.ee_block = ex->ee_block; | 2982 | orig_ex.ee_block = ex->ee_block; |
| @@ -2996,16 +2994,16 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
| 2996 | * block where the write begins, and the write completely | 2994 | * block where the write begins, and the write completely |
| 2997 | * covers the extent, then we don't need to split it. | 2995 | * covers the extent, then we don't need to split it. |
| 2998 | */ | 2996 | */ |
| 2999 | if ((iblock == ee_block) && (allocated <= max_blocks)) | 2997 | if ((map->m_lblk == ee_block) && (allocated <= map->m_len)) |
| 3000 | return allocated; | 2998 | return allocated; |
| 3001 | 2999 | ||
| 3002 | err = ext4_ext_get_access(handle, inode, path + depth); | 3000 | err = ext4_ext_get_access(handle, inode, path + depth); |
| 3003 | if (err) | 3001 | if (err) |
| 3004 | goto out; | 3002 | goto out; |
| 3005 | /* ex1: ee_block to iblock - 1 : uninitialized */ | 3003 | /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ |
| 3006 | if (iblock > ee_block) { | 3004 | if (map->m_lblk > ee_block) { |
| 3007 | ex1 = ex; | 3005 | ex1 = ex; |
| 3008 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 3006 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
| 3009 | ext4_ext_mark_uninitialized(ex1); | 3007 | ext4_ext_mark_uninitialized(ex1); |
| 3010 | ex2 = &newex; | 3008 | ex2 = &newex; |
| 3011 | } | 3009 | } |
| @@ -3014,15 +3012,15 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
| 3014 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | 3012 | * we insert ex3, if ex1 is NULL. This is to avoid temporary |
| 3015 | * overlap of blocks. | 3013 | * overlap of blocks. |
| 3016 | */ | 3014 | */ |
| 3017 | if (!ex1 && allocated > max_blocks) | 3015 | if (!ex1 && allocated > map->m_len) |
| 3018 | ex2->ee_len = cpu_to_le16(max_blocks); | 3016 | ex2->ee_len = cpu_to_le16(map->m_len); |
| 3019 | /* ex3: to ee_block + ee_len : uninitialised */ | 3017 | /* ex3: to ee_block + ee_len : uninitialised */ |
| 3020 | if (allocated > max_blocks) { | 3018 | if (allocated > map->m_len) { |
| 3021 | unsigned int newdepth; | 3019 | unsigned int newdepth; |
| 3022 | ex3 = &newex; | 3020 | ex3 = &newex; |
| 3023 | ex3->ee_block = cpu_to_le32(iblock + max_blocks); | 3021 | ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); |
| 3024 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | 3022 | ext4_ext_store_pblock(ex3, newblock + map->m_len); |
| 3025 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | 3023 | ex3->ee_len = cpu_to_le16(allocated - map->m_len); |
| 3026 | ext4_ext_mark_uninitialized(ex3); | 3024 | ext4_ext_mark_uninitialized(ex3); |
| 3027 | err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); | 3025 | err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); |
| 3028 | if (err == -ENOSPC && may_zeroout) { | 3026 | if (err == -ENOSPC && may_zeroout) { |
| @@ -3035,7 +3033,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
| 3035 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 3033 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
| 3036 | ext4_ext_dirty(handle, inode, path + depth); | 3034 | ext4_ext_dirty(handle, inode, path + depth); |
| 3037 | /* zeroed the full extent */ | 3035 | /* zeroed the full extent */ |
| 3038 | /* blocks available from iblock */ | 3036 | /* blocks available from map->m_lblk */ |
| 3039 | return allocated; | 3037 | return allocated; |
| 3040 | 3038 | ||
| 3041 | } else if (err) | 3039 | } else if (err) |
| @@ -3055,7 +3053,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
| 3055 | 3053 | ||
| 3056 | depth = newdepth; | 3054 | depth = newdepth; |
| 3057 | ext4_ext_drop_refs(path); | 3055 | ext4_ext_drop_refs(path); |
| 3058 | path = ext4_ext_find_extent(inode, iblock, path); | 3056 | path = ext4_ext_find_extent(inode, map->m_lblk, path); |
| 3059 | if (IS_ERR(path)) { | 3057 | if (IS_ERR(path)) { |
| 3060 | err = PTR_ERR(path); | 3058 | err = PTR_ERR(path); |
| 3061 | goto out; | 3059 | goto out; |
| @@ -3069,7 +3067,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
| 3069 | if (err) | 3067 | if (err) |
| 3070 | goto out; | 3068 | goto out; |
| 3071 | 3069 | ||
| 3072 | allocated = max_blocks; | 3070 | allocated = map->m_len; |
| 3073 | } | 3071 | } |
| 3074 | /* | 3072 | /* |
| 3075 | * If there was a change of depth as part of the | 3073 | * If there was a change of depth as part of the |
| @@ -3078,15 +3076,15 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
| 3078 | */ | 3076 | */ |
| 3079 | if (ex1 && ex1 != ex) { | 3077 | if (ex1 && ex1 != ex) { |
| 3080 | ex1 = ex; | 3078 | ex1 = ex; |
| 3081 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 3079 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
| 3082 | ext4_ext_mark_uninitialized(ex1); | 3080 | ext4_ext_mark_uninitialized(ex1); |
| 3083 | ex2 = &newex; | 3081 | ex2 = &newex; |
| 3084 | } | 3082 | } |
| 3085 | /* | 3083 | /* |
| 3086 | * ex2: iblock to iblock + maxblocks-1 : to be direct IO written, | 3084 | * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written |
| 3087 | * uninitialised still. | 3085 | * using direct I/O, uninitialised still. |
| 3088 | */ | 3086 | */ |
| 3089 | ex2->ee_block = cpu_to_le32(iblock); | 3087 | ex2->ee_block = cpu_to_le32(map->m_lblk); |
| 3090 | ext4_ext_store_pblock(ex2, newblock); | 3088 | ext4_ext_store_pblock(ex2, newblock); |
| 3091 | ex2->ee_len = cpu_to_le16(allocated); | 3089 | ex2->ee_len = cpu_to_le16(allocated); |
| 3092 | ext4_ext_mark_uninitialized(ex2); | 3090 | ext4_ext_mark_uninitialized(ex2); |
| @@ -3188,10 +3186,9 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev, | |||
| 3188 | 3186 | ||
| 3189 | static int | 3187 | static int |
| 3190 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | 3188 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, |
| 3191 | ext4_lblk_t iblock, unsigned int max_blocks, | 3189 | struct ext4_map_blocks *map, |
| 3192 | struct ext4_ext_path *path, int flags, | 3190 | struct ext4_ext_path *path, int flags, |
| 3193 | unsigned int allocated, struct buffer_head *bh_result, | 3191 | unsigned int allocated, ext4_fsblk_t newblock) |
| 3194 | ext4_fsblk_t newblock) | ||
| 3195 | { | 3192 | { |
| 3196 | int ret = 0; | 3193 | int ret = 0; |
| 3197 | int err = 0; | 3194 | int err = 0; |
| @@ -3199,15 +3196,14 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
| 3199 | 3196 | ||
| 3200 | ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" | 3197 | ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" |
| 3201 | "block %llu, max_blocks %u, flags %d, allocated %u", | 3198 | "block %llu, max_blocks %u, flags %d, allocated %u", |
| 3202 | inode->i_ino, (unsigned long long)iblock, max_blocks, | 3199 | inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, |
| 3203 | flags, allocated); | 3200 | flags, allocated); |
| 3204 | ext4_ext_show_leaf(inode, path); | 3201 | ext4_ext_show_leaf(inode, path); |
| 3205 | 3202 | ||
| 3206 | /* get_block() before submit the IO, split the extent */ | 3203 | /* get_block() before submit the IO, split the extent */ |
| 3207 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { | 3204 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
| 3208 | ret = ext4_split_unwritten_extents(handle, | 3205 | ret = ext4_split_unwritten_extents(handle, inode, map, |
| 3209 | inode, path, iblock, | 3206 | path, flags); |
| 3210 | max_blocks, flags); | ||
| 3211 | /* | 3207 | /* |
| 3212 | * Flag the inode(non aio case) or end_io struct (aio case) | 3208 | * Flag the inode(non aio case) or end_io struct (aio case) |
| 3213 | * that this IO needs to convertion to written when IO is | 3209 | * that this IO needs to convertion to written when IO is |
| @@ -3218,7 +3214,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
| 3218 | else | 3214 | else |
| 3219 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); | 3215 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
| 3220 | if (ext4_should_dioread_nolock(inode)) | 3216 | if (ext4_should_dioread_nolock(inode)) |
| 3221 | set_buffer_uninit(bh_result); | 3217 | map->m_flags |= EXT4_MAP_UNINIT; |
| 3222 | goto out; | 3218 | goto out; |
| 3223 | } | 3219 | } |
| 3224 | /* IO end_io complete, convert the filled extent to written */ | 3220 | /* IO end_io complete, convert the filled extent to written */ |
| @@ -3246,14 +3242,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
| 3246 | * the buffer head will be unmapped so that | 3242 | * the buffer head will be unmapped so that |
| 3247 | * a read from the block returns 0s. | 3243 | * a read from the block returns 0s. |
| 3248 | */ | 3244 | */ |
| 3249 | set_buffer_unwritten(bh_result); | 3245 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
| 3250 | goto out1; | 3246 | goto out1; |
| 3251 | } | 3247 | } |
| 3252 | 3248 | ||
| 3253 | /* buffered write, writepage time, convert*/ | 3249 | /* buffered write, writepage time, convert*/ |
| 3254 | ret = ext4_ext_convert_to_initialized(handle, inode, | 3250 | ret = ext4_ext_convert_to_initialized(handle, inode, map, path); |
| 3255 | path, iblock, | ||
| 3256 | max_blocks); | ||
| 3257 | if (ret >= 0) | 3251 | if (ret >= 0) |
| 3258 | ext4_update_inode_fsync_trans(handle, inode, 1); | 3252 | ext4_update_inode_fsync_trans(handle, inode, 1); |
| 3259 | out: | 3253 | out: |
| @@ -3262,7 +3256,7 @@ out: | |||
| 3262 | goto out2; | 3256 | goto out2; |
| 3263 | } else | 3257 | } else |
| 3264 | allocated = ret; | 3258 | allocated = ret; |
| 3265 | set_buffer_new(bh_result); | 3259 | map->m_flags |= EXT4_MAP_NEW; |
| 3266 | /* | 3260 | /* |
| 3267 | * if we allocated more blocks than requested | 3261 | * if we allocated more blocks than requested |
| 3268 | * we need to make sure we unmap the extra block | 3262 | * we need to make sure we unmap the extra block |
| @@ -3270,11 +3264,11 @@ out: | |||
| 3270 | * unmapped later when we find the buffer_head marked | 3264 | * unmapped later when we find the buffer_head marked |
| 3271 | * new. | 3265 | * new. |
| 3272 | */ | 3266 | */ |
| 3273 | if (allocated > max_blocks) { | 3267 | if (allocated > map->m_len) { |
| 3274 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, | 3268 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, |
| 3275 | newblock + max_blocks, | 3269 | newblock + map->m_len, |
| 3276 | allocated - max_blocks); | 3270 | allocated - map->m_len); |
| 3277 | allocated = max_blocks; | 3271 | allocated = map->m_len; |
| 3278 | } | 3272 | } |
| 3279 | 3273 | ||
| 3280 | /* | 3274 | /* |
| @@ -3288,13 +3282,13 @@ out: | |||
| 3288 | ext4_da_update_reserve_space(inode, allocated, 0); | 3282 | ext4_da_update_reserve_space(inode, allocated, 0); |
| 3289 | 3283 | ||
| 3290 | map_out: | 3284 | map_out: |
| 3291 | set_buffer_mapped(bh_result); | 3285 | map->m_flags |= EXT4_MAP_MAPPED; |
| 3292 | out1: | 3286 | out1: |
| 3293 | if (allocated > max_blocks) | 3287 | if (allocated > map->m_len) |
| 3294 | allocated = max_blocks; | 3288 | allocated = map->m_len; |
| 3295 | ext4_ext_show_leaf(inode, path); | 3289 | ext4_ext_show_leaf(inode, path); |
| 3296 | bh_result->b_bdev = inode->i_sb->s_bdev; | 3290 | map->m_pblk = newblock; |
| 3297 | bh_result->b_blocknr = newblock; | 3291 | map->m_len = allocated; |
| 3298 | out2: | 3292 | out2: |
| 3299 | if (path) { | 3293 | if (path) { |
| 3300 | ext4_ext_drop_refs(path); | 3294 | ext4_ext_drop_refs(path); |
| @@ -3320,10 +3314,8 @@ out2: | |||
| 3320 | * | 3314 | * |
| 3321 | * return < 0, error case. | 3315 | * return < 0, error case. |
| 3322 | */ | 3316 | */ |
| 3323 | int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | 3317 | int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
| 3324 | ext4_lblk_t iblock, | 3318 | struct ext4_map_blocks *map, int flags) |
| 3325 | unsigned int max_blocks, struct buffer_head *bh_result, | ||
| 3326 | int flags) | ||
| 3327 | { | 3319 | { |
| 3328 | struct ext4_ext_path *path = NULL; | 3320 | struct ext4_ext_path *path = NULL; |
| 3329 | struct ext4_extent_header *eh; | 3321 | struct ext4_extent_header *eh; |
| @@ -3334,12 +3326,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3334 | struct ext4_allocation_request ar; | 3326 | struct ext4_allocation_request ar; |
| 3335 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; | 3327 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
| 3336 | 3328 | ||
| 3337 | __clear_bit(BH_New, &bh_result->b_state); | ||
| 3338 | ext_debug("blocks %u/%u requested for inode %lu\n", | 3329 | ext_debug("blocks %u/%u requested for inode %lu\n", |
| 3339 | iblock, max_blocks, inode->i_ino); | 3330 | map->m_lblk, map->m_len, inode->i_ino); |
| 3340 | 3331 | ||
| 3341 | /* check in cache */ | 3332 | /* check in cache */ |
| 3342 | cache_type = ext4_ext_in_cache(inode, iblock, &newex); | 3333 | cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex); |
| 3343 | if (cache_type) { | 3334 | if (cache_type) { |
| 3344 | if (cache_type == EXT4_EXT_CACHE_GAP) { | 3335 | if (cache_type == EXT4_EXT_CACHE_GAP) { |
| 3345 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | 3336 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
| @@ -3352,12 +3343,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3352 | /* we should allocate requested block */ | 3343 | /* we should allocate requested block */ |
| 3353 | } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { | 3344 | } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { |
| 3354 | /* block is already allocated */ | 3345 | /* block is already allocated */ |
| 3355 | newblock = iblock | 3346 | newblock = map->m_lblk |
| 3356 | - le32_to_cpu(newex.ee_block) | 3347 | - le32_to_cpu(newex.ee_block) |
| 3357 | + ext_pblock(&newex); | 3348 | + ext_pblock(&newex); |
| 3358 | /* number of remaining blocks in the extent */ | 3349 | /* number of remaining blocks in the extent */ |
| 3359 | allocated = ext4_ext_get_actual_len(&newex) - | 3350 | allocated = ext4_ext_get_actual_len(&newex) - |
| 3360 | (iblock - le32_to_cpu(newex.ee_block)); | 3351 | (map->m_lblk - le32_to_cpu(newex.ee_block)); |
| 3361 | goto out; | 3352 | goto out; |
| 3362 | } else { | 3353 | } else { |
| 3363 | BUG(); | 3354 | BUG(); |
| @@ -3365,7 +3356,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3365 | } | 3356 | } |
| 3366 | 3357 | ||
| 3367 | /* find extent for this block */ | 3358 | /* find extent for this block */ |
| 3368 | path = ext4_ext_find_extent(inode, iblock, NULL); | 3359 | path = ext4_ext_find_extent(inode, map->m_lblk, NULL); |
| 3369 | if (IS_ERR(path)) { | 3360 | if (IS_ERR(path)) { |
| 3370 | err = PTR_ERR(path); | 3361 | err = PTR_ERR(path); |
| 3371 | path = NULL; | 3362 | path = NULL; |
| @@ -3382,7 +3373,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3382 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { | 3373 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { |
| 3383 | EXT4_ERROR_INODE(inode, "bad extent address " | 3374 | EXT4_ERROR_INODE(inode, "bad extent address " |
| 3384 | "iblock: %d, depth: %d pblock %lld", | 3375 | "iblock: %d, depth: %d pblock %lld", |
| 3385 | iblock, depth, path[depth].p_block); | 3376 | map->m_lblk, depth, path[depth].p_block); |
| 3386 | err = -EIO; | 3377 | err = -EIO; |
| 3387 | goto out2; | 3378 | goto out2; |
| 3388 | } | 3379 | } |
| @@ -3400,12 +3391,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3400 | */ | 3391 | */ |
| 3401 | ee_len = ext4_ext_get_actual_len(ex); | 3392 | ee_len = ext4_ext_get_actual_len(ex); |
| 3402 | /* if found extent covers block, simply return it */ | 3393 | /* if found extent covers block, simply return it */ |
| 3403 | if (in_range(iblock, ee_block, ee_len)) { | 3394 | if (in_range(map->m_lblk, ee_block, ee_len)) { |
| 3404 | newblock = iblock - ee_block + ee_start; | 3395 | newblock = map->m_lblk - ee_block + ee_start; |
| 3405 | /* number of remaining blocks in the extent */ | 3396 | /* number of remaining blocks in the extent */ |
| 3406 | allocated = ee_len - (iblock - ee_block); | 3397 | allocated = ee_len - (map->m_lblk - ee_block); |
| 3407 | ext_debug("%u fit into %u:%d -> %llu\n", iblock, | 3398 | ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, |
| 3408 | ee_block, ee_len, newblock); | 3399 | ee_block, ee_len, newblock); |
| 3409 | 3400 | ||
| 3410 | /* Do not put uninitialized extent in the cache */ | 3401 | /* Do not put uninitialized extent in the cache */ |
| 3411 | if (!ext4_ext_is_uninitialized(ex)) { | 3402 | if (!ext4_ext_is_uninitialized(ex)) { |
| @@ -3415,8 +3406,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3415 | goto out; | 3406 | goto out; |
| 3416 | } | 3407 | } |
| 3417 | ret = ext4_ext_handle_uninitialized_extents(handle, | 3408 | ret = ext4_ext_handle_uninitialized_extents(handle, |
| 3418 | inode, iblock, max_blocks, path, | 3409 | inode, map, path, flags, allocated, |
| 3419 | flags, allocated, bh_result, newblock); | 3410 | newblock); |
| 3420 | return ret; | 3411 | return ret; |
| 3421 | } | 3412 | } |
| 3422 | } | 3413 | } |
| @@ -3430,7 +3421,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3430 | * put just found gap into cache to speed up | 3421 | * put just found gap into cache to speed up |
| 3431 | * subsequent requests | 3422 | * subsequent requests |
| 3432 | */ | 3423 | */ |
| 3433 | ext4_ext_put_gap_in_cache(inode, path, iblock); | 3424 | ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); |
| 3434 | goto out2; | 3425 | goto out2; |
| 3435 | } | 3426 | } |
| 3436 | /* | 3427 | /* |
| @@ -3438,11 +3429,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3438 | */ | 3429 | */ |
| 3439 | 3430 | ||
| 3440 | /* find neighbour allocated blocks */ | 3431 | /* find neighbour allocated blocks */ |
| 3441 | ar.lleft = iblock; | 3432 | ar.lleft = map->m_lblk; |
| 3442 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); | 3433 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); |
| 3443 | if (err) | 3434 | if (err) |
| 3444 | goto out2; | 3435 | goto out2; |
| 3445 | ar.lright = iblock; | 3436 | ar.lright = map->m_lblk; |
| 3446 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); | 3437 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); |
| 3447 | if (err) | 3438 | if (err) |
| 3448 | goto out2; | 3439 | goto out2; |
| @@ -3453,26 +3444,26 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3453 | * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is | 3444 | * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is |
| 3454 | * EXT_UNINIT_MAX_LEN. | 3445 | * EXT_UNINIT_MAX_LEN. |
| 3455 | */ | 3446 | */ |
| 3456 | if (max_blocks > EXT_INIT_MAX_LEN && | 3447 | if (map->m_len > EXT_INIT_MAX_LEN && |
| 3457 | !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) | 3448 | !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
| 3458 | max_blocks = EXT_INIT_MAX_LEN; | 3449 | map->m_len = EXT_INIT_MAX_LEN; |
| 3459 | else if (max_blocks > EXT_UNINIT_MAX_LEN && | 3450 | else if (map->m_len > EXT_UNINIT_MAX_LEN && |
| 3460 | (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) | 3451 | (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
| 3461 | max_blocks = EXT_UNINIT_MAX_LEN; | 3452 | map->m_len = EXT_UNINIT_MAX_LEN; |
| 3462 | 3453 | ||
| 3463 | /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ | 3454 | /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ |
| 3464 | newex.ee_block = cpu_to_le32(iblock); | 3455 | newex.ee_block = cpu_to_le32(map->m_lblk); |
| 3465 | newex.ee_len = cpu_to_le16(max_blocks); | 3456 | newex.ee_len = cpu_to_le16(map->m_len); |
| 3466 | err = ext4_ext_check_overlap(inode, &newex, path); | 3457 | err = ext4_ext_check_overlap(inode, &newex, path); |
| 3467 | if (err) | 3458 | if (err) |
| 3468 | allocated = ext4_ext_get_actual_len(&newex); | 3459 | allocated = ext4_ext_get_actual_len(&newex); |
| 3469 | else | 3460 | else |
| 3470 | allocated = max_blocks; | 3461 | allocated = map->m_len; |
| 3471 | 3462 | ||
| 3472 | /* allocate new block */ | 3463 | /* allocate new block */ |
| 3473 | ar.inode = inode; | 3464 | ar.inode = inode; |
| 3474 | ar.goal = ext4_ext_find_goal(inode, path, iblock); | 3465 | ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); |
| 3475 | ar.logical = iblock; | 3466 | ar.logical = map->m_lblk; |
| 3476 | ar.len = allocated; | 3467 | ar.len = allocated; |
| 3477 | if (S_ISREG(inode->i_mode)) | 3468 | if (S_ISREG(inode->i_mode)) |
| 3478 | ar.flags = EXT4_MB_HINT_DATA; | 3469 | ar.flags = EXT4_MB_HINT_DATA; |
| @@ -3506,7 +3497,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3506 | EXT4_STATE_DIO_UNWRITTEN); | 3497 | EXT4_STATE_DIO_UNWRITTEN); |
| 3507 | } | 3498 | } |
| 3508 | if (ext4_should_dioread_nolock(inode)) | 3499 | if (ext4_should_dioread_nolock(inode)) |
| 3509 | set_buffer_uninit(bh_result); | 3500 | map->m_flags |= EXT4_MAP_UNINIT; |
| 3510 | } | 3501 | } |
| 3511 | 3502 | ||
| 3512 | if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { | 3503 | if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { |
| @@ -3518,7 +3509,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3518 | goto out2; | 3509 | goto out2; |
| 3519 | } | 3510 | } |
| 3520 | last_ex = EXT_LAST_EXTENT(eh); | 3511 | last_ex = EXT_LAST_EXTENT(eh); |
| 3521 | if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) | 3512 | if (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) |
| 3522 | + ext4_ext_get_actual_len(last_ex)) | 3513 | + ext4_ext_get_actual_len(last_ex)) |
| 3523 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; | 3514 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; |
| 3524 | } | 3515 | } |
| @@ -3536,9 +3527,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3536 | /* previous routine could use block we allocated */ | 3527 | /* previous routine could use block we allocated */ |
| 3537 | newblock = ext_pblock(&newex); | 3528 | newblock = ext_pblock(&newex); |
| 3538 | allocated = ext4_ext_get_actual_len(&newex); | 3529 | allocated = ext4_ext_get_actual_len(&newex); |
| 3539 | if (allocated > max_blocks) | 3530 | if (allocated > map->m_len) |
| 3540 | allocated = max_blocks; | 3531 | allocated = map->m_len; |
| 3541 | set_buffer_new(bh_result); | 3532 | map->m_flags |= EXT4_MAP_NEW; |
| 3542 | 3533 | ||
| 3543 | /* | 3534 | /* |
| 3544 | * Update reserved blocks/metadata blocks after successful | 3535 | * Update reserved blocks/metadata blocks after successful |
| @@ -3552,18 +3543,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3552 | * when it is _not_ an uninitialized extent. | 3543 | * when it is _not_ an uninitialized extent. |
| 3553 | */ | 3544 | */ |
| 3554 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { | 3545 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { |
| 3555 | ext4_ext_put_in_cache(inode, iblock, allocated, newblock, | 3546 | ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock, |
| 3556 | EXT4_EXT_CACHE_EXTENT); | 3547 | EXT4_EXT_CACHE_EXTENT); |
| 3557 | ext4_update_inode_fsync_trans(handle, inode, 1); | 3548 | ext4_update_inode_fsync_trans(handle, inode, 1); |
| 3558 | } else | 3549 | } else |
| 3559 | ext4_update_inode_fsync_trans(handle, inode, 0); | 3550 | ext4_update_inode_fsync_trans(handle, inode, 0); |
| 3560 | out: | 3551 | out: |
| 3561 | if (allocated > max_blocks) | 3552 | if (allocated > map->m_len) |
| 3562 | allocated = max_blocks; | 3553 | allocated = map->m_len; |
| 3563 | ext4_ext_show_leaf(inode, path); | 3554 | ext4_ext_show_leaf(inode, path); |
| 3564 | set_buffer_mapped(bh_result); | 3555 | map->m_flags |= EXT4_MAP_MAPPED; |
| 3565 | bh_result->b_bdev = inode->i_sb->s_bdev; | 3556 | map->m_pblk = newblock; |
| 3566 | bh_result->b_blocknr = newblock; | 3557 | map->m_len = allocated; |
| 3567 | out2: | 3558 | out2: |
| 3568 | if (path) { | 3559 | if (path) { |
| 3569 | ext4_ext_drop_refs(path); | 3560 | ext4_ext_drop_refs(path); |
| @@ -3729,7 +3720,7 @@ retry: | |||
| 3729 | if (ret <= 0) { | 3720 | if (ret <= 0) { |
| 3730 | #ifdef EXT4FS_DEBUG | 3721 | #ifdef EXT4FS_DEBUG |
| 3731 | WARN_ON(ret <= 0); | 3722 | WARN_ON(ret <= 0); |
| 3732 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | 3723 | printk(KERN_ERR "%s: ext4_ext_map_blocks " |
| 3733 | "returned error inode#%lu, block=%u, " | 3724 | "returned error inode#%lu, block=%u, " |
| 3734 | "max_blocks=%u", __func__, | 3725 | "max_blocks=%u", __func__, |
| 3735 | inode->i_ino, block, max_blocks); | 3726 | inode->i_ino, block, max_blocks); |
| @@ -3806,7 +3797,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | |||
| 3806 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); | 3797 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
| 3807 | if (ret <= 0) { | 3798 | if (ret <= 0) { |
| 3808 | WARN_ON(ret <= 0); | 3799 | WARN_ON(ret <= 0); |
| 3809 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | 3800 | printk(KERN_ERR "%s: ext4_ext_map_blocks " |
| 3810 | "returned error inode#%lu, block=%u, " | 3801 | "returned error inode#%lu, block=%u, " |
| 3811 | "max_blocks=%u", __func__, | 3802 | "max_blocks=%u", __func__, |
| 3812 | inode->i_ino, block, max_blocks); | 3803 | inode->i_ino, block, max_blocks); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 830336d3911b..ff2f5fd681b5 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -149,7 +149,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, | |||
| 149 | int ret; | 149 | int ret; |
| 150 | 150 | ||
| 151 | /* | 151 | /* |
| 152 | * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this | 152 | * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this |
| 153 | * moment, get_block can be called only for blocks inside i_size since | 153 | * moment, get_block can be called only for blocks inside i_size since |
| 154 | * page cache has been already dropped and writes are blocked by | 154 | * page cache has been already dropped and writes are blocked by |
| 155 | * i_mutex. So we can safely drop the i_data_sem here. | 155 | * i_mutex. So we can safely drop the i_data_sem here. |
| @@ -890,9 +890,9 @@ err_out: | |||
| 890 | } | 890 | } |
| 891 | 891 | ||
| 892 | /* | 892 | /* |
| 893 | * The ext4_ind_get_blocks() function handles non-extents inodes | 893 | * The ext4_ind_map_blocks() function handles non-extents inodes |
| 894 | * (i.e., using the traditional indirect/double-indirect i_blocks | 894 | * (i.e., using the traditional indirect/double-indirect i_blocks |
| 895 | * scheme) for ext4_get_blocks(). | 895 | * scheme) for ext4_map_blocks(). |
| 896 | * | 896 | * |
| 897 | * Allocation strategy is simple: if we have to allocate something, we will | 897 | * Allocation strategy is simple: if we have to allocate something, we will |
| 898 | * have to go the whole way to leaf. So let's do it before attaching anything | 898 | * have to go the whole way to leaf. So let's do it before attaching anything |
| @@ -917,9 +917,8 @@ err_out: | |||
| 917 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system | 917 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system |
| 918 | * blocks. | 918 | * blocks. |
| 919 | */ | 919 | */ |
| 920 | static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | 920 | static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, |
| 921 | ext4_lblk_t iblock, unsigned int maxblocks, | 921 | struct ext4_map_blocks *map, |
| 922 | struct buffer_head *bh_result, | ||
| 923 | int flags) | 922 | int flags) |
| 924 | { | 923 | { |
| 925 | int err = -EIO; | 924 | int err = -EIO; |
| @@ -935,7 +934,7 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | |||
| 935 | 934 | ||
| 936 | J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); | 935 | J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); |
| 937 | J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); | 936 | J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); |
| 938 | depth = ext4_block_to_path(inode, iblock, offsets, | 937 | depth = ext4_block_to_path(inode, map->m_lblk, offsets, |
| 939 | &blocks_to_boundary); | 938 | &blocks_to_boundary); |
| 940 | 939 | ||
| 941 | if (depth == 0) | 940 | if (depth == 0) |
| @@ -946,10 +945,9 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | |||
| 946 | /* Simplest case - block found, no allocation needed */ | 945 | /* Simplest case - block found, no allocation needed */ |
| 947 | if (!partial) { | 946 | if (!partial) { |
| 948 | first_block = le32_to_cpu(chain[depth - 1].key); | 947 | first_block = le32_to_cpu(chain[depth - 1].key); |
| 949 | clear_buffer_new(bh_result); | ||
| 950 | count++; | 948 | count++; |
| 951 | /*map more blocks*/ | 949 | /*map more blocks*/ |
| 952 | while (count < maxblocks && count <= blocks_to_boundary) { | 950 | while (count < map->m_len && count <= blocks_to_boundary) { |
| 953 | ext4_fsblk_t blk; | 951 | ext4_fsblk_t blk; |
| 954 | 952 | ||
| 955 | blk = le32_to_cpu(*(chain[depth-1].p + count)); | 953 | blk = le32_to_cpu(*(chain[depth-1].p + count)); |
| @@ -969,7 +967,7 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | |||
| 969 | /* | 967 | /* |
| 970 | * Okay, we need to do block allocation. | 968 | * Okay, we need to do block allocation. |
| 971 | */ | 969 | */ |
| 972 | goal = ext4_find_goal(inode, iblock, partial); | 970 | goal = ext4_find_goal(inode, map->m_lblk, partial); |
| 973 | 971 | ||
| 974 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 972 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
| 975 | indirect_blks = (chain + depth) - partial - 1; | 973 | indirect_blks = (chain + depth) - partial - 1; |
| @@ -979,11 +977,11 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | |||
| 979 | * direct blocks to allocate for this branch. | 977 | * direct blocks to allocate for this branch. |
| 980 | */ | 978 | */ |
| 981 | count = ext4_blks_to_allocate(partial, indirect_blks, | 979 | count = ext4_blks_to_allocate(partial, indirect_blks, |
| 982 | maxblocks, blocks_to_boundary); | 980 | map->m_len, blocks_to_boundary); |
| 983 | /* | 981 | /* |
| 984 | * Block out ext4_truncate while we alter the tree | 982 | * Block out ext4_truncate while we alter the tree |
| 985 | */ | 983 | */ |
| 986 | err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, | 984 | err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, |
| 987 | &count, goal, | 985 | &count, goal, |
| 988 | offsets + (partial - chain), partial); | 986 | offsets + (partial - chain), partial); |
| 989 | 987 | ||
| @@ -995,18 +993,20 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | |||
| 995 | * may need to return -EAGAIN upwards in the worst case. --sct | 993 | * may need to return -EAGAIN upwards in the worst case. --sct |
| 996 | */ | 994 | */ |
| 997 | if (!err) | 995 | if (!err) |
| 998 | err = ext4_splice_branch(handle, inode, iblock, | 996 | err = ext4_splice_branch(handle, inode, map->m_lblk, |
| 999 | partial, indirect_blks, count); | 997 | partial, indirect_blks, count); |
| 1000 | if (err) | 998 | if (err) |
| 1001 | goto cleanup; | 999 | goto cleanup; |
| 1002 | 1000 | ||
| 1003 | set_buffer_new(bh_result); | 1001 | map->m_flags |= EXT4_MAP_NEW; |
| 1004 | 1002 | ||
| 1005 | ext4_update_inode_fsync_trans(handle, inode, 1); | 1003 | ext4_update_inode_fsync_trans(handle, inode, 1); |
| 1006 | got_it: | 1004 | got_it: |
| 1007 | map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); | 1005 | map->m_flags |= EXT4_MAP_MAPPED; |
| 1006 | map->m_pblk = le32_to_cpu(chain[depth-1].key); | ||
| 1007 | map->m_len = count; | ||
| 1008 | if (count > blocks_to_boundary) | 1008 | if (count > blocks_to_boundary) |
| 1009 | set_buffer_boundary(bh_result); | 1009 | map->m_flags |= EXT4_MAP_BOUNDARY; |
| 1010 | err = count; | 1010 | err = count; |
| 1011 | /* Clean up and exit */ | 1011 | /* Clean up and exit */ |
| 1012 | partial = chain + depth - 1; /* the whole chain */ | 1012 | partial = chain + depth - 1; /* the whole chain */ |
| @@ -1016,7 +1016,6 @@ cleanup: | |||
| 1016 | brelse(partial->bh); | 1016 | brelse(partial->bh); |
| 1017 | partial--; | 1017 | partial--; |
| 1018 | } | 1018 | } |
| 1019 | BUFFER_TRACE(bh_result, "returned"); | ||
| 1020 | out: | 1019 | out: |
| 1021 | return err; | 1020 | return err; |
| 1022 | } | 1021 | } |
| @@ -1203,15 +1202,15 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, | |||
| 1203 | } | 1202 | } |
| 1204 | 1203 | ||
| 1205 | /* | 1204 | /* |
| 1206 | * The ext4_get_blocks() function tries to look up the requested blocks, | 1205 | * The ext4_map_blocks() function tries to look up the requested blocks, |
| 1207 | * and returns if the blocks are already mapped. | 1206 | * and returns if the blocks are already mapped. |
| 1208 | * | 1207 | * |
| 1209 | * Otherwise it takes the write lock of the i_data_sem and allocate blocks | 1208 | * Otherwise it takes the write lock of the i_data_sem and allocate blocks |
| 1210 | * and store the allocated blocks in the result buffer head and mark it | 1209 | * and store the allocated blocks in the result buffer head and mark it |
| 1211 | * mapped. | 1210 | * mapped. |
| 1212 | * | 1211 | * |
| 1213 | * If file type is extents based, it will call ext4_ext_get_blocks(), | 1212 | * If file type is extents based, it will call ext4_ext_map_blocks(), |
| 1214 | * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping | 1213 | * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping |
| 1215 | * based files | 1214 | * based files |
| 1216 | * | 1215 | * |
| 1217 | * On success, it returns the number of blocks being mapped or allocate. | 1216 | * On success, it returns the number of blocks being mapped or allocate. |
| @@ -1224,35 +1223,30 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, | |||
| 1224 | * | 1223 | * |
| 1225 | * It returns the error in case of allocation failure. | 1224 | * It returns the error in case of allocation failure. |
| 1226 | */ | 1225 | */ |
| 1227 | int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | 1226 | int ext4_map_blocks(handle_t *handle, struct inode *inode, |
| 1228 | unsigned int max_blocks, struct buffer_head *bh, | 1227 | struct ext4_map_blocks *map, int flags) |
| 1229 | int flags) | ||
| 1230 | { | 1228 | { |
| 1231 | int retval; | 1229 | int retval; |
| 1232 | 1230 | ||
| 1233 | clear_buffer_mapped(bh); | 1231 | map->m_flags = 0; |
| 1234 | clear_buffer_unwritten(bh); | 1232 | ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," |
| 1235 | 1233 | "logical block %lu\n", inode->i_ino, flags, map->m_len, | |
| 1236 | ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u," | 1234 | (unsigned long) map->m_lblk); |
| 1237 | "logical block %lu\n", inode->i_ino, flags, max_blocks, | ||
| 1238 | (unsigned long)block); | ||
| 1239 | /* | 1235 | /* |
| 1240 | * Try to see if we can get the block without requesting a new | 1236 | * Try to see if we can get the block without requesting a new |
| 1241 | * file system block. | 1237 | * file system block. |
| 1242 | */ | 1238 | */ |
| 1243 | down_read((&EXT4_I(inode)->i_data_sem)); | 1239 | down_read((&EXT4_I(inode)->i_data_sem)); |
| 1244 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { | 1240 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { |
| 1245 | retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, | 1241 | retval = ext4_ext_map_blocks(handle, inode, map, 0); |
| 1246 | bh, 0); | ||
| 1247 | } else { | 1242 | } else { |
| 1248 | retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, | 1243 | retval = ext4_ind_map_blocks(handle, inode, map, 0); |
| 1249 | bh, 0); | ||
| 1250 | } | 1244 | } |
| 1251 | up_read((&EXT4_I(inode)->i_data_sem)); | 1245 | up_read((&EXT4_I(inode)->i_data_sem)); |
| 1252 | 1246 | ||
| 1253 | if (retval > 0 && buffer_mapped(bh)) { | 1247 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
| 1254 | int ret = check_block_validity(inode, "file system corruption", | 1248 | int ret = check_block_validity(inode, "file system corruption", |
| 1255 | block, bh->b_blocknr, retval); | 1249 | map->m_lblk, map->m_pblk, retval); |
| 1256 | if (ret != 0) | 1250 | if (ret != 0) |
| 1257 | return ret; | 1251 | return ret; |
| 1258 | } | 1252 | } |
| @@ -1268,7 +1262,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | |||
| 1268 | * ext4_ext_get_block() returns th create = 0 | 1262 | * ext4_ext_get_block() returns th create = 0 |
| 1269 | * with buffer head unmapped. | 1263 | * with buffer head unmapped. |
| 1270 | */ | 1264 | */ |
| 1271 | if (retval > 0 && buffer_mapped(bh)) | 1265 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) |
| 1272 | return retval; | 1266 | return retval; |
| 1273 | 1267 | ||
| 1274 | /* | 1268 | /* |
| @@ -1281,7 +1275,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | |||
| 1281 | * of BH_Unwritten and BH_Mapped flags being simultaneously | 1275 | * of BH_Unwritten and BH_Mapped flags being simultaneously |
| 1282 | * set on the buffer_head. | 1276 | * set on the buffer_head. |
| 1283 | */ | 1277 | */ |
| 1284 | clear_buffer_unwritten(bh); | 1278 | map->m_flags &= ~EXT4_MAP_UNWRITTEN; |
| 1285 | 1279 | ||
| 1286 | /* | 1280 | /* |
| 1287 | * New blocks allocate and/or writing to uninitialized extent | 1281 | * New blocks allocate and/or writing to uninitialized extent |
| @@ -1304,13 +1298,11 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | |||
| 1304 | * could have changed the inode type in between | 1298 | * could have changed the inode type in between |
| 1305 | */ | 1299 | */ |
| 1306 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { | 1300 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { |
| 1307 | retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, | 1301 | retval = ext4_ext_map_blocks(handle, inode, map, flags); |
| 1308 | bh, flags); | ||
| 1309 | } else { | 1302 | } else { |
| 1310 | retval = ext4_ind_get_blocks(handle, inode, block, | 1303 | retval = ext4_ind_map_blocks(handle, inode, map, flags); |
| 1311 | max_blocks, bh, flags); | ||
| 1312 | 1304 | ||
| 1313 | if (retval > 0 && buffer_new(bh)) { | 1305 | if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { |
| 1314 | /* | 1306 | /* |
| 1315 | * We allocated new blocks which will result in | 1307 | * We allocated new blocks which will result in |
| 1316 | * i_data's format changing. Force the migrate | 1308 | * i_data's format changing. Force the migrate |
| @@ -1333,16 +1325,38 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | |||
| 1333 | EXT4_I(inode)->i_delalloc_reserved_flag = 0; | 1325 | EXT4_I(inode)->i_delalloc_reserved_flag = 0; |
| 1334 | 1326 | ||
| 1335 | up_write((&EXT4_I(inode)->i_data_sem)); | 1327 | up_write((&EXT4_I(inode)->i_data_sem)); |
| 1336 | if (retval > 0 && buffer_mapped(bh)) { | 1328 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
| 1337 | int ret = check_block_validity(inode, "file system " | 1329 | int ret = check_block_validity(inode, "file system " |
| 1338 | "corruption after allocation", | 1330 | "corruption after allocation", |
| 1339 | block, bh->b_blocknr, retval); | 1331 | map->m_lblk, map->m_pblk, |
| 1332 | retval); | ||
| 1340 | if (ret != 0) | 1333 | if (ret != 0) |
| 1341 | return ret; | 1334 | return ret; |
| 1342 | } | 1335 | } |
| 1343 | return retval; | 1336 | return retval; |
| 1344 | } | 1337 | } |
| 1345 | 1338 | ||
| 1339 | int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | ||
| 1340 | unsigned int max_blocks, struct buffer_head *bh, | ||
| 1341 | int flags) | ||
| 1342 | { | ||
| 1343 | struct ext4_map_blocks map; | ||
| 1344 | int ret; | ||
| 1345 | |||
| 1346 | map.m_lblk = block; | ||
| 1347 | map.m_len = max_blocks; | ||
| 1348 | |||
| 1349 | ret = ext4_map_blocks(handle, inode, &map, flags); | ||
| 1350 | if (ret < 0) | ||
| 1351 | return ret; | ||
| 1352 | |||
| 1353 | bh->b_blocknr = map.m_pblk; | ||
| 1354 | bh->b_size = inode->i_sb->s_blocksize * map.m_len; | ||
| 1355 | bh->b_bdev = inode->i_sb->s_bdev; | ||
| 1356 | bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; | ||
| 1357 | return ret; | ||
| 1358 | } | ||
| 1359 | |||
| 1346 | /* Maximum number of blocks we map for direct IO at once. */ | 1360 | /* Maximum number of blocks we map for direct IO at once. */ |
| 1347 | #define DIO_MAX_BLOCKS 4096 | 1361 | #define DIO_MAX_BLOCKS 4096 |
| 1348 | 1362 | ||
