aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/extents.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2010-05-16 19:00:00 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-05-16 19:00:00 -0400
commite35fd6609b2fee54484d520deccb8f18bf7d38f3 (patch)
tree9b786445602819074f599c282b31bead166e8c03 /fs/ext4/extents.c
parent8e48dcfbd7c0892b4cfd064d682cc4c95a29df32 (diff)
ext4: Add new abstraction ext4_map_blocks() underneath ext4_get_blocks()
Jack up ext4_get_blocks() and add a new function, ext4_map_blocks() which uses a much smaller structure, struct ext4_map_blocks which is 20 bytes, as opposed to a struct buffer_head, which nearly 5 times bigger on an x86_64 machine. By switching things to use ext4_map_blocks(), we can save stack space by using ext4_map_blocks() since we can avoid allocating a struct buffer_head on the stack. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r--fs/ext4/extents.c237
1 files changed, 114 insertions, 123 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 8a8f9f0be911..37f938789344 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2611,7 +2611,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2611 2611
2612#define EXT4_EXT_ZERO_LEN 7 2612#define EXT4_EXT_ZERO_LEN 7
2613/* 2613/*
2614 * This function is called by ext4_ext_get_blocks() if someone tries to write 2614 * This function is called by ext4_ext_map_blocks() if someone tries to write
2615 * to an uninitialized extent. It may result in splitting the uninitialized 2615 * to an uninitialized extent. It may result in splitting the uninitialized
2616 * extent into multiple extents (upto three - one initialized and two 2616 * extent into multiple extents (upto three - one initialized and two
2617 * uninitialized). 2617 * uninitialized).
@@ -2621,10 +2621,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2621 * c> Splits in three extents: Somone is writing in middle of the extent 2621 * c> Splits in three extents: Somone is writing in middle of the extent
2622 */ 2622 */
2623static int ext4_ext_convert_to_initialized(handle_t *handle, 2623static int ext4_ext_convert_to_initialized(handle_t *handle,
2624 struct inode *inode, 2624 struct inode *inode,
2625 struct ext4_ext_path *path, 2625 struct ext4_map_blocks *map,
2626 ext4_lblk_t iblock, 2626 struct ext4_ext_path *path)
2627 unsigned int max_blocks)
2628{ 2627{
2629 struct ext4_extent *ex, newex, orig_ex; 2628 struct ext4_extent *ex, newex, orig_ex;
2630 struct ext4_extent *ex1 = NULL; 2629 struct ext4_extent *ex1 = NULL;
@@ -2640,20 +2639,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2640 2639
2641 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 2640 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2642 "block %llu, max_blocks %u\n", inode->i_ino, 2641 "block %llu, max_blocks %u\n", inode->i_ino,
2643 (unsigned long long)iblock, max_blocks); 2642 (unsigned long long)map->m_lblk, map->m_len);
2644 2643
2645 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 2644 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2646 inode->i_sb->s_blocksize_bits; 2645 inode->i_sb->s_blocksize_bits;
2647 if (eof_block < iblock + max_blocks) 2646 if (eof_block < map->m_lblk + map->m_len)
2648 eof_block = iblock + max_blocks; 2647 eof_block = map->m_lblk + map->m_len;
2649 2648
2650 depth = ext_depth(inode); 2649 depth = ext_depth(inode);
2651 eh = path[depth].p_hdr; 2650 eh = path[depth].p_hdr;
2652 ex = path[depth].p_ext; 2651 ex = path[depth].p_ext;
2653 ee_block = le32_to_cpu(ex->ee_block); 2652 ee_block = le32_to_cpu(ex->ee_block);
2654 ee_len = ext4_ext_get_actual_len(ex); 2653 ee_len = ext4_ext_get_actual_len(ex);
2655 allocated = ee_len - (iblock - ee_block); 2654 allocated = ee_len - (map->m_lblk - ee_block);
2656 newblock = iblock - ee_block + ext_pblock(ex); 2655 newblock = map->m_lblk - ee_block + ext_pblock(ex);
2657 2656
2658 ex2 = ex; 2657 ex2 = ex;
2659 orig_ex.ee_block = ex->ee_block; 2658 orig_ex.ee_block = ex->ee_block;
@@ -2683,10 +2682,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2683 return allocated; 2682 return allocated;
2684 } 2683 }
2685 2684
2686 /* ex1: ee_block to iblock - 1 : uninitialized */ 2685 /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
2687 if (iblock > ee_block) { 2686 if (map->m_lblk > ee_block) {
2688 ex1 = ex; 2687 ex1 = ex;
2689 ex1->ee_len = cpu_to_le16(iblock - ee_block); 2688 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2690 ext4_ext_mark_uninitialized(ex1); 2689 ext4_ext_mark_uninitialized(ex1);
2691 ex2 = &newex; 2690 ex2 = &newex;
2692 } 2691 }
@@ -2695,15 +2694,15 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2695 * we insert ex3, if ex1 is NULL. This is to avoid temporary 2694 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2696 * overlap of blocks. 2695 * overlap of blocks.
2697 */ 2696 */
2698 if (!ex1 && allocated > max_blocks) 2697 if (!ex1 && allocated > map->m_len)
2699 ex2->ee_len = cpu_to_le16(max_blocks); 2698 ex2->ee_len = cpu_to_le16(map->m_len);
2700 /* ex3: to ee_block + ee_len : uninitialised */ 2699 /* ex3: to ee_block + ee_len : uninitialised */
2701 if (allocated > max_blocks) { 2700 if (allocated > map->m_len) {
2702 unsigned int newdepth; 2701 unsigned int newdepth;
2703 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ 2702 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2704 if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) { 2703 if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
2705 /* 2704 /*
2706 * iblock == ee_block is handled by the zerouout 2705 * map->m_lblk == ee_block is handled by the zerouout
2707 * at the beginning. 2706 * at the beginning.
2708 * Mark first half uninitialized. 2707 * Mark first half uninitialized.
2709 * Mark second half initialized and zero out the 2708 * Mark second half initialized and zero out the
@@ -2716,7 +2715,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2716 ext4_ext_dirty(handle, inode, path + depth); 2715 ext4_ext_dirty(handle, inode, path + depth);
2717 2716
2718 ex3 = &newex; 2717 ex3 = &newex;
2719 ex3->ee_block = cpu_to_le32(iblock); 2718 ex3->ee_block = cpu_to_le32(map->m_lblk);
2720 ext4_ext_store_pblock(ex3, newblock); 2719 ext4_ext_store_pblock(ex3, newblock);
2721 ex3->ee_len = cpu_to_le16(allocated); 2720 ex3->ee_len = cpu_to_le16(allocated);
2722 err = ext4_ext_insert_extent(handle, inode, path, 2721 err = ext4_ext_insert_extent(handle, inode, path,
@@ -2729,7 +2728,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2729 ex->ee_len = orig_ex.ee_len; 2728 ex->ee_len = orig_ex.ee_len;
2730 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2729 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2731 ext4_ext_dirty(handle, inode, path + depth); 2730 ext4_ext_dirty(handle, inode, path + depth);
2732 /* blocks available from iblock */ 2731 /* blocks available from map->m_lblk */
2733 return allocated; 2732 return allocated;
2734 2733
2735 } else if (err) 2734 } else if (err)
@@ -2751,8 +2750,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2751 */ 2750 */
2752 depth = ext_depth(inode); 2751 depth = ext_depth(inode);
2753 ext4_ext_drop_refs(path); 2752 ext4_ext_drop_refs(path);
2754 path = ext4_ext_find_extent(inode, 2753 path = ext4_ext_find_extent(inode, map->m_lblk,
2755 iblock, path); 2754 path);
2756 if (IS_ERR(path)) { 2755 if (IS_ERR(path)) {
2757 err = PTR_ERR(path); 2756 err = PTR_ERR(path);
2758 return err; 2757 return err;
@@ -2772,9 +2771,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2772 return allocated; 2771 return allocated;
2773 } 2772 }
2774 ex3 = &newex; 2773 ex3 = &newex;
2775 ex3->ee_block = cpu_to_le32(iblock + max_blocks); 2774 ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
2776 ext4_ext_store_pblock(ex3, newblock + max_blocks); 2775 ext4_ext_store_pblock(ex3, newblock + map->m_len);
2777 ex3->ee_len = cpu_to_le16(allocated - max_blocks); 2776 ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2778 ext4_ext_mark_uninitialized(ex3); 2777 ext4_ext_mark_uninitialized(ex3);
2779 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); 2778 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2780 if (err == -ENOSPC && may_zeroout) { 2779 if (err == -ENOSPC && may_zeroout) {
@@ -2787,7 +2786,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2787 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2786 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2788 ext4_ext_dirty(handle, inode, path + depth); 2787 ext4_ext_dirty(handle, inode, path + depth);
2789 /* zeroed the full extent */ 2788 /* zeroed the full extent */
2790 /* blocks available from iblock */ 2789 /* blocks available from map->m_lblk */
2791 return allocated; 2790 return allocated;
2792 2791
2793 } else if (err) 2792 } else if (err)
@@ -2807,7 +2806,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2807 2806
2808 depth = newdepth; 2807 depth = newdepth;
2809 ext4_ext_drop_refs(path); 2808 ext4_ext_drop_refs(path);
2810 path = ext4_ext_find_extent(inode, iblock, path); 2809 path = ext4_ext_find_extent(inode, map->m_lblk, path);
2811 if (IS_ERR(path)) { 2810 if (IS_ERR(path)) {
2812 err = PTR_ERR(path); 2811 err = PTR_ERR(path);
2813 goto out; 2812 goto out;
@@ -2821,14 +2820,14 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2821 if (err) 2820 if (err)
2822 goto out; 2821 goto out;
2823 2822
2824 allocated = max_blocks; 2823 allocated = map->m_len;
2825 2824
2826 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying 2825 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2827 * to insert a extent in the middle zerout directly 2826 * to insert a extent in the middle zerout directly
2828 * otherwise give the extent a chance to merge to left 2827 * otherwise give the extent a chance to merge to left
2829 */ 2828 */
2830 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && 2829 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2831 iblock != ee_block && may_zeroout) { 2830 map->m_lblk != ee_block && may_zeroout) {
2832 err = ext4_ext_zeroout(inode, &orig_ex); 2831 err = ext4_ext_zeroout(inode, &orig_ex);
2833 if (err) 2832 if (err)
2834 goto fix_extent_len; 2833 goto fix_extent_len;
@@ -2838,7 +2837,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2838 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2837 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2839 ext4_ext_dirty(handle, inode, path + depth); 2838 ext4_ext_dirty(handle, inode, path + depth);
2840 /* zero out the first half */ 2839 /* zero out the first half */
2841 /* blocks available from iblock */ 2840 /* blocks available from map->m_lblk */
2842 return allocated; 2841 return allocated;
2843 } 2842 }
2844 } 2843 }
@@ -2849,12 +2848,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2849 */ 2848 */
2850 if (ex1 && ex1 != ex) { 2849 if (ex1 && ex1 != ex) {
2851 ex1 = ex; 2850 ex1 = ex;
2852 ex1->ee_len = cpu_to_le16(iblock - ee_block); 2851 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2853 ext4_ext_mark_uninitialized(ex1); 2852 ext4_ext_mark_uninitialized(ex1);
2854 ex2 = &newex; 2853 ex2 = &newex;
2855 } 2854 }
2856 /* ex2: iblock to iblock + maxblocks-1 : initialised */ 2855 /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
2857 ex2->ee_block = cpu_to_le32(iblock); 2856 ex2->ee_block = cpu_to_le32(map->m_lblk);
2858 ext4_ext_store_pblock(ex2, newblock); 2857 ext4_ext_store_pblock(ex2, newblock);
2859 ex2->ee_len = cpu_to_le16(allocated); 2858 ex2->ee_len = cpu_to_le16(allocated);
2860 if (ex2 != ex) 2859 if (ex2 != ex)
@@ -2924,7 +2923,7 @@ fix_extent_len:
2924} 2923}
2925 2924
2926/* 2925/*
2927 * This function is called by ext4_ext_get_blocks() from 2926 * This function is called by ext4_ext_map_blocks() from
2928 * ext4_get_blocks_dio_write() when DIO to write 2927 * ext4_get_blocks_dio_write() when DIO to write
2929 * to an uninitialized extent. 2928 * to an uninitialized extent.
2930 * 2929 *
@@ -2947,9 +2946,8 @@ fix_extent_len:
2947 */ 2946 */
2948static int ext4_split_unwritten_extents(handle_t *handle, 2947static int ext4_split_unwritten_extents(handle_t *handle,
2949 struct inode *inode, 2948 struct inode *inode,
2949 struct ext4_map_blocks *map,
2950 struct ext4_ext_path *path, 2950 struct ext4_ext_path *path,
2951 ext4_lblk_t iblock,
2952 unsigned int max_blocks,
2953 int flags) 2951 int flags)
2954{ 2952{
2955 struct ext4_extent *ex, newex, orig_ex; 2953 struct ext4_extent *ex, newex, orig_ex;
@@ -2965,20 +2963,20 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2965 2963
2966 ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 2964 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
2967 "block %llu, max_blocks %u\n", inode->i_ino, 2965 "block %llu, max_blocks %u\n", inode->i_ino,
2968 (unsigned long long)iblock, max_blocks); 2966 (unsigned long long)map->m_lblk, map->m_len);
2969 2967
2970 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 2968 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2971 inode->i_sb->s_blocksize_bits; 2969 inode->i_sb->s_blocksize_bits;
2972 if (eof_block < iblock + max_blocks) 2970 if (eof_block < map->m_lblk + map->m_len)
2973 eof_block = iblock + max_blocks; 2971 eof_block = map->m_lblk + map->m_len;
2974 2972
2975 depth = ext_depth(inode); 2973 depth = ext_depth(inode);
2976 eh = path[depth].p_hdr; 2974 eh = path[depth].p_hdr;
2977 ex = path[depth].p_ext; 2975 ex = path[depth].p_ext;
2978 ee_block = le32_to_cpu(ex->ee_block); 2976 ee_block = le32_to_cpu(ex->ee_block);
2979 ee_len = ext4_ext_get_actual_len(ex); 2977 ee_len = ext4_ext_get_actual_len(ex);
2980 allocated = ee_len - (iblock - ee_block); 2978 allocated = ee_len - (map->m_lblk - ee_block);
2981 newblock = iblock - ee_block + ext_pblock(ex); 2979 newblock = map->m_lblk - ee_block + ext_pblock(ex);
2982 2980
2983 ex2 = ex; 2981 ex2 = ex;
2984 orig_ex.ee_block = ex->ee_block; 2982 orig_ex.ee_block = ex->ee_block;
@@ -2996,16 +2994,16 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2996 * block where the write begins, and the write completely 2994 * block where the write begins, and the write completely
2997 * covers the extent, then we don't need to split it. 2995 * covers the extent, then we don't need to split it.
2998 */ 2996 */
2999 if ((iblock == ee_block) && (allocated <= max_blocks)) 2997 if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
3000 return allocated; 2998 return allocated;
3001 2999
3002 err = ext4_ext_get_access(handle, inode, path + depth); 3000 err = ext4_ext_get_access(handle, inode, path + depth);
3003 if (err) 3001 if (err)
3004 goto out; 3002 goto out;
3005 /* ex1: ee_block to iblock - 1 : uninitialized */ 3003 /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
3006 if (iblock > ee_block) { 3004 if (map->m_lblk > ee_block) {
3007 ex1 = ex; 3005 ex1 = ex;
3008 ex1->ee_len = cpu_to_le16(iblock - ee_block); 3006 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
3009 ext4_ext_mark_uninitialized(ex1); 3007 ext4_ext_mark_uninitialized(ex1);
3010 ex2 = &newex; 3008 ex2 = &newex;
3011 } 3009 }
@@ -3014,15 +3012,15 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3014 * we insert ex3, if ex1 is NULL. This is to avoid temporary 3012 * we insert ex3, if ex1 is NULL. This is to avoid temporary
3015 * overlap of blocks. 3013 * overlap of blocks.
3016 */ 3014 */
3017 if (!ex1 && allocated > max_blocks) 3015 if (!ex1 && allocated > map->m_len)
3018 ex2->ee_len = cpu_to_le16(max_blocks); 3016 ex2->ee_len = cpu_to_le16(map->m_len);
3019 /* ex3: to ee_block + ee_len : uninitialised */ 3017 /* ex3: to ee_block + ee_len : uninitialised */
3020 if (allocated > max_blocks) { 3018 if (allocated > map->m_len) {
3021 unsigned int newdepth; 3019 unsigned int newdepth;
3022 ex3 = &newex; 3020 ex3 = &newex;
3023 ex3->ee_block = cpu_to_le32(iblock + max_blocks); 3021 ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
3024 ext4_ext_store_pblock(ex3, newblock + max_blocks); 3022 ext4_ext_store_pblock(ex3, newblock + map->m_len);
3025 ex3->ee_len = cpu_to_le16(allocated - max_blocks); 3023 ex3->ee_len = cpu_to_le16(allocated - map->m_len);
3026 ext4_ext_mark_uninitialized(ex3); 3024 ext4_ext_mark_uninitialized(ex3);
3027 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); 3025 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
3028 if (err == -ENOSPC && may_zeroout) { 3026 if (err == -ENOSPC && may_zeroout) {
@@ -3035,7 +3033,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3035 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 3033 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3036 ext4_ext_dirty(handle, inode, path + depth); 3034 ext4_ext_dirty(handle, inode, path + depth);
3037 /* zeroed the full extent */ 3035 /* zeroed the full extent */
3038 /* blocks available from iblock */ 3036 /* blocks available from map->m_lblk */
3039 return allocated; 3037 return allocated;
3040 3038
3041 } else if (err) 3039 } else if (err)
@@ -3055,7 +3053,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3055 3053
3056 depth = newdepth; 3054 depth = newdepth;
3057 ext4_ext_drop_refs(path); 3055 ext4_ext_drop_refs(path);
3058 path = ext4_ext_find_extent(inode, iblock, path); 3056 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3059 if (IS_ERR(path)) { 3057 if (IS_ERR(path)) {
3060 err = PTR_ERR(path); 3058 err = PTR_ERR(path);
3061 goto out; 3059 goto out;
@@ -3069,7 +3067,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3069 if (err) 3067 if (err)
3070 goto out; 3068 goto out;
3071 3069
3072 allocated = max_blocks; 3070 allocated = map->m_len;
3073 } 3071 }
3074 /* 3072 /*
3075 * If there was a change of depth as part of the 3073 * If there was a change of depth as part of the
@@ -3078,15 +3076,15 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3078 */ 3076 */
3079 if (ex1 && ex1 != ex) { 3077 if (ex1 && ex1 != ex) {
3080 ex1 = ex; 3078 ex1 = ex;
3081 ex1->ee_len = cpu_to_le16(iblock - ee_block); 3079 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
3082 ext4_ext_mark_uninitialized(ex1); 3080 ext4_ext_mark_uninitialized(ex1);
3083 ex2 = &newex; 3081 ex2 = &newex;
3084 } 3082 }
3085 /* 3083 /*
3086 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written, 3084 * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
3087 * uninitialised still. 3085 * using direct I/O, uninitialised still.
3088 */ 3086 */
3089 ex2->ee_block = cpu_to_le32(iblock); 3087 ex2->ee_block = cpu_to_le32(map->m_lblk);
3090 ext4_ext_store_pblock(ex2, newblock); 3088 ext4_ext_store_pblock(ex2, newblock);
3091 ex2->ee_len = cpu_to_le16(allocated); 3089 ex2->ee_len = cpu_to_le16(allocated);
3092 ext4_ext_mark_uninitialized(ex2); 3090 ext4_ext_mark_uninitialized(ex2);
@@ -3188,10 +3186,9 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3188 3186
3189static int 3187static int
3190ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3188ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3191 ext4_lblk_t iblock, unsigned int max_blocks, 3189 struct ext4_map_blocks *map,
3192 struct ext4_ext_path *path, int flags, 3190 struct ext4_ext_path *path, int flags,
3193 unsigned int allocated, struct buffer_head *bh_result, 3191 unsigned int allocated, ext4_fsblk_t newblock)
3194 ext4_fsblk_t newblock)
3195{ 3192{
3196 int ret = 0; 3193 int ret = 0;
3197 int err = 0; 3194 int err = 0;
@@ -3199,15 +3196,14 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3199 3196
3200 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" 3197 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3201 "block %llu, max_blocks %u, flags %d, allocated %u", 3198 "block %llu, max_blocks %u, flags %d, allocated %u",
3202 inode->i_ino, (unsigned long long)iblock, max_blocks, 3199 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3203 flags, allocated); 3200 flags, allocated);
3204 ext4_ext_show_leaf(inode, path); 3201 ext4_ext_show_leaf(inode, path);
3205 3202
3206 /* get_block() before submit the IO, split the extent */ 3203 /* get_block() before submit the IO, split the extent */
3207 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3204 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3208 ret = ext4_split_unwritten_extents(handle, 3205 ret = ext4_split_unwritten_extents(handle, inode, map,
3209 inode, path, iblock, 3206 path, flags);
3210 max_blocks, flags);
3211 /* 3207 /*
3212 * Flag the inode(non aio case) or end_io struct (aio case) 3208 * Flag the inode(non aio case) or end_io struct (aio case)
3213 * that this IO needs to convertion to written when IO is 3209 * that this IO needs to convertion to written when IO is
@@ -3218,7 +3214,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3218 else 3214 else
3219 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3215 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3220 if (ext4_should_dioread_nolock(inode)) 3216 if (ext4_should_dioread_nolock(inode))
3221 set_buffer_uninit(bh_result); 3217 map->m_flags |= EXT4_MAP_UNINIT;
3222 goto out; 3218 goto out;
3223 } 3219 }
3224 /* IO end_io complete, convert the filled extent to written */ 3220 /* IO end_io complete, convert the filled extent to written */
@@ -3246,14 +3242,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3246 * the buffer head will be unmapped so that 3242 * the buffer head will be unmapped so that
3247 * a read from the block returns 0s. 3243 * a read from the block returns 0s.
3248 */ 3244 */
3249 set_buffer_unwritten(bh_result); 3245 map->m_flags |= EXT4_MAP_UNWRITTEN;
3250 goto out1; 3246 goto out1;
3251 } 3247 }
3252 3248
3253 /* buffered write, writepage time, convert*/ 3249 /* buffered write, writepage time, convert*/
3254 ret = ext4_ext_convert_to_initialized(handle, inode, 3250 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3255 path, iblock,
3256 max_blocks);
3257 if (ret >= 0) 3251 if (ret >= 0)
3258 ext4_update_inode_fsync_trans(handle, inode, 1); 3252 ext4_update_inode_fsync_trans(handle, inode, 1);
3259out: 3253out:
@@ -3262,7 +3256,7 @@ out:
3262 goto out2; 3256 goto out2;
3263 } else 3257 } else
3264 allocated = ret; 3258 allocated = ret;
3265 set_buffer_new(bh_result); 3259 map->m_flags |= EXT4_MAP_NEW;
3266 /* 3260 /*
3267 * if we allocated more blocks than requested 3261 * if we allocated more blocks than requested
3268 * we need to make sure we unmap the extra block 3262 * we need to make sure we unmap the extra block
@@ -3270,11 +3264,11 @@ out:
3270 * unmapped later when we find the buffer_head marked 3264 * unmapped later when we find the buffer_head marked
3271 * new. 3265 * new.
3272 */ 3266 */
3273 if (allocated > max_blocks) { 3267 if (allocated > map->m_len) {
3274 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3268 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3275 newblock + max_blocks, 3269 newblock + map->m_len,
3276 allocated - max_blocks); 3270 allocated - map->m_len);
3277 allocated = max_blocks; 3271 allocated = map->m_len;
3278 } 3272 }
3279 3273
3280 /* 3274 /*
@@ -3288,13 +3282,13 @@ out:
3288 ext4_da_update_reserve_space(inode, allocated, 0); 3282 ext4_da_update_reserve_space(inode, allocated, 0);
3289 3283
3290map_out: 3284map_out:
3291 set_buffer_mapped(bh_result); 3285 map->m_flags |= EXT4_MAP_MAPPED;
3292out1: 3286out1:
3293 if (allocated > max_blocks) 3287 if (allocated > map->m_len)
3294 allocated = max_blocks; 3288 allocated = map->m_len;
3295 ext4_ext_show_leaf(inode, path); 3289 ext4_ext_show_leaf(inode, path);
3296 bh_result->b_bdev = inode->i_sb->s_bdev; 3290 map->m_pblk = newblock;
3297 bh_result->b_blocknr = newblock; 3291 map->m_len = allocated;
3298out2: 3292out2:
3299 if (path) { 3293 if (path) {
3300 ext4_ext_drop_refs(path); 3294 ext4_ext_drop_refs(path);
@@ -3320,10 +3314,8 @@ out2:
3320 * 3314 *
3321 * return < 0, error case. 3315 * return < 0, error case.
3322 */ 3316 */
3323int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 3317int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3324 ext4_lblk_t iblock, 3318 struct ext4_map_blocks *map, int flags)
3325 unsigned int max_blocks, struct buffer_head *bh_result,
3326 int flags)
3327{ 3319{
3328 struct ext4_ext_path *path = NULL; 3320 struct ext4_ext_path *path = NULL;
3329 struct ext4_extent_header *eh; 3321 struct ext4_extent_header *eh;
@@ -3334,12 +3326,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3334 struct ext4_allocation_request ar; 3326 struct ext4_allocation_request ar;
3335 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3327 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3336 3328
3337 __clear_bit(BH_New, &bh_result->b_state);
3338 ext_debug("blocks %u/%u requested for inode %lu\n", 3329 ext_debug("blocks %u/%u requested for inode %lu\n",
3339 iblock, max_blocks, inode->i_ino); 3330 map->m_lblk, map->m_len, inode->i_ino);
3340 3331
3341 /* check in cache */ 3332 /* check in cache */
3342 cache_type = ext4_ext_in_cache(inode, iblock, &newex); 3333 cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
3343 if (cache_type) { 3334 if (cache_type) {
3344 if (cache_type == EXT4_EXT_CACHE_GAP) { 3335 if (cache_type == EXT4_EXT_CACHE_GAP) {
3345 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3336 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
@@ -3352,12 +3343,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3352 /* we should allocate requested block */ 3343 /* we should allocate requested block */
3353 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { 3344 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
3354 /* block is already allocated */ 3345 /* block is already allocated */
3355 newblock = iblock 3346 newblock = map->m_lblk
3356 - le32_to_cpu(newex.ee_block) 3347 - le32_to_cpu(newex.ee_block)
3357 + ext_pblock(&newex); 3348 + ext_pblock(&newex);
3358 /* number of remaining blocks in the extent */ 3349 /* number of remaining blocks in the extent */
3359 allocated = ext4_ext_get_actual_len(&newex) - 3350 allocated = ext4_ext_get_actual_len(&newex) -
3360 (iblock - le32_to_cpu(newex.ee_block)); 3351 (map->m_lblk - le32_to_cpu(newex.ee_block));
3361 goto out; 3352 goto out;
3362 } else { 3353 } else {
3363 BUG(); 3354 BUG();
@@ -3365,7 +3356,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3365 } 3356 }
3366 3357
3367 /* find extent for this block */ 3358 /* find extent for this block */
3368 path = ext4_ext_find_extent(inode, iblock, NULL); 3359 path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3369 if (IS_ERR(path)) { 3360 if (IS_ERR(path)) {
3370 err = PTR_ERR(path); 3361 err = PTR_ERR(path);
3371 path = NULL; 3362 path = NULL;
@@ -3382,7 +3373,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3382 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 3373 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3383 EXT4_ERROR_INODE(inode, "bad extent address " 3374 EXT4_ERROR_INODE(inode, "bad extent address "
3384 "iblock: %d, depth: %d pblock %lld", 3375 "iblock: %d, depth: %d pblock %lld",
3385 iblock, depth, path[depth].p_block); 3376 map->m_lblk, depth, path[depth].p_block);
3386 err = -EIO; 3377 err = -EIO;
3387 goto out2; 3378 goto out2;
3388 } 3379 }
@@ -3400,12 +3391,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3400 */ 3391 */
3401 ee_len = ext4_ext_get_actual_len(ex); 3392 ee_len = ext4_ext_get_actual_len(ex);
3402 /* if found extent covers block, simply return it */ 3393 /* if found extent covers block, simply return it */
3403 if (in_range(iblock, ee_block, ee_len)) { 3394 if (in_range(map->m_lblk, ee_block, ee_len)) {
3404 newblock = iblock - ee_block + ee_start; 3395 newblock = map->m_lblk - ee_block + ee_start;
3405 /* number of remaining blocks in the extent */ 3396 /* number of remaining blocks in the extent */
3406 allocated = ee_len - (iblock - ee_block); 3397 allocated = ee_len - (map->m_lblk - ee_block);
3407 ext_debug("%u fit into %u:%d -> %llu\n", iblock, 3398 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3408 ee_block, ee_len, newblock); 3399 ee_block, ee_len, newblock);
3409 3400
3410 /* Do not put uninitialized extent in the cache */ 3401 /* Do not put uninitialized extent in the cache */
3411 if (!ext4_ext_is_uninitialized(ex)) { 3402 if (!ext4_ext_is_uninitialized(ex)) {
@@ -3415,8 +3406,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3415 goto out; 3406 goto out;
3416 } 3407 }
3417 ret = ext4_ext_handle_uninitialized_extents(handle, 3408 ret = ext4_ext_handle_uninitialized_extents(handle,
3418 inode, iblock, max_blocks, path, 3409 inode, map, path, flags, allocated,
3419 flags, allocated, bh_result, newblock); 3410 newblock);
3420 return ret; 3411 return ret;
3421 } 3412 }
3422 } 3413 }
@@ -3430,7 +3421,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3430 * put just found gap into cache to speed up 3421 * put just found gap into cache to speed up
3431 * subsequent requests 3422 * subsequent requests
3432 */ 3423 */
3433 ext4_ext_put_gap_in_cache(inode, path, iblock); 3424 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3434 goto out2; 3425 goto out2;
3435 } 3426 }
3436 /* 3427 /*
@@ -3438,11 +3429,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3438 */ 3429 */
3439 3430
3440 /* find neighbour allocated blocks */ 3431 /* find neighbour allocated blocks */
3441 ar.lleft = iblock; 3432 ar.lleft = map->m_lblk;
3442 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 3433 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3443 if (err) 3434 if (err)
3444 goto out2; 3435 goto out2;
3445 ar.lright = iblock; 3436 ar.lright = map->m_lblk;
3446 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); 3437 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
3447 if (err) 3438 if (err)
3448 goto out2; 3439 goto out2;
@@ -3453,26 +3444,26 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3453 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 3444 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3454 * EXT_UNINIT_MAX_LEN. 3445 * EXT_UNINIT_MAX_LEN.
3455 */ 3446 */
3456 if (max_blocks > EXT_INIT_MAX_LEN && 3447 if (map->m_len > EXT_INIT_MAX_LEN &&
3457 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3448 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3458 max_blocks = EXT_INIT_MAX_LEN; 3449 map->m_len = EXT_INIT_MAX_LEN;
3459 else if (max_blocks > EXT_UNINIT_MAX_LEN && 3450 else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3460 (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3451 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3461 max_blocks = EXT_UNINIT_MAX_LEN; 3452 map->m_len = EXT_UNINIT_MAX_LEN;
3462 3453
3463 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ 3454 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
3464 newex.ee_block = cpu_to_le32(iblock); 3455 newex.ee_block = cpu_to_le32(map->m_lblk);
3465 newex.ee_len = cpu_to_le16(max_blocks); 3456 newex.ee_len = cpu_to_le16(map->m_len);
3466 err = ext4_ext_check_overlap(inode, &newex, path); 3457 err = ext4_ext_check_overlap(inode, &newex, path);
3467 if (err) 3458 if (err)
3468 allocated = ext4_ext_get_actual_len(&newex); 3459 allocated = ext4_ext_get_actual_len(&newex);
3469 else 3460 else
3470 allocated = max_blocks; 3461 allocated = map->m_len;
3471 3462
3472 /* allocate new block */ 3463 /* allocate new block */
3473 ar.inode = inode; 3464 ar.inode = inode;
3474 ar.goal = ext4_ext_find_goal(inode, path, iblock); 3465 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
3475 ar.logical = iblock; 3466 ar.logical = map->m_lblk;
3476 ar.len = allocated; 3467 ar.len = allocated;
3477 if (S_ISREG(inode->i_mode)) 3468 if (S_ISREG(inode->i_mode))
3478 ar.flags = EXT4_MB_HINT_DATA; 3469 ar.flags = EXT4_MB_HINT_DATA;
@@ -3506,7 +3497,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3506 EXT4_STATE_DIO_UNWRITTEN); 3497 EXT4_STATE_DIO_UNWRITTEN);
3507 } 3498 }
3508 if (ext4_should_dioread_nolock(inode)) 3499 if (ext4_should_dioread_nolock(inode))
3509 set_buffer_uninit(bh_result); 3500 map->m_flags |= EXT4_MAP_UNINIT;
3510 } 3501 }
3511 3502
3512 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { 3503 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
@@ -3518,7 +3509,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3518 goto out2; 3509 goto out2;
3519 } 3510 }
3520 last_ex = EXT_LAST_EXTENT(eh); 3511 last_ex = EXT_LAST_EXTENT(eh);
3521 if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) 3512 if (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block)
3522 + ext4_ext_get_actual_len(last_ex)) 3513 + ext4_ext_get_actual_len(last_ex))
3523 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; 3514 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
3524 } 3515 }
@@ -3536,9 +3527,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3536 /* previous routine could use block we allocated */ 3527 /* previous routine could use block we allocated */
3537 newblock = ext_pblock(&newex); 3528 newblock = ext_pblock(&newex);
3538 allocated = ext4_ext_get_actual_len(&newex); 3529 allocated = ext4_ext_get_actual_len(&newex);
3539 if (allocated > max_blocks) 3530 if (allocated > map->m_len)
3540 allocated = max_blocks; 3531 allocated = map->m_len;
3541 set_buffer_new(bh_result); 3532 map->m_flags |= EXT4_MAP_NEW;
3542 3533
3543 /* 3534 /*
3544 * Update reserved blocks/metadata blocks after successful 3535 * Update reserved blocks/metadata blocks after successful
@@ -3552,18 +3543,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3552 * when it is _not_ an uninitialized extent. 3543 * when it is _not_ an uninitialized extent.
3553 */ 3544 */
3554 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 3545 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3555 ext4_ext_put_in_cache(inode, iblock, allocated, newblock, 3546 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
3556 EXT4_EXT_CACHE_EXTENT); 3547 EXT4_EXT_CACHE_EXTENT);
3557 ext4_update_inode_fsync_trans(handle, inode, 1); 3548 ext4_update_inode_fsync_trans(handle, inode, 1);
3558 } else 3549 } else
3559 ext4_update_inode_fsync_trans(handle, inode, 0); 3550 ext4_update_inode_fsync_trans(handle, inode, 0);
3560out: 3551out:
3561 if (allocated > max_blocks) 3552 if (allocated > map->m_len)
3562 allocated = max_blocks; 3553 allocated = map->m_len;
3563 ext4_ext_show_leaf(inode, path); 3554 ext4_ext_show_leaf(inode, path);
3564 set_buffer_mapped(bh_result); 3555 map->m_flags |= EXT4_MAP_MAPPED;
3565 bh_result->b_bdev = inode->i_sb->s_bdev; 3556 map->m_pblk = newblock;
3566 bh_result->b_blocknr = newblock; 3557 map->m_len = allocated;
3567out2: 3558out2:
3568 if (path) { 3559 if (path) {
3569 ext4_ext_drop_refs(path); 3560 ext4_ext_drop_refs(path);
@@ -3729,7 +3720,7 @@ retry:
3729 if (ret <= 0) { 3720 if (ret <= 0) {
3730#ifdef EXT4FS_DEBUG 3721#ifdef EXT4FS_DEBUG
3731 WARN_ON(ret <= 0); 3722 WARN_ON(ret <= 0);
3732 printk(KERN_ERR "%s: ext4_ext_get_blocks " 3723 printk(KERN_ERR "%s: ext4_ext_map_blocks "
3733 "returned error inode#%lu, block=%u, " 3724 "returned error inode#%lu, block=%u, "
3734 "max_blocks=%u", __func__, 3725 "max_blocks=%u", __func__,
3735 inode->i_ino, block, max_blocks); 3726 inode->i_ino, block, max_blocks);
@@ -3806,7 +3797,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3806 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 3797 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3807 if (ret <= 0) { 3798 if (ret <= 0) {
3808 WARN_ON(ret <= 0); 3799 WARN_ON(ret <= 0);
3809 printk(KERN_ERR "%s: ext4_ext_get_blocks " 3800 printk(KERN_ERR "%s: ext4_ext_map_blocks "
3810 "returned error inode#%lu, block=%u, " 3801 "returned error inode#%lu, block=%u, "
3811 "max_blocks=%u", __func__, 3802 "max_blocks=%u", __func__,
3812 inode->i_ino, block, max_blocks); 3803 inode->i_ino, block, max_blocks);