diff options
author | Mingming Cao <cmm@us.ibm.com> | 2009-09-28 15:49:08 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2009-09-28 15:49:08 -0400 |
commit | 0031462b5b392f90d17f1d75abb795883c44e969 (patch) | |
tree | e8323861b8dede0f3ddbfc8324d650bf1f4fd74b /fs/ext4/extents.c | |
parent | 9f0ccfd8e07d61b413e6536ffa02fbf60d2e20d8 (diff) |
ext4: Split uninitialized extents for direct I/O
When writing into an unitialized extent via direct I/O, and the direct
I/O doesn't exactly cover the unitialized extent, split the extent
into uninitialized and initialized extents before submitting the I/O.
This avoids needing to deal with an ENOSPC error in the end_io
callback that gets used for direct I/O.
When the IO is complete, the written extent will be marked as initialized.
Singed-Off-By: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r-- | fs/ext4/extents.c | 423 |
1 files changed, 386 insertions, 37 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 7a3832577923..a38e651c004e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -723,7 +723,7 @@ err: | |||
723 | * insert new index [@logical;@ptr] into the block at @curp; | 723 | * insert new index [@logical;@ptr] into the block at @curp; |
724 | * check where to insert: before @curp or after @curp | 724 | * check where to insert: before @curp or after @curp |
725 | */ | 725 | */ |
726 | static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | 726 | int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
727 | struct ext4_ext_path *curp, | 727 | struct ext4_ext_path *curp, |
728 | int logical, ext4_fsblk_t ptr) | 728 | int logical, ext4_fsblk_t ptr) |
729 | { | 729 | { |
@@ -1586,7 +1586,7 @@ out: | |||
1586 | */ | 1586 | */ |
1587 | int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | 1587 | int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, |
1588 | struct ext4_ext_path *path, | 1588 | struct ext4_ext_path *path, |
1589 | struct ext4_extent *newext) | 1589 | struct ext4_extent *newext, int flag) |
1590 | { | 1590 | { |
1591 | struct ext4_extent_header *eh; | 1591 | struct ext4_extent_header *eh; |
1592 | struct ext4_extent *ex, *fex; | 1592 | struct ext4_extent *ex, *fex; |
@@ -1602,7 +1602,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |||
1602 | BUG_ON(path[depth].p_hdr == NULL); | 1602 | BUG_ON(path[depth].p_hdr == NULL); |
1603 | 1603 | ||
1604 | /* try to insert block into found extent and return */ | 1604 | /* try to insert block into found extent and return */ |
1605 | if (ex && ext4_can_extents_be_merged(inode, ex, newext)) { | 1605 | if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) |
1606 | && ext4_can_extents_be_merged(inode, ex, newext)) { | ||
1606 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", | 1607 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", |
1607 | ext4_ext_is_uninitialized(newext), | 1608 | ext4_ext_is_uninitialized(newext), |
1608 | ext4_ext_get_actual_len(newext), | 1609 | ext4_ext_get_actual_len(newext), |
@@ -1722,7 +1723,8 @@ has_space: | |||
1722 | 1723 | ||
1723 | merge: | 1724 | merge: |
1724 | /* try to merge extents to the right */ | 1725 | /* try to merge extents to the right */ |
1725 | ext4_ext_try_to_merge(inode, path, nearex); | 1726 | if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) |
1727 | ext4_ext_try_to_merge(inode, path, nearex); | ||
1726 | 1728 | ||
1727 | /* try to merge extents to the left */ | 1729 | /* try to merge extents to the left */ |
1728 | 1730 | ||
@@ -2490,7 +2492,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
2490 | } | 2492 | } |
2491 | 2493 | ||
2492 | #define EXT4_EXT_ZERO_LEN 7 | 2494 | #define EXT4_EXT_ZERO_LEN 7 |
2493 | |||
2494 | /* | 2495 | /* |
2495 | * This function is called by ext4_ext_get_blocks() if someone tries to write | 2496 | * This function is called by ext4_ext_get_blocks() if someone tries to write |
2496 | * to an uninitialized extent. It may result in splitting the uninitialized | 2497 | * to an uninitialized extent. It may result in splitting the uninitialized |
@@ -2583,7 +2584,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2583 | ex3->ee_block = cpu_to_le32(iblock); | 2584 | ex3->ee_block = cpu_to_le32(iblock); |
2584 | ext4_ext_store_pblock(ex3, newblock); | 2585 | ext4_ext_store_pblock(ex3, newblock); |
2585 | ex3->ee_len = cpu_to_le16(allocated); | 2586 | ex3->ee_len = cpu_to_le16(allocated); |
2586 | err = ext4_ext_insert_extent(handle, inode, path, ex3); | 2587 | err = ext4_ext_insert_extent(handle, inode, path, |
2588 | ex3, 0); | ||
2587 | if (err == -ENOSPC) { | 2589 | if (err == -ENOSPC) { |
2588 | err = ext4_ext_zeroout(inode, &orig_ex); | 2590 | err = ext4_ext_zeroout(inode, &orig_ex); |
2589 | if (err) | 2591 | if (err) |
@@ -2639,7 +2641,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2639 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | 2641 | ext4_ext_store_pblock(ex3, newblock + max_blocks); |
2640 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | 2642 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); |
2641 | ext4_ext_mark_uninitialized(ex3); | 2643 | ext4_ext_mark_uninitialized(ex3); |
2642 | err = ext4_ext_insert_extent(handle, inode, path, ex3); | 2644 | err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); |
2643 | if (err == -ENOSPC) { | 2645 | if (err == -ENOSPC) { |
2644 | err = ext4_ext_zeroout(inode, &orig_ex); | 2646 | err = ext4_ext_zeroout(inode, &orig_ex); |
2645 | if (err) | 2647 | if (err) |
@@ -2757,7 +2759,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2757 | err = ext4_ext_dirty(handle, inode, path + depth); | 2759 | err = ext4_ext_dirty(handle, inode, path + depth); |
2758 | goto out; | 2760 | goto out; |
2759 | insert: | 2761 | insert: |
2760 | err = ext4_ext_insert_extent(handle, inode, path, &newex); | 2762 | err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); |
2761 | if (err == -ENOSPC) { | 2763 | if (err == -ENOSPC) { |
2762 | err = ext4_ext_zeroout(inode, &orig_ex); | 2764 | err = ext4_ext_zeroout(inode, &orig_ex); |
2763 | if (err) | 2765 | if (err) |
@@ -2785,6 +2787,320 @@ fix_extent_len: | |||
2785 | } | 2787 | } |
2786 | 2788 | ||
2787 | /* | 2789 | /* |
2790 | * This function is called by ext4_ext_get_blocks() from | ||
2791 | * ext4_get_blocks_dio_write() when DIO to write | ||
2792 | * to an uninitialized extent. | ||
2793 | * | ||
2794 | * Writing to an uninitized extent may result in splitting the uninitialized | ||
2795 | * extent into multiple /intialized unintialized extents (up to three) | ||
2796 | * There are three possibilities: | ||
2797 | * a> There is no split required: Entire extent should be uninitialized | ||
2798 | * b> Splits in two extents: Write is happening at either end of the extent | ||
2799 | * c> Splits in three extents: Somone is writing in middle of the extent | ||
2800 | * | ||
2801 | * One of more index blocks maybe needed if the extent tree grow after | ||
2802 | * the unintialized extent split. To prevent ENOSPC occur at the IO | ||
2803 | * complete, we need to split the uninitialized extent before DIO submit | ||
2804 | * the IO. The uninitilized extent called at this time will be split | ||
2805 | * into three uninitialized extent(at most). After IO complete, the part | ||
2806 | * being filled will be convert to initialized by the end_io callback function | ||
2807 | * via ext4_convert_unwritten_extents(). | ||
2808 | */ | ||
2809 | static int ext4_split_unwritten_extents(handle_t *handle, | ||
2810 | struct inode *inode, | ||
2811 | struct ext4_ext_path *path, | ||
2812 | ext4_lblk_t iblock, | ||
2813 | unsigned int max_blocks, | ||
2814 | int flags) | ||
2815 | { | ||
2816 | struct ext4_extent *ex, newex, orig_ex; | ||
2817 | struct ext4_extent *ex1 = NULL; | ||
2818 | struct ext4_extent *ex2 = NULL; | ||
2819 | struct ext4_extent *ex3 = NULL; | ||
2820 | struct ext4_extent_header *eh; | ||
2821 | ext4_lblk_t ee_block; | ||
2822 | unsigned int allocated, ee_len, depth; | ||
2823 | ext4_fsblk_t newblock; | ||
2824 | int err = 0; | ||
2825 | int ret = 0; | ||
2826 | |||
2827 | ext_debug("ext4_split_unwritten_extents: inode %lu," | ||
2828 | "iblock %llu, max_blocks %u\n", inode->i_ino, | ||
2829 | (unsigned long long)iblock, max_blocks); | ||
2830 | depth = ext_depth(inode); | ||
2831 | eh = path[depth].p_hdr; | ||
2832 | ex = path[depth].p_ext; | ||
2833 | ee_block = le32_to_cpu(ex->ee_block); | ||
2834 | ee_len = ext4_ext_get_actual_len(ex); | ||
2835 | allocated = ee_len - (iblock - ee_block); | ||
2836 | newblock = iblock - ee_block + ext_pblock(ex); | ||
2837 | ex2 = ex; | ||
2838 | orig_ex.ee_block = ex->ee_block; | ||
2839 | orig_ex.ee_len = cpu_to_le16(ee_len); | ||
2840 | ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); | ||
2841 | |||
2842 | /* | ||
2843 | * if the entire unintialized extent length less than | ||
2844 | * the size of extent to write, there is no need to split | ||
2845 | * uninitialized extent | ||
2846 | */ | ||
2847 | if (allocated <= max_blocks) | ||
2848 | return ret; | ||
2849 | |||
2850 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2851 | if (err) | ||
2852 | goto out; | ||
2853 | /* ex1: ee_block to iblock - 1 : uninitialized */ | ||
2854 | if (iblock > ee_block) { | ||
2855 | ex1 = ex; | ||
2856 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | ||
2857 | ext4_ext_mark_uninitialized(ex1); | ||
2858 | ex2 = &newex; | ||
2859 | } | ||
2860 | /* | ||
2861 | * for sanity, update the length of the ex2 extent before | ||
2862 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | ||
2863 | * overlap of blocks. | ||
2864 | */ | ||
2865 | if (!ex1 && allocated > max_blocks) | ||
2866 | ex2->ee_len = cpu_to_le16(max_blocks); | ||
2867 | /* ex3: to ee_block + ee_len : uninitialised */ | ||
2868 | if (allocated > max_blocks) { | ||
2869 | unsigned int newdepth; | ||
2870 | ex3 = &newex; | ||
2871 | ex3->ee_block = cpu_to_le32(iblock + max_blocks); | ||
2872 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | ||
2873 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | ||
2874 | ext4_ext_mark_uninitialized(ex3); | ||
2875 | err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); | ||
2876 | if (err == -ENOSPC) { | ||
2877 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2878 | if (err) | ||
2879 | goto fix_extent_len; | ||
2880 | /* update the extent length and mark as initialized */ | ||
2881 | ex->ee_block = orig_ex.ee_block; | ||
2882 | ex->ee_len = orig_ex.ee_len; | ||
2883 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | ||
2884 | ext4_ext_dirty(handle, inode, path + depth); | ||
2885 | /* zeroed the full extent */ | ||
2886 | /* blocks available from iblock */ | ||
2887 | return allocated; | ||
2888 | |||
2889 | } else if (err) | ||
2890 | goto fix_extent_len; | ||
2891 | /* | ||
2892 | * The depth, and hence eh & ex might change | ||
2893 | * as part of the insert above. | ||
2894 | */ | ||
2895 | newdepth = ext_depth(inode); | ||
2896 | /* | ||
2897 | * update the extent length after successful insert of the | ||
2898 | * split extent | ||
2899 | */ | ||
2900 | orig_ex.ee_len = cpu_to_le16(ee_len - | ||
2901 | ext4_ext_get_actual_len(ex3)); | ||
2902 | depth = newdepth; | ||
2903 | ext4_ext_drop_refs(path); | ||
2904 | path = ext4_ext_find_extent(inode, iblock, path); | ||
2905 | if (IS_ERR(path)) { | ||
2906 | err = PTR_ERR(path); | ||
2907 | goto out; | ||
2908 | } | ||
2909 | eh = path[depth].p_hdr; | ||
2910 | ex = path[depth].p_ext; | ||
2911 | if (ex2 != &newex) | ||
2912 | ex2 = ex; | ||
2913 | |||
2914 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2915 | if (err) | ||
2916 | goto out; | ||
2917 | |||
2918 | allocated = max_blocks; | ||
2919 | } | ||
2920 | /* | ||
2921 | * If there was a change of depth as part of the | ||
2922 | * insertion of ex3 above, we need to update the length | ||
2923 | * of the ex1 extent again here | ||
2924 | */ | ||
2925 | if (ex1 && ex1 != ex) { | ||
2926 | ex1 = ex; | ||
2927 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | ||
2928 | ext4_ext_mark_uninitialized(ex1); | ||
2929 | ex2 = &newex; | ||
2930 | } | ||
2931 | /* | ||
2932 | * ex2: iblock to iblock + maxblocks-1 : to be direct IO written, | ||
2933 | * uninitialised still. | ||
2934 | */ | ||
2935 | ex2->ee_block = cpu_to_le32(iblock); | ||
2936 | ext4_ext_store_pblock(ex2, newblock); | ||
2937 | ex2->ee_len = cpu_to_le16(allocated); | ||
2938 | ext4_ext_mark_uninitialized(ex2); | ||
2939 | if (ex2 != ex) | ||
2940 | goto insert; | ||
2941 | /* Mark modified extent as dirty */ | ||
2942 | err = ext4_ext_dirty(handle, inode, path + depth); | ||
2943 | ext_debug("out here\n"); | ||
2944 | goto out; | ||
2945 | insert: | ||
2946 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | ||
2947 | if (err == -ENOSPC) { | ||
2948 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2949 | if (err) | ||
2950 | goto fix_extent_len; | ||
2951 | /* update the extent length and mark as initialized */ | ||
2952 | ex->ee_block = orig_ex.ee_block; | ||
2953 | ex->ee_len = orig_ex.ee_len; | ||
2954 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | ||
2955 | ext4_ext_dirty(handle, inode, path + depth); | ||
2956 | /* zero out the first half */ | ||
2957 | return allocated; | ||
2958 | } else if (err) | ||
2959 | goto fix_extent_len; | ||
2960 | out: | ||
2961 | ext4_ext_show_leaf(inode, path); | ||
2962 | return err ? err : allocated; | ||
2963 | |||
2964 | fix_extent_len: | ||
2965 | ex->ee_block = orig_ex.ee_block; | ||
2966 | ex->ee_len = orig_ex.ee_len; | ||
2967 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | ||
2968 | ext4_ext_mark_uninitialized(ex); | ||
2969 | ext4_ext_dirty(handle, inode, path + depth); | ||
2970 | return err; | ||
2971 | } | ||
2972 | static int ext4_convert_unwritten_extents_dio(handle_t *handle, | ||
2973 | struct inode *inode, | ||
2974 | struct ext4_ext_path *path) | ||
2975 | { | ||
2976 | struct ext4_extent *ex; | ||
2977 | struct ext4_extent_header *eh; | ||
2978 | int depth; | ||
2979 | int err = 0; | ||
2980 | int ret = 0; | ||
2981 | |||
2982 | depth = ext_depth(inode); | ||
2983 | eh = path[depth].p_hdr; | ||
2984 | ex = path[depth].p_ext; | ||
2985 | |||
2986 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2987 | if (err) | ||
2988 | goto out; | ||
2989 | /* first mark the extent as initialized */ | ||
2990 | ext4_ext_mark_initialized(ex); | ||
2991 | |||
2992 | /* | ||
2993 | * We have to see if it can be merged with the extent | ||
2994 | * on the left. | ||
2995 | */ | ||
2996 | if (ex > EXT_FIRST_EXTENT(eh)) { | ||
2997 | /* | ||
2998 | * To merge left, pass "ex - 1" to try_to_merge(), | ||
2999 | * since it merges towards right _only_. | ||
3000 | */ | ||
3001 | ret = ext4_ext_try_to_merge(inode, path, ex - 1); | ||
3002 | if (ret) { | ||
3003 | err = ext4_ext_correct_indexes(handle, inode, path); | ||
3004 | if (err) | ||
3005 | goto out; | ||
3006 | depth = ext_depth(inode); | ||
3007 | ex--; | ||
3008 | } | ||
3009 | } | ||
3010 | /* | ||
3011 | * Try to Merge towards right. | ||
3012 | */ | ||
3013 | ret = ext4_ext_try_to_merge(inode, path, ex); | ||
3014 | if (ret) { | ||
3015 | err = ext4_ext_correct_indexes(handle, inode, path); | ||
3016 | if (err) | ||
3017 | goto out; | ||
3018 | depth = ext_depth(inode); | ||
3019 | } | ||
3020 | /* Mark modified extent as dirty */ | ||
3021 | err = ext4_ext_dirty(handle, inode, path + depth); | ||
3022 | out: | ||
3023 | ext4_ext_show_leaf(inode, path); | ||
3024 | return err; | ||
3025 | } | ||
3026 | |||
3027 | static int | ||
3028 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | ||
3029 | ext4_lblk_t iblock, unsigned int max_blocks, | ||
3030 | struct ext4_ext_path *path, int flags, | ||
3031 | unsigned int allocated, struct buffer_head *bh_result, | ||
3032 | ext4_fsblk_t newblock) | ||
3033 | { | ||
3034 | int ret = 0; | ||
3035 | int err = 0; | ||
3036 | |||
3037 | ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" | ||
3038 | "block %llu, max_blocks %u, flags %d, allocated %u", | ||
3039 | inode->i_ino, (unsigned long long)iblock, max_blocks, | ||
3040 | flags, allocated); | ||
3041 | ext4_ext_show_leaf(inode, path); | ||
3042 | |||
3043 | /* DIO get_block() before submit the IO, split the extent */ | ||
3044 | if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { | ||
3045 | ret = ext4_split_unwritten_extents(handle, | ||
3046 | inode, path, iblock, | ||
3047 | max_blocks, flags); | ||
3048 | goto out; | ||
3049 | } | ||
3050 | /* DIO end_io complete, convert the filled extent to written */ | ||
3051 | if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { | ||
3052 | ret = ext4_convert_unwritten_extents_dio(handle, inode, | ||
3053 | path); | ||
3054 | goto out2; | ||
3055 | } | ||
3056 | /* buffered IO case */ | ||
3057 | /* | ||
3058 | * repeat fallocate creation request | ||
3059 | * we already have an unwritten extent | ||
3060 | */ | ||
3061 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) | ||
3062 | goto map_out; | ||
3063 | |||
3064 | /* buffered READ or buffered write_begin() lookup */ | ||
3065 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | ||
3066 | /* | ||
3067 | * We have blocks reserved already. We | ||
3068 | * return allocated blocks so that delalloc | ||
3069 | * won't do block reservation for us. But | ||
3070 | * the buffer head will be unmapped so that | ||
3071 | * a read from the block returns 0s. | ||
3072 | */ | ||
3073 | set_buffer_unwritten(bh_result); | ||
3074 | goto out1; | ||
3075 | } | ||
3076 | |||
3077 | /* buffered write, writepage time, convert*/ | ||
3078 | ret = ext4_ext_convert_to_initialized(handle, inode, | ||
3079 | path, iblock, | ||
3080 | max_blocks); | ||
3081 | out: | ||
3082 | if (ret <= 0) { | ||
3083 | err = ret; | ||
3084 | goto out2; | ||
3085 | } else | ||
3086 | allocated = ret; | ||
3087 | set_buffer_new(bh_result); | ||
3088 | map_out: | ||
3089 | set_buffer_mapped(bh_result); | ||
3090 | out1: | ||
3091 | if (allocated > max_blocks) | ||
3092 | allocated = max_blocks; | ||
3093 | ext4_ext_show_leaf(inode, path); | ||
3094 | bh_result->b_bdev = inode->i_sb->s_bdev; | ||
3095 | bh_result->b_blocknr = newblock; | ||
3096 | out2: | ||
3097 | if (path) { | ||
3098 | ext4_ext_drop_refs(path); | ||
3099 | kfree(path); | ||
3100 | } | ||
3101 | return err ? err : allocated; | ||
3102 | } | ||
3103 | /* | ||
2788 | * Block allocation/map/preallocation routine for extents based files | 3104 | * Block allocation/map/preallocation routine for extents based files |
2789 | * | 3105 | * |
2790 | * | 3106 | * |
@@ -2889,33 +3205,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
2889 | EXT4_EXT_CACHE_EXTENT); | 3205 | EXT4_EXT_CACHE_EXTENT); |
2890 | goto out; | 3206 | goto out; |
2891 | } | 3207 | } |
2892 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) | 3208 | ret = ext4_ext_handle_uninitialized_extents(handle, |
2893 | goto out; | 3209 | inode, iblock, max_blocks, path, |
2894 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | 3210 | flags, allocated, bh_result, newblock); |
2895 | if (allocated > max_blocks) | 3211 | return ret; |
2896 | allocated = max_blocks; | ||
2897 | /* | ||
2898 | * We have blocks reserved already. We | ||
2899 | * return allocated blocks so that delalloc | ||
2900 | * won't do block reservation for us. But | ||
2901 | * the buffer head will be unmapped so that | ||
2902 | * a read from the block returns 0s. | ||
2903 | */ | ||
2904 | set_buffer_unwritten(bh_result); | ||
2905 | bh_result->b_bdev = inode->i_sb->s_bdev; | ||
2906 | bh_result->b_blocknr = newblock; | ||
2907 | goto out2; | ||
2908 | } | ||
2909 | |||
2910 | ret = ext4_ext_convert_to_initialized(handle, inode, | ||
2911 | path, iblock, | ||
2912 | max_blocks); | ||
2913 | if (ret <= 0) { | ||
2914 | err = ret; | ||
2915 | goto out2; | ||
2916 | } else | ||
2917 | allocated = ret; | ||
2918 | goto outnew; | ||
2919 | } | 3212 | } |
2920 | } | 3213 | } |
2921 | 3214 | ||
@@ -2988,7 +3281,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
2988 | newex.ee_len = cpu_to_le16(ar.len); | 3281 | newex.ee_len = cpu_to_le16(ar.len); |
2989 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) /* Mark uninitialized */ | 3282 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) /* Mark uninitialized */ |
2990 | ext4_ext_mark_uninitialized(&newex); | 3283 | ext4_ext_mark_uninitialized(&newex); |
2991 | err = ext4_ext_insert_extent(handle, inode, path, &newex); | 3284 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
2992 | if (err) { | 3285 | if (err) { |
2993 | /* free data blocks we just allocated */ | 3286 | /* free data blocks we just allocated */ |
2994 | /* not a good idea to call discard here directly, | 3287 | /* not a good idea to call discard here directly, |
@@ -3002,7 +3295,6 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3002 | /* previous routine could use block we allocated */ | 3295 | /* previous routine could use block we allocated */ |
3003 | newblock = ext_pblock(&newex); | 3296 | newblock = ext_pblock(&newex); |
3004 | allocated = ext4_ext_get_actual_len(&newex); | 3297 | allocated = ext4_ext_get_actual_len(&newex); |
3005 | outnew: | ||
3006 | set_buffer_new(bh_result); | 3298 | set_buffer_new(bh_result); |
3007 | 3299 | ||
3008 | /* Cache only when it is _not_ an uninitialized extent */ | 3300 | /* Cache only when it is _not_ an uninitialized extent */ |
@@ -3201,6 +3493,63 @@ retry: | |||
3201 | } | 3493 | } |
3202 | 3494 | ||
3203 | /* | 3495 | /* |
3496 | * This function convert a range of blocks to written extents | ||
3497 | * The caller of this function will pass the start offset and the size. | ||
3498 | * all unwritten extents within this range will be converted to | ||
3499 | * written extents. | ||
3500 | * | ||
3501 | * This function is called from the direct IO end io call back | ||
3502 | * function, to convert the fallocated extents after IO is completed. | ||
3503 | */ | ||
3504 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | ||
3505 | loff_t len) | ||
3506 | { | ||
3507 | handle_t *handle; | ||
3508 | ext4_lblk_t block; | ||
3509 | unsigned int max_blocks; | ||
3510 | int ret = 0; | ||
3511 | int ret2 = 0; | ||
3512 | struct buffer_head map_bh; | ||
3513 | unsigned int credits, blkbits = inode->i_blkbits; | ||
3514 | |||
3515 | block = offset >> blkbits; | ||
3516 | /* | ||
3517 | * We can't just convert len to max_blocks because | ||
3518 | * If blocksize = 4096 offset = 3072 and len = 2048 | ||
3519 | */ | ||
3520 | max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) | ||
3521 | - block; | ||
3522 | /* | ||
3523 | * credits to insert 1 extent into extent tree | ||
3524 | */ | ||
3525 | credits = ext4_chunk_trans_blocks(inode, max_blocks); | ||
3526 | while (ret >= 0 && ret < max_blocks) { | ||
3527 | block = block + ret; | ||
3528 | max_blocks = max_blocks - ret; | ||
3529 | handle = ext4_journal_start(inode, credits); | ||
3530 | if (IS_ERR(handle)) { | ||
3531 | ret = PTR_ERR(handle); | ||
3532 | break; | ||
3533 | } | ||
3534 | map_bh.b_state = 0; | ||
3535 | ret = ext4_get_blocks(handle, inode, block, | ||
3536 | max_blocks, &map_bh, | ||
3537 | EXT4_GET_BLOCKS_DIO_CONVERT_EXT); | ||
3538 | if (ret <= 0) { | ||
3539 | WARN_ON(ret <= 0); | ||
3540 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | ||
3541 | "returned error inode#%lu, block=%u, " | ||
3542 | "max_blocks=%u", __func__, | ||
3543 | inode->i_ino, block, max_blocks); | ||
3544 | } | ||
3545 | ext4_mark_inode_dirty(handle, inode); | ||
3546 | ret2 = ext4_journal_stop(handle); | ||
3547 | if (ret <= 0 || ret2 ) | ||
3548 | break; | ||
3549 | } | ||
3550 | return ret > 0 ? ret2 : ret; | ||
3551 | } | ||
3552 | /* | ||
3204 | * Callback function called for each extent to gather FIEMAP information. | 3553 | * Callback function called for each extent to gather FIEMAP information. |
3205 | */ | 3554 | */ |
3206 | static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, | 3555 | static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, |