diff options
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r-- | fs/ext4/extents.c | 330 |
1 files changed, 126 insertions, 204 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 74f23c292e1b..1421938e6792 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -44,6 +44,14 @@ | |||
44 | 44 | ||
45 | #include <trace/events/ext4.h> | 45 | #include <trace/events/ext4.h> |
46 | 46 | ||
47 | /* | ||
48 | * used by extent splitting. | ||
49 | */ | ||
50 | #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ | ||
51 | due to ENOSPC */ | ||
52 | #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ | ||
53 | #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ | ||
54 | |||
47 | static int ext4_split_extent(handle_t *handle, | 55 | static int ext4_split_extent(handle_t *handle, |
48 | struct inode *inode, | 56 | struct inode *inode, |
49 | struct ext4_ext_path *path, | 57 | struct ext4_ext_path *path, |
@@ -51,6 +59,13 @@ static int ext4_split_extent(handle_t *handle, | |||
51 | int split_flag, | 59 | int split_flag, |
52 | int flags); | 60 | int flags); |
53 | 61 | ||
62 | static int ext4_split_extent_at(handle_t *handle, | ||
63 | struct inode *inode, | ||
64 | struct ext4_ext_path *path, | ||
65 | ext4_lblk_t split, | ||
66 | int split_flag, | ||
67 | int flags); | ||
68 | |||
54 | static int ext4_ext_truncate_extend_restart(handle_t *handle, | 69 | static int ext4_ext_truncate_extend_restart(handle_t *handle, |
55 | struct inode *inode, | 70 | struct inode *inode, |
56 | int needed) | 71 | int needed) |
@@ -300,6 +315,8 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) | |||
300 | ext4_fsblk_t block = ext4_ext_pblock(ext); | 315 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
301 | int len = ext4_ext_get_actual_len(ext); | 316 | int len = ext4_ext_get_actual_len(ext); |
302 | 317 | ||
318 | if (len == 0) | ||
319 | return 0; | ||
303 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); | 320 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
304 | } | 321 | } |
305 | 322 | ||
@@ -2308,7 +2325,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2308 | struct ext4_extent *ex; | 2325 | struct ext4_extent *ex; |
2309 | 2326 | ||
2310 | /* the header must be checked already in ext4_ext_remove_space() */ | 2327 | /* the header must be checked already in ext4_ext_remove_space() */ |
2311 | ext_debug("truncate since %u in leaf\n", start); | 2328 | ext_debug("truncate since %u in leaf to %u\n", start, end); |
2312 | if (!path[depth].p_hdr) | 2329 | if (!path[depth].p_hdr) |
2313 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | 2330 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); |
2314 | eh = path[depth].p_hdr; | 2331 | eh = path[depth].p_hdr; |
@@ -2343,14 +2360,17 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2343 | ext_debug(" border %u:%u\n", a, b); | 2360 | ext_debug(" border %u:%u\n", a, b); |
2344 | 2361 | ||
2345 | /* If this extent is beyond the end of the hole, skip it */ | 2362 | /* If this extent is beyond the end of the hole, skip it */ |
2346 | if (end <= ex_ee_block) { | 2363 | if (end < ex_ee_block) { |
2347 | ex--; | 2364 | ex--; |
2348 | ex_ee_block = le32_to_cpu(ex->ee_block); | 2365 | ex_ee_block = le32_to_cpu(ex->ee_block); |
2349 | ex_ee_len = ext4_ext_get_actual_len(ex); | 2366 | ex_ee_len = ext4_ext_get_actual_len(ex); |
2350 | continue; | 2367 | continue; |
2351 | } else if (b != ex_ee_block + ex_ee_len - 1) { | 2368 | } else if (b != ex_ee_block + ex_ee_len - 1) { |
2352 | EXT4_ERROR_INODE(inode," bad truncate %u:%u\n", | 2369 | EXT4_ERROR_INODE(inode, |
2353 | start, end); | 2370 | "can not handle truncate %u:%u " |
2371 | "on extent %u:%u", | ||
2372 | start, end, ex_ee_block, | ||
2373 | ex_ee_block + ex_ee_len - 1); | ||
2354 | err = -EIO; | 2374 | err = -EIO; |
2355 | goto out; | 2375 | goto out; |
2356 | } else if (a != ex_ee_block) { | 2376 | } else if (a != ex_ee_block) { |
@@ -2482,7 +2502,8 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path) | |||
2482 | return 1; | 2502 | return 1; |
2483 | } | 2503 | } |
2484 | 2504 | ||
2485 | static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) | 2505 | static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, |
2506 | ext4_lblk_t end) | ||
2486 | { | 2507 | { |
2487 | struct super_block *sb = inode->i_sb; | 2508 | struct super_block *sb = inode->i_sb; |
2488 | int depth = ext_depth(inode); | 2509 | int depth = ext_depth(inode); |
@@ -2491,7 +2512,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) | |||
2491 | handle_t *handle; | 2512 | handle_t *handle; |
2492 | int i, err; | 2513 | int i, err; |
2493 | 2514 | ||
2494 | ext_debug("truncate since %u\n", start); | 2515 | ext_debug("truncate since %u to %u\n", start, end); |
2495 | 2516 | ||
2496 | /* probably first extent we're gonna free will be last in block */ | 2517 | /* probably first extent we're gonna free will be last in block */ |
2497 | handle = ext4_journal_start(inode, depth + 1); | 2518 | handle = ext4_journal_start(inode, depth + 1); |
@@ -2504,6 +2525,61 @@ again: | |||
2504 | trace_ext4_ext_remove_space(inode, start, depth); | 2525 | trace_ext4_ext_remove_space(inode, start, depth); |
2505 | 2526 | ||
2506 | /* | 2527 | /* |
2528 | * Check if we are removing extents inside the extent tree. If that | ||
2529 | * is the case, we are going to punch a hole inside the extent tree | ||
2530 | * so we have to check whether we need to split the extent covering | ||
2531 | * the last block to remove so we can easily remove the part of it | ||
2532 | * in ext4_ext_rm_leaf(). | ||
2533 | */ | ||
2534 | if (end < EXT_MAX_BLOCKS - 1) { | ||
2535 | struct ext4_extent *ex; | ||
2536 | ext4_lblk_t ee_block; | ||
2537 | |||
2538 | /* find extent for this block */ | ||
2539 | path = ext4_ext_find_extent(inode, end, NULL); | ||
2540 | if (IS_ERR(path)) { | ||
2541 | ext4_journal_stop(handle); | ||
2542 | return PTR_ERR(path); | ||
2543 | } | ||
2544 | depth = ext_depth(inode); | ||
2545 | ex = path[depth].p_ext; | ||
2546 | if (!ex) | ||
2547 | goto cont; | ||
2548 | |||
2549 | ee_block = le32_to_cpu(ex->ee_block); | ||
2550 | |||
2551 | /* | ||
2552 | * See if the last block is inside the extent, if so split | ||
2553 | * the extent at 'end' block so we can easily remove the | ||
2554 | * tail of the first part of the split extent in | ||
2555 | * ext4_ext_rm_leaf(). | ||
2556 | */ | ||
2557 | if (end >= ee_block && | ||
2558 | end < ee_block + ext4_ext_get_actual_len(ex) - 1) { | ||
2559 | int split_flag = 0; | ||
2560 | |||
2561 | if (ext4_ext_is_uninitialized(ex)) | ||
2562 | split_flag = EXT4_EXT_MARK_UNINIT1 | | ||
2563 | EXT4_EXT_MARK_UNINIT2; | ||
2564 | |||
2565 | /* | ||
2566 | * Split the extent in two so that 'end' is the last | ||
2567 | * block in the first new extent | ||
2568 | */ | ||
2569 | err = ext4_split_extent_at(handle, inode, path, | ||
2570 | end + 1, split_flag, | ||
2571 | EXT4_GET_BLOCKS_PRE_IO | | ||
2572 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT); | ||
2573 | |||
2574 | if (err < 0) | ||
2575 | goto out; | ||
2576 | } | ||
2577 | ext4_ext_drop_refs(path); | ||
2578 | kfree(path); | ||
2579 | } | ||
2580 | cont: | ||
2581 | |||
2582 | /* | ||
2507 | * We start scanning from right side, freeing all the blocks | 2583 | * We start scanning from right side, freeing all the blocks |
2508 | * after i_size and walking into the tree depth-wise. | 2584 | * after i_size and walking into the tree depth-wise. |
2509 | */ | 2585 | */ |
@@ -2515,6 +2591,7 @@ again: | |||
2515 | } | 2591 | } |
2516 | path[0].p_depth = depth; | 2592 | path[0].p_depth = depth; |
2517 | path[0].p_hdr = ext_inode_hdr(inode); | 2593 | path[0].p_hdr = ext_inode_hdr(inode); |
2594 | |||
2518 | if (ext4_ext_check(inode, path[0].p_hdr, depth)) { | 2595 | if (ext4_ext_check(inode, path[0].p_hdr, depth)) { |
2519 | err = -EIO; | 2596 | err = -EIO; |
2520 | goto out; | 2597 | goto out; |
@@ -2526,7 +2603,7 @@ again: | |||
2526 | /* this is leaf block */ | 2603 | /* this is leaf block */ |
2527 | err = ext4_ext_rm_leaf(handle, inode, path, | 2604 | err = ext4_ext_rm_leaf(handle, inode, path, |
2528 | &partial_cluster, start, | 2605 | &partial_cluster, start, |
2529 | EXT_MAX_BLOCKS - 1); | 2606 | end); |
2530 | /* root level has p_bh == NULL, brelse() eats this */ | 2607 | /* root level has p_bh == NULL, brelse() eats this */ |
2531 | brelse(path[i].p_bh); | 2608 | brelse(path[i].p_bh); |
2532 | path[i].p_bh = NULL; | 2609 | path[i].p_bh = NULL; |
@@ -2651,17 +2728,17 @@ void ext4_ext_init(struct super_block *sb) | |||
2651 | 2728 | ||
2652 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { | 2729 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { |
2653 | #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) | 2730 | #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) |
2654 | printk(KERN_INFO "EXT4-fs: file extents enabled"); | 2731 | printk(KERN_INFO "EXT4-fs: file extents enabled" |
2655 | #ifdef AGGRESSIVE_TEST | 2732 | #ifdef AGGRESSIVE_TEST |
2656 | printk(", aggressive tests"); | 2733 | ", aggressive tests" |
2657 | #endif | 2734 | #endif |
2658 | #ifdef CHECK_BINSEARCH | 2735 | #ifdef CHECK_BINSEARCH |
2659 | printk(", check binsearch"); | 2736 | ", check binsearch" |
2660 | #endif | 2737 | #endif |
2661 | #ifdef EXTENTS_STATS | 2738 | #ifdef EXTENTS_STATS |
2662 | printk(", stats"); | 2739 | ", stats" |
2663 | #endif | 2740 | #endif |
2664 | printk("\n"); | 2741 | "\n"); |
2665 | #endif | 2742 | #endif |
2666 | #ifdef EXTENTS_STATS | 2743 | #ifdef EXTENTS_STATS |
2667 | spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); | 2744 | spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); |
@@ -2709,14 +2786,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
2709 | } | 2786 | } |
2710 | 2787 | ||
2711 | /* | 2788 | /* |
2712 | * used by extent splitting. | ||
2713 | */ | ||
2714 | #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ | ||
2715 | due to ENOSPC */ | ||
2716 | #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ | ||
2717 | #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ | ||
2718 | |||
2719 | /* | ||
2720 | * ext4_split_extent_at() splits an extent at given block. | 2789 | * ext4_split_extent_at() splits an extent at given block. |
2721 | * | 2790 | * |
2722 | * @handle: the journal handle | 2791 | * @handle: the journal handle |
@@ -3224,11 +3293,13 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, | |||
3224 | depth = ext_depth(inode); | 3293 | depth = ext_depth(inode); |
3225 | eh = path[depth].p_hdr; | 3294 | eh = path[depth].p_hdr; |
3226 | 3295 | ||
3227 | if (unlikely(!eh->eh_entries)) { | 3296 | /* |
3228 | EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and " | 3297 | * We're going to remove EOFBLOCKS_FL entirely in future so we |
3229 | "EOFBLOCKS_FL set"); | 3298 | * do not care for this case anymore. Simply remove the flag |
3230 | return -EIO; | 3299 | * if there are no extents. |
3231 | } | 3300 | */ |
3301 | if (unlikely(!eh->eh_entries)) | ||
3302 | goto out; | ||
3232 | last_ex = EXT_LAST_EXTENT(eh); | 3303 | last_ex = EXT_LAST_EXTENT(eh); |
3233 | /* | 3304 | /* |
3234 | * We should clear the EOFBLOCKS_FL flag if we are writing the | 3305 | * We should clear the EOFBLOCKS_FL flag if we are writing the |
@@ -3252,6 +3323,7 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, | |||
3252 | for (i = depth-1; i >= 0; i--) | 3323 | for (i = depth-1; i >= 0; i--) |
3253 | if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) | 3324 | if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) |
3254 | return 0; | 3325 | return 0; |
3326 | out: | ||
3255 | ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); | 3327 | ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); |
3256 | return ext4_mark_inode_dirty(handle, inode); | 3328 | return ext4_mark_inode_dirty(handle, inode); |
3257 | } | 3329 | } |
@@ -3710,8 +3782,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3710 | int free_on_err = 0, err = 0, depth, ret; | 3782 | int free_on_err = 0, err = 0, depth, ret; |
3711 | unsigned int allocated = 0, offset = 0; | 3783 | unsigned int allocated = 0, offset = 0; |
3712 | unsigned int allocated_clusters = 0; | 3784 | unsigned int allocated_clusters = 0; |
3713 | unsigned int punched_out = 0; | ||
3714 | unsigned int result = 0; | ||
3715 | struct ext4_allocation_request ar; | 3785 | struct ext4_allocation_request ar; |
3716 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; | 3786 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
3717 | ext4_lblk_t cluster_offset; | 3787 | ext4_lblk_t cluster_offset; |
@@ -3721,8 +3791,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3721 | trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); | 3791 | trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); |
3722 | 3792 | ||
3723 | /* check in cache */ | 3793 | /* check in cache */ |
3724 | if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) && | 3794 | if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { |
3725 | ext4_ext_in_cache(inode, map->m_lblk, &newex)) { | ||
3726 | if (!newex.ee_start_lo && !newex.ee_start_hi) { | 3795 | if (!newex.ee_start_lo && !newex.ee_start_hi) { |
3727 | if ((sbi->s_cluster_ratio > 1) && | 3796 | if ((sbi->s_cluster_ratio > 1) && |
3728 | ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) | 3797 | ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) |
@@ -3790,113 +3859,25 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3790 | 3859 | ||
3791 | /* if found extent covers block, simply return it */ | 3860 | /* if found extent covers block, simply return it */ |
3792 | if (in_range(map->m_lblk, ee_block, ee_len)) { | 3861 | if (in_range(map->m_lblk, ee_block, ee_len)) { |
3793 | struct ext4_map_blocks punch_map; | ||
3794 | ext4_fsblk_t partial_cluster = 0; | ||
3795 | |||
3796 | newblock = map->m_lblk - ee_block + ee_start; | 3862 | newblock = map->m_lblk - ee_block + ee_start; |
3797 | /* number of remaining blocks in the extent */ | 3863 | /* number of remaining blocks in the extent */ |
3798 | allocated = ee_len - (map->m_lblk - ee_block); | 3864 | allocated = ee_len - (map->m_lblk - ee_block); |
3799 | ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, | 3865 | ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, |
3800 | ee_block, ee_len, newblock); | 3866 | ee_block, ee_len, newblock); |
3801 | 3867 | ||
3802 | if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) { | ||
3803 | /* | ||
3804 | * Do not put uninitialized extent | ||
3805 | * in the cache | ||
3806 | */ | ||
3807 | if (!ext4_ext_is_uninitialized(ex)) { | ||
3808 | ext4_ext_put_in_cache(inode, ee_block, | ||
3809 | ee_len, ee_start); | ||
3810 | goto out; | ||
3811 | } | ||
3812 | ret = ext4_ext_handle_uninitialized_extents( | ||
3813 | handle, inode, map, path, flags, | ||
3814 | allocated, newblock); | ||
3815 | return ret; | ||
3816 | } | ||
3817 | |||
3818 | /* | ||
3819 | * Punch out the map length, but only to the | ||
3820 | * end of the extent | ||
3821 | */ | ||
3822 | punched_out = allocated < map->m_len ? | ||
3823 | allocated : map->m_len; | ||
3824 | |||
3825 | /* | 3868 | /* |
3826 | * Sense extents need to be converted to | 3869 | * Do not put uninitialized extent |
3827 | * uninitialized, they must fit in an | 3870 | * in the cache |
3828 | * uninitialized extent | ||
3829 | */ | 3871 | */ |
3830 | if (punched_out > EXT_UNINIT_MAX_LEN) | 3872 | if (!ext4_ext_is_uninitialized(ex)) { |
3831 | punched_out = EXT_UNINIT_MAX_LEN; | 3873 | ext4_ext_put_in_cache(inode, ee_block, |
3832 | 3874 | ee_len, ee_start); | |
3833 | punch_map.m_lblk = map->m_lblk; | 3875 | goto out; |
3834 | punch_map.m_pblk = newblock; | ||
3835 | punch_map.m_len = punched_out; | ||
3836 | punch_map.m_flags = 0; | ||
3837 | |||
3838 | /* Check to see if the extent needs to be split */ | ||
3839 | if (punch_map.m_len != ee_len || | ||
3840 | punch_map.m_lblk != ee_block) { | ||
3841 | |||
3842 | ret = ext4_split_extent(handle, inode, | ||
3843 | path, &punch_map, 0, | ||
3844 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT | | ||
3845 | EXT4_GET_BLOCKS_PRE_IO); | ||
3846 | |||
3847 | if (ret < 0) { | ||
3848 | err = ret; | ||
3849 | goto out2; | ||
3850 | } | ||
3851 | /* | ||
3852 | * find extent for the block at | ||
3853 | * the start of the hole | ||
3854 | */ | ||
3855 | ext4_ext_drop_refs(path); | ||
3856 | kfree(path); | ||
3857 | |||
3858 | path = ext4_ext_find_extent(inode, | ||
3859 | map->m_lblk, NULL); | ||
3860 | if (IS_ERR(path)) { | ||
3861 | err = PTR_ERR(path); | ||
3862 | path = NULL; | ||
3863 | goto out2; | ||
3864 | } | ||
3865 | |||
3866 | depth = ext_depth(inode); | ||
3867 | ex = path[depth].p_ext; | ||
3868 | ee_len = ext4_ext_get_actual_len(ex); | ||
3869 | ee_block = le32_to_cpu(ex->ee_block); | ||
3870 | ee_start = ext4_ext_pblock(ex); | ||
3871 | |||
3872 | } | ||
3873 | |||
3874 | ext4_ext_mark_uninitialized(ex); | ||
3875 | |||
3876 | ext4_ext_invalidate_cache(inode); | ||
3877 | |||
3878 | err = ext4_ext_rm_leaf(handle, inode, path, | ||
3879 | &partial_cluster, map->m_lblk, | ||
3880 | map->m_lblk + punched_out); | ||
3881 | |||
3882 | if (!err && path->p_hdr->eh_entries == 0) { | ||
3883 | /* | ||
3884 | * Punch hole freed all of this sub tree, | ||
3885 | * so we need to correct eh_depth | ||
3886 | */ | ||
3887 | err = ext4_ext_get_access(handle, inode, path); | ||
3888 | if (err == 0) { | ||
3889 | ext_inode_hdr(inode)->eh_depth = 0; | ||
3890 | ext_inode_hdr(inode)->eh_max = | ||
3891 | cpu_to_le16(ext4_ext_space_root( | ||
3892 | inode, 0)); | ||
3893 | |||
3894 | err = ext4_ext_dirty( | ||
3895 | handle, inode, path); | ||
3896 | } | ||
3897 | } | 3876 | } |
3898 | 3877 | ret = ext4_ext_handle_uninitialized_extents( | |
3899 | goto out2; | 3878 | handle, inode, map, path, flags, |
3879 | allocated, newblock); | ||
3880 | return ret; | ||
3900 | } | 3881 | } |
3901 | } | 3882 | } |
3902 | 3883 | ||
@@ -4165,13 +4146,11 @@ out2: | |||
4165 | ext4_ext_drop_refs(path); | 4146 | ext4_ext_drop_refs(path); |
4166 | kfree(path); | 4147 | kfree(path); |
4167 | } | 4148 | } |
4168 | result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ? | ||
4169 | punched_out : allocated; | ||
4170 | 4149 | ||
4171 | trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, | 4150 | trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, |
4172 | newblock, map->m_len, err ? err : result); | 4151 | newblock, map->m_len, err ? err : allocated); |
4173 | 4152 | ||
4174 | return err ? err : result; | 4153 | return err ? err : allocated; |
4175 | } | 4154 | } |
4176 | 4155 | ||
4177 | void ext4_ext_truncate(struct inode *inode) | 4156 | void ext4_ext_truncate(struct inode *inode) |
@@ -4228,7 +4207,7 @@ void ext4_ext_truncate(struct inode *inode) | |||
4228 | 4207 | ||
4229 | last_block = (inode->i_size + sb->s_blocksize - 1) | 4208 | last_block = (inode->i_size + sb->s_blocksize - 1) |
4230 | >> EXT4_BLOCK_SIZE_BITS(sb); | 4209 | >> EXT4_BLOCK_SIZE_BITS(sb); |
4231 | err = ext4_ext_remove_space(inode, last_block); | 4210 | err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); |
4232 | 4211 | ||
4233 | /* In a multi-transaction truncate, we only make the final | 4212 | /* In a multi-transaction truncate, we only make the final |
4234 | * transaction synchronous. | 4213 | * transaction synchronous. |
@@ -4436,10 +4415,11 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | |||
4436 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); | 4415 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
4437 | if (ret <= 0) { | 4416 | if (ret <= 0) { |
4438 | WARN_ON(ret <= 0); | 4417 | WARN_ON(ret <= 0); |
4439 | printk(KERN_ERR "%s: ext4_ext_map_blocks " | 4418 | ext4_msg(inode->i_sb, KERN_ERR, |
4440 | "returned error inode#%lu, block=%u, " | 4419 | "%s:%d: inode #%lu: block %u: len %u: " |
4441 | "max_blocks=%u", __func__, | 4420 | "ext4_ext_map_blocks returned %d", |
4442 | inode->i_ino, map.m_lblk, map.m_len); | 4421 | __func__, __LINE__, inode->i_ino, map.m_lblk, |
4422 | map.m_len, ret); | ||
4443 | } | 4423 | } |
4444 | ext4_mark_inode_dirty(handle, inode); | 4424 | ext4_mark_inode_dirty(handle, inode); |
4445 | ret2 = ext4_journal_stop(handle); | 4425 | ret2 = ext4_journal_stop(handle); |
@@ -4705,14 +4685,12 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) | |||
4705 | { | 4685 | { |
4706 | struct inode *inode = file->f_path.dentry->d_inode; | 4686 | struct inode *inode = file->f_path.dentry->d_inode; |
4707 | struct super_block *sb = inode->i_sb; | 4687 | struct super_block *sb = inode->i_sb; |
4708 | struct ext4_ext_cache cache_ex; | 4688 | ext4_lblk_t first_block, stop_block; |
4709 | ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks; | ||
4710 | struct address_space *mapping = inode->i_mapping; | 4689 | struct address_space *mapping = inode->i_mapping; |
4711 | struct ext4_map_blocks map; | ||
4712 | handle_t *handle; | 4690 | handle_t *handle; |
4713 | loff_t first_page, last_page, page_len; | 4691 | loff_t first_page, last_page, page_len; |
4714 | loff_t first_page_offset, last_page_offset; | 4692 | loff_t first_page_offset, last_page_offset; |
4715 | int ret, credits, blocks_released, err = 0; | 4693 | int credits, err = 0; |
4716 | 4694 | ||
4717 | /* No need to punch hole beyond i_size */ | 4695 | /* No need to punch hole beyond i_size */ |
4718 | if (offset >= inode->i_size) | 4696 | if (offset >= inode->i_size) |
@@ -4728,10 +4706,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) | |||
4728 | offset; | 4706 | offset; |
4729 | } | 4707 | } |
4730 | 4708 | ||
4731 | first_block = (offset + sb->s_blocksize - 1) >> | ||
4732 | EXT4_BLOCK_SIZE_BITS(sb); | ||
4733 | last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); | ||
4734 | |||
4735 | first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 4709 | first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
4736 | last_page = (offset + length) >> PAGE_CACHE_SHIFT; | 4710 | last_page = (offset + length) >> PAGE_CACHE_SHIFT; |
4737 | 4711 | ||
@@ -4810,7 +4784,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) | |||
4810 | } | 4784 | } |
4811 | } | 4785 | } |
4812 | 4786 | ||
4813 | |||
4814 | /* | 4787 | /* |
4815 | * If i_size is contained in the last page, we need to | 4788 | * If i_size is contained in the last page, we need to |
4816 | * unmap and zero the partial page after i_size | 4789 | * unmap and zero the partial page after i_size |
@@ -4830,73 +4803,22 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) | |||
4830 | } | 4803 | } |
4831 | } | 4804 | } |
4832 | 4805 | ||
4806 | first_block = (offset + sb->s_blocksize - 1) >> | ||
4807 | EXT4_BLOCK_SIZE_BITS(sb); | ||
4808 | stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); | ||
4809 | |||
4833 | /* If there are no blocks to remove, return now */ | 4810 | /* If there are no blocks to remove, return now */ |
4834 | if (first_block >= last_block) | 4811 | if (first_block >= stop_block) |
4835 | goto out; | 4812 | goto out; |
4836 | 4813 | ||
4837 | down_write(&EXT4_I(inode)->i_data_sem); | 4814 | down_write(&EXT4_I(inode)->i_data_sem); |
4838 | ext4_ext_invalidate_cache(inode); | 4815 | ext4_ext_invalidate_cache(inode); |
4839 | ext4_discard_preallocations(inode); | 4816 | ext4_discard_preallocations(inode); |
4840 | 4817 | ||
4841 | /* | 4818 | err = ext4_ext_remove_space(inode, first_block, stop_block - 1); |
4842 | * Loop over all the blocks and identify blocks | ||
4843 | * that need to be punched out | ||
4844 | */ | ||
4845 | iblock = first_block; | ||
4846 | blocks_released = 0; | ||
4847 | while (iblock < last_block) { | ||
4848 | max_blocks = last_block - iblock; | ||
4849 | num_blocks = 1; | ||
4850 | memset(&map, 0, sizeof(map)); | ||
4851 | map.m_lblk = iblock; | ||
4852 | map.m_len = max_blocks; | ||
4853 | ret = ext4_ext_map_blocks(handle, inode, &map, | ||
4854 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT); | ||
4855 | |||
4856 | if (ret > 0) { | ||
4857 | blocks_released += ret; | ||
4858 | num_blocks = ret; | ||
4859 | } else if (ret == 0) { | ||
4860 | /* | ||
4861 | * If map blocks could not find the block, | ||
4862 | * then it is in a hole. If the hole was | ||
4863 | * not already cached, then map blocks should | ||
4864 | * put it in the cache. So we can get the hole | ||
4865 | * out of the cache | ||
4866 | */ | ||
4867 | memset(&cache_ex, 0, sizeof(cache_ex)); | ||
4868 | if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) && | ||
4869 | !cache_ex.ec_start) { | ||
4870 | |||
4871 | /* The hole is cached */ | ||
4872 | num_blocks = cache_ex.ec_block + | ||
4873 | cache_ex.ec_len - iblock; | ||
4874 | |||
4875 | } else { | ||
4876 | /* The block could not be identified */ | ||
4877 | err = -EIO; | ||
4878 | break; | ||
4879 | } | ||
4880 | } else { | ||
4881 | /* Map blocks error */ | ||
4882 | err = ret; | ||
4883 | break; | ||
4884 | } | ||
4885 | |||
4886 | if (num_blocks == 0) { | ||
4887 | /* This condition should never happen */ | ||
4888 | ext_debug("Block lookup failed"); | ||
4889 | err = -EIO; | ||
4890 | break; | ||
4891 | } | ||
4892 | |||
4893 | iblock += num_blocks; | ||
4894 | } | ||
4895 | 4819 | ||
4896 | if (blocks_released > 0) { | 4820 | ext4_ext_invalidate_cache(inode); |
4897 | ext4_ext_invalidate_cache(inode); | 4821 | ext4_discard_preallocations(inode); |
4898 | ext4_discard_preallocations(inode); | ||
4899 | } | ||
4900 | 4822 | ||
4901 | if (IS_SYNC(inode)) | 4823 | if (IS_SYNC(inode)) |
4902 | ext4_handle_sync(handle); | 4824 | ext4_handle_sync(handle); |