aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/extents.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r--fs/ext4/extents.c223
1 files changed, 116 insertions, 107 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0b16fb4c06d3..e5d3eadf47b1 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2306,16 +2306,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2306 ext4_lblk_t block) 2306 ext4_lblk_t block)
2307{ 2307{
2308 int depth = ext_depth(inode); 2308 int depth = ext_depth(inode);
2309 unsigned long len = 0; 2309 ext4_lblk_t len;
2310 ext4_lblk_t lblock = 0; 2310 ext4_lblk_t lblock;
2311 struct ext4_extent *ex; 2311 struct ext4_extent *ex;
2312 struct extent_status es;
2312 2313
2313 ex = path[depth].p_ext; 2314 ex = path[depth].p_ext;
2314 if (ex == NULL) { 2315 if (ex == NULL) {
2315 /* 2316 /* there is no extent yet, so gap is [0;-] */
2316 * there is no extent yet, so gap is [0;-] and we 2317 lblock = 0;
2317 * don't cache it 2318 len = EXT_MAX_BLOCKS;
2318 */
2319 ext_debug("cache gap(whole file):"); 2319 ext_debug("cache gap(whole file):");
2320 } else if (block < le32_to_cpu(ex->ee_block)) { 2320 } else if (block < le32_to_cpu(ex->ee_block)) {
2321 lblock = block; 2321 lblock = block;
@@ -2324,9 +2324,6 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2324 block, 2324 block,
2325 le32_to_cpu(ex->ee_block), 2325 le32_to_cpu(ex->ee_block),
2326 ext4_ext_get_actual_len(ex)); 2326 ext4_ext_get_actual_len(ex));
2327 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2328 ext4_es_insert_extent(inode, lblock, len, ~0,
2329 EXTENT_STATUS_HOLE);
2330 } else if (block >= le32_to_cpu(ex->ee_block) 2327 } else if (block >= le32_to_cpu(ex->ee_block)
2331 + ext4_ext_get_actual_len(ex)) { 2328 + ext4_ext_get_actual_len(ex)) {
2332 ext4_lblk_t next; 2329 ext4_lblk_t next;
@@ -2340,14 +2337,19 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2340 block); 2337 block);
2341 BUG_ON(next == lblock); 2338 BUG_ON(next == lblock);
2342 len = next - lblock; 2339 len = next - lblock;
2343 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2344 ext4_es_insert_extent(inode, lblock, len, ~0,
2345 EXTENT_STATUS_HOLE);
2346 } else { 2340 } else {
2347 BUG(); 2341 BUG();
2348 } 2342 }
2349 2343
2350 ext_debug(" -> %u:%lu\n", lblock, len); 2344 ext4_es_find_delayed_extent_range(inode, lblock, lblock + len - 1, &es);
2345 if (es.es_len) {
2346 /* There's delayed extent containing lblock? */
2347 if (es.es_lblk <= lblock)
2348 return;
2349 len = min(es.es_lblk - lblock, len);
2350 }
2351 ext_debug(" -> %u:%u\n", lblock, len);
2352 ext4_es_insert_extent(inode, lblock, len, ~0, EXTENT_STATUS_HOLE);
2351} 2353}
2352 2354
2353/* 2355/*
@@ -2481,7 +2483,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2481 ext4_lblk_t from, ext4_lblk_t to) 2483 ext4_lblk_t from, ext4_lblk_t to)
2482{ 2484{
2483 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2485 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2484 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2486 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2485 ext4_fsblk_t pblk; 2487 ext4_fsblk_t pblk;
2486 int flags = get_default_free_blocks_flags(inode); 2488 int flags = get_default_free_blocks_flags(inode);
2487 2489
@@ -2490,7 +2492,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2490 * at the beginning of the extent. Instead, we make a note 2492 * at the beginning of the extent. Instead, we make a note
2491 * that we tried freeing the cluster, and check to see if we 2493 * that we tried freeing the cluster, and check to see if we
2492 * need to free it on a subsequent call to ext4_remove_blocks, 2494 * need to free it on a subsequent call to ext4_remove_blocks,
2493 * or at the end of the ext4_truncate() operation. 2495 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2494 */ 2496 */
2495 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2497 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2496 2498
@@ -2501,8 +2503,8 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2501 * partial cluster here. 2503 * partial cluster here.
2502 */ 2504 */
2503 pblk = ext4_ext_pblock(ex) + ee_len - 1; 2505 pblk = ext4_ext_pblock(ex) + ee_len - 1;
2504 if ((*partial_cluster > 0) && 2506 if (*partial_cluster > 0 &&
2505 (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 2507 *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
2506 ext4_free_blocks(handle, inode, NULL, 2508 ext4_free_blocks(handle, inode, NULL,
2507 EXT4_C2B(sbi, *partial_cluster), 2509 EXT4_C2B(sbi, *partial_cluster),
2508 sbi->s_cluster_ratio, flags); 2510 sbi->s_cluster_ratio, flags);
@@ -2528,7 +2530,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2528 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2530 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2529 /* tail removal */ 2531 /* tail removal */
2530 ext4_lblk_t num; 2532 ext4_lblk_t num;
2531 unsigned int unaligned; 2533 long long first_cluster;
2532 2534
2533 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2535 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2534 pblk = ext4_ext_pblock(ex) + ee_len - num; 2536 pblk = ext4_ext_pblock(ex) + ee_len - num;
@@ -2538,7 +2540,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2538 * used by any other extent (partial_cluster is negative). 2540 * used by any other extent (partial_cluster is negative).
2539 */ 2541 */
2540 if (*partial_cluster < 0 && 2542 if (*partial_cluster < 0 &&
2541 -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1)) 2543 *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
2542 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2544 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2543 2545
2544 ext_debug("free last %u blocks starting %llu partial %lld\n", 2546 ext_debug("free last %u blocks starting %llu partial %lld\n",
@@ -2549,21 +2551,24 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2549 * beginning of a cluster, and we removed the entire 2551 * beginning of a cluster, and we removed the entire
2550 * extent and the cluster is not used by any other extent, 2552 * extent and the cluster is not used by any other extent,
2551 * save the partial cluster here, since we might need to 2553 * save the partial cluster here, since we might need to
2552 * delete if we determine that the truncate operation has 2554 * delete if we determine that the truncate or punch hole
2553 * removed all of the blocks in the cluster. 2555 * operation has removed all of the blocks in the cluster.
2556 * If that cluster is used by another extent, preserve its
2557 * negative value so it isn't freed later on.
2554 * 2558 *
2555 * On the other hand, if we did not manage to free the whole 2559 * If the whole extent wasn't freed, we've reached the
2556 * extent, we have to mark the cluster as used (store negative 2560 * start of the truncated/punched region and have finished
2557 * cluster number in partial_cluster). 2561 * removing blocks. If there's a partial cluster here it's
2562 * shared with the remainder of the extent and is no longer
2563 * a candidate for removal.
2558 */ 2564 */
2559 unaligned = EXT4_PBLK_COFF(sbi, pblk); 2565 if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
2560 if (unaligned && (ee_len == num) && 2566 first_cluster = (long long) EXT4_B2C(sbi, pblk);
2561 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) 2567 if (first_cluster != -*partial_cluster)
2562 *partial_cluster = EXT4_B2C(sbi, pblk); 2568 *partial_cluster = first_cluster;
2563 else if (unaligned) 2569 } else {
2564 *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2565 else if (*partial_cluster > 0)
2566 *partial_cluster = 0; 2570 *partial_cluster = 0;
2571 }
2567 } else 2572 } else
2568 ext4_error(sbi->s_sb, "strange request: removal(2) " 2573 ext4_error(sbi->s_sb, "strange request: removal(2) "
2569 "%u-%u from %u:%u\n", 2574 "%u-%u from %u:%u\n",
@@ -2574,15 +2579,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2574 2579
2575/* 2580/*
2576 * ext4_ext_rm_leaf() Removes the extents associated with the 2581 * ext4_ext_rm_leaf() Removes the extents associated with the
2577 * blocks appearing between "start" and "end", and splits the extents 2582 * blocks appearing between "start" and "end". Both "start"
2578 * if "start" and "end" appear in the same extent 2583 * and "end" must appear in the same extent or EIO is returned.
2579 * 2584 *
2580 * @handle: The journal handle 2585 * @handle: The journal handle
2581 * @inode: The files inode 2586 * @inode: The files inode
2582 * @path: The path to the leaf 2587 * @path: The path to the leaf
2583 * @partial_cluster: The cluster which we'll have to free if all extents 2588 * @partial_cluster: The cluster which we'll have to free if all extents
2584 * has been released from it. It gets negative in case 2589 * has been released from it. However, if this value is
2585 * that the cluster is still used. 2590 * negative, it's a cluster just to the right of the
2591 * punched region and it must not be freed.
2586 * @start: The first block to remove 2592 * @start: The first block to remove
2587 * @end: The last block to remove 2593 * @end: The last block to remove
2588 */ 2594 */
@@ -2621,27 +2627,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2621 ex_ee_block = le32_to_cpu(ex->ee_block); 2627 ex_ee_block = le32_to_cpu(ex->ee_block);
2622 ex_ee_len = ext4_ext_get_actual_len(ex); 2628 ex_ee_len = ext4_ext_get_actual_len(ex);
2623 2629
2624 /*
2625 * If we're starting with an extent other than the last one in the
2626 * node, we need to see if it shares a cluster with the extent to
2627 * the right (towards the end of the file). If its leftmost cluster
2628 * is this extent's rightmost cluster and it is not cluster aligned,
2629 * we'll mark it as a partial that is not to be deallocated.
2630 */
2631
2632 if (ex != EXT_LAST_EXTENT(eh)) {
2633 ext4_fsblk_t current_pblk, right_pblk;
2634 long long current_cluster, right_cluster;
2635
2636 current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2637 current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
2638 right_pblk = ext4_ext_pblock(ex + 1);
2639 right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
2640 if (current_cluster == right_cluster &&
2641 EXT4_PBLK_COFF(sbi, right_pblk))
2642 *partial_cluster = -right_cluster;
2643 }
2644
2645 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2630 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2646 2631
2647 while (ex >= EXT_FIRST_EXTENT(eh) && 2632 while (ex >= EXT_FIRST_EXTENT(eh) &&
@@ -2666,14 +2651,16 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2666 if (end < ex_ee_block) { 2651 if (end < ex_ee_block) {
2667 /* 2652 /*
2668 * We're going to skip this extent and move to another, 2653 * We're going to skip this extent and move to another,
2669 * so if this extent is not cluster aligned we have 2654 * so note that its first cluster is in use to avoid
2670 * to mark the current cluster as used to avoid 2655 * freeing it when removing blocks. Eventually, the
2671 * accidentally freeing it later on 2656 * right edge of the truncated/punched region will
2657 * be just to the left.
2672 */ 2658 */
2673 pblk = ext4_ext_pblock(ex); 2659 if (sbi->s_cluster_ratio > 1) {
2674 if (EXT4_PBLK_COFF(sbi, pblk)) 2660 pblk = ext4_ext_pblock(ex);
2675 *partial_cluster = 2661 *partial_cluster =
2676 -((long long)EXT4_B2C(sbi, pblk)); 2662 -(long long) EXT4_B2C(sbi, pblk);
2663 }
2677 ex--; 2664 ex--;
2678 ex_ee_block = le32_to_cpu(ex->ee_block); 2665 ex_ee_block = le32_to_cpu(ex->ee_block);
2679 ex_ee_len = ext4_ext_get_actual_len(ex); 2666 ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2749,8 +2736,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2749 sizeof(struct ext4_extent)); 2736 sizeof(struct ext4_extent));
2750 } 2737 }
2751 le16_add_cpu(&eh->eh_entries, -1); 2738 le16_add_cpu(&eh->eh_entries, -1);
2752 } else if (*partial_cluster > 0) 2739 }
2753 *partial_cluster = 0;
2754 2740
2755 err = ext4_ext_dirty(handle, inode, path + depth); 2741 err = ext4_ext_dirty(handle, inode, path + depth);
2756 if (err) 2742 if (err)
@@ -2769,20 +2755,18 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2769 /* 2755 /*
2770 * If there's a partial cluster and at least one extent remains in 2756 * If there's a partial cluster and at least one extent remains in
2771 * the leaf, free the partial cluster if it isn't shared with the 2757 * the leaf, free the partial cluster if it isn't shared with the
2772 * current extent. If there's a partial cluster and no extents 2758 * current extent. If it is shared with the current extent
2773 * remain in the leaf, it can't be freed here. It can only be 2759 * we zero partial_cluster because we've reached the start of the
2774 * freed when it's possible to determine if it's not shared with 2760 * truncated/punched region and we're done removing blocks.
2775 * any other extent - when the next leaf is processed or when space
2776 * removal is complete.
2777 */ 2761 */
2778 if (*partial_cluster > 0 && eh->eh_entries && 2762 if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
2779 (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 2763 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2780 *partial_cluster)) { 2764 if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
2781 int flags = get_default_free_blocks_flags(inode); 2765 ext4_free_blocks(handle, inode, NULL,
2782 2766 EXT4_C2B(sbi, *partial_cluster),
2783 ext4_free_blocks(handle, inode, NULL, 2767 sbi->s_cluster_ratio,
2784 EXT4_C2B(sbi, *partial_cluster), 2768 get_default_free_blocks_flags(inode));
2785 sbi->s_cluster_ratio, flags); 2769 }
2786 *partial_cluster = 0; 2770 *partial_cluster = 0;
2787 } 2771 }
2788 2772
@@ -2819,7 +2803,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
2819int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2803int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2820 ext4_lblk_t end) 2804 ext4_lblk_t end)
2821{ 2805{
2822 struct super_block *sb = inode->i_sb; 2806 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2823 int depth = ext_depth(inode); 2807 int depth = ext_depth(inode);
2824 struct ext4_ext_path *path = NULL; 2808 struct ext4_ext_path *path = NULL;
2825 long long partial_cluster = 0; 2809 long long partial_cluster = 0;
@@ -2845,9 +2829,10 @@ again:
2845 */ 2829 */
2846 if (end < EXT_MAX_BLOCKS - 1) { 2830 if (end < EXT_MAX_BLOCKS - 1) {
2847 struct ext4_extent *ex; 2831 struct ext4_extent *ex;
2848 ext4_lblk_t ee_block; 2832 ext4_lblk_t ee_block, ex_end, lblk;
2833 ext4_fsblk_t pblk;
2849 2834
2850 /* find extent for this block */ 2835 /* find extent for or closest extent to this block */
2851 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 2836 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2852 if (IS_ERR(path)) { 2837 if (IS_ERR(path)) {
2853 ext4_journal_stop(handle); 2838 ext4_journal_stop(handle);
@@ -2867,6 +2852,7 @@ again:
2867 } 2852 }
2868 2853
2869 ee_block = le32_to_cpu(ex->ee_block); 2854 ee_block = le32_to_cpu(ex->ee_block);
2855 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2870 2856
2871 /* 2857 /*
2872 * See if the last block is inside the extent, if so split 2858 * See if the last block is inside the extent, if so split
@@ -2874,8 +2860,19 @@ again:
2874 * tail of the first part of the split extent in 2860 * tail of the first part of the split extent in
2875 * ext4_ext_rm_leaf(). 2861 * ext4_ext_rm_leaf().
2876 */ 2862 */
2877 if (end >= ee_block && 2863 if (end >= ee_block && end < ex_end) {
2878 end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 2864
2865 /*
2866 * If we're going to split the extent, note that
2867 * the cluster containing the block after 'end' is
2868 * in use to avoid freeing it when removing blocks.
2869 */
2870 if (sbi->s_cluster_ratio > 1) {
2871 pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
2872 partial_cluster =
2873 -(long long) EXT4_B2C(sbi, pblk);
2874 }
2875
2879 /* 2876 /*
2880 * Split the extent in two so that 'end' is the last 2877 * Split the extent in two so that 'end' is the last
2881 * block in the first new extent. Also we should not 2878 * block in the first new extent. Also we should not
@@ -2886,6 +2883,24 @@ again:
2886 end + 1, 1); 2883 end + 1, 1);
2887 if (err < 0) 2884 if (err < 0)
2888 goto out; 2885 goto out;
2886
2887 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
2888 /*
2889 * If there's an extent to the right its first cluster
2890 * contains the immediate right boundary of the
2891 * truncated/punched region. Set partial_cluster to
2892 * its negative value so it won't be freed if shared
2893 * with the current extent. The end < ee_block case
2894 * is handled in ext4_ext_rm_leaf().
2895 */
2896 lblk = ex_end + 1;
2897 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2898 &ex);
2899 if (err)
2900 goto out;
2901 if (pblk)
2902 partial_cluster =
2903 -(long long) EXT4_B2C(sbi, pblk);
2889 } 2904 }
2890 } 2905 }
2891 /* 2906 /*
@@ -2996,16 +3011,18 @@ again:
2996 trace_ext4_ext_remove_space_done(inode, start, end, depth, 3011 trace_ext4_ext_remove_space_done(inode, start, end, depth,
2997 partial_cluster, path->p_hdr->eh_entries); 3012 partial_cluster, path->p_hdr->eh_entries);
2998 3013
2999 /* If we still have something in the partial cluster and we have removed 3014 /*
3015 * If we still have something in the partial cluster and we have removed
3000 * even the first extent, then we should free the blocks in the partial 3016 * even the first extent, then we should free the blocks in the partial
3001 * cluster as well. */ 3017 * cluster as well. (This code will only run when there are no leaves
3002 if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { 3018 * to the immediate left of the truncated/punched region.)
3003 int flags = get_default_free_blocks_flags(inode); 3019 */
3004 3020 if (partial_cluster > 0 && err == 0) {
3021 /* don't zero partial_cluster since it's not used afterwards */
3005 ext4_free_blocks(handle, inode, NULL, 3022 ext4_free_blocks(handle, inode, NULL,
3006 EXT4_C2B(EXT4_SB(sb), partial_cluster), 3023 EXT4_C2B(sbi, partial_cluster),
3007 EXT4_SB(sb)->s_cluster_ratio, flags); 3024 sbi->s_cluster_ratio,
3008 partial_cluster = 0; 3025 get_default_free_blocks_flags(inode));
3009 } 3026 }
3010 3027
3011 /* TODO: flexible tree reduction should be here */ 3028 /* TODO: flexible tree reduction should be here */
@@ -4267,6 +4284,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4267 ext4_io_end_t *io = ext4_inode_aio(inode); 4284 ext4_io_end_t *io = ext4_inode_aio(inode);
4268 ext4_lblk_t cluster_offset; 4285 ext4_lblk_t cluster_offset;
4269 int set_unwritten = 0; 4286 int set_unwritten = 0;
4287 bool map_from_cluster = false;
4270 4288
4271 ext_debug("blocks %u/%u requested for inode %lu\n", 4289 ext_debug("blocks %u/%u requested for inode %lu\n",
4272 map->m_lblk, map->m_len, inode->i_ino); 4290 map->m_lblk, map->m_len, inode->i_ino);
@@ -4343,10 +4361,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4343 } 4361 }
4344 } 4362 }
4345 4363
4346 if ((sbi->s_cluster_ratio > 1) &&
4347 ext4_find_delalloc_cluster(inode, map->m_lblk))
4348 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4349
4350 /* 4364 /*
4351 * requested block isn't allocated yet; 4365 * requested block isn't allocated yet;
4352 * we couldn't try to create block if create flag is zero 4366 * we couldn't try to create block if create flag is zero
@@ -4356,15 +4370,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4356 * put just found gap into cache to speed up 4370 * put just found gap into cache to speed up
4357 * subsequent requests 4371 * subsequent requests
4358 */ 4372 */
4359 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4373 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4360 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4361 goto out2; 4374 goto out2;
4362 } 4375 }
4363 4376
4364 /* 4377 /*
4365 * Okay, we need to do block allocation. 4378 * Okay, we need to do block allocation.
4366 */ 4379 */
4367 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4368 newex.ee_block = cpu_to_le32(map->m_lblk); 4380 newex.ee_block = cpu_to_le32(map->m_lblk);
4369 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4381 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4370 4382
@@ -4376,7 +4388,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4376 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4388 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4377 ar.len = allocated = map->m_len; 4389 ar.len = allocated = map->m_len;
4378 newblock = map->m_pblk; 4390 newblock = map->m_pblk;
4379 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4391 map_from_cluster = true;
4380 goto got_allocated_blocks; 4392 goto got_allocated_blocks;
4381 } 4393 }
4382 4394
@@ -4397,7 +4409,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4397 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 4409 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4398 ar.len = allocated = map->m_len; 4410 ar.len = allocated = map->m_len;
4399 newblock = map->m_pblk; 4411 newblock = map->m_pblk;
4400 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4412 map_from_cluster = true;
4401 goto got_allocated_blocks; 4413 goto got_allocated_blocks;
4402 } 4414 }
4403 4415
@@ -4523,7 +4535,7 @@ got_allocated_blocks:
4523 */ 4535 */
4524 reserved_clusters = get_reserved_cluster_alloc(inode, 4536 reserved_clusters = get_reserved_cluster_alloc(inode,
4525 map->m_lblk, allocated); 4537 map->m_lblk, allocated);
4526 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 4538 if (map_from_cluster) {
4527 if (reserved_clusters) { 4539 if (reserved_clusters) {
4528 /* 4540 /*
4529 * We have clusters reserved for this range. 4541 * We have clusters reserved for this range.
@@ -4620,7 +4632,6 @@ out2:
4620 4632
4621 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4633 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4622 err ? err : allocated); 4634 err ? err : allocated);
4623 ext4_es_lru_add(inode);
4624 return err ? err : allocated; 4635 return err ? err : allocated;
4625} 4636}
4626 4637
@@ -5140,7 +5151,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5140 if (ext4_has_inline_data(inode)) { 5151 if (ext4_has_inline_data(inode)) {
5141 int has_inline = 1; 5152 int has_inline = 1;
5142 5153
5143 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 5154 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
5155 start, len);
5144 5156
5145 if (has_inline) 5157 if (has_inline)
5146 return error; 5158 return error;
@@ -5154,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5154 5166
5155 /* fallback to generic here if not in extents fmt */ 5167 /* fallback to generic here if not in extents fmt */
5156 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5168 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5157 return generic_block_fiemap(inode, fieinfo, start, len, 5169 return __generic_block_fiemap(inode, fieinfo, start, len,
5158 ext4_get_block); 5170 ext4_get_block);
5159 5171
5160 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 5172 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
5161 return -EBADR; 5173 return -EBADR;
@@ -5179,7 +5191,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5179 error = ext4_fill_fiemap_extents(inode, start_blk, 5191 error = ext4_fill_fiemap_extents(inode, start_blk,
5180 len_blks, fieinfo); 5192 len_blks, fieinfo);
5181 } 5193 }
5182 ext4_es_lru_add(inode);
5183 return error; 5194 return error;
5184} 5195}
5185 5196
@@ -5239,8 +5250,6 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5239 return -EIO; 5250 return -EIO;
5240 5251
5241 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 5252 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5242 if (!ex_last)
5243 return -EIO;
5244 5253
5245 err = ext4_access_path(handle, inode, path + depth); 5254 err = ext4_access_path(handle, inode, path + depth);
5246 if (err) 5255 if (err)