diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /fs/ext4/extents.c | |
parent | fd0961ff67727482bb20ca7e8ea97b83e9de2ddb (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r-- | fs/ext4/extents.c | 417 |
1 files changed, 228 insertions, 189 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 146f1f6a9203..bf029c7d5518 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -107,11 +107,8 @@ static int ext4_ext_truncate_extend_restart(handle_t *handle, | |||
107 | if (err <= 0) | 107 | if (err <= 0) |
108 | return err; | 108 | return err; |
109 | err = ext4_truncate_restart_trans(handle, inode, needed); | 109 | err = ext4_truncate_restart_trans(handle, inode, needed); |
110 | /* | 110 | if (err == 0) |
111 | * We have dropped i_data_sem so someone might have cached again | 111 | err = -EAGAIN; |
112 | * an extent we are going to truncate. | ||
113 | */ | ||
114 | ext4_ext_invalidate_cache(inode); | ||
115 | 112 | ||
116 | return err; | 113 | return err; |
117 | } | 114 | } |
@@ -185,10 +182,10 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, | |||
185 | if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { | 182 | if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { |
186 | /* | 183 | /* |
187 | * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME | 184 | * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME |
188 | * block groups per flexgroup, reserve the first block | 185 | * block groups per flexgroup, reserve the first block |
189 | * group for directories and special files. Regular | 186 | * group for directories and special files. Regular |
190 | * files will start at the second block group. This | 187 | * files will start at the second block group. This |
191 | * tends to speed up directory access and improves | 188 | * tends to speed up directory access and improves |
192 | * fsck times. | 189 | * fsck times. |
193 | */ | 190 | */ |
194 | block_group &= ~(flex_size-1); | 191 | block_group &= ~(flex_size-1); |
@@ -439,10 +436,10 @@ static int __ext4_ext_check(const char *function, struct inode *inode, | |||
439 | return 0; | 436 | return 0; |
440 | 437 | ||
441 | corrupted: | 438 | corrupted: |
442 | __ext4_error(inode->i_sb, function, | 439 | ext4_error_inode(function, inode, |
443 | "bad header/extent in inode #%lu: %s - magic %x, " | 440 | "bad header/extent: %s - magic %x, " |
444 | "entries %u, max %u(%u), depth %u(%u)", | 441 | "entries %u, max %u(%u), depth %u(%u)", |
445 | inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), | 442 | error_msg, le16_to_cpu(eh->eh_magic), |
446 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), | 443 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), |
447 | max, le16_to_cpu(eh->eh_depth), depth); | 444 | max, le16_to_cpu(eh->eh_depth), depth); |
448 | 445 | ||
@@ -1622,9 +1619,7 @@ int ext4_ext_try_to_merge(struct inode *inode, | |||
1622 | merge_done = 1; | 1619 | merge_done = 1; |
1623 | WARN_ON(eh->eh_entries == 0); | 1620 | WARN_ON(eh->eh_entries == 0); |
1624 | if (!eh->eh_entries) | 1621 | if (!eh->eh_entries) |
1625 | ext4_error(inode->i_sb, | 1622 | EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); |
1626 | "inode#%lu, eh->eh_entries = 0!", | ||
1627 | inode->i_ino); | ||
1628 | } | 1623 | } |
1629 | 1624 | ||
1630 | return merge_done; | 1625 | return merge_done; |
@@ -2039,7 +2034,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | |||
2039 | struct ext4_ext_cache *cex; | 2034 | struct ext4_ext_cache *cex; |
2040 | int ret = EXT4_EXT_CACHE_NO; | 2035 | int ret = EXT4_EXT_CACHE_NO; |
2041 | 2036 | ||
2042 | /* | 2037 | /* |
2043 | * We borrow i_block_reservation_lock to protect i_cached_extent | 2038 | * We borrow i_block_reservation_lock to protect i_cached_extent |
2044 | */ | 2039 | */ |
2045 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 2040 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
@@ -2361,7 +2356,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) | |||
2361 | int depth = ext_depth(inode); | 2356 | int depth = ext_depth(inode); |
2362 | struct ext4_ext_path *path; | 2357 | struct ext4_ext_path *path; |
2363 | handle_t *handle; | 2358 | handle_t *handle; |
2364 | int i = 0, err = 0; | 2359 | int i, err; |
2365 | 2360 | ||
2366 | ext_debug("truncate since %u\n", start); | 2361 | ext_debug("truncate since %u\n", start); |
2367 | 2362 | ||
@@ -2370,23 +2365,26 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) | |||
2370 | if (IS_ERR(handle)) | 2365 | if (IS_ERR(handle)) |
2371 | return PTR_ERR(handle); | 2366 | return PTR_ERR(handle); |
2372 | 2367 | ||
2368 | again: | ||
2373 | ext4_ext_invalidate_cache(inode); | 2369 | ext4_ext_invalidate_cache(inode); |
2374 | 2370 | ||
2375 | /* | 2371 | /* |
2376 | * We start scanning from right side, freeing all the blocks | 2372 | * We start scanning from right side, freeing all the blocks |
2377 | * after i_size and walking into the tree depth-wise. | 2373 | * after i_size and walking into the tree depth-wise. |
2378 | */ | 2374 | */ |
2375 | depth = ext_depth(inode); | ||
2379 | path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); | 2376 | path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); |
2380 | if (path == NULL) { | 2377 | if (path == NULL) { |
2381 | ext4_journal_stop(handle); | 2378 | ext4_journal_stop(handle); |
2382 | return -ENOMEM; | 2379 | return -ENOMEM; |
2383 | } | 2380 | } |
2381 | path[0].p_depth = depth; | ||
2384 | path[0].p_hdr = ext_inode_hdr(inode); | 2382 | path[0].p_hdr = ext_inode_hdr(inode); |
2385 | if (ext4_ext_check(inode, path[0].p_hdr, depth)) { | 2383 | if (ext4_ext_check(inode, path[0].p_hdr, depth)) { |
2386 | err = -EIO; | 2384 | err = -EIO; |
2387 | goto out; | 2385 | goto out; |
2388 | } | 2386 | } |
2389 | path[0].p_depth = depth; | 2387 | i = err = 0; |
2390 | 2388 | ||
2391 | while (i >= 0 && err == 0) { | 2389 | while (i >= 0 && err == 0) { |
2392 | if (i == depth) { | 2390 | if (i == depth) { |
@@ -2480,6 +2478,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) | |||
2480 | out: | 2478 | out: |
2481 | ext4_ext_drop_refs(path); | 2479 | ext4_ext_drop_refs(path); |
2482 | kfree(path); | 2480 | kfree(path); |
2481 | if (err == -EAGAIN) | ||
2482 | goto again; | ||
2483 | ext4_journal_stop(handle); | 2483 | ext4_journal_stop(handle); |
2484 | 2484 | ||
2485 | return err; | 2485 | return err; |
@@ -2544,7 +2544,7 @@ static void bi_complete(struct bio *bio, int error) | |||
2544 | /* FIXME!! we need to try to merge to left or right after zero-out */ | 2544 | /* FIXME!! we need to try to merge to left or right after zero-out */ |
2545 | static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | 2545 | static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) |
2546 | { | 2546 | { |
2547 | int ret = -EIO; | 2547 | int ret; |
2548 | struct bio *bio; | 2548 | struct bio *bio; |
2549 | int blkbits, blocksize; | 2549 | int blkbits, blocksize; |
2550 | sector_t ee_pblock; | 2550 | sector_t ee_pblock; |
@@ -2568,6 +2568,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
2568 | len = ee_len; | 2568 | len = ee_len; |
2569 | 2569 | ||
2570 | bio = bio_alloc(GFP_NOIO, len); | 2570 | bio = bio_alloc(GFP_NOIO, len); |
2571 | if (!bio) | ||
2572 | return -ENOMEM; | ||
2573 | |||
2571 | bio->bi_sector = ee_pblock; | 2574 | bio->bi_sector = ee_pblock; |
2572 | bio->bi_bdev = inode->i_sb->s_bdev; | 2575 | bio->bi_bdev = inode->i_sb->s_bdev; |
2573 | 2576 | ||
@@ -2595,22 +2598,20 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
2595 | submit_bio(WRITE, bio); | 2598 | submit_bio(WRITE, bio); |
2596 | wait_for_completion(&event); | 2599 | wait_for_completion(&event); |
2597 | 2600 | ||
2598 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) | 2601 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { |
2599 | ret = 0; | 2602 | bio_put(bio); |
2600 | else { | 2603 | return -EIO; |
2601 | ret = -EIO; | ||
2602 | break; | ||
2603 | } | 2604 | } |
2604 | bio_put(bio); | 2605 | bio_put(bio); |
2605 | ee_len -= done; | 2606 | ee_len -= done; |
2606 | ee_pblock += done << (blkbits - 9); | 2607 | ee_pblock += done << (blkbits - 9); |
2607 | } | 2608 | } |
2608 | return ret; | 2609 | return 0; |
2609 | } | 2610 | } |
2610 | 2611 | ||
2611 | #define EXT4_EXT_ZERO_LEN 7 | 2612 | #define EXT4_EXT_ZERO_LEN 7 |
2612 | /* | 2613 | /* |
2613 | * This function is called by ext4_ext_get_blocks() if someone tries to write | 2614 | * This function is called by ext4_ext_map_blocks() if someone tries to write |
2614 | * to an uninitialized extent. It may result in splitting the uninitialized | 2615 | * to an uninitialized extent. It may result in splitting the uninitialized |
2615 | * extent into multiple extents (upto three - one initialized and two | 2616 | * extent into multiple extents (upto three - one initialized and two |
2616 | * uninitialized). | 2617 | * uninitialized). |
@@ -2620,39 +2621,55 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
2620 | * c> Splits in three extents: Somone is writing in middle of the extent | 2621 | * c> Splits in three extents: Somone is writing in middle of the extent |
2621 | */ | 2622 | */ |
2622 | static int ext4_ext_convert_to_initialized(handle_t *handle, | 2623 | static int ext4_ext_convert_to_initialized(handle_t *handle, |
2623 | struct inode *inode, | 2624 | struct inode *inode, |
2624 | struct ext4_ext_path *path, | 2625 | struct ext4_map_blocks *map, |
2625 | ext4_lblk_t iblock, | 2626 | struct ext4_ext_path *path) |
2626 | unsigned int max_blocks) | ||
2627 | { | 2627 | { |
2628 | struct ext4_extent *ex, newex, orig_ex; | 2628 | struct ext4_extent *ex, newex, orig_ex; |
2629 | struct ext4_extent *ex1 = NULL; | 2629 | struct ext4_extent *ex1 = NULL; |
2630 | struct ext4_extent *ex2 = NULL; | 2630 | struct ext4_extent *ex2 = NULL; |
2631 | struct ext4_extent *ex3 = NULL; | 2631 | struct ext4_extent *ex3 = NULL; |
2632 | struct ext4_extent_header *eh; | 2632 | struct ext4_extent_header *eh; |
2633 | ext4_lblk_t ee_block; | 2633 | ext4_lblk_t ee_block, eof_block; |
2634 | unsigned int allocated, ee_len, depth; | 2634 | unsigned int allocated, ee_len, depth; |
2635 | ext4_fsblk_t newblock; | 2635 | ext4_fsblk_t newblock; |
2636 | int err = 0; | 2636 | int err = 0; |
2637 | int ret = 0; | 2637 | int ret = 0; |
2638 | int may_zeroout; | ||
2639 | |||
2640 | ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" | ||
2641 | "block %llu, max_blocks %u\n", inode->i_ino, | ||
2642 | (unsigned long long)map->m_lblk, map->m_len); | ||
2643 | |||
2644 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> | ||
2645 | inode->i_sb->s_blocksize_bits; | ||
2646 | if (eof_block < map->m_lblk + map->m_len) | ||
2647 | eof_block = map->m_lblk + map->m_len; | ||
2638 | 2648 | ||
2639 | depth = ext_depth(inode); | 2649 | depth = ext_depth(inode); |
2640 | eh = path[depth].p_hdr; | 2650 | eh = path[depth].p_hdr; |
2641 | ex = path[depth].p_ext; | 2651 | ex = path[depth].p_ext; |
2642 | ee_block = le32_to_cpu(ex->ee_block); | 2652 | ee_block = le32_to_cpu(ex->ee_block); |
2643 | ee_len = ext4_ext_get_actual_len(ex); | 2653 | ee_len = ext4_ext_get_actual_len(ex); |
2644 | allocated = ee_len - (iblock - ee_block); | 2654 | allocated = ee_len - (map->m_lblk - ee_block); |
2645 | newblock = iblock - ee_block + ext_pblock(ex); | 2655 | newblock = map->m_lblk - ee_block + ext_pblock(ex); |
2656 | |||
2646 | ex2 = ex; | 2657 | ex2 = ex; |
2647 | orig_ex.ee_block = ex->ee_block; | 2658 | orig_ex.ee_block = ex->ee_block; |
2648 | orig_ex.ee_len = cpu_to_le16(ee_len); | 2659 | orig_ex.ee_len = cpu_to_le16(ee_len); |
2649 | ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); | 2660 | ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); |
2650 | 2661 | ||
2662 | /* | ||
2663 | * It is safe to convert extent to initialized via explicit | ||
2664 | * zeroout only if extent is fully insde i_size or new_size. | ||
2665 | */ | ||
2666 | may_zeroout = ee_block + ee_len <= eof_block; | ||
2667 | |||
2651 | err = ext4_ext_get_access(handle, inode, path + depth); | 2668 | err = ext4_ext_get_access(handle, inode, path + depth); |
2652 | if (err) | 2669 | if (err) |
2653 | goto out; | 2670 | goto out; |
2654 | /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ | 2671 | /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ |
2655 | if (ee_len <= 2*EXT4_EXT_ZERO_LEN) { | 2672 | if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) { |
2656 | err = ext4_ext_zeroout(inode, &orig_ex); | 2673 | err = ext4_ext_zeroout(inode, &orig_ex); |
2657 | if (err) | 2674 | if (err) |
2658 | goto fix_extent_len; | 2675 | goto fix_extent_len; |
@@ -2665,10 +2682,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2665 | return allocated; | 2682 | return allocated; |
2666 | } | 2683 | } |
2667 | 2684 | ||
2668 | /* ex1: ee_block to iblock - 1 : uninitialized */ | 2685 | /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ |
2669 | if (iblock > ee_block) { | 2686 | if (map->m_lblk > ee_block) { |
2670 | ex1 = ex; | 2687 | ex1 = ex; |
2671 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 2688 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
2672 | ext4_ext_mark_uninitialized(ex1); | 2689 | ext4_ext_mark_uninitialized(ex1); |
2673 | ex2 = &newex; | 2690 | ex2 = &newex; |
2674 | } | 2691 | } |
@@ -2677,15 +2694,15 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2677 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | 2694 | * we insert ex3, if ex1 is NULL. This is to avoid temporary |
2678 | * overlap of blocks. | 2695 | * overlap of blocks. |
2679 | */ | 2696 | */ |
2680 | if (!ex1 && allocated > max_blocks) | 2697 | if (!ex1 && allocated > map->m_len) |
2681 | ex2->ee_len = cpu_to_le16(max_blocks); | 2698 | ex2->ee_len = cpu_to_le16(map->m_len); |
2682 | /* ex3: to ee_block + ee_len : uninitialised */ | 2699 | /* ex3: to ee_block + ee_len : uninitialised */ |
2683 | if (allocated > max_blocks) { | 2700 | if (allocated > map->m_len) { |
2684 | unsigned int newdepth; | 2701 | unsigned int newdepth; |
2685 | /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ | 2702 | /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ |
2686 | if (allocated <= EXT4_EXT_ZERO_LEN) { | 2703 | if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) { |
2687 | /* | 2704 | /* |
2688 | * iblock == ee_block is handled by the zerouout | 2705 | * map->m_lblk == ee_block is handled by the zerouout |
2689 | * at the beginning. | 2706 | * at the beginning. |
2690 | * Mark first half uninitialized. | 2707 | * Mark first half uninitialized. |
2691 | * Mark second half initialized and zero out the | 2708 | * Mark second half initialized and zero out the |
@@ -2698,7 +2715,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2698 | ext4_ext_dirty(handle, inode, path + depth); | 2715 | ext4_ext_dirty(handle, inode, path + depth); |
2699 | 2716 | ||
2700 | ex3 = &newex; | 2717 | ex3 = &newex; |
2701 | ex3->ee_block = cpu_to_le32(iblock); | 2718 | ex3->ee_block = cpu_to_le32(map->m_lblk); |
2702 | ext4_ext_store_pblock(ex3, newblock); | 2719 | ext4_ext_store_pblock(ex3, newblock); |
2703 | ex3->ee_len = cpu_to_le16(allocated); | 2720 | ex3->ee_len = cpu_to_le16(allocated); |
2704 | err = ext4_ext_insert_extent(handle, inode, path, | 2721 | err = ext4_ext_insert_extent(handle, inode, path, |
@@ -2711,7 +2728,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2711 | ex->ee_len = orig_ex.ee_len; | 2728 | ex->ee_len = orig_ex.ee_len; |
2712 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 2729 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
2713 | ext4_ext_dirty(handle, inode, path + depth); | 2730 | ext4_ext_dirty(handle, inode, path + depth); |
2714 | /* blocks available from iblock */ | 2731 | /* blocks available from map->m_lblk */ |
2715 | return allocated; | 2732 | return allocated; |
2716 | 2733 | ||
2717 | } else if (err) | 2734 | } else if (err) |
@@ -2733,8 +2750,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2733 | */ | 2750 | */ |
2734 | depth = ext_depth(inode); | 2751 | depth = ext_depth(inode); |
2735 | ext4_ext_drop_refs(path); | 2752 | ext4_ext_drop_refs(path); |
2736 | path = ext4_ext_find_extent(inode, | 2753 | path = ext4_ext_find_extent(inode, map->m_lblk, |
2737 | iblock, path); | 2754 | path); |
2738 | if (IS_ERR(path)) { | 2755 | if (IS_ERR(path)) { |
2739 | err = PTR_ERR(path); | 2756 | err = PTR_ERR(path); |
2740 | return err; | 2757 | return err; |
@@ -2754,12 +2771,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2754 | return allocated; | 2771 | return allocated; |
2755 | } | 2772 | } |
2756 | ex3 = &newex; | 2773 | ex3 = &newex; |
2757 | ex3->ee_block = cpu_to_le32(iblock + max_blocks); | 2774 | ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); |
2758 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | 2775 | ext4_ext_store_pblock(ex3, newblock + map->m_len); |
2759 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | 2776 | ex3->ee_len = cpu_to_le16(allocated - map->m_len); |
2760 | ext4_ext_mark_uninitialized(ex3); | 2777 | ext4_ext_mark_uninitialized(ex3); |
2761 | err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); | 2778 | err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); |
2762 | if (err == -ENOSPC) { | 2779 | if (err == -ENOSPC && may_zeroout) { |
2763 | err = ext4_ext_zeroout(inode, &orig_ex); | 2780 | err = ext4_ext_zeroout(inode, &orig_ex); |
2764 | if (err) | 2781 | if (err) |
2765 | goto fix_extent_len; | 2782 | goto fix_extent_len; |
@@ -2769,7 +2786,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2769 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 2786 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
2770 | ext4_ext_dirty(handle, inode, path + depth); | 2787 | ext4_ext_dirty(handle, inode, path + depth); |
2771 | /* zeroed the full extent */ | 2788 | /* zeroed the full extent */ |
2772 | /* blocks available from iblock */ | 2789 | /* blocks available from map->m_lblk */ |
2773 | return allocated; | 2790 | return allocated; |
2774 | 2791 | ||
2775 | } else if (err) | 2792 | } else if (err) |
@@ -2783,11 +2800,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2783 | * update the extent length after successful insert of the | 2800 | * update the extent length after successful insert of the |
2784 | * split extent | 2801 | * split extent |
2785 | */ | 2802 | */ |
2786 | orig_ex.ee_len = cpu_to_le16(ee_len - | 2803 | ee_len -= ext4_ext_get_actual_len(ex3); |
2787 | ext4_ext_get_actual_len(ex3)); | 2804 | orig_ex.ee_len = cpu_to_le16(ee_len); |
2805 | may_zeroout = ee_block + ee_len <= eof_block; | ||
2806 | |||
2788 | depth = newdepth; | 2807 | depth = newdepth; |
2789 | ext4_ext_drop_refs(path); | 2808 | ext4_ext_drop_refs(path); |
2790 | path = ext4_ext_find_extent(inode, iblock, path); | 2809 | path = ext4_ext_find_extent(inode, map->m_lblk, path); |
2791 | if (IS_ERR(path)) { | 2810 | if (IS_ERR(path)) { |
2792 | err = PTR_ERR(path); | 2811 | err = PTR_ERR(path); |
2793 | goto out; | 2812 | goto out; |
@@ -2801,14 +2820,14 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2801 | if (err) | 2820 | if (err) |
2802 | goto out; | 2821 | goto out; |
2803 | 2822 | ||
2804 | allocated = max_blocks; | 2823 | allocated = map->m_len; |
2805 | 2824 | ||
2806 | /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying | 2825 | /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying |
2807 | * to insert a extent in the middle zerout directly | 2826 | * to insert a extent in the middle zerout directly |
2808 | * otherwise give the extent a chance to merge to left | 2827 | * otherwise give the extent a chance to merge to left |
2809 | */ | 2828 | */ |
2810 | if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && | 2829 | if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && |
2811 | iblock != ee_block) { | 2830 | map->m_lblk != ee_block && may_zeroout) { |
2812 | err = ext4_ext_zeroout(inode, &orig_ex); | 2831 | err = ext4_ext_zeroout(inode, &orig_ex); |
2813 | if (err) | 2832 | if (err) |
2814 | goto fix_extent_len; | 2833 | goto fix_extent_len; |
@@ -2818,7 +2837,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2818 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 2837 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
2819 | ext4_ext_dirty(handle, inode, path + depth); | 2838 | ext4_ext_dirty(handle, inode, path + depth); |
2820 | /* zero out the first half */ | 2839 | /* zero out the first half */ |
2821 | /* blocks available from iblock */ | 2840 | /* blocks available from map->m_lblk */ |
2822 | return allocated; | 2841 | return allocated; |
2823 | } | 2842 | } |
2824 | } | 2843 | } |
@@ -2829,12 +2848,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2829 | */ | 2848 | */ |
2830 | if (ex1 && ex1 != ex) { | 2849 | if (ex1 && ex1 != ex) { |
2831 | ex1 = ex; | 2850 | ex1 = ex; |
2832 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 2851 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
2833 | ext4_ext_mark_uninitialized(ex1); | 2852 | ext4_ext_mark_uninitialized(ex1); |
2834 | ex2 = &newex; | 2853 | ex2 = &newex; |
2835 | } | 2854 | } |
2836 | /* ex2: iblock to iblock + maxblocks-1 : initialised */ | 2855 | /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */ |
2837 | ex2->ee_block = cpu_to_le32(iblock); | 2856 | ex2->ee_block = cpu_to_le32(map->m_lblk); |
2838 | ext4_ext_store_pblock(ex2, newblock); | 2857 | ext4_ext_store_pblock(ex2, newblock); |
2839 | ex2->ee_len = cpu_to_le16(allocated); | 2858 | ex2->ee_len = cpu_to_le16(allocated); |
2840 | if (ex2 != ex) | 2859 | if (ex2 != ex) |
@@ -2877,7 +2896,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2877 | goto out; | 2896 | goto out; |
2878 | insert: | 2897 | insert: |
2879 | err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); | 2898 | err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); |
2880 | if (err == -ENOSPC) { | 2899 | if (err == -ENOSPC && may_zeroout) { |
2881 | err = ext4_ext_zeroout(inode, &orig_ex); | 2900 | err = ext4_ext_zeroout(inode, &orig_ex); |
2882 | if (err) | 2901 | if (err) |
2883 | goto fix_extent_len; | 2902 | goto fix_extent_len; |
@@ -2904,7 +2923,7 @@ fix_extent_len: | |||
2904 | } | 2923 | } |
2905 | 2924 | ||
2906 | /* | 2925 | /* |
2907 | * This function is called by ext4_ext_get_blocks() from | 2926 | * This function is called by ext4_ext_map_blocks() from |
2908 | * ext4_get_blocks_dio_write() when DIO to write | 2927 | * ext4_get_blocks_dio_write() when DIO to write |
2909 | * to an uninitialized extent. | 2928 | * to an uninitialized extent. |
2910 | * | 2929 | * |
@@ -2927,9 +2946,8 @@ fix_extent_len: | |||
2927 | */ | 2946 | */ |
2928 | static int ext4_split_unwritten_extents(handle_t *handle, | 2947 | static int ext4_split_unwritten_extents(handle_t *handle, |
2929 | struct inode *inode, | 2948 | struct inode *inode, |
2949 | struct ext4_map_blocks *map, | ||
2930 | struct ext4_ext_path *path, | 2950 | struct ext4_ext_path *path, |
2931 | ext4_lblk_t iblock, | ||
2932 | unsigned int max_blocks, | ||
2933 | int flags) | 2951 | int flags) |
2934 | { | 2952 | { |
2935 | struct ext4_extent *ex, newex, orig_ex; | 2953 | struct ext4_extent *ex, newex, orig_ex; |
@@ -2937,41 +2955,55 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
2937 | struct ext4_extent *ex2 = NULL; | 2955 | struct ext4_extent *ex2 = NULL; |
2938 | struct ext4_extent *ex3 = NULL; | 2956 | struct ext4_extent *ex3 = NULL; |
2939 | struct ext4_extent_header *eh; | 2957 | struct ext4_extent_header *eh; |
2940 | ext4_lblk_t ee_block; | 2958 | ext4_lblk_t ee_block, eof_block; |
2941 | unsigned int allocated, ee_len, depth; | 2959 | unsigned int allocated, ee_len, depth; |
2942 | ext4_fsblk_t newblock; | 2960 | ext4_fsblk_t newblock; |
2943 | int err = 0; | 2961 | int err = 0; |
2962 | int may_zeroout; | ||
2963 | |||
2964 | ext_debug("ext4_split_unwritten_extents: inode %lu, logical" | ||
2965 | "block %llu, max_blocks %u\n", inode->i_ino, | ||
2966 | (unsigned long long)map->m_lblk, map->m_len); | ||
2967 | |||
2968 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> | ||
2969 | inode->i_sb->s_blocksize_bits; | ||
2970 | if (eof_block < map->m_lblk + map->m_len) | ||
2971 | eof_block = map->m_lblk + map->m_len; | ||
2944 | 2972 | ||
2945 | ext_debug("ext4_split_unwritten_extents: inode %lu," | ||
2946 | "iblock %llu, max_blocks %u\n", inode->i_ino, | ||
2947 | (unsigned long long)iblock, max_blocks); | ||
2948 | depth = ext_depth(inode); | 2973 | depth = ext_depth(inode); |
2949 | eh = path[depth].p_hdr; | 2974 | eh = path[depth].p_hdr; |
2950 | ex = path[depth].p_ext; | 2975 | ex = path[depth].p_ext; |
2951 | ee_block = le32_to_cpu(ex->ee_block); | 2976 | ee_block = le32_to_cpu(ex->ee_block); |
2952 | ee_len = ext4_ext_get_actual_len(ex); | 2977 | ee_len = ext4_ext_get_actual_len(ex); |
2953 | allocated = ee_len - (iblock - ee_block); | 2978 | allocated = ee_len - (map->m_lblk - ee_block); |
2954 | newblock = iblock - ee_block + ext_pblock(ex); | 2979 | newblock = map->m_lblk - ee_block + ext_pblock(ex); |
2980 | |||
2955 | ex2 = ex; | 2981 | ex2 = ex; |
2956 | orig_ex.ee_block = ex->ee_block; | 2982 | orig_ex.ee_block = ex->ee_block; |
2957 | orig_ex.ee_len = cpu_to_le16(ee_len); | 2983 | orig_ex.ee_len = cpu_to_le16(ee_len); |
2958 | ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); | 2984 | ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); |
2959 | 2985 | ||
2960 | /* | 2986 | /* |
2987 | * It is safe to convert extent to initialized via explicit | ||
2988 | * zeroout only if extent is fully insde i_size or new_size. | ||
2989 | */ | ||
2990 | may_zeroout = ee_block + ee_len <= eof_block; | ||
2991 | |||
2992 | /* | ||
2961 | * If the uninitialized extent begins at the same logical | 2993 | * If the uninitialized extent begins at the same logical |
2962 | * block where the write begins, and the write completely | 2994 | * block where the write begins, and the write completely |
2963 | * covers the extent, then we don't need to split it. | 2995 | * covers the extent, then we don't need to split it. |
2964 | */ | 2996 | */ |
2965 | if ((iblock == ee_block) && (allocated <= max_blocks)) | 2997 | if ((map->m_lblk == ee_block) && (allocated <= map->m_len)) |
2966 | return allocated; | 2998 | return allocated; |
2967 | 2999 | ||
2968 | err = ext4_ext_get_access(handle, inode, path + depth); | 3000 | err = ext4_ext_get_access(handle, inode, path + depth); |
2969 | if (err) | 3001 | if (err) |
2970 | goto out; | 3002 | goto out; |
2971 | /* ex1: ee_block to iblock - 1 : uninitialized */ | 3003 | /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ |
2972 | if (iblock > ee_block) { | 3004 | if (map->m_lblk > ee_block) { |
2973 | ex1 = ex; | 3005 | ex1 = ex; |
2974 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 3006 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
2975 | ext4_ext_mark_uninitialized(ex1); | 3007 | ext4_ext_mark_uninitialized(ex1); |
2976 | ex2 = &newex; | 3008 | ex2 = &newex; |
2977 | } | 3009 | } |
@@ -2980,18 +3012,18 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
2980 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | 3012 | * we insert ex3, if ex1 is NULL. This is to avoid temporary |
2981 | * overlap of blocks. | 3013 | * overlap of blocks. |
2982 | */ | 3014 | */ |
2983 | if (!ex1 && allocated > max_blocks) | 3015 | if (!ex1 && allocated > map->m_len) |
2984 | ex2->ee_len = cpu_to_le16(max_blocks); | 3016 | ex2->ee_len = cpu_to_le16(map->m_len); |
2985 | /* ex3: to ee_block + ee_len : uninitialised */ | 3017 | /* ex3: to ee_block + ee_len : uninitialised */ |
2986 | if (allocated > max_blocks) { | 3018 | if (allocated > map->m_len) { |
2987 | unsigned int newdepth; | 3019 | unsigned int newdepth; |
2988 | ex3 = &newex; | 3020 | ex3 = &newex; |
2989 | ex3->ee_block = cpu_to_le32(iblock + max_blocks); | 3021 | ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); |
2990 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | 3022 | ext4_ext_store_pblock(ex3, newblock + map->m_len); |
2991 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | 3023 | ex3->ee_len = cpu_to_le16(allocated - map->m_len); |
2992 | ext4_ext_mark_uninitialized(ex3); | 3024 | ext4_ext_mark_uninitialized(ex3); |
2993 | err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); | 3025 | err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); |
2994 | if (err == -ENOSPC) { | 3026 | if (err == -ENOSPC && may_zeroout) { |
2995 | err = ext4_ext_zeroout(inode, &orig_ex); | 3027 | err = ext4_ext_zeroout(inode, &orig_ex); |
2996 | if (err) | 3028 | if (err) |
2997 | goto fix_extent_len; | 3029 | goto fix_extent_len; |
@@ -3001,7 +3033,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
3001 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | 3033 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); |
3002 | ext4_ext_dirty(handle, inode, path + depth); | 3034 | ext4_ext_dirty(handle, inode, path + depth); |
3003 | /* zeroed the full extent */ | 3035 | /* zeroed the full extent */ |
3004 | /* blocks available from iblock */ | 3036 | /* blocks available from map->m_lblk */ |
3005 | return allocated; | 3037 | return allocated; |
3006 | 3038 | ||
3007 | } else if (err) | 3039 | } else if (err) |
@@ -3015,11 +3047,13 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
3015 | * update the extent length after successful insert of the | 3047 | * update the extent length after successful insert of the |
3016 | * split extent | 3048 | * split extent |
3017 | */ | 3049 | */ |
3018 | orig_ex.ee_len = cpu_to_le16(ee_len - | 3050 | ee_len -= ext4_ext_get_actual_len(ex3); |
3019 | ext4_ext_get_actual_len(ex3)); | 3051 | orig_ex.ee_len = cpu_to_le16(ee_len); |
3052 | may_zeroout = ee_block + ee_len <= eof_block; | ||
3053 | |||
3020 | depth = newdepth; | 3054 | depth = newdepth; |
3021 | ext4_ext_drop_refs(path); | 3055 | ext4_ext_drop_refs(path); |
3022 | path = ext4_ext_find_extent(inode, iblock, path); | 3056 | path = ext4_ext_find_extent(inode, map->m_lblk, path); |
3023 | if (IS_ERR(path)) { | 3057 | if (IS_ERR(path)) { |
3024 | err = PTR_ERR(path); | 3058 | err = PTR_ERR(path); |
3025 | goto out; | 3059 | goto out; |
@@ -3033,7 +3067,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
3033 | if (err) | 3067 | if (err) |
3034 | goto out; | 3068 | goto out; |
3035 | 3069 | ||
3036 | allocated = max_blocks; | 3070 | allocated = map->m_len; |
3037 | } | 3071 | } |
3038 | /* | 3072 | /* |
3039 | * If there was a change of depth as part of the | 3073 | * If there was a change of depth as part of the |
@@ -3042,15 +3076,15 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
3042 | */ | 3076 | */ |
3043 | if (ex1 && ex1 != ex) { | 3077 | if (ex1 && ex1 != ex) { |
3044 | ex1 = ex; | 3078 | ex1 = ex; |
3045 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | 3079 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); |
3046 | ext4_ext_mark_uninitialized(ex1); | 3080 | ext4_ext_mark_uninitialized(ex1); |
3047 | ex2 = &newex; | 3081 | ex2 = &newex; |
3048 | } | 3082 | } |
3049 | /* | 3083 | /* |
3050 | * ex2: iblock to iblock + maxblocks-1 : to be direct IO written, | 3084 | * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written |
3051 | * uninitialised still. | 3085 | * using direct I/O, uninitialised still. |
3052 | */ | 3086 | */ |
3053 | ex2->ee_block = cpu_to_le32(iblock); | 3087 | ex2->ee_block = cpu_to_le32(map->m_lblk); |
3054 | ext4_ext_store_pblock(ex2, newblock); | 3088 | ext4_ext_store_pblock(ex2, newblock); |
3055 | ex2->ee_len = cpu_to_le16(allocated); | 3089 | ex2->ee_len = cpu_to_le16(allocated); |
3056 | ext4_ext_mark_uninitialized(ex2); | 3090 | ext4_ext_mark_uninitialized(ex2); |
@@ -3062,7 +3096,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
3062 | goto out; | 3096 | goto out; |
3063 | insert: | 3097 | insert: |
3064 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | 3098 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
3065 | if (err == -ENOSPC) { | 3099 | if (err == -ENOSPC && may_zeroout) { |
3066 | err = ext4_ext_zeroout(inode, &orig_ex); | 3100 | err = ext4_ext_zeroout(inode, &orig_ex); |
3067 | if (err) | 3101 | if (err) |
3068 | goto fix_extent_len; | 3102 | goto fix_extent_len; |
@@ -3152,10 +3186,9 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev, | |||
3152 | 3186 | ||
3153 | static int | 3187 | static int |
3154 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | 3188 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, |
3155 | ext4_lblk_t iblock, unsigned int max_blocks, | 3189 | struct ext4_map_blocks *map, |
3156 | struct ext4_ext_path *path, int flags, | 3190 | struct ext4_ext_path *path, int flags, |
3157 | unsigned int allocated, struct buffer_head *bh_result, | 3191 | unsigned int allocated, ext4_fsblk_t newblock) |
3158 | ext4_fsblk_t newblock) | ||
3159 | { | 3192 | { |
3160 | int ret = 0; | 3193 | int ret = 0; |
3161 | int err = 0; | 3194 | int err = 0; |
@@ -3163,15 +3196,14 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3163 | 3196 | ||
3164 | ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" | 3197 | ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" |
3165 | "block %llu, max_blocks %u, flags %d, allocated %u", | 3198 | "block %llu, max_blocks %u, flags %d, allocated %u", |
3166 | inode->i_ino, (unsigned long long)iblock, max_blocks, | 3199 | inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, |
3167 | flags, allocated); | 3200 | flags, allocated); |
3168 | ext4_ext_show_leaf(inode, path); | 3201 | ext4_ext_show_leaf(inode, path); |
3169 | 3202 | ||
3170 | /* get_block() before submit the IO, split the extent */ | 3203 | /* get_block() before submit the IO, split the extent */ |
3171 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { | 3204 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
3172 | ret = ext4_split_unwritten_extents(handle, | 3205 | ret = ext4_split_unwritten_extents(handle, inode, map, |
3173 | inode, path, iblock, | 3206 | path, flags); |
3174 | max_blocks, flags); | ||
3175 | /* | 3207 | /* |
3176 | * Flag the inode(non aio case) or end_io struct (aio case) | 3208 | * Flag the inode(non aio case) or end_io struct (aio case) |
3177 | * that this IO needs to convertion to written when IO is | 3209 | * that this IO needs to convertion to written when IO is |
@@ -3182,7 +3214,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3182 | else | 3214 | else |
3183 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); | 3215 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
3184 | if (ext4_should_dioread_nolock(inode)) | 3216 | if (ext4_should_dioread_nolock(inode)) |
3185 | set_buffer_uninit(bh_result); | 3217 | map->m_flags |= EXT4_MAP_UNINIT; |
3186 | goto out; | 3218 | goto out; |
3187 | } | 3219 | } |
3188 | /* IO end_io complete, convert the filled extent to written */ | 3220 | /* IO end_io complete, convert the filled extent to written */ |
@@ -3210,14 +3242,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3210 | * the buffer head will be unmapped so that | 3242 | * the buffer head will be unmapped so that |
3211 | * a read from the block returns 0s. | 3243 | * a read from the block returns 0s. |
3212 | */ | 3244 | */ |
3213 | set_buffer_unwritten(bh_result); | 3245 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
3214 | goto out1; | 3246 | goto out1; |
3215 | } | 3247 | } |
3216 | 3248 | ||
3217 | /* buffered write, writepage time, convert*/ | 3249 | /* buffered write, writepage time, convert*/ |
3218 | ret = ext4_ext_convert_to_initialized(handle, inode, | 3250 | ret = ext4_ext_convert_to_initialized(handle, inode, map, path); |
3219 | path, iblock, | ||
3220 | max_blocks); | ||
3221 | if (ret >= 0) | 3251 | if (ret >= 0) |
3222 | ext4_update_inode_fsync_trans(handle, inode, 1); | 3252 | ext4_update_inode_fsync_trans(handle, inode, 1); |
3223 | out: | 3253 | out: |
@@ -3226,7 +3256,7 @@ out: | |||
3226 | goto out2; | 3256 | goto out2; |
3227 | } else | 3257 | } else |
3228 | allocated = ret; | 3258 | allocated = ret; |
3229 | set_buffer_new(bh_result); | 3259 | map->m_flags |= EXT4_MAP_NEW; |
3230 | /* | 3260 | /* |
3231 | * if we allocated more blocks than requested | 3261 | * if we allocated more blocks than requested |
3232 | * we need to make sure we unmap the extra block | 3262 | * we need to make sure we unmap the extra block |
@@ -3234,11 +3264,11 @@ out: | |||
3234 | * unmapped later when we find the buffer_head marked | 3264 | * unmapped later when we find the buffer_head marked |
3235 | * new. | 3265 | * new. |
3236 | */ | 3266 | */ |
3237 | if (allocated > max_blocks) { | 3267 | if (allocated > map->m_len) { |
3238 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, | 3268 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, |
3239 | newblock + max_blocks, | 3269 | newblock + map->m_len, |
3240 | allocated - max_blocks); | 3270 | allocated - map->m_len); |
3241 | allocated = max_blocks; | 3271 | allocated = map->m_len; |
3242 | } | 3272 | } |
3243 | 3273 | ||
3244 | /* | 3274 | /* |
@@ -3252,13 +3282,13 @@ out: | |||
3252 | ext4_da_update_reserve_space(inode, allocated, 0); | 3282 | ext4_da_update_reserve_space(inode, allocated, 0); |
3253 | 3283 | ||
3254 | map_out: | 3284 | map_out: |
3255 | set_buffer_mapped(bh_result); | 3285 | map->m_flags |= EXT4_MAP_MAPPED; |
3256 | out1: | 3286 | out1: |
3257 | if (allocated > max_blocks) | 3287 | if (allocated > map->m_len) |
3258 | allocated = max_blocks; | 3288 | allocated = map->m_len; |
3259 | ext4_ext_show_leaf(inode, path); | 3289 | ext4_ext_show_leaf(inode, path); |
3260 | bh_result->b_bdev = inode->i_sb->s_bdev; | 3290 | map->m_pblk = newblock; |
3261 | bh_result->b_blocknr = newblock; | 3291 | map->m_len = allocated; |
3262 | out2: | 3292 | out2: |
3263 | if (path) { | 3293 | if (path) { |
3264 | ext4_ext_drop_refs(path); | 3294 | ext4_ext_drop_refs(path); |
@@ -3284,26 +3314,23 @@ out2: | |||
3284 | * | 3314 | * |
3285 | * return < 0, error case. | 3315 | * return < 0, error case. |
3286 | */ | 3316 | */ |
3287 | int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | 3317 | int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
3288 | ext4_lblk_t iblock, | 3318 | struct ext4_map_blocks *map, int flags) |
3289 | unsigned int max_blocks, struct buffer_head *bh_result, | ||
3290 | int flags) | ||
3291 | { | 3319 | { |
3292 | struct ext4_ext_path *path = NULL; | 3320 | struct ext4_ext_path *path = NULL; |
3293 | struct ext4_extent_header *eh; | 3321 | struct ext4_extent_header *eh; |
3294 | struct ext4_extent newex, *ex, *last_ex; | 3322 | struct ext4_extent newex, *ex, *last_ex; |
3295 | ext4_fsblk_t newblock; | 3323 | ext4_fsblk_t newblock; |
3296 | int err = 0, depth, ret, cache_type; | 3324 | int i, err = 0, depth, ret, cache_type; |
3297 | unsigned int allocated = 0; | 3325 | unsigned int allocated = 0; |
3298 | struct ext4_allocation_request ar; | 3326 | struct ext4_allocation_request ar; |
3299 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; | 3327 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
3300 | 3328 | ||
3301 | __clear_bit(BH_New, &bh_result->b_state); | ||
3302 | ext_debug("blocks %u/%u requested for inode %lu\n", | 3329 | ext_debug("blocks %u/%u requested for inode %lu\n", |
3303 | iblock, max_blocks, inode->i_ino); | 3330 | map->m_lblk, map->m_len, inode->i_ino); |
3304 | 3331 | ||
3305 | /* check in cache */ | 3332 | /* check in cache */ |
3306 | cache_type = ext4_ext_in_cache(inode, iblock, &newex); | 3333 | cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex); |
3307 | if (cache_type) { | 3334 | if (cache_type) { |
3308 | if (cache_type == EXT4_EXT_CACHE_GAP) { | 3335 | if (cache_type == EXT4_EXT_CACHE_GAP) { |
3309 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | 3336 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
@@ -3316,12 +3343,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3316 | /* we should allocate requested block */ | 3343 | /* we should allocate requested block */ |
3317 | } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { | 3344 | } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { |
3318 | /* block is already allocated */ | 3345 | /* block is already allocated */ |
3319 | newblock = iblock | 3346 | newblock = map->m_lblk |
3320 | - le32_to_cpu(newex.ee_block) | 3347 | - le32_to_cpu(newex.ee_block) |
3321 | + ext_pblock(&newex); | 3348 | + ext_pblock(&newex); |
3322 | /* number of remaining blocks in the extent */ | 3349 | /* number of remaining blocks in the extent */ |
3323 | allocated = ext4_ext_get_actual_len(&newex) - | 3350 | allocated = ext4_ext_get_actual_len(&newex) - |
3324 | (iblock - le32_to_cpu(newex.ee_block)); | 3351 | (map->m_lblk - le32_to_cpu(newex.ee_block)); |
3325 | goto out; | 3352 | goto out; |
3326 | } else { | 3353 | } else { |
3327 | BUG(); | 3354 | BUG(); |
@@ -3329,7 +3356,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3329 | } | 3356 | } |
3330 | 3357 | ||
3331 | /* find extent for this block */ | 3358 | /* find extent for this block */ |
3332 | path = ext4_ext_find_extent(inode, iblock, NULL); | 3359 | path = ext4_ext_find_extent(inode, map->m_lblk, NULL); |
3333 | if (IS_ERR(path)) { | 3360 | if (IS_ERR(path)) { |
3334 | err = PTR_ERR(path); | 3361 | err = PTR_ERR(path); |
3335 | path = NULL; | 3362 | path = NULL; |
@@ -3345,8 +3372,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3345 | */ | 3372 | */ |
3346 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { | 3373 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { |
3347 | EXT4_ERROR_INODE(inode, "bad extent address " | 3374 | EXT4_ERROR_INODE(inode, "bad extent address " |
3348 | "iblock: %d, depth: %d pblock %lld", | 3375 | "lblock: %lu, depth: %d pblock %lld", |
3349 | iblock, depth, path[depth].p_block); | 3376 | (unsigned long) map->m_lblk, depth, |
3377 | path[depth].p_block); | ||
3350 | err = -EIO; | 3378 | err = -EIO; |
3351 | goto out2; | 3379 | goto out2; |
3352 | } | 3380 | } |
@@ -3364,12 +3392,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3364 | */ | 3392 | */ |
3365 | ee_len = ext4_ext_get_actual_len(ex); | 3393 | ee_len = ext4_ext_get_actual_len(ex); |
3366 | /* if found extent covers block, simply return it */ | 3394 | /* if found extent covers block, simply return it */ |
3367 | if (in_range(iblock, ee_block, ee_len)) { | 3395 | if (in_range(map->m_lblk, ee_block, ee_len)) { |
3368 | newblock = iblock - ee_block + ee_start; | 3396 | newblock = map->m_lblk - ee_block + ee_start; |
3369 | /* number of remaining blocks in the extent */ | 3397 | /* number of remaining blocks in the extent */ |
3370 | allocated = ee_len - (iblock - ee_block); | 3398 | allocated = ee_len - (map->m_lblk - ee_block); |
3371 | ext_debug("%u fit into %u:%d -> %llu\n", iblock, | 3399 | ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, |
3372 | ee_block, ee_len, newblock); | 3400 | ee_block, ee_len, newblock); |
3373 | 3401 | ||
3374 | /* Do not put uninitialized extent in the cache */ | 3402 | /* Do not put uninitialized extent in the cache */ |
3375 | if (!ext4_ext_is_uninitialized(ex)) { | 3403 | if (!ext4_ext_is_uninitialized(ex)) { |
@@ -3379,8 +3407,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3379 | goto out; | 3407 | goto out; |
3380 | } | 3408 | } |
3381 | ret = ext4_ext_handle_uninitialized_extents(handle, | 3409 | ret = ext4_ext_handle_uninitialized_extents(handle, |
3382 | inode, iblock, max_blocks, path, | 3410 | inode, map, path, flags, allocated, |
3383 | flags, allocated, bh_result, newblock); | 3411 | newblock); |
3384 | return ret; | 3412 | return ret; |
3385 | } | 3413 | } |
3386 | } | 3414 | } |
@@ -3394,7 +3422,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3394 | * put just found gap into cache to speed up | 3422 | * put just found gap into cache to speed up |
3395 | * subsequent requests | 3423 | * subsequent requests |
3396 | */ | 3424 | */ |
3397 | ext4_ext_put_gap_in_cache(inode, path, iblock); | 3425 | ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); |
3398 | goto out2; | 3426 | goto out2; |
3399 | } | 3427 | } |
3400 | /* | 3428 | /* |
@@ -3402,11 +3430,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3402 | */ | 3430 | */ |
3403 | 3431 | ||
3404 | /* find neighbour allocated blocks */ | 3432 | /* find neighbour allocated blocks */ |
3405 | ar.lleft = iblock; | 3433 | ar.lleft = map->m_lblk; |
3406 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); | 3434 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); |
3407 | if (err) | 3435 | if (err) |
3408 | goto out2; | 3436 | goto out2; |
3409 | ar.lright = iblock; | 3437 | ar.lright = map->m_lblk; |
3410 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); | 3438 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); |
3411 | if (err) | 3439 | if (err) |
3412 | goto out2; | 3440 | goto out2; |
@@ -3417,26 +3445,26 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3417 | * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is | 3445 | * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is |
3418 | * EXT_UNINIT_MAX_LEN. | 3446 | * EXT_UNINIT_MAX_LEN. |
3419 | */ | 3447 | */ |
3420 | if (max_blocks > EXT_INIT_MAX_LEN && | 3448 | if (map->m_len > EXT_INIT_MAX_LEN && |
3421 | !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) | 3449 | !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
3422 | max_blocks = EXT_INIT_MAX_LEN; | 3450 | map->m_len = EXT_INIT_MAX_LEN; |
3423 | else if (max_blocks > EXT_UNINIT_MAX_LEN && | 3451 | else if (map->m_len > EXT_UNINIT_MAX_LEN && |
3424 | (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) | 3452 | (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
3425 | max_blocks = EXT_UNINIT_MAX_LEN; | 3453 | map->m_len = EXT_UNINIT_MAX_LEN; |
3426 | 3454 | ||
3427 | /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ | 3455 | /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ |
3428 | newex.ee_block = cpu_to_le32(iblock); | 3456 | newex.ee_block = cpu_to_le32(map->m_lblk); |
3429 | newex.ee_len = cpu_to_le16(max_blocks); | 3457 | newex.ee_len = cpu_to_le16(map->m_len); |
3430 | err = ext4_ext_check_overlap(inode, &newex, path); | 3458 | err = ext4_ext_check_overlap(inode, &newex, path); |
3431 | if (err) | 3459 | if (err) |
3432 | allocated = ext4_ext_get_actual_len(&newex); | 3460 | allocated = ext4_ext_get_actual_len(&newex); |
3433 | else | 3461 | else |
3434 | allocated = max_blocks; | 3462 | allocated = map->m_len; |
3435 | 3463 | ||
3436 | /* allocate new block */ | 3464 | /* allocate new block */ |
3437 | ar.inode = inode; | 3465 | ar.inode = inode; |
3438 | ar.goal = ext4_ext_find_goal(inode, path, iblock); | 3466 | ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); |
3439 | ar.logical = iblock; | 3467 | ar.logical = map->m_lblk; |
3440 | ar.len = allocated; | 3468 | ar.len = allocated; |
3441 | if (S_ISREG(inode->i_mode)) | 3469 | if (S_ISREG(inode->i_mode)) |
3442 | ar.flags = EXT4_MB_HINT_DATA; | 3470 | ar.flags = EXT4_MB_HINT_DATA; |
@@ -3470,21 +3498,33 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3470 | EXT4_STATE_DIO_UNWRITTEN); | 3498 | EXT4_STATE_DIO_UNWRITTEN); |
3471 | } | 3499 | } |
3472 | if (ext4_should_dioread_nolock(inode)) | 3500 | if (ext4_should_dioread_nolock(inode)) |
3473 | set_buffer_uninit(bh_result); | 3501 | map->m_flags |= EXT4_MAP_UNINIT; |
3474 | } | 3502 | } |
3475 | 3503 | ||
3476 | if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { | 3504 | if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) { |
3477 | if (unlikely(!eh->eh_entries)) { | 3505 | if (unlikely(!eh->eh_entries)) { |
3478 | EXT4_ERROR_INODE(inode, | 3506 | EXT4_ERROR_INODE(inode, |
3479 | "eh->eh_entries == 0 ee_block %d", | 3507 | "eh->eh_entries == 0 and " |
3480 | ex->ee_block); | 3508 | "EOFBLOCKS_FL set"); |
3481 | err = -EIO; | 3509 | err = -EIO; |
3482 | goto out2; | 3510 | goto out2; |
3483 | } | 3511 | } |
3484 | last_ex = EXT_LAST_EXTENT(eh); | 3512 | last_ex = EXT_LAST_EXTENT(eh); |
3485 | if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) | 3513 | /* |
3486 | + ext4_ext_get_actual_len(last_ex)) | 3514 | * If the current leaf block was reached by looking at |
3487 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; | 3515 | * the last index block all the way down the tree, and |
3516 | * we are extending the inode beyond the last extent | ||
3517 | * in the current leaf block, then clear the | ||
3518 | * EOFBLOCKS_FL flag. | ||
3519 | */ | ||
3520 | for (i = depth-1; i >= 0; i--) { | ||
3521 | if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) | ||
3522 | break; | ||
3523 | } | ||
3524 | if ((i < 0) && | ||
3525 | (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) + | ||
3526 | ext4_ext_get_actual_len(last_ex))) | ||
3527 | ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); | ||
3488 | } | 3528 | } |
3489 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | 3529 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
3490 | if (err) { | 3530 | if (err) { |
@@ -3500,9 +3540,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3500 | /* previous routine could use block we allocated */ | 3540 | /* previous routine could use block we allocated */ |
3501 | newblock = ext_pblock(&newex); | 3541 | newblock = ext_pblock(&newex); |
3502 | allocated = ext4_ext_get_actual_len(&newex); | 3542 | allocated = ext4_ext_get_actual_len(&newex); |
3503 | if (allocated > max_blocks) | 3543 | if (allocated > map->m_len) |
3504 | allocated = max_blocks; | 3544 | allocated = map->m_len; |
3505 | set_buffer_new(bh_result); | 3545 | map->m_flags |= EXT4_MAP_NEW; |
3506 | 3546 | ||
3507 | /* | 3547 | /* |
3508 | * Update reserved blocks/metadata blocks after successful | 3548 | * Update reserved blocks/metadata blocks after successful |
@@ -3516,18 +3556,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3516 | * when it is _not_ an uninitialized extent. | 3556 | * when it is _not_ an uninitialized extent. |
3517 | */ | 3557 | */ |
3518 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { | 3558 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { |
3519 | ext4_ext_put_in_cache(inode, iblock, allocated, newblock, | 3559 | ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock, |
3520 | EXT4_EXT_CACHE_EXTENT); | 3560 | EXT4_EXT_CACHE_EXTENT); |
3521 | ext4_update_inode_fsync_trans(handle, inode, 1); | 3561 | ext4_update_inode_fsync_trans(handle, inode, 1); |
3522 | } else | 3562 | } else |
3523 | ext4_update_inode_fsync_trans(handle, inode, 0); | 3563 | ext4_update_inode_fsync_trans(handle, inode, 0); |
3524 | out: | 3564 | out: |
3525 | if (allocated > max_blocks) | 3565 | if (allocated > map->m_len) |
3526 | allocated = max_blocks; | 3566 | allocated = map->m_len; |
3527 | ext4_ext_show_leaf(inode, path); | 3567 | ext4_ext_show_leaf(inode, path); |
3528 | set_buffer_mapped(bh_result); | 3568 | map->m_flags |= EXT4_MAP_MAPPED; |
3529 | bh_result->b_bdev = inode->i_sb->s_bdev; | 3569 | map->m_pblk = newblock; |
3530 | bh_result->b_blocknr = newblock; | 3570 | map->m_len = allocated; |
3531 | out2: | 3571 | out2: |
3532 | if (path) { | 3572 | if (path) { |
3533 | ext4_ext_drop_refs(path); | 3573 | ext4_ext_drop_refs(path); |
@@ -3625,7 +3665,7 @@ static void ext4_falloc_update_inode(struct inode *inode, | |||
3625 | * can proceed even if the new size is the same as i_size. | 3665 | * can proceed even if the new size is the same as i_size. |
3626 | */ | 3666 | */ |
3627 | if (new_size > i_size_read(inode)) | 3667 | if (new_size > i_size_read(inode)) |
3628 | EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL; | 3668 | ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); |
3629 | } | 3669 | } |
3630 | 3670 | ||
3631 | } | 3671 | } |
@@ -3640,55 +3680,57 @@ static void ext4_falloc_update_inode(struct inode *inode, | |||
3640 | long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) | 3680 | long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) |
3641 | { | 3681 | { |
3642 | handle_t *handle; | 3682 | handle_t *handle; |
3643 | ext4_lblk_t block; | ||
3644 | loff_t new_size; | 3683 | loff_t new_size; |
3645 | unsigned int max_blocks; | 3684 | unsigned int max_blocks; |
3646 | int ret = 0; | 3685 | int ret = 0; |
3647 | int ret2 = 0; | 3686 | int ret2 = 0; |
3648 | int retries = 0; | 3687 | int retries = 0; |
3649 | struct buffer_head map_bh; | 3688 | struct ext4_map_blocks map; |
3650 | unsigned int credits, blkbits = inode->i_blkbits; | 3689 | unsigned int credits, blkbits = inode->i_blkbits; |
3651 | 3690 | ||
3652 | /* | 3691 | /* |
3653 | * currently supporting (pre)allocate mode for extent-based | 3692 | * currently supporting (pre)allocate mode for extent-based |
3654 | * files _only_ | 3693 | * files _only_ |
3655 | */ | 3694 | */ |
3656 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | 3695 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
3657 | return -EOPNOTSUPP; | 3696 | return -EOPNOTSUPP; |
3658 | 3697 | ||
3659 | /* preallocation to directories is currently not supported */ | 3698 | /* preallocation to directories is currently not supported */ |
3660 | if (S_ISDIR(inode->i_mode)) | 3699 | if (S_ISDIR(inode->i_mode)) |
3661 | return -ENODEV; | 3700 | return -ENODEV; |
3662 | 3701 | ||
3663 | block = offset >> blkbits; | 3702 | map.m_lblk = offset >> blkbits; |
3664 | /* | 3703 | /* |
3665 | * We can't just convert len to max_blocks because | 3704 | * We can't just convert len to max_blocks because |
3666 | * If blocksize = 4096 offset = 3072 and len = 2048 | 3705 | * If blocksize = 4096 offset = 3072 and len = 2048 |
3667 | */ | 3706 | */ |
3668 | max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) | 3707 | max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) |
3669 | - block; | 3708 | - map.m_lblk; |
3670 | /* | 3709 | /* |
3671 | * credits to insert 1 extent into extent tree | 3710 | * credits to insert 1 extent into extent tree |
3672 | */ | 3711 | */ |
3673 | credits = ext4_chunk_trans_blocks(inode, max_blocks); | 3712 | credits = ext4_chunk_trans_blocks(inode, max_blocks); |
3674 | mutex_lock(&inode->i_mutex); | 3713 | mutex_lock(&inode->i_mutex); |
3714 | ret = inode_newsize_ok(inode, (len + offset)); | ||
3715 | if (ret) { | ||
3716 | mutex_unlock(&inode->i_mutex); | ||
3717 | return ret; | ||
3718 | } | ||
3675 | retry: | 3719 | retry: |
3676 | while (ret >= 0 && ret < max_blocks) { | 3720 | while (ret >= 0 && ret < max_blocks) { |
3677 | block = block + ret; | 3721 | map.m_lblk = map.m_lblk + ret; |
3678 | max_blocks = max_blocks - ret; | 3722 | map.m_len = max_blocks = max_blocks - ret; |
3679 | handle = ext4_journal_start(inode, credits); | 3723 | handle = ext4_journal_start(inode, credits); |
3680 | if (IS_ERR(handle)) { | 3724 | if (IS_ERR(handle)) { |
3681 | ret = PTR_ERR(handle); | 3725 | ret = PTR_ERR(handle); |
3682 | break; | 3726 | break; |
3683 | } | 3727 | } |
3684 | map_bh.b_state = 0; | 3728 | ret = ext4_map_blocks(handle, inode, &map, |
3685 | ret = ext4_get_blocks(handle, inode, block, | ||
3686 | max_blocks, &map_bh, | ||
3687 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); | 3729 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); |
3688 | if (ret <= 0) { | 3730 | if (ret <= 0) { |
3689 | #ifdef EXT4FS_DEBUG | 3731 | #ifdef EXT4FS_DEBUG |
3690 | WARN_ON(ret <= 0); | 3732 | WARN_ON(ret <= 0); |
3691 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | 3733 | printk(KERN_ERR "%s: ext4_ext_map_blocks " |
3692 | "returned error inode#%lu, block=%u, " | 3734 | "returned error inode#%lu, block=%u, " |
3693 | "max_blocks=%u", __func__, | 3735 | "max_blocks=%u", __func__, |
3694 | inode->i_ino, block, max_blocks); | 3736 | inode->i_ino, block, max_blocks); |
@@ -3697,14 +3739,14 @@ retry: | |||
3697 | ret2 = ext4_journal_stop(handle); | 3739 | ret2 = ext4_journal_stop(handle); |
3698 | break; | 3740 | break; |
3699 | } | 3741 | } |
3700 | if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len, | 3742 | if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, |
3701 | blkbits) >> blkbits)) | 3743 | blkbits) >> blkbits)) |
3702 | new_size = offset + len; | 3744 | new_size = offset + len; |
3703 | else | 3745 | else |
3704 | new_size = (block + ret) << blkbits; | 3746 | new_size = (map.m_lblk + ret) << blkbits; |
3705 | 3747 | ||
3706 | ext4_falloc_update_inode(inode, mode, new_size, | 3748 | ext4_falloc_update_inode(inode, mode, new_size, |
3707 | buffer_new(&map_bh)); | 3749 | (map.m_flags & EXT4_MAP_NEW)); |
3708 | ext4_mark_inode_dirty(handle, inode); | 3750 | ext4_mark_inode_dirty(handle, inode); |
3709 | ret2 = ext4_journal_stop(handle); | 3751 | ret2 = ext4_journal_stop(handle); |
3710 | if (ret2) | 3752 | if (ret2) |
@@ -3733,42 +3775,39 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | |||
3733 | ssize_t len) | 3775 | ssize_t len) |
3734 | { | 3776 | { |
3735 | handle_t *handle; | 3777 | handle_t *handle; |
3736 | ext4_lblk_t block; | ||
3737 | unsigned int max_blocks; | 3778 | unsigned int max_blocks; |
3738 | int ret = 0; | 3779 | int ret = 0; |
3739 | int ret2 = 0; | 3780 | int ret2 = 0; |
3740 | struct buffer_head map_bh; | 3781 | struct ext4_map_blocks map; |
3741 | unsigned int credits, blkbits = inode->i_blkbits; | 3782 | unsigned int credits, blkbits = inode->i_blkbits; |
3742 | 3783 | ||
3743 | block = offset >> blkbits; | 3784 | map.m_lblk = offset >> blkbits; |
3744 | /* | 3785 | /* |
3745 | * We can't just convert len to max_blocks because | 3786 | * We can't just convert len to max_blocks because |
3746 | * If blocksize = 4096 offset = 3072 and len = 2048 | 3787 | * If blocksize = 4096 offset = 3072 and len = 2048 |
3747 | */ | 3788 | */ |
3748 | max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) | 3789 | max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - |
3749 | - block; | 3790 | map.m_lblk); |
3750 | /* | 3791 | /* |
3751 | * credits to insert 1 extent into extent tree | 3792 | * credits to insert 1 extent into extent tree |
3752 | */ | 3793 | */ |
3753 | credits = ext4_chunk_trans_blocks(inode, max_blocks); | 3794 | credits = ext4_chunk_trans_blocks(inode, max_blocks); |
3754 | while (ret >= 0 && ret < max_blocks) { | 3795 | while (ret >= 0 && ret < max_blocks) { |
3755 | block = block + ret; | 3796 | map.m_lblk += ret; |
3756 | max_blocks = max_blocks - ret; | 3797 | map.m_len = (max_blocks -= ret); |
3757 | handle = ext4_journal_start(inode, credits); | 3798 | handle = ext4_journal_start(inode, credits); |
3758 | if (IS_ERR(handle)) { | 3799 | if (IS_ERR(handle)) { |
3759 | ret = PTR_ERR(handle); | 3800 | ret = PTR_ERR(handle); |
3760 | break; | 3801 | break; |
3761 | } | 3802 | } |
3762 | map_bh.b_state = 0; | 3803 | ret = ext4_map_blocks(handle, inode, &map, |
3763 | ret = ext4_get_blocks(handle, inode, block, | ||
3764 | max_blocks, &map_bh, | ||
3765 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); | 3804 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
3766 | if (ret <= 0) { | 3805 | if (ret <= 0) { |
3767 | WARN_ON(ret <= 0); | 3806 | WARN_ON(ret <= 0); |
3768 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | 3807 | printk(KERN_ERR "%s: ext4_ext_map_blocks " |
3769 | "returned error inode#%lu, block=%u, " | 3808 | "returned error inode#%lu, block=%u, " |
3770 | "max_blocks=%u", __func__, | 3809 | "max_blocks=%u", __func__, |
3771 | inode->i_ino, block, max_blocks); | 3810 | inode->i_ino, map.m_lblk, map.m_len); |
3772 | } | 3811 | } |
3773 | ext4_mark_inode_dirty(handle, inode); | 3812 | ext4_mark_inode_dirty(handle, inode); |
3774 | ret2 = ext4_journal_stop(handle); | 3813 | ret2 = ext4_journal_stop(handle); |
@@ -3898,7 +3937,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3898 | int error = 0; | 3937 | int error = 0; |
3899 | 3938 | ||
3900 | /* fallback to generic here if not in extents fmt */ | 3939 | /* fallback to generic here if not in extents fmt */ |
3901 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | 3940 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
3902 | return generic_block_fiemap(inode, fieinfo, start, len, | 3941 | return generic_block_fiemap(inode, fieinfo, start, len, |
3903 | ext4_get_block); | 3942 | ext4_get_block); |
3904 | 3943 | ||