diff options
author | Theodore Ts'o <tytso@mit.edu> | 2011-02-26 14:07:31 -0500 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2011-02-26 14:07:31 -0500 |
commit | 8eb9e5ce211de1b98bc84e93258b7db0860a103c (patch) | |
tree | 80c5cba007b3f4612524474f1ec1cdbc4a7b5181 /fs/ext4/inode.c | |
parent | 6fd7a46781999c32f423025767e43b349b967d57 (diff) |
ext4: fold __mpage_da_writepage() into write_cache_pages_da()
Fold the __mpage_da_writepage() function into write_cache_pages_da().
This will give us opportunities to clean up and simplify the resulting
code.
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 206 |
1 files changed, 91 insertions, 115 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e878c3a7aaf0..fcd08ca0643b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -2438,102 +2438,6 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) | |||
2438 | } | 2438 | } |
2439 | 2439 | ||
2440 | /* | 2440 | /* |
2441 | * __mpage_da_writepage - finds extent of pages and blocks | ||
2442 | * | ||
2443 | * @page: page to consider | ||
2444 | * @wbc: not used, we just follow rules | ||
2445 | * @data: context | ||
2446 | * | ||
2447 | * The function finds extents of pages and scan them for all blocks. | ||
2448 | */ | ||
2449 | static int __mpage_da_writepage(struct page *page, | ||
2450 | struct writeback_control *wbc, | ||
2451 | struct mpage_da_data *mpd) | ||
2452 | { | ||
2453 | struct inode *inode = mpd->inode; | ||
2454 | struct buffer_head *bh, *head; | ||
2455 | sector_t logical; | ||
2456 | |||
2457 | /* | ||
2458 | * Can we merge this page to current extent? | ||
2459 | */ | ||
2460 | if (mpd->next_page != page->index) { | ||
2461 | /* | ||
2462 | * Nope, we can't. So, we map non-allocated blocks | ||
2463 | * and start IO on them | ||
2464 | */ | ||
2465 | if (mpd->next_page != mpd->first_page) { | ||
2466 | mpage_da_map_and_submit(mpd); | ||
2467 | /* | ||
2468 | * skip rest of the page in the page_vec | ||
2469 | */ | ||
2470 | redirty_page_for_writepage(wbc, page); | ||
2471 | unlock_page(page); | ||
2472 | return MPAGE_DA_EXTENT_TAIL; | ||
2473 | } | ||
2474 | |||
2475 | /* | ||
2476 | * Start next extent of pages ... | ||
2477 | */ | ||
2478 | mpd->first_page = page->index; | ||
2479 | |||
2480 | /* | ||
2481 | * ... and blocks | ||
2482 | */ | ||
2483 | mpd->b_size = 0; | ||
2484 | mpd->b_state = 0; | ||
2485 | mpd->b_blocknr = 0; | ||
2486 | } | ||
2487 | |||
2488 | mpd->next_page = page->index + 1; | ||
2489 | logical = (sector_t) page->index << | ||
2490 | (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
2491 | |||
2492 | if (!page_has_buffers(page)) { | ||
2493 | mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, | ||
2494 | (1 << BH_Dirty) | (1 << BH_Uptodate)); | ||
2495 | if (mpd->io_done) | ||
2496 | return MPAGE_DA_EXTENT_TAIL; | ||
2497 | } else { | ||
2498 | /* | ||
2499 | * Page with regular buffer heads, just add all dirty ones | ||
2500 | */ | ||
2501 | head = page_buffers(page); | ||
2502 | bh = head; | ||
2503 | do { | ||
2504 | BUG_ON(buffer_locked(bh)); | ||
2505 | /* | ||
2506 | * We need to try to allocate | ||
2507 | * unmapped blocks in the same page. | ||
2508 | * Otherwise we won't make progress | ||
2509 | * with the page in ext4_writepage | ||
2510 | */ | ||
2511 | if (ext4_bh_delay_or_unwritten(NULL, bh)) { | ||
2512 | mpage_add_bh_to_extent(mpd, logical, | ||
2513 | bh->b_size, | ||
2514 | bh->b_state); | ||
2515 | if (mpd->io_done) | ||
2516 | return MPAGE_DA_EXTENT_TAIL; | ||
2517 | } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { | ||
2518 | /* | ||
2519 | * mapped dirty buffer. We need to update | ||
2520 | * the b_state because we look at | ||
2521 | * b_state in mpage_da_map_blocks. We don't | ||
2522 | * update b_size because if we find an | ||
2523 | * unmapped buffer_head later we need to | ||
2524 | * use the b_state flag of that buffer_head. | ||
2525 | */ | ||
2526 | if (mpd->b_size == 0) | ||
2527 | mpd->b_state = bh->b_state & BH_FLAGS; | ||
2528 | } | ||
2529 | logical++; | ||
2530 | } while ((bh = bh->b_this_page) != head); | ||
2531 | } | ||
2532 | |||
2533 | return 0; | ||
2534 | } | ||
2535 | |||
2536 | /* | ||
2537 | * This is a special get_blocks_t callback which is used by | 2441 | * This is a special get_blocks_t callback which is used by |
2538 | * ext4_da_write_begin(). It will either return mapped block or | 2442 | * ext4_da_write_begin(). It will either return mapped block or |
2539 | * reserve space for a single block. | 2443 | * reserve space for a single block. |
@@ -2811,18 +2715,17 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode) | |||
2811 | 2715 | ||
2812 | /* | 2716 | /* |
2813 | * write_cache_pages_da - walk the list of dirty pages of the given | 2717 | * write_cache_pages_da - walk the list of dirty pages of the given |
2814 | * address space and call the callback function (which usually writes | 2718 | * address space and accumulate pages that need writing, and call |
2815 | * the pages). | 2719 | * mpage_da_map_and_submit to map the pages and then write them. |
2816 | * | ||
2817 | * This is a forked version of write_cache_pages(). Differences: | ||
2818 | * Range cyclic is ignored. | ||
2819 | * no_nrwrite_index_update is always presumed true | ||
2820 | */ | 2720 | */ |
2821 | static int write_cache_pages_da(struct address_space *mapping, | 2721 | static int write_cache_pages_da(struct address_space *mapping, |
2822 | struct writeback_control *wbc, | 2722 | struct writeback_control *wbc, |
2823 | struct mpage_da_data *mpd, | 2723 | struct mpage_da_data *mpd, |
2824 | pgoff_t *done_index) | 2724 | pgoff_t *done_index) |
2825 | { | 2725 | { |
2726 | struct inode *inode = mpd->inode; | ||
2727 | struct buffer_head *bh, *head; | ||
2728 | sector_t logical; | ||
2826 | int ret = 0; | 2729 | int ret = 0; |
2827 | int done = 0; | 2730 | int done = 0; |
2828 | struct pagevec pvec; | 2731 | struct pagevec pvec; |
@@ -2899,17 +2802,90 @@ continue_unlock: | |||
2899 | if (!clear_page_dirty_for_io(page)) | 2802 | if (!clear_page_dirty_for_io(page)) |
2900 | goto continue_unlock; | 2803 | goto continue_unlock; |
2901 | 2804 | ||
2902 | ret = __mpage_da_writepage(page, wbc, mpd); | 2805 | /* BEGIN __mpage_da_writepage */ |
2903 | if (unlikely(ret)) { | 2806 | |
2904 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | 2807 | /* |
2808 | * Can we merge this page to current extent? | ||
2809 | */ | ||
2810 | if (mpd->next_page != page->index) { | ||
2811 | /* | ||
2812 | * Nope, we can't. So, we map | ||
2813 | * non-allocated blocks and start IO | ||
2814 | * on them | ||
2815 | */ | ||
2816 | if (mpd->next_page != mpd->first_page) { | ||
2817 | mpage_da_map_and_submit(mpd); | ||
2818 | /* | ||
2819 | * skip rest of the page in the page_vec | ||
2820 | */ | ||
2821 | redirty_page_for_writepage(wbc, page); | ||
2905 | unlock_page(page); | 2822 | unlock_page(page); |
2906 | ret = 0; | 2823 | ret = MPAGE_DA_EXTENT_TAIL; |
2907 | } else { | 2824 | goto out; |
2908 | done = 1; | ||
2909 | break; | ||
2910 | } | 2825 | } |
2826 | |||
2827 | /* | ||
2828 | * Start next extent of pages and blocks | ||
2829 | */ | ||
2830 | mpd->first_page = page->index; | ||
2831 | mpd->b_size = 0; | ||
2832 | mpd->b_state = 0; | ||
2833 | mpd->b_blocknr = 0; | ||
2834 | } | ||
2835 | |||
2836 | mpd->next_page = page->index + 1; | ||
2837 | logical = (sector_t) page->index << | ||
2838 | (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
2839 | |||
2840 | if (!page_has_buffers(page)) { | ||
2841 | mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, | ||
2842 | (1 << BH_Dirty) | (1 << BH_Uptodate)); | ||
2843 | if (mpd->io_done) { | ||
2844 | ret = MPAGE_DA_EXTENT_TAIL; | ||
2845 | goto out; | ||
2846 | } | ||
2847 | } else { | ||
2848 | /* | ||
2849 | * Page with regular buffer heads, just add all dirty ones | ||
2850 | */ | ||
2851 | head = page_buffers(page); | ||
2852 | bh = head; | ||
2853 | do { | ||
2854 | BUG_ON(buffer_locked(bh)); | ||
2855 | /* | ||
2856 | * We need to try to allocate | ||
2857 | * unmapped blocks in the same page. | ||
2858 | * Otherwise we won't make progress | ||
2859 | * with the page in ext4_writepage | ||
2860 | */ | ||
2861 | if (ext4_bh_delay_or_unwritten(NULL, bh)) { | ||
2862 | mpage_add_bh_to_extent(mpd, logical, | ||
2863 | bh->b_size, | ||
2864 | bh->b_state); | ||
2865 | if (mpd->io_done) { | ||
2866 | ret = MPAGE_DA_EXTENT_TAIL; | ||
2867 | goto out; | ||
2868 | } | ||
2869 | } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { | ||
2870 | /* | ||
2871 | * mapped dirty buffer. We need to update | ||
2872 | * the b_state because we look at | ||
2873 | * b_state in mpage_da_map_blocks. We don't | ||
2874 | * update b_size because if we find an | ||
2875 | * unmapped buffer_head later we need to | ||
2876 | * use the b_state flag of that buffer_head. | ||
2877 | */ | ||
2878 | if (mpd->b_size == 0) | ||
2879 | mpd->b_state = bh->b_state & BH_FLAGS; | ||
2880 | } | ||
2881 | logical++; | ||
2882 | } while ((bh = bh->b_this_page) != head); | ||
2911 | } | 2883 | } |
2912 | 2884 | ||
2885 | ret = 0; | ||
2886 | |||
2887 | /* END __mpage_da_writepage */ | ||
2888 | |||
2913 | if (nr_to_write > 0) { | 2889 | if (nr_to_write > 0) { |
2914 | nr_to_write--; | 2890 | nr_to_write--; |
2915 | if (nr_to_write == 0 && | 2891 | if (nr_to_write == 0 && |
@@ -2933,6 +2909,10 @@ continue_unlock: | |||
2933 | cond_resched(); | 2909 | cond_resched(); |
2934 | } | 2910 | } |
2935 | return ret; | 2911 | return ret; |
2912 | out: | ||
2913 | pagevec_release(&pvec); | ||
2914 | cond_resched(); | ||
2915 | return ret; | ||
2936 | } | 2916 | } |
2937 | 2917 | ||
2938 | 2918 | ||
@@ -3059,13 +3039,9 @@ retry: | |||
3059 | } | 3039 | } |
3060 | 3040 | ||
3061 | /* | 3041 | /* |
3062 | * Now call __mpage_da_writepage to find the next | 3042 | * Now call write_cache_pages_da() to find the next |
3063 | * contiguous region of logical blocks that need | 3043 | * contiguous region of logical blocks that need |
3064 | * blocks to be allocated by ext4. We don't actually | 3044 | * blocks to be allocated by ext4 and submit them. |
3065 | * submit the blocks for I/O here, even though | ||
3066 | * write_cache_pages thinks it will, and will set the | ||
3067 | * pages as clean for write before calling | ||
3068 | * __mpage_da_writepage(). | ||
3069 | */ | 3045 | */ |
3070 | mpd.b_size = 0; | 3046 | mpd.b_size = 0; |
3071 | mpd.b_state = 0; | 3047 | mpd.b_state = 0; |