diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2014-06-04 19:07:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:02 -0400 |
commit | 90768eee4565adb28ea28b4ac5081c676a8fe1f2 (patch) | |
tree | acd43359d33ffada20aae69d254c9bb44dab6170 /fs | |
parent | 1b938c0827478df268d2336469ec48d400a2eb3e (diff) |
fs/mpage.c: factor clean_buffers() out of __mpage_writepage()
__mpage_writepage() is over 200 lines long, has 20 local variables, four
goto labels and could desperately use simplification. Splitting
clean_buffers() into a helper function improves matters a little,
removing 20+ lines from it.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dheeraj Reddy <dheeraj.reddy@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/mpage.c | 54 |
1 files changed, 30 insertions, 24 deletions
diff --git a/fs/mpage.c b/fs/mpage.c index 4979ffa60aaa..4cc9c5d079f7 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -439,6 +439,35 @@ struct mpage_data { | |||
439 | unsigned use_writepage; | 439 | unsigned use_writepage; |
440 | }; | 440 | }; |
441 | 441 | ||
442 | /* | ||
443 | * We have our BIO, so we can now mark the buffers clean. Make | ||
444 | * sure to only clean buffers which we know we'll be writing. | ||
445 | */ | ||
446 | static void clean_buffers(struct page *page, unsigned first_unmapped) | ||
447 | { | ||
448 | unsigned buffer_counter = 0; | ||
449 | struct buffer_head *bh, *head; | ||
450 | if (!page_has_buffers(page)) | ||
451 | return; | ||
452 | head = page_buffers(page); | ||
453 | bh = head; | ||
454 | |||
455 | do { | ||
456 | if (buffer_counter++ == first_unmapped) | ||
457 | break; | ||
458 | clear_buffer_dirty(bh); | ||
459 | bh = bh->b_this_page; | ||
460 | } while (bh != head); | ||
461 | |||
462 | /* | ||
463 | * we cannot drop the bh if the page is not uptodate or a concurrent | ||
464 | * readpage would fail to serialize with the bh and it would read from | ||
465 | * disk before we reach the platter. | ||
466 | */ | ||
467 | if (buffer_heads_over_limit && PageUptodate(page)) | ||
468 | try_to_free_buffers(page); | ||
469 | } | ||
470 | |||
442 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | 471 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
443 | void *data) | 472 | void *data) |
444 | { | 473 | { |
@@ -591,30 +620,7 @@ alloc_new: | |||
591 | goto alloc_new; | 620 | goto alloc_new; |
592 | } | 621 | } |
593 | 622 | ||
594 | /* | 623 | clean_buffers(page, first_unmapped); |
595 | * OK, we have our BIO, so we can now mark the buffers clean. Make | ||
596 | * sure to only clean buffers which we know we'll be writing. | ||
597 | */ | ||
598 | if (page_has_buffers(page)) { | ||
599 | struct buffer_head *head = page_buffers(page); | ||
600 | struct buffer_head *bh = head; | ||
601 | unsigned buffer_counter = 0; | ||
602 | |||
603 | do { | ||
604 | if (buffer_counter++ == first_unmapped) | ||
605 | break; | ||
606 | clear_buffer_dirty(bh); | ||
607 | bh = bh->b_this_page; | ||
608 | } while (bh != head); | ||
609 | |||
610 | /* | ||
611 | * we cannot drop the bh if the page is not uptodate | ||
612 | * or a concurrent readpage would fail to serialize with the bh | ||
613 | * and it would read from disk before we reach the platter. | ||
614 | */ | ||
615 | if (buffer_heads_over_limit && PageUptodate(page)) | ||
616 | try_to_free_buffers(page); | ||
617 | } | ||
618 | 624 | ||
619 | BUG_ON(PageWriteback(page)); | 625 | BUG_ON(PageWriteback(page)); |
620 | set_page_writeback(page); | 626 | set_page_writeback(page); |