aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/segment.c
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2009-04-06 22:01:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-07 11:31:15 -0400
commitf30bf3e40f80ef50c17f55271deae3abc03e793e (patch)
tree2e6d69611d1443688c1a79c5e5aed60cf38672e7 /fs/nilfs2/segment.c
parent9ff05123e3bfbb1d2b68ba1d9bf1f7d1dffc1453 (diff)
nilfs2: fix missed-sync issue for do_sync_mapping_range()
Chris Mason pointed out that there is a missed sync issue in nilfs_writepages(): On Wed, 17 Dec 2008 21:52:55 -0500, Chris Mason wrote: > It looks like nilfs_writepage ignores WB_SYNC_NONE, which is used by > do_sync_mapping_range(). where WB_SYNC_NONE in do_sync_mapping_range() was replaced with WB_SYNC_ALL by Nick's patch (commit: ee53a891f47444c53318b98dac947ede963db400). This fixes the problem by letting nilfs_writepages() write out the log of file data within the range if sync_mode is WB_SYNC_ALL. This involves removal of nilfs_file_aio_write() which was previously needed to ensure O_SYNC sync writes. Cc: Chris Mason <chris.mason@oracle.com> Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/nilfs2/segment.c')
-rw-r--r--fs/nilfs2/segment.c120
1 files changed, 72 insertions, 48 deletions
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 2c4c088059fd..ad65a737aff4 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -654,29 +654,41 @@ struct nilfs_sc_operations nilfs_sc_dsync_ops = {
654 .write_node_binfo = NULL, 654 .write_node_binfo = NULL,
655}; 655};
656 656
657static int nilfs_lookup_dirty_data_buffers(struct inode *inode, 657static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
658 struct list_head *listp, 658 struct list_head *listp,
659 struct nilfs_sc_info *sci) 659 size_t nlimit,
660 loff_t start, loff_t end)
660{ 661{
661 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
662 struct address_space *mapping = inode->i_mapping; 662 struct address_space *mapping = inode->i_mapping;
663 struct pagevec pvec; 663 struct pagevec pvec;
664 unsigned i, ndirties = 0, nlimit; 664 pgoff_t index = 0, last = ULONG_MAX;
665 pgoff_t index = 0; 665 size_t ndirties = 0;
666 int err = 0; 666 int i;
667 667
668 nlimit = sci->sc_segbuf_nblocks - 668 if (unlikely(start != 0 || end != LLONG_MAX)) {
669 (sci->sc_nblk_this_inc + segbuf->sb_sum.nblocks); 669 /*
670 * A valid range is given for sync-ing data pages. The
671 * range is rounded to per-page; extra dirty buffers
672 * may be included if blocksize < pagesize.
673 */
674 index = start >> PAGE_SHIFT;
675 last = end >> PAGE_SHIFT;
676 }
670 pagevec_init(&pvec, 0); 677 pagevec_init(&pvec, 0);
671 repeat: 678 repeat:
672 if (!pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, 679 if (unlikely(index > last) ||
673 PAGEVEC_SIZE)) 680 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
674 return 0; 681 min_t(pgoff_t, last - index,
682 PAGEVEC_SIZE - 1) + 1))
683 return ndirties;
675 684
676 for (i = 0; i < pagevec_count(&pvec); i++) { 685 for (i = 0; i < pagevec_count(&pvec); i++) {
677 struct buffer_head *bh, *head; 686 struct buffer_head *bh, *head;
678 struct page *page = pvec.pages[i]; 687 struct page *page = pvec.pages[i];
679 688
689 if (unlikely(page->index > last))
690 break;
691
680 if (mapping->host) { 692 if (mapping->host) {
681 lock_page(page); 693 lock_page(page);
682 if (!page_has_buffers(page)) 694 if (!page_has_buffers(page))
@@ -687,24 +699,21 @@ static int nilfs_lookup_dirty_data_buffers(struct inode *inode,
687 699
688 bh = head = page_buffers(page); 700 bh = head = page_buffers(page);
689 do { 701 do {
690 if (buffer_dirty(bh)) { 702 if (!buffer_dirty(bh))
691 if (ndirties > nlimit) { 703 continue;
692 err = -E2BIG; 704 get_bh(bh);
693 break; 705 list_add_tail(&bh->b_assoc_buffers, listp);
694 } 706 ndirties++;
695 get_bh(bh); 707 if (unlikely(ndirties >= nlimit)) {
696 list_add_tail(&bh->b_assoc_buffers, listp); 708 pagevec_release(&pvec);
697 ndirties++; 709 cond_resched();
710 return ndirties;
698 } 711 }
699 bh = bh->b_this_page; 712 } while (bh = bh->b_this_page, bh != head);
700 } while (bh != head);
701 } 713 }
702 pagevec_release(&pvec); 714 pagevec_release(&pvec);
703 cond_resched(); 715 cond_resched();
704 716 goto repeat;
705 if (!err)
706 goto repeat;
707 return err;
708} 717}
709 718
710static void nilfs_lookup_dirty_node_buffers(struct inode *inode, 719static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
@@ -1058,23 +1067,31 @@ static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1058 return err; 1067 return err;
1059} 1068}
1060 1069
1070static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1071{
1072 /* Remaining number of blocks within segment buffer */
1073 return sci->sc_segbuf_nblocks -
1074 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1075}
1076
1061static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, 1077static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1062 struct inode *inode, 1078 struct inode *inode,
1063 struct nilfs_sc_operations *sc_ops) 1079 struct nilfs_sc_operations *sc_ops)
1064{ 1080{
1065 LIST_HEAD(data_buffers); 1081 LIST_HEAD(data_buffers);
1066 LIST_HEAD(node_buffers); 1082 LIST_HEAD(node_buffers);
1067 int err, err2; 1083 int err;
1068 1084
1069 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { 1085 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1070 err = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, 1086 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1071 sci); 1087
1072 if (err) { 1088 n = nilfs_lookup_dirty_data_buffers(
1073 err2 = nilfs_segctor_apply_buffers( 1089 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1090 if (n > rest) {
1091 err = nilfs_segctor_apply_buffers(
1074 sci, inode, &data_buffers, 1092 sci, inode, &data_buffers,
1075 err == -E2BIG ? sc_ops->collect_data : NULL); 1093 sc_ops->collect_data);
1076 if (err == -E2BIG) 1094 BUG_ON(!err); /* always receive -E2BIG or true error */
1077 err = err2;
1078 goto break_or_fail; 1095 goto break_or_fail;
1079 } 1096 }
1080 } 1097 }
@@ -1114,16 +1131,20 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1114 struct inode *inode) 1131 struct inode *inode)
1115{ 1132{
1116 LIST_HEAD(data_buffers); 1133 LIST_HEAD(data_buffers);
1117 int err, err2; 1134 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1135 int err;
1118 1136
1119 err = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, sci); 1137 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1120 err2 = nilfs_segctor_apply_buffers(sci, inode, &data_buffers, 1138 sci->sc_dsync_start,
1121 (!err || err == -E2BIG) ? 1139 sci->sc_dsync_end);
1122 nilfs_collect_file_data : NULL); 1140
1123 if (err == -E2BIG) 1141 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1124 err = err2; 1142 nilfs_collect_file_data);
1125 if (!err) 1143 if (!err) {
1126 nilfs_segctor_end_finfo(sci, inode); 1144 nilfs_segctor_end_finfo(sci, inode);
1145 BUG_ON(n > rest);
1146 /* always receive -E2BIG or true error if n > rest */
1147 }
1127 return err; 1148 return err;
1128} 1149}
1129 1150
@@ -1276,14 +1297,13 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1276 case NILFS_ST_DSYNC: 1297 case NILFS_ST_DSYNC:
1277 dsync_mode: 1298 dsync_mode:
1278 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; 1299 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1279 ii = sci->sc_stage.dirty_file_ptr; 1300 ii = sci->sc_dsync_inode;
1280 if (!test_bit(NILFS_I_BUSY, &ii->i_state)) 1301 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1281 break; 1302 break;
1282 1303
1283 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode); 1304 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1284 if (unlikely(err)) 1305 if (unlikely(err))
1285 break; 1306 break;
1286 sci->sc_stage.dirty_file_ptr = NULL;
1287 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; 1307 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1288 sci->sc_stage.scnt = NILFS_ST_DONE; 1308 sci->sc_stage.scnt = NILFS_ST_DONE;
1289 return 0; 1309 return 0;
@@ -2624,7 +2644,9 @@ int nilfs_construct_segment(struct super_block *sb)
2624/** 2644/**
2625 * nilfs_construct_dsync_segment - construct a data-only logical segment 2645 * nilfs_construct_dsync_segment - construct a data-only logical segment
2626 * @sb: super block 2646 * @sb: super block
2627 * @inode: the inode whose data blocks should be written out 2647 * @inode: inode whose data blocks should be written out
2648 * @start: start byte offset
2649 * @end: end byte offset (inclusive)
2628 * 2650 *
2629 * Return Value: On success, 0 is retured. On errors, one of the following 2651 * Return Value: On success, 0 is retured. On errors, one of the following
2630 * negative error code is returned. 2652 * negative error code is returned.
@@ -2639,8 +2661,8 @@ int nilfs_construct_segment(struct super_block *sb)
2639 * 2661 *
2640 * %-ENOMEM - Insufficient memory available. 2662 * %-ENOMEM - Insufficient memory available.
2641 */ 2663 */
2642int nilfs_construct_dsync_segment(struct super_block *sb, 2664int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2643 struct inode *inode) 2665 loff_t start, loff_t end)
2644{ 2666{
2645 struct nilfs_sb_info *sbi = NILFS_SB(sb); 2667 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2646 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2668 struct nilfs_sc_info *sci = NILFS_SC(sbi);
@@ -2671,7 +2693,9 @@ int nilfs_construct_dsync_segment(struct super_block *sb,
2671 return 0; 2693 return 0;
2672 } 2694 }
2673 spin_unlock(&sbi->s_inode_lock); 2695 spin_unlock(&sbi->s_inode_lock);
2674 sci->sc_stage.dirty_file_ptr = ii; 2696 sci->sc_dsync_inode = ii;
2697 sci->sc_dsync_start = start;
2698 sci->sc_dsync_end = end;
2675 2699
2676 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); 2700 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2677 2701