aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nilfs2')
-rw-r--r--fs/nilfs2/file.c27
-rw-r--r--fs/nilfs2/inode.c16
-rw-r--r--fs/nilfs2/segment.c120
-rw-r--r--fs/nilfs2/segment.h11
4 files changed, 93 insertions, 81 deletions
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 8031086db8d..cd38124372f 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -44,35 +44,14 @@ int nilfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
44 return 0; 44 return 0;
45 45
46 if (datasync) 46 if (datasync)
47 err = nilfs_construct_dsync_segment(inode->i_sb, inode); 47 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0,
48 LLONG_MAX);
48 else 49 else
49 err = nilfs_construct_segment(inode->i_sb); 50 err = nilfs_construct_segment(inode->i_sb);
50 51
51 return err; 52 return err;
52} 53}
53 54
54static ssize_t
55nilfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
56 unsigned long nr_segs, loff_t pos)
57{
58 struct file *file = iocb->ki_filp;
59 struct inode *inode = file->f_dentry->d_inode;
60 ssize_t ret;
61
62 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
63 if (ret <= 0)
64 return ret;
65
66 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
67 int err;
68
69 err = nilfs_construct_dsync_segment(inode->i_sb, inode);
70 if (unlikely(err))
71 return err;
72 }
73 return ret;
74}
75
76static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 55static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
77{ 56{
78 struct page *page = vmf->page; 57 struct page *page = vmf->page;
@@ -160,7 +139,7 @@ struct file_operations nilfs_file_operations = {
160 .read = do_sync_read, 139 .read = do_sync_read,
161 .write = do_sync_write, 140 .write = do_sync_write,
162 .aio_read = generic_file_aio_read, 141 .aio_read = generic_file_aio_read,
163 .aio_write = nilfs_file_aio_write, 142 .aio_write = generic_file_aio_write,
164 .ioctl = nilfs_ioctl, 143 .ioctl = nilfs_ioctl,
165#ifdef CONFIG_COMPAT 144#ifdef CONFIG_COMPAT
166 .compat_ioctl = nilfs_compat_ioctl, 145 .compat_ioctl = nilfs_compat_ioctl,
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b4697d9d7e5..289d1798dec 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -24,6 +24,7 @@
24#include <linux/buffer_head.h> 24#include <linux/buffer_head.h>
25#include <linux/mpage.h> 25#include <linux/mpage.h>
26#include <linux/writeback.h> 26#include <linux/writeback.h>
27#include <linux/uio.h>
27#include "nilfs.h" 28#include "nilfs.h"
28#include "segment.h" 29#include "segment.h"
29#include "page.h" 30#include "page.h"
@@ -145,8 +146,14 @@ static int nilfs_readpages(struct file *file, struct address_space *mapping,
145static int nilfs_writepages(struct address_space *mapping, 146static int nilfs_writepages(struct address_space *mapping,
146 struct writeback_control *wbc) 147 struct writeback_control *wbc)
147{ 148{
148 /* This empty method is required not to call generic_writepages() */ 149 struct inode *inode = mapping->host;
149 return 0; 150 int err = 0;
151
152 if (wbc->sync_mode == WB_SYNC_ALL)
153 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
154 wbc->range_start,
155 wbc->range_end);
156 return err;
150} 157}
151 158
152static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 159static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -225,11 +232,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
225 struct file *file = iocb->ki_filp; 232 struct file *file = iocb->ki_filp;
226 struct inode *inode = file->f_mapping->host; 233 struct inode *inode = file->f_mapping->host;
227 ssize_t size; 234 ssize_t size;
228 int err;
229
230 err = nilfs_construct_dsync_segment(inode->i_sb, inode);
231 if (unlikely(err))
232 return err;
233 235
234 if (rw == WRITE) 236 if (rw == WRITE)
235 return 0; 237 return 0;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 2c4c088059f..ad65a737aff 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -654,29 +654,41 @@ struct nilfs_sc_operations nilfs_sc_dsync_ops = {
654 .write_node_binfo = NULL, 654 .write_node_binfo = NULL,
655}; 655};
656 656
657static int nilfs_lookup_dirty_data_buffers(struct inode *inode, 657static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
658 struct list_head *listp, 658 struct list_head *listp,
659 struct nilfs_sc_info *sci) 659 size_t nlimit,
660 loff_t start, loff_t end)
660{ 661{
661 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
662 struct address_space *mapping = inode->i_mapping; 662 struct address_space *mapping = inode->i_mapping;
663 struct pagevec pvec; 663 struct pagevec pvec;
664 unsigned i, ndirties = 0, nlimit; 664 pgoff_t index = 0, last = ULONG_MAX;
665 pgoff_t index = 0; 665 size_t ndirties = 0;
666 int err = 0; 666 int i;
667 667
668 nlimit = sci->sc_segbuf_nblocks - 668 if (unlikely(start != 0 || end != LLONG_MAX)) {
669 (sci->sc_nblk_this_inc + segbuf->sb_sum.nblocks); 669 /*
670 * A valid range is given for sync-ing data pages. The
671 * range is rounded to per-page; extra dirty buffers
672 * may be included if blocksize < pagesize.
673 */
674 index = start >> PAGE_SHIFT;
675 last = end >> PAGE_SHIFT;
676 }
670 pagevec_init(&pvec, 0); 677 pagevec_init(&pvec, 0);
671 repeat: 678 repeat:
672 if (!pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, 679 if (unlikely(index > last) ||
673 PAGEVEC_SIZE)) 680 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
674 return 0; 681 min_t(pgoff_t, last - index,
682 PAGEVEC_SIZE - 1) + 1))
683 return ndirties;
675 684
676 for (i = 0; i < pagevec_count(&pvec); i++) { 685 for (i = 0; i < pagevec_count(&pvec); i++) {
677 struct buffer_head *bh, *head; 686 struct buffer_head *bh, *head;
678 struct page *page = pvec.pages[i]; 687 struct page *page = pvec.pages[i];
679 688
689 if (unlikely(page->index > last))
690 break;
691
680 if (mapping->host) { 692 if (mapping->host) {
681 lock_page(page); 693 lock_page(page);
682 if (!page_has_buffers(page)) 694 if (!page_has_buffers(page))
@@ -687,24 +699,21 @@ static int nilfs_lookup_dirty_data_buffers(struct inode *inode,
687 699
688 bh = head = page_buffers(page); 700 bh = head = page_buffers(page);
689 do { 701 do {
690 if (buffer_dirty(bh)) { 702 if (!buffer_dirty(bh))
691 if (ndirties > nlimit) { 703 continue;
692 err = -E2BIG; 704 get_bh(bh);
693 break; 705 list_add_tail(&bh->b_assoc_buffers, listp);
694 } 706 ndirties++;
695 get_bh(bh); 707 if (unlikely(ndirties >= nlimit)) {
696 list_add_tail(&bh->b_assoc_buffers, listp); 708 pagevec_release(&pvec);
697 ndirties++; 709 cond_resched();
710 return ndirties;
698 } 711 }
699 bh = bh->b_this_page; 712 } while (bh = bh->b_this_page, bh != head);
700 } while (bh != head);
701 } 713 }
702 pagevec_release(&pvec); 714 pagevec_release(&pvec);
703 cond_resched(); 715 cond_resched();
704 716 goto repeat;
705 if (!err)
706 goto repeat;
707 return err;
708} 717}
709 718
710static void nilfs_lookup_dirty_node_buffers(struct inode *inode, 719static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
@@ -1058,23 +1067,31 @@ static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1058 return err; 1067 return err;
1059} 1068}
1060 1069
1070static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1071{
1072 /* Remaining number of blocks within segment buffer */
1073 return sci->sc_segbuf_nblocks -
1074 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1075}
1076
1061static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, 1077static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1062 struct inode *inode, 1078 struct inode *inode,
1063 struct nilfs_sc_operations *sc_ops) 1079 struct nilfs_sc_operations *sc_ops)
1064{ 1080{
1065 LIST_HEAD(data_buffers); 1081 LIST_HEAD(data_buffers);
1066 LIST_HEAD(node_buffers); 1082 LIST_HEAD(node_buffers);
1067 int err, err2; 1083 int err;
1068 1084
1069 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { 1085 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1070 err = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, 1086 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1071 sci); 1087
1072 if (err) { 1088 n = nilfs_lookup_dirty_data_buffers(
1073 err2 = nilfs_segctor_apply_buffers( 1089 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1090 if (n > rest) {
1091 err = nilfs_segctor_apply_buffers(
1074 sci, inode, &data_buffers, 1092 sci, inode, &data_buffers,
1075 err == -E2BIG ? sc_ops->collect_data : NULL); 1093 sc_ops->collect_data);
1076 if (err == -E2BIG) 1094 BUG_ON(!err); /* always receive -E2BIG or true error */
1077 err = err2;
1078 goto break_or_fail; 1095 goto break_or_fail;
1079 } 1096 }
1080 } 1097 }
@@ -1114,16 +1131,20 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1114 struct inode *inode) 1131 struct inode *inode)
1115{ 1132{
1116 LIST_HEAD(data_buffers); 1133 LIST_HEAD(data_buffers);
1117 int err, err2; 1134 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1135 int err;
1118 1136
1119 err = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, sci); 1137 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1120 err2 = nilfs_segctor_apply_buffers(sci, inode, &data_buffers, 1138 sci->sc_dsync_start,
1121 (!err || err == -E2BIG) ? 1139 sci->sc_dsync_end);
1122 nilfs_collect_file_data : NULL); 1140
1123 if (err == -E2BIG) 1141 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1124 err = err2; 1142 nilfs_collect_file_data);
1125 if (!err) 1143 if (!err) {
1126 nilfs_segctor_end_finfo(sci, inode); 1144 nilfs_segctor_end_finfo(sci, inode);
1145 BUG_ON(n > rest);
1146 /* always receive -E2BIG or true error if n > rest */
1147 }
1127 return err; 1148 return err;
1128} 1149}
1129 1150
@@ -1276,14 +1297,13 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1276 case NILFS_ST_DSYNC: 1297 case NILFS_ST_DSYNC:
1277 dsync_mode: 1298 dsync_mode:
1278 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; 1299 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1279 ii = sci->sc_stage.dirty_file_ptr; 1300 ii = sci->sc_dsync_inode;
1280 if (!test_bit(NILFS_I_BUSY, &ii->i_state)) 1301 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1281 break; 1302 break;
1282 1303
1283 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode); 1304 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1284 if (unlikely(err)) 1305 if (unlikely(err))
1285 break; 1306 break;
1286 sci->sc_stage.dirty_file_ptr = NULL;
1287 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; 1307 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1288 sci->sc_stage.scnt = NILFS_ST_DONE; 1308 sci->sc_stage.scnt = NILFS_ST_DONE;
1289 return 0; 1309 return 0;
@@ -2624,7 +2644,9 @@ int nilfs_construct_segment(struct super_block *sb)
2624/** 2644/**
2625 * nilfs_construct_dsync_segment - construct a data-only logical segment 2645 * nilfs_construct_dsync_segment - construct a data-only logical segment
2626 * @sb: super block 2646 * @sb: super block
2627 * @inode: the inode whose data blocks should be written out 2647 * @inode: inode whose data blocks should be written out
2648 * @start: start byte offset
2649 * @end: end byte offset (inclusive)
2628 * 2650 *
2629 * Return Value: On success, 0 is retured. On errors, one of the following 2651 * Return Value: On success, 0 is retured. On errors, one of the following
2630 * negative error code is returned. 2652 * negative error code is returned.
@@ -2639,8 +2661,8 @@ int nilfs_construct_segment(struct super_block *sb)
2639 * 2661 *
2640 * %-ENOMEM - Insufficient memory available. 2662 * %-ENOMEM - Insufficient memory available.
2641 */ 2663 */
2642int nilfs_construct_dsync_segment(struct super_block *sb, 2664int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2643 struct inode *inode) 2665 loff_t start, loff_t end)
2644{ 2666{
2645 struct nilfs_sb_info *sbi = NILFS_SB(sb); 2667 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2646 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2668 struct nilfs_sc_info *sci = NILFS_SC(sbi);
@@ -2671,7 +2693,9 @@ int nilfs_construct_dsync_segment(struct super_block *sb,
2671 return 0; 2693 return 0;
2672 } 2694 }
2673 spin_unlock(&sbi->s_inode_lock); 2695 spin_unlock(&sbi->s_inode_lock);
2674 sci->sc_stage.dirty_file_ptr = ii; 2696 sci->sc_dsync_inode = ii;
2697 sci->sc_dsync_start = start;
2698 sci->sc_dsync_end = end;
2675 2699
2676 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); 2700 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2677 2701
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 615654b8c32..2dd39da9f38 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -93,6 +93,9 @@ struct nilfs_segsum_pointer {
93 * @sc_active_segments: List of active segments that were already written out 93 * @sc_active_segments: List of active segments that were already written out
94 * @sc_cleaning_segments: List of segments to be freed through construction 94 * @sc_cleaning_segments: List of segments to be freed through construction
95 * @sc_copied_buffers: List of copied buffers (buffer heads) to freeze data 95 * @sc_copied_buffers: List of copied buffers (buffer heads) to freeze data
96 * @sc_dsync_inode: inode whose data pages are written for a sync operation
97 * @sc_dsync_start: start byte offset of data pages
98 * @sc_dsync_end: end byte offset of data pages (inclusive)
96 * @sc_segbufs: List of segment buffers 99 * @sc_segbufs: List of segment buffers
97 * @sc_segbuf_nblocks: Number of available blocks in segment buffers. 100 * @sc_segbuf_nblocks: Number of available blocks in segment buffers.
98 * @sc_curseg: Current segment buffer 101 * @sc_curseg: Current segment buffer
@@ -134,6 +137,10 @@ struct nilfs_sc_info {
134 struct list_head sc_cleaning_segments; 137 struct list_head sc_cleaning_segments;
135 struct list_head sc_copied_buffers; 138 struct list_head sc_copied_buffers;
136 139
140 struct nilfs_inode_info *sc_dsync_inode;
141 loff_t sc_dsync_start;
142 loff_t sc_dsync_end;
143
137 /* Segment buffers */ 144 /* Segment buffers */
138 struct list_head sc_segbufs; 145 struct list_head sc_segbufs;
139 unsigned long sc_segbuf_nblocks; 146 unsigned long sc_segbuf_nblocks;
@@ -221,8 +228,8 @@ extern void nilfs_destroy_transaction_cache(void);
221extern void nilfs_relax_pressure_in_lock(struct super_block *); 228extern void nilfs_relax_pressure_in_lock(struct super_block *);
222 229
223extern int nilfs_construct_segment(struct super_block *); 230extern int nilfs_construct_segment(struct super_block *);
224extern int nilfs_construct_dsync_segment(struct super_block *, 231extern int nilfs_construct_dsync_segment(struct super_block *, struct inode *,
225 struct inode *); 232 loff_t, loff_t);
226extern void nilfs_flush_segment(struct super_block *, ino_t); 233extern void nilfs_flush_segment(struct super_block *, ino_t);
227extern int nilfs_clean_segments(struct super_block *, void __user *); 234extern int nilfs_clean_segments(struct super_block *, void __user *);
228 235