aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2010-12-26 02:28:28 -0500
committerRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2011-01-10 00:05:45 -0500
commit27e6c7a3ce29ae5fa5bec4ed5917f8508bfac120 (patch)
tree958d82757bf23e426c7bc1035f76d986f09be535 /fs/nilfs2
parente828949e5b42bfd234ee537cdb7c5e3a577958a3 (diff)
nilfs2: mark buffer heads as delayed until the data is written to disk
Nilfs does not allocate new blocks on disk until they are actually written to. To implement fiemap, we need to deal with such blocks. To allow successive fiemap patch to distinguish mapped but unallocated regions, this marks buffer heads of those new blocks as delayed and clears the flag after the blocks are written to disk. Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs/nilfs2')
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/segment.c2
2 files changed, 3 insertions, 0 deletions
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 550b1788981e..1a546a86d7a7 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -104,6 +104,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
104 nilfs_transaction_commit(inode->i_sb); /* never fails */ 104 nilfs_transaction_commit(inode->i_sb); /* never fails */
105 /* Error handling should be detailed */ 105 /* Error handling should be detailed */
106 set_buffer_new(bh_result); 106 set_buffer_new(bh_result);
107 set_buffer_delay(bh_result);
107 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 108 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
108 to proper value */ 109 to proper value */
109 } else if (ret == -ENOENT) { 110 } else if (ret == -ENOENT) {
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index d3d2f4396f72..efc7d0a1bbf7 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1763,6 +1763,7 @@ static void nilfs_clear_copied_buffers(struct list_head *list, int err)
1763 if (!err) { 1763 if (!err) {
1764 set_buffer_uptodate(bh); 1764 set_buffer_uptodate(bh);
1765 clear_buffer_dirty(bh); 1765 clear_buffer_dirty(bh);
1766 clear_buffer_delay(bh);
1766 clear_buffer_nilfs_volatile(bh); 1767 clear_buffer_nilfs_volatile(bh);
1767 } 1768 }
1768 brelse(bh); /* for b_assoc_buffers */ 1769 brelse(bh); /* for b_assoc_buffers */
@@ -1889,6 +1890,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1889 b_assoc_buffers) { 1890 b_assoc_buffers) {
1890 set_buffer_uptodate(bh); 1891 set_buffer_uptodate(bh);
1891 clear_buffer_dirty(bh); 1892 clear_buffer_dirty(bh);
1893 clear_buffer_delay(bh);
1892 clear_buffer_nilfs_volatile(bh); 1894 clear_buffer_nilfs_volatile(bh);
1893 clear_buffer_nilfs_redirected(bh); 1895 clear_buffer_nilfs_redirected(bh);
1894 if (bh == segbuf->sb_super_root) { 1896 if (bh == segbuf->sb_super_root) {