aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2010-08-30 22:40:34 -0400
committerRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2010-10-22 20:24:37 -0400
commitb1f6a4f294088b3fcf9ae67915ca550a1ded2819 (patch)
treee4eab590fddc20111138e97ee4d55f10af519c8c /fs/nilfs2
parentebdfed4dc59d177cf26013a0c9b8ee9652e9a140 (diff)
nilfs2: add routines to redirect access to buffers of DAT file
During garbage collection (GC), DAT file, which converts virtual block number to real block number, may return disk block number that is not yet written to the device. To avoid access to unwritten blocks, the current implementation stores changes to the caches of GCDAT during GC and atomically commit the changes into the DAT file after they are written to the device. This patch, instead, adds a function that makes a copy of specified buffer and stores it in nilfs_shadow_map, and a function to get the backup copy as needed (nilfs_mdt_freeze_buffer and nilfs_mdt_get_frozen_buffer respectively). Before DAT changes block number in an entry block, it makes a copy and redirect access to the buffer so that address conversion function (i.e. nilfs_dat_translate) refers to the old address saved in the copy. This patch gives requisites for such redirection. Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs/nilfs2')
-rw-r--r--fs/nilfs2/mdt.c67
-rw-r--r--fs/nilfs2/mdt.h3
-rw-r--r--fs/nilfs2/page.c2
-rw-r--r--fs/nilfs2/page.h2
-rw-r--r--fs/nilfs2/segment.c1
5 files changed, 75 insertions, 0 deletions
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 0066468609da..532f85acf273 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -622,6 +622,72 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
622 return ret; 622 return ret;
623} 623}
624 624
625int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
626{
627 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
628 struct buffer_head *bh_frozen;
629 struct page *page;
630 int blkbits = inode->i_blkbits;
631 int ret = -ENOMEM;
632
633 page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
634 if (!page)
635 return ret;
636
637 if (!page_has_buffers(page))
638 create_empty_buffers(page, 1 << blkbits, 0);
639
640 bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
641 if (bh_frozen) {
642 if (!buffer_uptodate(bh_frozen))
643 nilfs_copy_buffer(bh_frozen, bh);
644 if (list_empty(&bh_frozen->b_assoc_buffers)) {
645 list_add_tail(&bh_frozen->b_assoc_buffers,
646 &shadow->frozen_buffers);
647 set_buffer_nilfs_redirected(bh);
648 } else {
649 brelse(bh_frozen); /* already frozen */
650 }
651 ret = 0;
652 }
653 unlock_page(page);
654 page_cache_release(page);
655 return ret;
656}
657
658struct buffer_head *
659nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
660{
661 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
662 struct buffer_head *bh_frozen = NULL;
663 struct page *page;
664 int n;
665
666 page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
667 if (page) {
668 if (page_has_buffers(page)) {
669 n = bh_offset(bh) >> inode->i_blkbits;
670 bh_frozen = nilfs_page_get_nth_block(page, n);
671 }
672 unlock_page(page);
673 page_cache_release(page);
674 }
675 return bh_frozen;
676}
677
678static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
679{
680 struct list_head *head = &shadow->frozen_buffers;
681 struct buffer_head *bh;
682
683 while (!list_empty(head)) {
684 bh = list_first_entry(head, struct buffer_head,
685 b_assoc_buffers);
686 list_del_init(&bh->b_assoc_buffers);
687 brelse(bh); /* drop ref-count to make it releasable */
688 }
689}
690
625/** 691/**
626 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state 692 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
627 * @inode: inode of the metadata file 693 * @inode: inode of the metadata file
@@ -658,6 +724,7 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
658 struct nilfs_shadow_map *shadow = mi->mi_shadow; 724 struct nilfs_shadow_map *shadow = mi->mi_shadow;
659 725
660 down_write(&mi->mi_sem); 726 down_write(&mi->mi_sem);
727 nilfs_release_frozen_buffers(shadow);
661 truncate_inode_pages(&shadow->frozen_data, 0); 728 truncate_inode_pages(&shadow->frozen_data, 0);
662 truncate_inode_pages(&shadow->frozen_btnodes, 0); 729 truncate_inode_pages(&shadow->frozen_btnodes, 0);
663 up_write(&mi->mi_sem); 730 up_write(&mi->mi_sem);
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index e7f0d158c527..e60bbfe899f1 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -100,6 +100,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
100int nilfs_mdt_save_to_shadow_map(struct inode *inode); 100int nilfs_mdt_save_to_shadow_map(struct inode *inode);
101void nilfs_mdt_restore_from_shadow_map(struct inode *inode); 101void nilfs_mdt_restore_from_shadow_map(struct inode *inode);
102void nilfs_mdt_clear_shadow_map(struct inode *inode); 102void nilfs_mdt_clear_shadow_map(struct inode *inode);
103int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh);
104struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode,
105 struct buffer_head *bh);
103 106
104#define nilfs_mdt_mark_buffer_dirty(bh) nilfs_mark_buffer_dirty(bh) 107#define nilfs_mdt_mark_buffer_dirty(bh) nilfs_mark_buffer_dirty(bh)
105 108
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 6384ac14c0c8..7083344ac881 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -131,6 +131,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
131 lock_buffer(bh); 131 lock_buffer(bh);
132 clear_buffer_nilfs_volatile(bh); 132 clear_buffer_nilfs_volatile(bh);
133 clear_buffer_nilfs_checked(bh); 133 clear_buffer_nilfs_checked(bh);
134 clear_buffer_nilfs_redirected(bh);
134 clear_buffer_dirty(bh); 135 clear_buffer_dirty(bh);
135 if (nilfs_page_buffers_clean(page)) 136 if (nilfs_page_buffers_clean(page))
136 __nilfs_clear_page_dirty(page); 137 __nilfs_clear_page_dirty(page);
@@ -483,6 +484,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
483 clear_buffer_dirty(bh); 484 clear_buffer_dirty(bh);
484 clear_buffer_nilfs_volatile(bh); 485 clear_buffer_nilfs_volatile(bh);
485 clear_buffer_nilfs_checked(bh); 486 clear_buffer_nilfs_checked(bh);
487 clear_buffer_nilfs_redirected(bh);
486 clear_buffer_uptodate(bh); 488 clear_buffer_uptodate(bh);
487 clear_buffer_mapped(bh); 489 clear_buffer_mapped(bh);
488 unlock_buffer(bh); 490 unlock_buffer(bh);
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 6ec4f498fc2b..fb9e8a8a2038 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -35,12 +35,14 @@ enum {
35 BH_NILFS_Node, 35 BH_NILFS_Node,
36 BH_NILFS_Volatile, 36 BH_NILFS_Volatile,
37 BH_NILFS_Checked, 37 BH_NILFS_Checked,
38 BH_NILFS_Redirected,
38}; 39};
39 40
40BUFFER_FNS(NILFS_Allocated, nilfs_allocated) /* nilfs private buffers */ 41BUFFER_FNS(NILFS_Allocated, nilfs_allocated) /* nilfs private buffers */
41BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */ 42BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */
42BUFFER_FNS(NILFS_Volatile, nilfs_volatile) 43BUFFER_FNS(NILFS_Volatile, nilfs_volatile)
43BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */ 44BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
45BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
44 46
45 47
46void nilfs_mark_buffer_dirty(struct buffer_head *bh); 48void nilfs_mark_buffer_dirty(struct buffer_head *bh);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index b75306d642c2..91dc0668ec83 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1908,6 +1908,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1908 set_buffer_uptodate(bh); 1908 set_buffer_uptodate(bh);
1909 clear_buffer_dirty(bh); 1909 clear_buffer_dirty(bh);
1910 clear_buffer_nilfs_volatile(bh); 1910 clear_buffer_nilfs_volatile(bh);
1911 clear_buffer_nilfs_redirected(bh);
1911 if (bh == segbuf->sb_super_root) { 1912 if (bh == segbuf->sb_super_root) {
1912 if (bh->b_page != bd_page) { 1913 if (bh->b_page != bd_page) {
1913 end_page_writeback(bd_page); 1914 end_page_writeback(bd_page);