aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2010-09-05 23:05:43 -0400
committerRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2010-10-22 20:24:37 -0400
commitebdfed4dc59d177cf26013a0c9b8ee9652e9a140 (patch)
tree6ef90f068ae41c55234181c93d8e30a303126c43 /fs
parenta8070dd365dd995f6139a2fc3aeee10159bdcc45 (diff)
nilfs2: add routines to roll back state of DAT file
This adds optional function to metadata files which makes a copy of bmap, page caches, and b-tree node cache, and rolls back to the copy as needed. This enhancement is intended to displace gcdat inode that provides a similar function in a different way. In this patch, nilfs_shadow_map structure is added to store a copy of the foregoing states. nilfs_mdt_setup_shadow_map relates this structure to a metadata file. And, nilfs_mdt_save_to_shadow_map() and nilfs_mdt_restore_from_shadow_map() provides save and restore functions respectively. Finally, nilfs_mdt_clear_shadow_map() clears states of nilfs_shadow_map. The copy of b-tree node cache and page cache is made by duplicating only dirty pages into corresponding caches in nilfs_shadow_map. Their restoration is done by clearing dirty pages from original caches and by copying dirty pages back from nilfs_shadow_map. Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs')
-rw-r--r--fs/nilfs2/btnode.c17
-rw-r--r--fs/nilfs2/mdt.c104
-rw-r--r--fs/nilfs2/mdt.h14
-rw-r--r--fs/nilfs2/page.c25
-rw-r--r--fs/nilfs2/page.h4
5 files changed, 145 insertions, 19 deletions
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index f78ab1044d1d..5115814cb745 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -37,15 +37,7 @@
37 37
38void nilfs_btnode_cache_init_once(struct address_space *btnc) 38void nilfs_btnode_cache_init_once(struct address_space *btnc)
39{ 39{
40 memset(btnc, 0, sizeof(*btnc)); 40 nilfs_mapping_init_once(btnc);
41 INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
42 spin_lock_init(&btnc->tree_lock);
43 INIT_LIST_HEAD(&btnc->private_list);
44 spin_lock_init(&btnc->private_lock);
45
46 spin_lock_init(&btnc->i_mmap_lock);
47 INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap);
48 INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
49} 41}
50 42
51static const struct address_space_operations def_btnode_aops = { 43static const struct address_space_operations def_btnode_aops = {
@@ -55,12 +47,7 @@ static const struct address_space_operations def_btnode_aops = {
55void nilfs_btnode_cache_init(struct address_space *btnc, 47void nilfs_btnode_cache_init(struct address_space *btnc,
56 struct backing_dev_info *bdi) 48 struct backing_dev_info *bdi)
57{ 49{
58 btnc->host = NULL; /* can safely set to host inode ? */ 50 nilfs_mapping_init(btnc, bdi, &def_btnode_aops);
59 btnc->flags = 0;
60 mapping_set_gfp_mask(btnc, GFP_NOFS);
61 btnc->assoc_mapping = NULL;
62 btnc->backing_dev_info = bdi;
63 btnc->a_ops = &def_btnode_aops;
64} 51}
65 52
66void nilfs_btnode_cache_clear(struct address_space *btnc) 53void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 73e5da3b097e..0066468609da 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -398,16 +398,22 @@ int nilfs_mdt_fetch_dirty(struct inode *inode)
398static int 398static int
399nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) 399nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
400{ 400{
401 struct inode *inode = container_of(page->mapping, 401 struct inode *inode;
402 struct inode, i_data); 402 struct super_block *sb;
403 struct super_block *sb = inode->i_sb; 403 struct the_nilfs *nilfs;
404 struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs;
405 struct nilfs_sb_info *writer = NULL; 404 struct nilfs_sb_info *writer = NULL;
406 int err = 0; 405 int err = 0;
407 406
408 redirty_page_for_writepage(wbc, page); 407 redirty_page_for_writepage(wbc, page);
409 unlock_page(page); 408 unlock_page(page);
410 409
410 inode = page->mapping->host;
411 if (!inode)
412 return 0;
413
414 sb = inode->i_sb;
415 nilfs = NILFS_MDT(inode)->mi_nilfs;
416
411 if (page->mapping->assoc_mapping) 417 if (page->mapping->assoc_mapping)
412 return 0; /* Do not request flush for shadow page cache */ 418 return 0; /* Do not request flush for shadow page cache */
413 if (!sb) { 419 if (!sb) {
@@ -567,6 +573,96 @@ void nilfs_mdt_set_shadow(struct inode *orig, struct inode *shadow)
567 &NILFS_I(orig)->i_btnode_cache; 573 &NILFS_I(orig)->i_btnode_cache;
568} 574}
569 575
576static const struct address_space_operations shadow_map_aops = {
577 .sync_page = block_sync_page,
578};
579
580/**
581 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
582 * @inode: inode of the metadata file
583 * @shadow: shadow mapping
584 */
585int nilfs_mdt_setup_shadow_map(struct inode *inode,
586 struct nilfs_shadow_map *shadow)
587{
588 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
589 struct backing_dev_info *bdi = NILFS_I_NILFS(inode)->ns_bdi;
590
591 INIT_LIST_HEAD(&shadow->frozen_buffers);
592 nilfs_mapping_init_once(&shadow->frozen_data);
593 nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
594 nilfs_mapping_init_once(&shadow->frozen_btnodes);
595 nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
596 mi->mi_shadow = shadow;
597 return 0;
598}
599
600/**
601 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
602 * @inode: inode of the metadata file
603 */
604int nilfs_mdt_save_to_shadow_map(struct inode *inode)
605{
606 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
607 struct nilfs_inode_info *ii = NILFS_I(inode);
608 struct nilfs_shadow_map *shadow = mi->mi_shadow;
609 int ret;
610
611 ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
612 if (ret)
613 goto out;
614
615 ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
616 &ii->i_btnode_cache);
617 if (ret)
618 goto out;
619
620 nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
621 out:
622 return ret;
623}
624
625/**
626 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
627 * @inode: inode of the metadata file
628 */
629void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
630{
631 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
632 struct nilfs_inode_info *ii = NILFS_I(inode);
633 struct nilfs_shadow_map *shadow = mi->mi_shadow;
634
635 down_write(&mi->mi_sem);
636
637 if (mi->mi_palloc_cache)
638 nilfs_palloc_clear_cache(inode);
639
640 nilfs_clear_dirty_pages(inode->i_mapping);
641 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
642
643 nilfs_clear_dirty_pages(&ii->i_btnode_cache);
644 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
645
646 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
647
648 up_write(&mi->mi_sem);
649}
650
651/**
652 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
653 * @inode: inode of the metadata file
654 */
655void nilfs_mdt_clear_shadow_map(struct inode *inode)
656{
657 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
658 struct nilfs_shadow_map *shadow = mi->mi_shadow;
659
660 down_write(&mi->mi_sem);
661 truncate_inode_pages(&shadow->frozen_data, 0);
662 truncate_inode_pages(&shadow->frozen_btnodes, 0);
663 up_write(&mi->mi_sem);
664}
665
570static void nilfs_mdt_clear(struct inode *inode) 666static void nilfs_mdt_clear(struct inode *inode)
571{ 667{
572 struct nilfs_inode_info *ii = NILFS_I(inode); 668 struct nilfs_inode_info *ii = NILFS_I(inode);
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index f44560224bd1..e7f0d158c527 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -28,6 +28,13 @@
28#include "nilfs.h" 28#include "nilfs.h"
29#include "page.h" 29#include "page.h"
30 30
31struct nilfs_shadow_map {
32 struct nilfs_bmap_store bmap_store;
33 struct address_space frozen_data;
34 struct address_space frozen_btnodes;
35 struct list_head frozen_buffers;
36};
37
31/** 38/**
32 * struct nilfs_mdt_info - on-memory private data of meta data files 39 * struct nilfs_mdt_info - on-memory private data of meta data files
33 * @mi_nilfs: back pointer to the_nilfs struct 40 * @mi_nilfs: back pointer to the_nilfs struct
@@ -37,6 +44,7 @@
37 * @mi_first_entry_offset: offset to the first entry 44 * @mi_first_entry_offset: offset to the first entry
38 * @mi_entries_per_block: number of entries in a block 45 * @mi_entries_per_block: number of entries in a block
39 * @mi_palloc_cache: persistent object allocator cache 46 * @mi_palloc_cache: persistent object allocator cache
47 * @mi_shadow: shadow of bmap and page caches
40 * @mi_blocks_per_group: number of blocks in a group 48 * @mi_blocks_per_group: number of blocks in a group
41 * @mi_blocks_per_desc_block: number of blocks per descriptor block 49 * @mi_blocks_per_desc_block: number of blocks per descriptor block
42 */ 50 */
@@ -48,6 +56,7 @@ struct nilfs_mdt_info {
48 unsigned mi_first_entry_offset; 56 unsigned mi_first_entry_offset;
49 unsigned long mi_entries_per_block; 57 unsigned long mi_entries_per_block;
50 struct nilfs_palloc_cache *mi_palloc_cache; 58 struct nilfs_palloc_cache *mi_palloc_cache;
59 struct nilfs_shadow_map *mi_shadow;
51 unsigned long mi_blocks_per_group; 60 unsigned long mi_blocks_per_group;
52 unsigned long mi_blocks_per_desc_block; 61 unsigned long mi_blocks_per_desc_block;
53}; 62};
@@ -86,6 +95,11 @@ void nilfs_mdt_destroy(struct inode *);
86void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned); 95void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned);
87void nilfs_mdt_set_shadow(struct inode *, struct inode *); 96void nilfs_mdt_set_shadow(struct inode *, struct inode *);
88 97
98int nilfs_mdt_setup_shadow_map(struct inode *inode,
99 struct nilfs_shadow_map *shadow);
100int nilfs_mdt_save_to_shadow_map(struct inode *inode);
101void nilfs_mdt_restore_from_shadow_map(struct inode *inode);
102void nilfs_mdt_clear_shadow_map(struct inode *inode);
89 103
90#define nilfs_mdt_mark_buffer_dirty(bh) nilfs_mark_buffer_dirty(bh) 104#define nilfs_mdt_mark_buffer_dirty(bh) nilfs_mark_buffer_dirty(bh)
91 105
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index aab11db2cb08..6384ac14c0c8 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -513,6 +513,31 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
513 } 513 }
514 return nc; 514 return nc;
515} 515}
516
517void nilfs_mapping_init_once(struct address_space *mapping)
518{
519 memset(mapping, 0, sizeof(*mapping));
520 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
521 spin_lock_init(&mapping->tree_lock);
522 INIT_LIST_HEAD(&mapping->private_list);
523 spin_lock_init(&mapping->private_lock);
524
525 spin_lock_init(&mapping->i_mmap_lock);
526 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
527 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
528}
529
530void nilfs_mapping_init(struct address_space *mapping,
531 struct backing_dev_info *bdi,
532 const struct address_space_operations *aops)
533{
534 mapping->host = NULL;
535 mapping->flags = 0;
536 mapping_set_gfp_mask(mapping, GFP_NOFS);
537 mapping->assoc_mapping = NULL;
538 mapping->backing_dev_info = bdi;
539 mapping->a_ops = aops;
540}
516 541
517/* 542/*
518 * NILFS2 needs clear_page_dirty() in the following two cases: 543 * NILFS2 needs clear_page_dirty() in the following two cases:
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index f53d8da41ed7..6ec4f498fc2b 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -59,6 +59,10 @@ void nilfs_free_private_page(struct page *);
59int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); 59int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
60void nilfs_copy_back_pages(struct address_space *, struct address_space *); 60void nilfs_copy_back_pages(struct address_space *, struct address_space *);
61void nilfs_clear_dirty_pages(struct address_space *); 61void nilfs_clear_dirty_pages(struct address_space *);
62void nilfs_mapping_init_once(struct address_space *mapping);
63void nilfs_mapping_init(struct address_space *mapping,
64 struct backing_dev_info *bdi,
65 const struct address_space_operations *aops);
62unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); 66unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
63 67
64#define NILFS_PAGE_BUG(page, m, a...) \ 68#define NILFS_PAGE_BUG(page, m, a...) \