diff options
Diffstat (limited to 'fs/f2fs/checkpoint.c')
| -rw-r--r-- | fs/f2fs/checkpoint.c | 80 |
1 files changed, 51 insertions, 29 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 6aeed5bada52..ec3b7a5381fa 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c | |||
| @@ -160,14 +160,11 @@ static int f2fs_write_meta_page(struct page *page, | |||
| 160 | goto redirty_out; | 160 | goto redirty_out; |
| 161 | if (wbc->for_reclaim) | 161 | if (wbc->for_reclaim) |
| 162 | goto redirty_out; | 162 | goto redirty_out; |
| 163 | 163 | if (unlikely(f2fs_cp_error(sbi))) | |
| 164 | /* Should not write any meta pages, if any IO error was occurred */ | 164 | goto redirty_out; |
| 165 | if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG))) | ||
| 166 | goto no_write; | ||
| 167 | 165 | ||
| 168 | f2fs_wait_on_page_writeback(page, META); | 166 | f2fs_wait_on_page_writeback(page, META); |
| 169 | write_meta_page(sbi, page); | 167 | write_meta_page(sbi, page); |
| 170 | no_write: | ||
| 171 | dec_page_count(sbi, F2FS_DIRTY_META); | 168 | dec_page_count(sbi, F2FS_DIRTY_META); |
| 172 | unlock_page(page); | 169 | unlock_page(page); |
| 173 | return 0; | 170 | return 0; |
| @@ -348,7 +345,7 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) | |||
| 348 | return e ? true : false; | 345 | return e ? true : false; |
| 349 | } | 346 | } |
| 350 | 347 | ||
| 351 | static void release_dirty_inode(struct f2fs_sb_info *sbi) | 348 | void release_dirty_inode(struct f2fs_sb_info *sbi) |
| 352 | { | 349 | { |
| 353 | struct ino_entry *e, *tmp; | 350 | struct ino_entry *e, *tmp; |
| 354 | int i; | 351 | int i; |
| @@ -446,8 +443,8 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) | |||
| 446 | struct f2fs_orphan_block *orphan_blk = NULL; | 443 | struct f2fs_orphan_block *orphan_blk = NULL; |
| 447 | unsigned int nentries = 0; | 444 | unsigned int nentries = 0; |
| 448 | unsigned short index; | 445 | unsigned short index; |
| 449 | unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans + | 446 | unsigned short orphan_blocks = |
| 450 | (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); | 447 | (unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans); |
| 451 | struct page *page = NULL; | 448 | struct page *page = NULL; |
| 452 | struct ino_entry *orphan = NULL; | 449 | struct ino_entry *orphan = NULL; |
| 453 | 450 | ||
| @@ -737,7 +734,7 @@ retry: | |||
| 737 | /* | 734 | /* |
| 738 | * Freeze all the FS-operations for checkpoint. | 735 | * Freeze all the FS-operations for checkpoint. |
| 739 | */ | 736 | */ |
| 740 | static void block_operations(struct f2fs_sb_info *sbi) | 737 | static int block_operations(struct f2fs_sb_info *sbi) |
| 741 | { | 738 | { |
| 742 | struct writeback_control wbc = { | 739 | struct writeback_control wbc = { |
| 743 | .sync_mode = WB_SYNC_ALL, | 740 | .sync_mode = WB_SYNC_ALL, |
| @@ -745,6 +742,7 @@ static void block_operations(struct f2fs_sb_info *sbi) | |||
| 745 | .for_reclaim = 0, | 742 | .for_reclaim = 0, |
| 746 | }; | 743 | }; |
| 747 | struct blk_plug plug; | 744 | struct blk_plug plug; |
| 745 | int err = 0; | ||
| 748 | 746 | ||
| 749 | blk_start_plug(&plug); | 747 | blk_start_plug(&plug); |
| 750 | 748 | ||
| @@ -754,11 +752,15 @@ retry_flush_dents: | |||
| 754 | if (get_pages(sbi, F2FS_DIRTY_DENTS)) { | 752 | if (get_pages(sbi, F2FS_DIRTY_DENTS)) { |
| 755 | f2fs_unlock_all(sbi); | 753 | f2fs_unlock_all(sbi); |
| 756 | sync_dirty_dir_inodes(sbi); | 754 | sync_dirty_dir_inodes(sbi); |
| 755 | if (unlikely(f2fs_cp_error(sbi))) { | ||
| 756 | err = -EIO; | ||
| 757 | goto out; | ||
| 758 | } | ||
| 757 | goto retry_flush_dents; | 759 | goto retry_flush_dents; |
| 758 | } | 760 | } |
| 759 | 761 | ||
| 760 | /* | 762 | /* |
| 761 | * POR: we should ensure that there is no dirty node pages | 763 | * POR: we should ensure that there are no dirty node pages |
| 762 | * until finishing nat/sit flush. | 764 | * until finishing nat/sit flush. |
| 763 | */ | 765 | */ |
| 764 | retry_flush_nodes: | 766 | retry_flush_nodes: |
| @@ -767,9 +769,16 @@ retry_flush_nodes: | |||
| 767 | if (get_pages(sbi, F2FS_DIRTY_NODES)) { | 769 | if (get_pages(sbi, F2FS_DIRTY_NODES)) { |
| 768 | up_write(&sbi->node_write); | 770 | up_write(&sbi->node_write); |
| 769 | sync_node_pages(sbi, 0, &wbc); | 771 | sync_node_pages(sbi, 0, &wbc); |
| 772 | if (unlikely(f2fs_cp_error(sbi))) { | ||
| 773 | f2fs_unlock_all(sbi); | ||
| 774 | err = -EIO; | ||
| 775 | goto out; | ||
| 776 | } | ||
| 770 | goto retry_flush_nodes; | 777 | goto retry_flush_nodes; |
| 771 | } | 778 | } |
| 779 | out: | ||
| 772 | blk_finish_plug(&plug); | 780 | blk_finish_plug(&plug); |
| 781 | return err; | ||
| 773 | } | 782 | } |
| 774 | 783 | ||
| 775 | static void unblock_operations(struct f2fs_sb_info *sbi) | 784 | static void unblock_operations(struct f2fs_sb_info *sbi) |
| @@ -813,8 +822,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 813 | discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); | 822 | discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); |
| 814 | 823 | ||
| 815 | /* Flush all the NAT/SIT pages */ | 824 | /* Flush all the NAT/SIT pages */ |
| 816 | while (get_pages(sbi, F2FS_DIRTY_META)) | 825 | while (get_pages(sbi, F2FS_DIRTY_META)) { |
| 817 | sync_meta_pages(sbi, META, LONG_MAX); | 826 | sync_meta_pages(sbi, META, LONG_MAX); |
| 827 | if (unlikely(f2fs_cp_error(sbi))) | ||
| 828 | return; | ||
| 829 | } | ||
| 818 | 830 | ||
| 819 | next_free_nid(sbi, &last_nid); | 831 | next_free_nid(sbi, &last_nid); |
| 820 | 832 | ||
| @@ -825,7 +837,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 825 | ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); | 837 | ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); |
| 826 | ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); | 838 | ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); |
| 827 | ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); | 839 | ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); |
| 828 | for (i = 0; i < 3; i++) { | 840 | for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { |
| 829 | ckpt->cur_node_segno[i] = | 841 | ckpt->cur_node_segno[i] = |
| 830 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); | 842 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); |
| 831 | ckpt->cur_node_blkoff[i] = | 843 | ckpt->cur_node_blkoff[i] = |
| @@ -833,7 +845,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 833 | ckpt->alloc_type[i + CURSEG_HOT_NODE] = | 845 | ckpt->alloc_type[i + CURSEG_HOT_NODE] = |
| 834 | curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); | 846 | curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); |
| 835 | } | 847 | } |
| 836 | for (i = 0; i < 3; i++) { | 848 | for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { |
| 837 | ckpt->cur_data_segno[i] = | 849 | ckpt->cur_data_segno[i] = |
| 838 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); | 850 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); |
| 839 | ckpt->cur_data_blkoff[i] = | 851 | ckpt->cur_data_blkoff[i] = |
| @@ -848,24 +860,23 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 848 | 860 | ||
| 849 | /* 2 cp + n data seg summary + orphan inode blocks */ | 861 | /* 2 cp + n data seg summary + orphan inode blocks */ |
| 850 | data_sum_blocks = npages_for_summary_flush(sbi); | 862 | data_sum_blocks = npages_for_summary_flush(sbi); |
| 851 | if (data_sum_blocks < 3) | 863 | if (data_sum_blocks < NR_CURSEG_DATA_TYPE) |
| 852 | set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); | 864 | set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); |
| 853 | else | 865 | else |
| 854 | clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); | 866 | clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); |
| 855 | 867 | ||
| 856 | orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) | 868 | orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans); |
| 857 | / F2FS_ORPHANS_PER_BLOCK; | ||
| 858 | ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + | 869 | ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + |
| 859 | orphan_blocks); | 870 | orphan_blocks); |
| 860 | 871 | ||
| 861 | if (is_umount) { | 872 | if (is_umount) { |
| 862 | set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | 873 | set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
| 863 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | 874 | ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+ |
| 864 | cp_payload_blks + data_sum_blocks + | 875 | cp_payload_blks + data_sum_blocks + |
| 865 | orphan_blocks + NR_CURSEG_NODE_TYPE); | 876 | orphan_blocks + NR_CURSEG_NODE_TYPE); |
| 866 | } else { | 877 | } else { |
| 867 | clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | 878 | clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
| 868 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | 879 | ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + |
| 869 | cp_payload_blks + data_sum_blocks + | 880 | cp_payload_blks + data_sum_blocks + |
| 870 | orphan_blocks); | 881 | orphan_blocks); |
| 871 | } | 882 | } |
| @@ -924,6 +935,9 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 924 | /* wait for previous submitted node/meta pages writeback */ | 935 | /* wait for previous submitted node/meta pages writeback */ |
| 925 | wait_on_all_pages_writeback(sbi); | 936 | wait_on_all_pages_writeback(sbi); |
| 926 | 937 | ||
| 938 | if (unlikely(f2fs_cp_error(sbi))) | ||
| 939 | return; | ||
| 940 | |||
| 927 | filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); | 941 | filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); |
| 928 | filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); | 942 | filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); |
| 929 | 943 | ||
| @@ -934,15 +948,17 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 934 | /* Here, we only have one bio having CP pack */ | 948 | /* Here, we only have one bio having CP pack */ |
| 935 | sync_meta_pages(sbi, META_FLUSH, LONG_MAX); | 949 | sync_meta_pages(sbi, META_FLUSH, LONG_MAX); |
| 936 | 950 | ||
| 937 | if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) { | 951 | release_dirty_inode(sbi); |
| 938 | clear_prefree_segments(sbi); | 952 | |
| 939 | release_dirty_inode(sbi); | 953 | if (unlikely(f2fs_cp_error(sbi))) |
| 940 | F2FS_RESET_SB_DIRT(sbi); | 954 | return; |
| 941 | } | 955 | |
| 956 | clear_prefree_segments(sbi); | ||
| 957 | F2FS_RESET_SB_DIRT(sbi); | ||
| 942 | } | 958 | } |
| 943 | 959 | ||
| 944 | /* | 960 | /* |
| 945 | * We guarantee that this checkpoint procedure should not fail. | 961 | * We guarantee that this checkpoint procedure will not fail. |
| 946 | */ | 962 | */ |
| 947 | void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | 963 | void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) |
| 948 | { | 964 | { |
| @@ -952,7 +968,13 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 952 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); | 968 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); |
| 953 | 969 | ||
| 954 | mutex_lock(&sbi->cp_mutex); | 970 | mutex_lock(&sbi->cp_mutex); |
| 955 | block_operations(sbi); | 971 | |
| 972 | if (!sbi->s_dirty) | ||
| 973 | goto out; | ||
| 974 | if (unlikely(f2fs_cp_error(sbi))) | ||
| 975 | goto out; | ||
| 976 | if (block_operations(sbi)) | ||
| 977 | goto out; | ||
| 956 | 978 | ||
| 957 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); | 979 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); |
| 958 | 980 | ||
| @@ -976,9 +998,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
| 976 | do_checkpoint(sbi, is_umount); | 998 | do_checkpoint(sbi, is_umount); |
| 977 | 999 | ||
| 978 | unblock_operations(sbi); | 1000 | unblock_operations(sbi); |
| 979 | mutex_unlock(&sbi->cp_mutex); | ||
| 980 | |||
| 981 | stat_inc_cp_count(sbi->stat_info); | 1001 | stat_inc_cp_count(sbi->stat_info); |
| 1002 | out: | ||
| 1003 | mutex_unlock(&sbi->cp_mutex); | ||
| 982 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); | 1004 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); |
| 983 | } | 1005 | } |
| 984 | 1006 | ||
| @@ -999,8 +1021,8 @@ void init_ino_entry_info(struct f2fs_sb_info *sbi) | |||
| 999 | * for cp pack we can have max 1020*504 orphan entries | 1021 | * for cp pack we can have max 1020*504 orphan entries |
| 1000 | */ | 1022 | */ |
| 1001 | sbi->n_orphans = 0; | 1023 | sbi->n_orphans = 0; |
| 1002 | sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE) | 1024 | sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - |
| 1003 | * F2FS_ORPHANS_PER_BLOCK; | 1025 | NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK; |
| 1004 | } | 1026 | } |
| 1005 | 1027 | ||
| 1006 | int __init create_checkpoint_caches(void) | 1028 | int __init create_checkpoint_caches(void) |
