diff options
author | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2009-04-06 22:01:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-07 11:31:20 -0400 |
commit | e339ad31f59925b48a92ee3947692fdf9758b8c7 (patch) | |
tree | 6bb85c43bfd100b0a220c788c654f2f74ca553e4 /fs/nilfs2/super.c | |
parent | cece552074c591970353ad48308d65f110aeaf28 (diff) |
nilfs2: introduce secondary super block
The former versions didn't have extra super blocks. This improves the
weak point by introducing another super block at unused region in tail of
the partition.
This doesn't break disk format compatibility; older versions just ingore
the secondary super block, and new versions just recover it if it doesn't
exist. The partition created by an old mkfs may not have unused region,
but in that case, the secondary super block will not be added.
This doesn't make more redundant copies of the super block; it is a future
work.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/nilfs2/super.c')
-rw-r--r-- | fs/nilfs2/super.c | 229 |
1 files changed, 95 insertions, 134 deletions
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index ef31e9a51c84..e2ced824c624 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -103,8 +103,9 @@ void nilfs_error(struct super_block *sb, const char *function, | |||
103 | down_write(&nilfs->ns_sem); | 103 | down_write(&nilfs->ns_sem); |
104 | if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { | 104 | if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { |
105 | nilfs->ns_mount_state |= NILFS_ERROR_FS; | 105 | nilfs->ns_mount_state |= NILFS_ERROR_FS; |
106 | nilfs->ns_sbp->s_state |= cpu_to_le16(NILFS_ERROR_FS); | 106 | nilfs->ns_sbp[0]->s_state |= |
107 | nilfs_commit_super(sbi); | 107 | cpu_to_le16(NILFS_ERROR_FS); |
108 | nilfs_commit_super(sbi, 1); | ||
108 | } | 109 | } |
109 | up_write(&nilfs->ns_sem); | 110 | up_write(&nilfs->ns_sem); |
110 | 111 | ||
@@ -208,90 +209,106 @@ static void nilfs_clear_inode(struct inode *inode) | |||
208 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); | 209 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); |
209 | } | 210 | } |
210 | 211 | ||
211 | /** | 212 | static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb) |
212 | * nilfs_update_last_segment - change pointer to the latest segment | ||
213 | * @sbi: nilfs_sb_info | ||
214 | * @update_cno: flag whether to update checkpoint number. | ||
215 | * | ||
216 | * nilfs_update_last_segment() changes information in the super block | ||
217 | * after a partial segment is written out successfully. The super | ||
218 | * block is marked dirty. It will be written out at the next VFS sync | ||
219 | * operations such as sync_supers() and generic_shutdown_super(). | ||
220 | */ | ||
221 | void nilfs_update_last_segment(struct nilfs_sb_info *sbi, int update_cno) | ||
222 | { | ||
223 | struct the_nilfs *nilfs = sbi->s_nilfs; | ||
224 | struct nilfs_super_block *sbp = nilfs->ns_sbp; | ||
225 | |||
226 | /* nilfs->sem must be locked by the caller. */ | ||
227 | spin_lock(&nilfs->ns_last_segment_lock); | ||
228 | if (update_cno) | ||
229 | nilfs->ns_last_cno = nilfs->ns_cno++; | ||
230 | sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); | ||
231 | sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); | ||
232 | sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); | ||
233 | spin_unlock(&nilfs->ns_last_segment_lock); | ||
234 | |||
235 | sbi->s_super->s_dirt = 1; /* must be set if delaying the call of | ||
236 | nilfs_commit_super() */ | ||
237 | } | ||
238 | |||
239 | static int nilfs_sync_super(struct nilfs_sb_info *sbi) | ||
240 | { | 213 | { |
241 | struct the_nilfs *nilfs = sbi->s_nilfs; | 214 | struct the_nilfs *nilfs = sbi->s_nilfs; |
242 | int err; | 215 | int err; |
243 | int barrier_done = 0; | 216 | int barrier_done = 0; |
244 | 217 | ||
245 | if (nilfs_test_opt(sbi, BARRIER)) { | 218 | if (nilfs_test_opt(sbi, BARRIER)) { |
246 | set_buffer_ordered(nilfs->ns_sbh); | 219 | set_buffer_ordered(nilfs->ns_sbh[0]); |
247 | barrier_done = 1; | 220 | barrier_done = 1; |
248 | } | 221 | } |
249 | retry: | 222 | retry: |
250 | set_buffer_dirty(nilfs->ns_sbh); | 223 | set_buffer_dirty(nilfs->ns_sbh[0]); |
251 | err = sync_dirty_buffer(nilfs->ns_sbh); | 224 | err = sync_dirty_buffer(nilfs->ns_sbh[0]); |
252 | if (err == -EOPNOTSUPP && barrier_done) { | 225 | if (err == -EOPNOTSUPP && barrier_done) { |
253 | nilfs_warning(sbi->s_super, __func__, | 226 | nilfs_warning(sbi->s_super, __func__, |
254 | "barrier-based sync failed. " | 227 | "barrier-based sync failed. " |
255 | "disabling barriers\n"); | 228 | "disabling barriers\n"); |
256 | nilfs_clear_opt(sbi, BARRIER); | 229 | nilfs_clear_opt(sbi, BARRIER); |
257 | barrier_done = 0; | 230 | barrier_done = 0; |
258 | clear_buffer_ordered(nilfs->ns_sbh); | 231 | clear_buffer_ordered(nilfs->ns_sbh[0]); |
259 | goto retry; | 232 | goto retry; |
260 | } | 233 | } |
261 | if (unlikely(err)) | 234 | if (unlikely(err)) { |
262 | printk(KERN_ERR | 235 | printk(KERN_ERR |
263 | "NILFS: unable to write superblock (err=%d)\n", err); | 236 | "NILFS: unable to write superblock (err=%d)\n", err); |
264 | else { | 237 | if (err == -EIO && nilfs->ns_sbh[1]) { |
238 | nilfs_fall_back_super_block(nilfs); | ||
239 | goto retry; | ||
240 | } | ||
241 | } else { | ||
242 | struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; | ||
243 | |||
244 | /* | ||
245 | * The latest segment becomes trailable from the position | ||
246 | * written in superblock. | ||
247 | */ | ||
265 | clear_nilfs_discontinued(nilfs); | 248 | clear_nilfs_discontinued(nilfs); |
266 | spin_lock(&nilfs->ns_last_segment_lock); | 249 | |
267 | nilfs->ns_prot_seq = le64_to_cpu(nilfs->ns_sbp->s_last_seq); | 250 | /* update GC protection for recent segments */ |
268 | spin_unlock(&nilfs->ns_last_segment_lock); | 251 | if (nilfs->ns_sbh[1]) { |
252 | sbp = NULL; | ||
253 | if (dupsb) { | ||
254 | set_buffer_dirty(nilfs->ns_sbh[1]); | ||
255 | if (!sync_dirty_buffer(nilfs->ns_sbh[1])) | ||
256 | sbp = nilfs->ns_sbp[1]; | ||
257 | } | ||
258 | } | ||
259 | if (sbp) { | ||
260 | spin_lock(&nilfs->ns_last_segment_lock); | ||
261 | nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); | ||
262 | spin_unlock(&nilfs->ns_last_segment_lock); | ||
263 | } | ||
269 | } | 264 | } |
270 | 265 | ||
271 | return err; | 266 | return err; |
272 | } | 267 | } |
273 | 268 | ||
274 | int nilfs_commit_super(struct nilfs_sb_info *sbi) | 269 | int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) |
275 | { | 270 | { |
276 | struct the_nilfs *nilfs = sbi->s_nilfs; | 271 | struct the_nilfs *nilfs = sbi->s_nilfs; |
277 | struct nilfs_super_block *sbp = nilfs->ns_sbp; | 272 | struct nilfs_super_block **sbp = nilfs->ns_sbp; |
278 | sector_t nfreeblocks; | 273 | sector_t nfreeblocks; |
274 | time_t t; | ||
279 | int err; | 275 | int err; |
280 | 276 | ||
281 | /* nilfs->sem must be locked by the caller. */ | 277 | /* nilfs->sem must be locked by the caller. */ |
278 | if (sbp[0]->s_magic != NILFS_SUPER_MAGIC) { | ||
279 | if (sbp[1] && sbp[1]->s_magic == NILFS_SUPER_MAGIC) | ||
280 | nilfs_swap_super_block(nilfs); | ||
281 | else { | ||
282 | printk(KERN_CRIT "NILFS: superblock broke on dev %s\n", | ||
283 | sbi->s_super->s_id); | ||
284 | return -EIO; | ||
285 | } | ||
286 | } | ||
282 | err = nilfs_count_free_blocks(nilfs, &nfreeblocks); | 287 | err = nilfs_count_free_blocks(nilfs, &nfreeblocks); |
283 | if (unlikely(err)) { | 288 | if (unlikely(err)) { |
284 | printk(KERN_ERR "NILFS: failed to count free blocks\n"); | 289 | printk(KERN_ERR "NILFS: failed to count free blocks\n"); |
285 | return err; | 290 | return err; |
286 | } | 291 | } |
287 | sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks); | 292 | spin_lock(&nilfs->ns_last_segment_lock); |
288 | sbp->s_wtime = cpu_to_le64(get_seconds()); | 293 | sbp[0]->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); |
289 | sbp->s_sum = 0; | 294 | sbp[0]->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); |
290 | sbp->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, | 295 | sbp[0]->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); |
291 | (unsigned char *)sbp, | 296 | spin_unlock(&nilfs->ns_last_segment_lock); |
292 | le16_to_cpu(sbp->s_bytes))); | 297 | |
298 | t = get_seconds(); | ||
299 | nilfs->ns_sbwtime[0] = t; | ||
300 | sbp[0]->s_free_blocks_count = cpu_to_le64(nfreeblocks); | ||
301 | sbp[0]->s_wtime = cpu_to_le64(t); | ||
302 | sbp[0]->s_sum = 0; | ||
303 | sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, | ||
304 | (unsigned char *)sbp[0], | ||
305 | nilfs->ns_sbsize)); | ||
306 | if (dupsb && sbp[1]) { | ||
307 | memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); | ||
308 | nilfs->ns_sbwtime[1] = t; | ||
309 | } | ||
293 | sbi->s_super->s_dirt = 0; | 310 | sbi->s_super->s_dirt = 0; |
294 | return nilfs_sync_super(sbi); | 311 | return nilfs_sync_super(sbi, dupsb); |
295 | } | 312 | } |
296 | 313 | ||
297 | static void nilfs_put_super(struct super_block *sb) | 314 | static void nilfs_put_super(struct super_block *sb) |
@@ -303,8 +320,8 @@ static void nilfs_put_super(struct super_block *sb) | |||
303 | 320 | ||
304 | if (!(sb->s_flags & MS_RDONLY)) { | 321 | if (!(sb->s_flags & MS_RDONLY)) { |
305 | down_write(&nilfs->ns_sem); | 322 | down_write(&nilfs->ns_sem); |
306 | nilfs->ns_sbp->s_state = cpu_to_le16(nilfs->ns_mount_state); | 323 | nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); |
307 | nilfs_commit_super(sbi); | 324 | nilfs_commit_super(sbi, 1); |
308 | up_write(&nilfs->ns_sem); | 325 | up_write(&nilfs->ns_sem); |
309 | } | 326 | } |
310 | 327 | ||
@@ -330,7 +347,7 @@ static void nilfs_put_super(struct super_block *sb) | |||
330 | * 2. down_write(&nilfs->ns_sem) | 347 | * 2. down_write(&nilfs->ns_sem) |
331 | * | 348 | * |
332 | * Inside NILFS, locking ns_sem is enough to protect s_dirt and the buffer | 349 | * Inside NILFS, locking ns_sem is enough to protect s_dirt and the buffer |
333 | * of the super block (nilfs->ns_sbp). | 350 | * of the super block (nilfs->ns_sbp[]). |
334 | * | 351 | * |
335 | * In most cases, VFS functions call lock_super() before calling these | 352 | * In most cases, VFS functions call lock_super() before calling these |
336 | * methods. So we must be careful not to bring on deadlocks when using | 353 | * methods. So we must be careful not to bring on deadlocks when using |
@@ -346,8 +363,19 @@ static void nilfs_write_super(struct super_block *sb) | |||
346 | struct the_nilfs *nilfs = sbi->s_nilfs; | 363 | struct the_nilfs *nilfs = sbi->s_nilfs; |
347 | 364 | ||
348 | down_write(&nilfs->ns_sem); | 365 | down_write(&nilfs->ns_sem); |
349 | if (!(sb->s_flags & MS_RDONLY)) | 366 | if (!(sb->s_flags & MS_RDONLY)) { |
350 | nilfs_commit_super(sbi); | 367 | struct nilfs_super_block **sbp = nilfs->ns_sbp; |
368 | u64 t = get_seconds(); | ||
369 | int dupsb; | ||
370 | |||
371 | if (!nilfs_discontinued(nilfs) && t >= nilfs->ns_sbwtime[0] && | ||
372 | t < nilfs->ns_sbwtime[0] + NILFS_SB_FREQ) { | ||
373 | up_write(&nilfs->ns_sem); | ||
374 | return; | ||
375 | } | ||
376 | dupsb = sbp[1] && t > nilfs->ns_sbwtime[1] + NILFS_ALTSB_FREQ; | ||
377 | nilfs_commit_super(sbi, dupsb); | ||
378 | } | ||
351 | sb->s_dirt = 0; | 379 | sb->s_dirt = 0; |
352 | up_write(&nilfs->ns_sem); | 380 | up_write(&nilfs->ns_sem); |
353 | } | 381 | } |
@@ -436,7 +464,7 @@ static int nilfs_mark_recovery_complete(struct nilfs_sb_info *sbi) | |||
436 | down_write(&nilfs->ns_sem); | 464 | down_write(&nilfs->ns_sem); |
437 | if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) { | 465 | if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) { |
438 | nilfs->ns_mount_state |= NILFS_VALID_FS; | 466 | nilfs->ns_mount_state |= NILFS_VALID_FS; |
439 | err = nilfs_commit_super(sbi); | 467 | err = nilfs_commit_super(sbi, 1); |
440 | if (likely(!err)) | 468 | if (likely(!err)) |
441 | printk(KERN_INFO "NILFS: recovery complete.\n"); | 469 | printk(KERN_INFO "NILFS: recovery complete.\n"); |
442 | } | 470 | } |
@@ -652,7 +680,7 @@ nilfs_set_default_options(struct nilfs_sb_info *sbi, | |||
652 | static int nilfs_setup_super(struct nilfs_sb_info *sbi) | 680 | static int nilfs_setup_super(struct nilfs_sb_info *sbi) |
653 | { | 681 | { |
654 | struct the_nilfs *nilfs = sbi->s_nilfs; | 682 | struct the_nilfs *nilfs = sbi->s_nilfs; |
655 | struct nilfs_super_block *sbp = nilfs->ns_sbp; | 683 | struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; |
656 | int max_mnt_count = le16_to_cpu(sbp->s_max_mnt_count); | 684 | int max_mnt_count = le16_to_cpu(sbp->s_max_mnt_count); |
657 | int mnt_count = le16_to_cpu(sbp->s_mnt_count); | 685 | int mnt_count = le16_to_cpu(sbp->s_mnt_count); |
658 | 686 | ||
@@ -674,88 +702,29 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi) | |||
674 | sbp->s_mnt_count = cpu_to_le16(mnt_count + 1); | 702 | sbp->s_mnt_count = cpu_to_le16(mnt_count + 1); |
675 | sbp->s_state = cpu_to_le16(le16_to_cpu(sbp->s_state) & ~NILFS_VALID_FS); | 703 | sbp->s_state = cpu_to_le16(le16_to_cpu(sbp->s_state) & ~NILFS_VALID_FS); |
676 | sbp->s_mtime = cpu_to_le64(get_seconds()); | 704 | sbp->s_mtime = cpu_to_le64(get_seconds()); |
677 | return nilfs_commit_super(sbi); | 705 | return nilfs_commit_super(sbi, 1); |
678 | } | 706 | } |
679 | 707 | ||
680 | struct nilfs_super_block * | 708 | struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb, |
681 | nilfs_load_super_block(struct super_block *sb, struct buffer_head **pbh) | 709 | u64 pos, int blocksize, |
710 | struct buffer_head **pbh) | ||
682 | { | 711 | { |
683 | int blocksize; | 712 | unsigned long long sb_index = pos; |
684 | unsigned long offset, sb_index; | 713 | unsigned long offset; |
685 | |||
686 | /* | ||
687 | * Adjusting block size | ||
688 | * Blocksize will be enlarged when it is smaller than hardware | ||
689 | * sector size. | ||
690 | * Disk format of superblock does not change. | ||
691 | */ | ||
692 | blocksize = sb_min_blocksize(sb, BLOCK_SIZE); | ||
693 | if (!blocksize) { | ||
694 | printk(KERN_ERR | ||
695 | "NILFS: unable to set blocksize of superblock\n"); | ||
696 | return NULL; | ||
697 | } | ||
698 | sb_index = NILFS_SB_OFFSET_BYTES / blocksize; | ||
699 | offset = NILFS_SB_OFFSET_BYTES % blocksize; | ||
700 | 714 | ||
715 | offset = do_div(sb_index, blocksize); | ||
701 | *pbh = sb_bread(sb, sb_index); | 716 | *pbh = sb_bread(sb, sb_index); |
702 | if (!*pbh) { | 717 | if (!*pbh) |
703 | printk(KERN_ERR "NILFS: unable to read superblock\n"); | ||
704 | return NULL; | 718 | return NULL; |
705 | } | ||
706 | return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset); | 719 | return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset); |
707 | } | 720 | } |
708 | 721 | ||
709 | struct nilfs_super_block * | ||
710 | nilfs_reload_super_block(struct super_block *sb, struct buffer_head **pbh, | ||
711 | int blocksize) | ||
712 | { | ||
713 | struct nilfs_super_block *sbp; | ||
714 | unsigned long offset, sb_index; | ||
715 | int hw_blocksize = bdev_hardsect_size(sb->s_bdev); | ||
716 | |||
717 | if (blocksize < hw_blocksize) { | ||
718 | printk(KERN_ERR | ||
719 | "NILFS: blocksize %d too small for device " | ||
720 | "(sector-size = %d).\n", | ||
721 | blocksize, hw_blocksize); | ||
722 | goto failed_sbh; | ||
723 | } | ||
724 | brelse(*pbh); | ||
725 | sb_set_blocksize(sb, blocksize); | ||
726 | |||
727 | sb_index = NILFS_SB_OFFSET_BYTES / blocksize; | ||
728 | offset = NILFS_SB_OFFSET_BYTES % blocksize; | ||
729 | |||
730 | *pbh = sb_bread(sb, sb_index); | ||
731 | if (!*pbh) { | ||
732 | printk(KERN_ERR | ||
733 | "NILFS: cannot read superblock on 2nd try.\n"); | ||
734 | goto failed; | ||
735 | } | ||
736 | |||
737 | sbp = (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset); | ||
738 | if (sbp->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { | ||
739 | printk(KERN_ERR | ||
740 | "NILFS: !? Magic mismatch on 2nd try.\n"); | ||
741 | goto failed_sbh; | ||
742 | } | ||
743 | return sbp; | ||
744 | |||
745 | failed_sbh: | ||
746 | brelse(*pbh); | ||
747 | |||
748 | failed: | ||
749 | return NULL; | ||
750 | } | ||
751 | |||
752 | int nilfs_store_magic_and_option(struct super_block *sb, | 722 | int nilfs_store_magic_and_option(struct super_block *sb, |
753 | struct nilfs_super_block *sbp, | 723 | struct nilfs_super_block *sbp, |
754 | char *data) | 724 | char *data) |
755 | { | 725 | { |
756 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | 726 | struct nilfs_sb_info *sbi = NILFS_SB(sb); |
757 | 727 | ||
758 | /* trying to fill super (1st stage) */ | ||
759 | sb->s_magic = le16_to_cpu(sbp->s_magic); | 728 | sb->s_magic = le16_to_cpu(sbp->s_magic); |
760 | 729 | ||
761 | /* FS independent flags */ | 730 | /* FS independent flags */ |
@@ -763,11 +732,6 @@ int nilfs_store_magic_and_option(struct super_block *sb, | |||
763 | sb->s_flags |= MS_NOATIME; | 732 | sb->s_flags |= MS_NOATIME; |
764 | #endif | 733 | #endif |
765 | 734 | ||
766 | if (sb->s_magic != NILFS_SUPER_MAGIC) { | ||
767 | printk("NILFS: Can't find nilfs on dev %s.\n", sb->s_id); | ||
768 | return -EINVAL; | ||
769 | } | ||
770 | |||
771 | nilfs_set_default_options(sbi, sbp); | 735 | nilfs_set_default_options(sbi, sbp); |
772 | 736 | ||
773 | sbi->s_resuid = le16_to_cpu(sbp->s_def_resuid); | 737 | sbi->s_resuid = le16_to_cpu(sbp->s_def_resuid); |
@@ -775,10 +739,7 @@ int nilfs_store_magic_and_option(struct super_block *sb, | |||
775 | sbi->s_interval = le32_to_cpu(sbp->s_c_interval); | 739 | sbi->s_interval = le32_to_cpu(sbp->s_c_interval); |
776 | sbi->s_watermark = le32_to_cpu(sbp->s_c_block_max); | 740 | sbi->s_watermark = le32_to_cpu(sbp->s_c_block_max); |
777 | 741 | ||
778 | if (!parse_options(data, sb)) | 742 | return !parse_options(data, sb) ? -EINVAL : 0 ; |
779 | return -EINVAL; | ||
780 | |||
781 | return 0; | ||
782 | } | 743 | } |
783 | 744 | ||
784 | /** | 745 | /** |
@@ -967,12 +928,12 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) | |||
967 | * the RDONLY flag and then mark the partition as valid again. | 928 | * the RDONLY flag and then mark the partition as valid again. |
968 | */ | 929 | */ |
969 | down_write(&nilfs->ns_sem); | 930 | down_write(&nilfs->ns_sem); |
970 | sbp = nilfs->ns_sbp; | 931 | sbp = nilfs->ns_sbp[0]; |
971 | if (!(sbp->s_state & le16_to_cpu(NILFS_VALID_FS)) && | 932 | if (!(sbp->s_state & le16_to_cpu(NILFS_VALID_FS)) && |
972 | (nilfs->ns_mount_state & NILFS_VALID_FS)) | 933 | (nilfs->ns_mount_state & NILFS_VALID_FS)) |
973 | sbp->s_state = cpu_to_le16(nilfs->ns_mount_state); | 934 | sbp->s_state = cpu_to_le16(nilfs->ns_mount_state); |
974 | sbp->s_mtime = cpu_to_le64(get_seconds()); | 935 | sbp->s_mtime = cpu_to_le64(get_seconds()); |
975 | nilfs_commit_super(sbi); | 936 | nilfs_commit_super(sbi, 1); |
976 | up_write(&nilfs->ns_sem); | 937 | up_write(&nilfs->ns_sem); |
977 | } else { | 938 | } else { |
978 | /* | 939 | /* |