aboutsummaryrefslogtreecommitdiffstats
path: root/fs/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/super.c')
-rw-r--r--fs/super.c72
1 files changed, 23 insertions, 49 deletions
diff --git a/fs/super.c b/fs/super.c
index 8dbe1ead9ddd..c8ce5ed04249 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -284,23 +284,23 @@ EXPORT_SYMBOL(lock_super);
284EXPORT_SYMBOL(unlock_super); 284EXPORT_SYMBOL(unlock_super);
285 285
286/* 286/*
287 * Write out and wait upon all dirty data associated with this 287 * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0)
288 * superblock. Filesystem data as well as the underlying block 288 * just dirties buffers with inodes so we have to submit IO for these buffers
289 * device. Takes the superblock lock. Requires a second blkdev 289 * via __sync_blockdev(). This also speeds up the wait == 1 case since in that
290 * flush by the caller to complete the operation. 290 * case write_inode() functions do sync_dirty_buffer() and thus effectively
291 * write one block at a time.
291 */ 292 */
292static int __fsync_super(struct super_block *sb) 293static int __fsync_super(struct super_block *sb, int wait)
293{ 294{
294 sync_inodes_sb(sb, 0);
295 vfs_dq_sync(sb); 295 vfs_dq_sync(sb);
296 sync_inodes_sb(sb, 1); 296 sync_inodes_sb(sb, wait);
297 lock_super(sb); 297 lock_super(sb);
298 if (sb->s_dirt && sb->s_op->write_super) 298 if (sb->s_dirt && sb->s_op->write_super)
299 sb->s_op->write_super(sb); 299 sb->s_op->write_super(sb);
300 unlock_super(sb); 300 unlock_super(sb);
301 if (sb->s_op->sync_fs) 301 if (sb->s_op->sync_fs)
302 sb->s_op->sync_fs(sb, 1); 302 sb->s_op->sync_fs(sb, wait);
303 return sync_blockdev(sb->s_bdev); 303 return __sync_blockdev(sb->s_bdev, wait);
304} 304}
305 305
306/* 306/*
@@ -310,7 +310,12 @@ static int __fsync_super(struct super_block *sb)
310 */ 310 */
311int fsync_super(struct super_block *sb) 311int fsync_super(struct super_block *sb)
312{ 312{
313 return __fsync_super(sb); 313 int ret;
314
315 ret = __fsync_super(sb, 0);
316 if (ret < 0)
317 return ret;
318 return __fsync_super(sb, 1);
314} 319}
315EXPORT_SYMBOL_GPL(fsync_super); 320EXPORT_SYMBOL_GPL(fsync_super);
316 321
@@ -469,20 +474,18 @@ restart:
469} 474}
470 475
471/* 476/*
472 * Call the ->sync_fs super_op against all filesystems which are r/w and 477 * Sync all the data for all the filesystems (called by sys_sync() and
473 * which implement it. 478 * emergency sync)
474 * 479 *
475 * This operation is careful to avoid the livelock which could easily happen 480 * This operation is careful to avoid the livelock which could easily happen
476 * if two or more filesystems are being continuously dirtied. s_need_sync_fs 481 * if two or more filesystems are being continuously dirtied. s_need_sync
477 * is used only here. We set it against all filesystems and then clear it as 482 * is used only here. We set it against all filesystems and then clear it as
478 * we sync them. So redirtied filesystems are skipped. 483 * we sync them. So redirtied filesystems are skipped.
479 * 484 *
480 * But if process A is currently running sync_filesystems and then process B 485 * But if process A is currently running sync_filesystems and then process B
481 * calls sync_filesystems as well, process B will set all the s_need_sync_fs 486 * calls sync_filesystems as well, process B will set all the s_need_sync
482 * flags again, which will cause process A to resync everything. Fix that with 487 * flags again, which will cause process A to resync everything. Fix that with
483 * a local mutex. 488 * a local mutex.
484 *
485 * (Fabian) Avoid sync_fs with clean fs & wait mode 0
486 */ 489 */
487void sync_filesystems(int wait) 490void sync_filesystems(int wait)
488{ 491{
@@ -492,25 +495,23 @@ void sync_filesystems(int wait)
492 mutex_lock(&mutex); /* Could be down_interruptible */ 495 mutex_lock(&mutex); /* Could be down_interruptible */
493 spin_lock(&sb_lock); 496 spin_lock(&sb_lock);
494 list_for_each_entry(sb, &super_blocks, s_list) { 497 list_for_each_entry(sb, &super_blocks, s_list) {
495 if (!sb->s_op->sync_fs)
496 continue;
497 if (sb->s_flags & MS_RDONLY) 498 if (sb->s_flags & MS_RDONLY)
498 continue; 499 continue;
499 sb->s_need_sync_fs = 1; 500 sb->s_need_sync = 1;
500 } 501 }
501 502
502restart: 503restart:
503 list_for_each_entry(sb, &super_blocks, s_list) { 504 list_for_each_entry(sb, &super_blocks, s_list) {
504 if (!sb->s_need_sync_fs) 505 if (!sb->s_need_sync)
505 continue; 506 continue;
506 sb->s_need_sync_fs = 0; 507 sb->s_need_sync = 0;
507 if (sb->s_flags & MS_RDONLY) 508 if (sb->s_flags & MS_RDONLY)
508 continue; /* hm. Was remounted r/o meanwhile */ 509 continue; /* hm. Was remounted r/o meanwhile */
509 sb->s_count++; 510 sb->s_count++;
510 spin_unlock(&sb_lock); 511 spin_unlock(&sb_lock);
511 down_read(&sb->s_umount); 512 down_read(&sb->s_umount);
512 if (sb->s_root) 513 if (sb->s_root)
513 sb->s_op->sync_fs(sb, wait); 514 __fsync_super(sb, wait);
514 up_read(&sb->s_umount); 515 up_read(&sb->s_umount);
515 /* restart only when sb is no longer on the list */ 516 /* restart only when sb is no longer on the list */
516 spin_lock(&sb_lock); 517 spin_lock(&sb_lock);
@@ -521,33 +522,6 @@ restart:
521 mutex_unlock(&mutex); 522 mutex_unlock(&mutex);
522} 523}
523 524
524#ifdef CONFIG_BLOCK
525/*
526 * Sync all block devices underlying some superblock
527 */
528void sync_blockdevs(void)
529{
530 struct super_block *sb;
531
532 spin_lock(&sb_lock);
533restart:
534 list_for_each_entry(sb, &super_blocks, s_list) {
535 if (!sb->s_bdev)
536 continue;
537 sb->s_count++;
538 spin_unlock(&sb_lock);
539 down_read(&sb->s_umount);
540 if (sb->s_root)
541 sync_blockdev(sb->s_bdev);
542 up_read(&sb->s_umount);
543 spin_lock(&sb_lock);
544 if (__put_super_and_need_restart(sb))
545 goto restart;
546 }
547 spin_unlock(&sb_lock);
548}
549#endif
550
551/** 525/**
552 * get_super - get the superblock of a device 526 * get_super - get the superblock of a device
553 * @bdev: device to get the superblock for 527 * @bdev: device to get the superblock for