diff options
author | Jan Kara <jack@suse.cz> | 2009-04-27 10:43:52 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2009-06-11 21:36:04 -0400 |
commit | c15c54f5f056ee4819da9fde59a5f2cd45445f23 (patch) | |
tree | 0b8d142f0b8d44b71bf7ff9db7a373f90d0556a3 /fs | |
parent | 5cee5815d1564bbbd505fea86f4550f1efdb5cd0 (diff) |
vfs: Move syncing code from super.c to sync.c (version 4)
Move sync_filesystems(), __fsync_super(), fsync_super() from
super.c to sync.c where it fits better.
[build fixes folded]
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/super.c | 85 | ||||
-rw-r--r-- | fs/sync.c | 85 |
2 files changed, 85 insertions, 85 deletions
diff --git a/fs/super.c b/fs/super.c index c8ce5ed04249..f822c74f25be 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -283,42 +283,6 @@ void unlock_super(struct super_block * sb) | |||
283 | EXPORT_SYMBOL(lock_super); | 283 | EXPORT_SYMBOL(lock_super); |
284 | EXPORT_SYMBOL(unlock_super); | 284 | EXPORT_SYMBOL(unlock_super); |
285 | 285 | ||
286 | /* | ||
287 | * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0) | ||
288 | * just dirties buffers with inodes so we have to submit IO for these buffers | ||
289 | * via __sync_blockdev(). This also speeds up the wait == 1 case since in that | ||
290 | * case write_inode() functions do sync_dirty_buffer() and thus effectively | ||
291 | * write one block at a time. | ||
292 | */ | ||
293 | static int __fsync_super(struct super_block *sb, int wait) | ||
294 | { | ||
295 | vfs_dq_sync(sb); | ||
296 | sync_inodes_sb(sb, wait); | ||
297 | lock_super(sb); | ||
298 | if (sb->s_dirt && sb->s_op->write_super) | ||
299 | sb->s_op->write_super(sb); | ||
300 | unlock_super(sb); | ||
301 | if (sb->s_op->sync_fs) | ||
302 | sb->s_op->sync_fs(sb, wait); | ||
303 | return __sync_blockdev(sb->s_bdev, wait); | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * Write out and wait upon all dirty data associated with this | ||
308 | * superblock. Filesystem data as well as the underlying block | ||
309 | * device. Takes the superblock lock. | ||
310 | */ | ||
311 | int fsync_super(struct super_block *sb) | ||
312 | { | ||
313 | int ret; | ||
314 | |||
315 | ret = __fsync_super(sb, 0); | ||
316 | if (ret < 0) | ||
317 | return ret; | ||
318 | return __fsync_super(sb, 1); | ||
319 | } | ||
320 | EXPORT_SYMBOL_GPL(fsync_super); | ||
321 | |||
322 | /** | 286 | /** |
323 | * generic_shutdown_super - common helper for ->kill_sb() | 287 | * generic_shutdown_super - common helper for ->kill_sb() |
324 | * @sb: superblock to kill | 288 | * @sb: superblock to kill |
@@ -473,55 +437,6 @@ restart: | |||
473 | spin_unlock(&sb_lock); | 437 | spin_unlock(&sb_lock); |
474 | } | 438 | } |
475 | 439 | ||
476 | /* | ||
477 | * Sync all the data for all the filesystems (called by sys_sync() and | ||
478 | * emergency sync) | ||
479 | * | ||
480 | * This operation is careful to avoid the livelock which could easily happen | ||
481 | * if two or more filesystems are being continuously dirtied. s_need_sync | ||
482 | * is used only here. We set it against all filesystems and then clear it as | ||
483 | * we sync them. So redirtied filesystems are skipped. | ||
484 | * | ||
485 | * But if process A is currently running sync_filesystems and then process B | ||
486 | * calls sync_filesystems as well, process B will set all the s_need_sync | ||
487 | * flags again, which will cause process A to resync everything. Fix that with | ||
488 | * a local mutex. | ||
489 | */ | ||
490 | void sync_filesystems(int wait) | ||
491 | { | ||
492 | struct super_block *sb; | ||
493 | static DEFINE_MUTEX(mutex); | ||
494 | |||
495 | mutex_lock(&mutex); /* Could be down_interruptible */ | ||
496 | spin_lock(&sb_lock); | ||
497 | list_for_each_entry(sb, &super_blocks, s_list) { | ||
498 | if (sb->s_flags & MS_RDONLY) | ||
499 | continue; | ||
500 | sb->s_need_sync = 1; | ||
501 | } | ||
502 | |||
503 | restart: | ||
504 | list_for_each_entry(sb, &super_blocks, s_list) { | ||
505 | if (!sb->s_need_sync) | ||
506 | continue; | ||
507 | sb->s_need_sync = 0; | ||
508 | if (sb->s_flags & MS_RDONLY) | ||
509 | continue; /* hm. Was remounted r/o meanwhile */ | ||
510 | sb->s_count++; | ||
511 | spin_unlock(&sb_lock); | ||
512 | down_read(&sb->s_umount); | ||
513 | if (sb->s_root) | ||
514 | __fsync_super(sb, wait); | ||
515 | up_read(&sb->s_umount); | ||
516 | /* restart only when sb is no longer on the list */ | ||
517 | spin_lock(&sb_lock); | ||
518 | if (__put_super_and_need_restart(sb)) | ||
519 | goto restart; | ||
520 | } | ||
521 | spin_unlock(&sb_lock); | ||
522 | mutex_unlock(&mutex); | ||
523 | } | ||
524 | |||
525 | /** | 440 | /** |
526 | * get_super - get the superblock of a device | 441 | * get_super - get the superblock of a device |
527 | * @bdev: device to get the superblock for | 442 | * @bdev: device to get the superblock for |
@@ -18,6 +18,91 @@ | |||
18 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ | 18 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ |
19 | SYNC_FILE_RANGE_WAIT_AFTER) | 19 | SYNC_FILE_RANGE_WAIT_AFTER) |
20 | 20 | ||
21 | /* | ||
22 | * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0) | ||
23 | * just dirties buffers with inodes so we have to submit IO for these buffers | ||
24 | * via __sync_blockdev(). This also speeds up the wait == 1 case since in that | ||
25 | * case write_inode() functions do sync_dirty_buffer() and thus effectively | ||
26 | * write one block at a time. | ||
27 | */ | ||
28 | static int __fsync_super(struct super_block *sb, int wait) | ||
29 | { | ||
30 | vfs_dq_sync(sb); | ||
31 | sync_inodes_sb(sb, wait); | ||
32 | lock_super(sb); | ||
33 | if (sb->s_dirt && sb->s_op->write_super) | ||
34 | sb->s_op->write_super(sb); | ||
35 | unlock_super(sb); | ||
36 | if (sb->s_op->sync_fs) | ||
37 | sb->s_op->sync_fs(sb, wait); | ||
38 | return __sync_blockdev(sb->s_bdev, wait); | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * Write out and wait upon all dirty data associated with this | ||
43 | * superblock. Filesystem data as well as the underlying block | ||
44 | * device. Takes the superblock lock. | ||
45 | */ | ||
46 | int fsync_super(struct super_block *sb) | ||
47 | { | ||
48 | int ret; | ||
49 | |||
50 | ret = __fsync_super(sb, 0); | ||
51 | if (ret < 0) | ||
52 | return ret; | ||
53 | return __fsync_super(sb, 1); | ||
54 | } | ||
55 | EXPORT_SYMBOL_GPL(fsync_super); | ||
56 | |||
57 | /* | ||
58 | * Sync all the data for all the filesystems (called by sys_sync() and | ||
59 | * emergency sync) | ||
60 | * | ||
61 | * This operation is careful to avoid the livelock which could easily happen | ||
62 | * if two or more filesystems are being continuously dirtied. s_need_sync | ||
63 | * is used only here. We set it against all filesystems and then clear it as | ||
64 | * we sync them. So redirtied filesystems are skipped. | ||
65 | * | ||
66 | * But if process A is currently running sync_filesystems and then process B | ||
67 | * calls sync_filesystems as well, process B will set all the s_need_sync | ||
68 | * flags again, which will cause process A to resync everything. Fix that with | ||
69 | * a local mutex. | ||
70 | */ | ||
71 | static void sync_filesystems(int wait) | ||
72 | { | ||
73 | struct super_block *sb; | ||
74 | static DEFINE_MUTEX(mutex); | ||
75 | |||
76 | mutex_lock(&mutex); /* Could be down_interruptible */ | ||
77 | spin_lock(&sb_lock); | ||
78 | list_for_each_entry(sb, &super_blocks, s_list) { | ||
79 | if (sb->s_flags & MS_RDONLY) | ||
80 | continue; | ||
81 | sb->s_need_sync = 1; | ||
82 | } | ||
83 | |||
84 | restart: | ||
85 | list_for_each_entry(sb, &super_blocks, s_list) { | ||
86 | if (!sb->s_need_sync) | ||
87 | continue; | ||
88 | sb->s_need_sync = 0; | ||
89 | if (sb->s_flags & MS_RDONLY) | ||
90 | continue; /* hm. Was remounted r/o meanwhile */ | ||
91 | sb->s_count++; | ||
92 | spin_unlock(&sb_lock); | ||
93 | down_read(&sb->s_umount); | ||
94 | if (sb->s_root) | ||
95 | __fsync_super(sb, wait); | ||
96 | up_read(&sb->s_umount); | ||
97 | /* restart only when sb is no longer on the list */ | ||
98 | spin_lock(&sb_lock); | ||
99 | if (__put_super_and_need_restart(sb)) | ||
100 | goto restart; | ||
101 | } | ||
102 | spin_unlock(&sb_lock); | ||
103 | mutex_unlock(&mutex); | ||
104 | } | ||
105 | |||
21 | SYSCALL_DEFINE0(sync) | 106 | SYSCALL_DEFINE0(sync) |
22 | { | 107 | { |
23 | sync_filesystems(0); | 108 | sync_filesystems(0); |