aboutsummaryrefslogtreecommitdiffstats
path: root/fs/reiserfs
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2017-04-05 08:17:30 -0400
committerJan Kara <jack@suse.cz>2017-04-05 08:24:55 -0400
commit1e0e653f1136a413a9969e5d0d548ee6499b9763 (patch)
treea3e413e0affd13b0906dafa495501ee70216106d /fs/reiserfs
parent71b0576bdb862e964a82c73327cdd1a249c53e67 (diff)
reiserfs: Protect dquot_writeback_dquots() by s_umount semaphore
dquot_writeback_dquots() expects s_umount semaphore to be held to protect it from other concurrent quota operations. reiserfs_sync_fs() can call dquot_writeback_dquots() without holding s_umount semaphore when called from flush_old_commits(). Fix the problem by grabbing s_umount in flush_old_commits(). However we have to be careful and use only trylock since reiserfs_cancel_old_sync() can be waiting for flush_old_commits() to complete while holding s_umount semaphore. Possible postponing of sync work is not a big deal though as that is only an opportunistic flush. Fixes: 9d1ccbe70e0b14545caad12dc73adb3605447df0 Reported-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/reiserfs')
-rw-r--r--fs/reiserfs/super.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 91cf5cbd6332..f536e12c4b1d 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -89,6 +89,19 @@ static void flush_old_commits(struct work_struct *work)
89 sbi = container_of(work, struct reiserfs_sb_info, old_work.work); 89 sbi = container_of(work, struct reiserfs_sb_info, old_work.work);
90 s = sbi->s_journal->j_work_sb; 90 s = sbi->s_journal->j_work_sb;
91 91
92 /*
93 * We need s_umount for protecting quota writeback. We have to use
94 * trylock as reiserfs_cancel_old_flush() may be waiting for this work
95 * to complete with s_umount held.
96 */
97 if (!down_read_trylock(&s->s_umount)) {
98 /* Requeue work if we are not cancelling it */
99 spin_lock(&sbi->old_work_lock);
100 if (sbi->work_queued == 1)
101 queue_delayed_work(system_long_wq, &sbi->old_work, HZ);
102 spin_unlock(&sbi->old_work_lock);
103 return;
104 }
92 spin_lock(&sbi->old_work_lock); 105 spin_lock(&sbi->old_work_lock);
93 /* Avoid clobbering the cancel state... */ 106 /* Avoid clobbering the cancel state... */
94 if (sbi->work_queued == 1) 107 if (sbi->work_queued == 1)
@@ -96,6 +109,7 @@ static void flush_old_commits(struct work_struct *work)
96 spin_unlock(&sbi->old_work_lock); 109 spin_unlock(&sbi->old_work_lock);
97 110
98 reiserfs_sync_fs(s, 1); 111 reiserfs_sync_fs(s, 1);
112 up_read(&s->s_umount);
99} 113}
100 114
101void reiserfs_schedule_old_flush(struct super_block *s) 115void reiserfs_schedule_old_flush(struct super_block *s)