aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2010-03-22 08:53:19 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-05-21 18:31:14 -0400
commitb20bd1a5e78af267dc4b6e1ffed48d5d776302c5 (patch)
treed50982ee42500712d9add489435d4455580b9cac /fs
parent389b8be6ef419397e4f176652927ebad6ebb4b77 (diff)
get rid of S_BIAS
use atomic_inc_not_zero(&sb->s_active) instead of playing games with checking ->s_count > S_BIAS Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/notify/inotify/inotify.c32
-rw-r--r--fs/super.c28
2 files changed, 24 insertions, 36 deletions
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index 40b1cf914ccb..441ef136af22 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -110,14 +110,10 @@ EXPORT_SYMBOL_GPL(get_inotify_watch);
110int pin_inotify_watch(struct inotify_watch *watch) 110int pin_inotify_watch(struct inotify_watch *watch)
111{ 111{
112 struct super_block *sb = watch->inode->i_sb; 112 struct super_block *sb = watch->inode->i_sb;
113 spin_lock(&sb_lock); 113 if (atomic_inc_not_zero(&sb->s_active)) {
114 if (sb->s_count >= S_BIAS) {
115 atomic_inc(&sb->s_active);
116 spin_unlock(&sb_lock);
117 atomic_inc(&watch->count); 114 atomic_inc(&watch->count);
118 return 1; 115 return 1;
119 } 116 }
120 spin_unlock(&sb_lock);
121 return 0; 117 return 0;
122} 118}
123 119
@@ -518,16 +514,16 @@ EXPORT_SYMBOL_GPL(inotify_init_watch);
518 * ->s_umount, which will almost certainly wait until the superblock is shut 514 * ->s_umount, which will almost certainly wait until the superblock is shut
519 * down and the watch in question is pining for fjords. That's fine, but 515 * down and the watch in question is pining for fjords. That's fine, but
520 * there is a problem - we might have hit the window between ->s_active 516 * there is a problem - we might have hit the window between ->s_active
521 * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock 517 * getting to 0 (i.e. the moment when superblock is past the point of no return
522 * is past the point of no return and is heading for shutdown) and the 518 * and is heading for shutdown) and the moment when deactivate_super() acquires
523 * moment when deactivate_super() acquires ->s_umount. We could just do 519 * ->s_umount. We could just do drop_super() yield() and retry, but that's
524 * drop_super() yield() and retry, but that's rather antisocial and this 520 * rather antisocial and this stuff is luser-triggerable. OTOH, having grabbed
525 * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having 521 * ->s_umount and having found that we'd got there first (i.e. that ->s_root is
526 * found that we'd got there first (i.e. that ->s_root is non-NULL) we know 522 * non-NULL) we know that we won't race with inotify_umount_inodes(). So we
527 * that we won't race with inotify_umount_inodes(). So we could grab a 523 * could grab a reference to watch and do the rest as above, just with
528 * reference to watch and do the rest as above, just with drop_super() instead 524 * drop_super() instead of deactivate_super(), right? Wrong. We had to drop
529 * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we 525 * ih->mutex before we could grab ->s_umount. So the watch could've been gone
530 * could grab ->s_umount. So the watch could've been gone already. 526 * already.
531 * 527 *
532 * That still can be dealt with - we need to save watch->wd, do idr_find() 528 * That still can be dealt with - we need to save watch->wd, do idr_find()
533 * and compare its result with our pointer. If they match, we either have 529 * and compare its result with our pointer. If they match, we either have
@@ -565,14 +561,12 @@ static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
565 struct super_block *sb = watch->inode->i_sb; 561 struct super_block *sb = watch->inode->i_sb;
566 s32 wd = watch->wd; 562 s32 wd = watch->wd;
567 563
568 spin_lock(&sb_lock); 564 if (atomic_inc_not_zero(&sb->s_active)) {
569 if (sb->s_count >= S_BIAS) {
570 atomic_inc(&sb->s_active);
571 spin_unlock(&sb_lock);
572 get_inotify_watch(watch); 565 get_inotify_watch(watch);
573 mutex_unlock(&ih->mutex); 566 mutex_unlock(&ih->mutex);
574 return 1; /* the best outcome */ 567 return 1; /* the best outcome */
575 } 568 }
569 spin_lock(&sb_lock);
576 sb->s_count++; 570 sb->s_count++;
577 spin_unlock(&sb_lock); 571 spin_unlock(&sb_lock);
578 mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ 572 mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
diff --git a/fs/super.c b/fs/super.c
index d8c8b1d2d010..bc734f8b3e18 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -93,7 +93,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
93 * subclass. 93 * subclass.
94 */ 94 */
95 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); 95 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
96 s->s_count = S_BIAS; 96 s->s_count = 1;
97 atomic_set(&s->s_active, 1); 97 atomic_set(&s->s_active, 1);
98 mutex_init(&s->s_vfs_rename_mutex); 98 mutex_init(&s->s_vfs_rename_mutex);
99 mutex_init(&s->s_dquot.dqio_mutex); 99 mutex_init(&s->s_dquot.dqio_mutex);
@@ -189,9 +189,7 @@ void put_super(struct super_block *sb)
189void deactivate_super(struct super_block *s) 189void deactivate_super(struct super_block *s)
190{ 190{
191 struct file_system_type *fs = s->s_type; 191 struct file_system_type *fs = s->s_type;
192 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { 192 if (atomic_dec_and_test(&s->s_active)) {
193 s->s_count -= S_BIAS-1;
194 spin_unlock(&sb_lock);
195 vfs_dq_off(s, 0); 193 vfs_dq_off(s, 0);
196 down_write(&s->s_umount); 194 down_write(&s->s_umount);
197 fs->kill_sb(s); 195 fs->kill_sb(s);
@@ -216,9 +214,7 @@ EXPORT_SYMBOL(deactivate_super);
216void deactivate_locked_super(struct super_block *s) 214void deactivate_locked_super(struct super_block *s)
217{ 215{
218 struct file_system_type *fs = s->s_type; 216 struct file_system_type *fs = s->s_type;
219 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { 217 if (atomic_dec_and_test(&s->s_active)) {
220 s->s_count -= S_BIAS-1;
221 spin_unlock(&sb_lock);
222 vfs_dq_off(s, 0); 218 vfs_dq_off(s, 0);
223 fs->kill_sb(s); 219 fs->kill_sb(s);
224 put_filesystem(fs); 220 put_filesystem(fs);
@@ -243,21 +239,19 @@ EXPORT_SYMBOL(deactivate_locked_super);
243 */ 239 */
244static int grab_super(struct super_block *s) __releases(sb_lock) 240static int grab_super(struct super_block *s) __releases(sb_lock)
245{ 241{
242 if (atomic_inc_not_zero(&s->s_active)) {
243 spin_unlock(&sb_lock);
244 down_write(&s->s_umount);
245 return 1;
246 }
247 /* it's going away */
246 s->s_count++; 248 s->s_count++;
247 spin_unlock(&sb_lock); 249 spin_unlock(&sb_lock);
250 /* usually that'll be enough for it to die... */
248 down_write(&s->s_umount); 251 down_write(&s->s_umount);
249 if (s->s_root) {
250 spin_lock(&sb_lock);
251 if (s->s_count > S_BIAS) {
252 atomic_inc(&s->s_active);
253 s->s_count--;
254 spin_unlock(&sb_lock);
255 return 1;
256 }
257 spin_unlock(&sb_lock);
258 }
259 up_write(&s->s_umount); 252 up_write(&s->s_umount);
260 put_super(s); 253 put_super(s);
254 /* ... but in case it wasn't, let's at least yield() */
261 yield(); 255 yield();
262 return 0; 256 return 0;
263} 257}