aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-11-16 04:50:25 -0500
committerIngo Molnar <mingo@kernel.org>2014-11-16 04:50:25 -0500
commite9ac5f0fa8549dffe2a15870217a9c2e7cd557ec (patch)
tree863e0e108f7b7ba2dffc7575bbdfc2d454fc2017 /fs/notify
parent44dba3d5d6a10685fb15bd1954e62016334825e0 (diff)
parent6e998916dfe327e785e7c2447959b2c1a3ea4930 (diff)
Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying more changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'fs/notify')
-rw-r--r--fs/notify/inode_mark.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 9ce062218de9..e8497144b323 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
288 spin_unlock(&inode->i_lock); 288 spin_unlock(&inode->i_lock);
289 289
290 /* In case the dropping of a reference would nuke next_i. */ 290 /* In case the dropping of a reference would nuke next_i. */
291 if ((&next_i->i_sb_list != list) && 291 while (&next_i->i_sb_list != list) {
292 atomic_read(&next_i->i_count)) {
293 spin_lock(&next_i->i_lock); 292 spin_lock(&next_i->i_lock);
294 if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) { 293 if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
294 atomic_read(&next_i->i_count)) {
295 __iget(next_i); 295 __iget(next_i);
296 need_iput = next_i; 296 need_iput = next_i;
297 spin_unlock(&next_i->i_lock);
298 break;
297 } 299 }
298 spin_unlock(&next_i->i_lock); 300 spin_unlock(&next_i->i_lock);
301 next_i = list_entry(next_i->i_sb_list.next,
302 struct inode, i_sb_list);
299 } 303 }
300 304
301 /* 305 /*
302 * We can safely drop inode_sb_list_lock here because we hold 306 * We can safely drop inode_sb_list_lock here because either
303 * references on both inode and next_i. Also no new inodes 307 * we actually hold references on both inode and next_i or
304 * will be added since the umount has begun. 308 * end of list. Also no new inodes will be added since the
309 * umount has begun.
305 */ 310 */
306 spin_unlock(&inode_sb_list_lock); 311 spin_unlock(&inode_sb_list_lock);
307 312