aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify/inotify/inotify_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/notify/inotify/inotify_user.c')
-rw-r--r--fs/notify/inotify/inotify_user.c32
1 files changed, 5 insertions, 27 deletions
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 982a412ac5bc..ff231ad23895 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -363,39 +363,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
363} 363}
364 364
365/* 365/*
366 * When, for whatever reason, inotify is done with a mark (or what used to be a 366 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
367 * watch) we need to remove that watch from the idr and we need to send IN_IGNORED 367 * internal reference help on the mark because it is in the idr.
368 * for the given wd.
369 *
370 * There is a bit of recursion here. The loop looks like:
371 * inotify_destroy_mark_entry -> fsnotify_destroy_mark_by_entry ->
372 * inotify_freeing_mark -> inotify_destory_mark_entry -> restart
373 * But the loop is broken in 2 places. fsnotify_destroy_mark_by_entry sets
374 * entry->group = NULL before the call to inotify_freeing_mark, so the if (egroup)
375 * test below will not call back to fsnotify again. But even if that test wasn't
376 * there this would still be safe since fsnotify_destroy_mark_by_entry() is
377 * safe from recursion.
378 */ 368 */
379void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) 369void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
370 struct fsnotify_group *group)
380{ 371{
381 struct inotify_inode_mark_entry *ientry; 372 struct inotify_inode_mark_entry *ientry;
382 struct inotify_event_private_data *event_priv; 373 struct inotify_event_private_data *event_priv;
383 struct fsnotify_event_private_data *fsn_event_priv; 374 struct fsnotify_event_private_data *fsn_event_priv;
384 struct fsnotify_group *egroup;
385 struct idr *idr; 375 struct idr *idr;
386 376
387 spin_lock(&entry->lock);
388 egroup = entry->group;
389
390 /* if egroup we aren't really done and something might still send events
391 * for this inode, on the callback we'll send the IN_IGNORED */
392 if (egroup) {
393 spin_unlock(&entry->lock);
394 fsnotify_destroy_mark_by_entry(entry);
395 return;
396 }
397 spin_unlock(&entry->lock);
398
399 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 377 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
400 378
401 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 379 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
@@ -699,7 +677,7 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
699 fsnotify_get_mark(entry); 677 fsnotify_get_mark(entry);
700 spin_unlock(&group->inotify_data.idr_lock); 678 spin_unlock(&group->inotify_data.idr_lock);
701 679
702 inotify_destroy_mark_entry(entry, group); 680 fsnotify_destroy_mark_by_entry(entry);
703 fsnotify_put_mark(entry); 681 fsnotify_put_mark(entry);
704 682
705out: 683out: