aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-21 19:06:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-21 19:06:53 -0400
commitd725e66c06ab440032f49ef17e960896d0ec6d49 (patch)
tree7d071492f1ab656913be0c892497d3204928ce0a
parent71ebd1af094db1c72d69505a27dfecea99c2cb0b (diff)
Revert "fsnotify: fix oops in fsnotify_clear_marks_by_group_flags()"
This reverts commit a2673b6e040663bf16a552f8619e6bde9f4b9acf. Kinglong Mee reports a memory leak with that patch, and Jan Kara confirms: "Thanks for report! You are right that my patch introduces a race between fsnotify kthread and fsnotify_destroy_group() which can result in leaking inotify event on group destruction. I haven't yet decided whether the right fix is not to queue events for dying notification group (as that is pointless anyway) or whether we should just fix the original problem differently... Whenever I look at fsnotify code mark handling I get lost in the maze of locks, lists, and subtle differences between how different notification systems handle notification marks :( I'll think about it over night" and after thinking about it, Jan says: "OK, I have looked into the code some more and I found another relatively simple way of fixing the original oops. It will be IMHO better than trying to fixup this issue which has more potential for breakage. I'll ask Linus to revert the fsnotify fix he already merged and send a new fix" Reported-by: Kinglong Mee <kinglongmee@gmail.com> Requested-by: Jan Kara <jack@suse.cz> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/notify/mark.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 3e594ce41010..92e48c70f0f0 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -152,15 +152,31 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
152 BUG(); 152 BUG();
153 153
154 list_del_init(&mark->g_list); 154 list_del_init(&mark->g_list);
155
155 spin_unlock(&mark->lock); 156 spin_unlock(&mark->lock);
156 157
157 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) 158 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
158 iput(inode); 159 iput(inode);
160 /* release lock temporarily */
161 mutex_unlock(&group->mark_mutex);
159 162
160 spin_lock(&destroy_lock); 163 spin_lock(&destroy_lock);
161 list_add(&mark->g_list, &destroy_list); 164 list_add(&mark->g_list, &destroy_list);
162 spin_unlock(&destroy_lock); 165 spin_unlock(&destroy_lock);
163 wake_up(&destroy_waitq); 166 wake_up(&destroy_waitq);
167 /*
168 * We don't necessarily have a ref on mark from caller so the above destroy
169 * may have actually freed it, unless this group provides a 'freeing_mark'
170 * function which must be holding a reference.
171 */
172
173 /*
174 * Some groups like to know that marks are being freed. This is a
175 * callback to the group function to let it know that this mark
176 * is being freed.
177 */
178 if (group->ops->freeing_mark)
179 group->ops->freeing_mark(mark, group);
164 180
165 /* 181 /*
166 * __fsnotify_update_child_dentry_flags(inode); 182 * __fsnotify_update_child_dentry_flags(inode);
@@ -175,6 +191,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
175 */ 191 */
176 192
177 atomic_dec(&group->num_marks); 193 atomic_dec(&group->num_marks);
194
195 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
178} 196}
179 197
180void fsnotify_destroy_mark(struct fsnotify_mark *mark, 198void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -187,10 +205,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
187 205
188/* 206/*
189 * Destroy all marks in the given list. The marks must be already detached from 207 * Destroy all marks in the given list. The marks must be already detached from
190 * the original inode / vfsmount. Note that we can race with 208 * the original inode / vfsmount.
191 * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each
192 * mark so they won't get freed from under us and nobody else touches our
193 * free_list list_head.
194 */ 209 */
195void fsnotify_destroy_marks(struct list_head *to_free) 210void fsnotify_destroy_marks(struct list_head *to_free)
196{ 211{
@@ -391,7 +406,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
391} 406}
392 407
393/* 408/*
394 * Clear any marks in a group in which mark->flags & flags is true. 409 * clear any marks in a group in which mark->flags & flags is true
395 */ 410 */
396void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, 411void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
397 unsigned int flags) 412 unsigned int flags)
@@ -445,7 +460,6 @@ static int fsnotify_mark_destroy(void *ignored)
445{ 460{
446 struct fsnotify_mark *mark, *next; 461 struct fsnotify_mark *mark, *next;
447 struct list_head private_destroy_list; 462 struct list_head private_destroy_list;
448 struct fsnotify_group *group;
449 463
450 for (;;) { 464 for (;;) {
451 spin_lock(&destroy_lock); 465 spin_lock(&destroy_lock);
@@ -457,14 +471,6 @@ static int fsnotify_mark_destroy(void *ignored)
457 471
458 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { 472 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
459 list_del_init(&mark->g_list); 473 list_del_init(&mark->g_list);
460 group = mark->group;
461 /*
462 * Some groups like to know that marks are being freed.
463 * This is a callback to the group function to let it
464 * know that this mark is being freed.
465 */
466 if (group && group->ops->freeing_mark)
467 group->ops->freeing_mark(mark, group);
468 fsnotify_put_mark(mark); 474 fsnotify_put_mark(mark);
469 } 475 }
470 476