summaryrefslogtreecommitdiffstats
path: root/fs/notify/mark.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.com>2015-08-06 18:46:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-06 21:39:41 -0400
commit8f2f3eb59dff4ec538de55f2e0592fec85966aab (patch)
treecbaf734af49bd7c59006ee329c06c759a112c5b1 /fs/notify/mark.c
parent447f6a95a9c80da7faaec3e66e656eab8f262640 (diff)
fsnotify: fix oops in fsnotify_clear_marks_by_group_flags()
fsnotify_clear_marks_by_group_flags() can race with fsnotify_destroy_marks() so that when fsnotify_destroy_mark_locked() drops mark_mutex, a mark from the list iterated by fsnotify_clear_marks_by_group_flags() can be freed and thus the next entry pointer we have cached may become stale and we dereference free memory. Fix the problem by first moving marks to free to a special private list and then always free the first entry in the special list. This method is safe even when entries from the list can disappear once we drop the lock. Signed-off-by: Jan Kara <jack@suse.com> Reported-by: Ashish Sangwan <a.sangwan@samsung.com> Reviewed-by: Ashish Sangwan <a.sangwan@samsung.com> Cc: Lino Sanfilippo <LinoSanfilippo@gmx.de> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/notify/mark.c')
-rw-r--r--fs/notify/mark.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
412 unsigned int flags) 412 unsigned int flags)
413{ 413{
414 struct fsnotify_mark *lmark, *mark; 414 struct fsnotify_mark *lmark, *mark;
415 LIST_HEAD(to_free);
415 416
417 /*
418 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
419 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
420 * to_free list so we have to use mark_mutex even when accessing that
421 * list. And freeing mark requires us to drop mark_mutex. So we can
422 * reliably free only the first mark in the list. That's why we first
423 * move marks to free to to_free list in one go and then free marks in
424 * to_free list one by one.
425 */
416 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); 426 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
417 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { 427 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
418 if (mark->flags & flags) { 428 if (mark->flags & flags)
419 fsnotify_get_mark(mark); 429 list_move(&mark->g_list, &to_free);
420 fsnotify_destroy_mark_locked(mark, group);
421 fsnotify_put_mark(mark);
422 }
423 } 430 }
424 mutex_unlock(&group->mark_mutex); 431 mutex_unlock(&group->mark_mutex);
432
433 while (1) {
434 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
435 if (list_empty(&to_free)) {
436 mutex_unlock(&group->mark_mutex);
437 break;
438 }
439 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
440 fsnotify_get_mark(mark);
441 fsnotify_destroy_mark_locked(mark, group);
442 mutex_unlock(&group->mark_mutex);
443 fsnotify_put_mark(mark);
444 }
425} 445}
426 446
427/* 447/*