summaryrefslogtreecommitdiffstats
path: root/fs/notify/mark.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/notify/mark.c')
-rw-r--r--fs/notify/mark.c78
1 files changed, 61 insertions, 17 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 7115c5d7d373..d3fea0bd89e2 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -97,8 +97,8 @@ struct srcu_struct fsnotify_mark_srcu;
97static DEFINE_SPINLOCK(destroy_lock); 97static DEFINE_SPINLOCK(destroy_lock);
98static LIST_HEAD(destroy_list); 98static LIST_HEAD(destroy_list);
99 99
100static void fsnotify_mark_destroy(struct work_struct *work); 100static void fsnotify_mark_destroy_workfn(struct work_struct *work);
101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy); 101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn);
102 102
103void fsnotify_get_mark(struct fsnotify_mark *mark) 103void fsnotify_get_mark(struct fsnotify_mark *mark)
104{ 104{
@@ -173,11 +173,15 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
173} 173}
174 174
175/* 175/*
176 * Free fsnotify mark. The freeing is actually happening from a kthread which 176 * Prepare mark for freeing and add it to the list of marks prepared for
177 * first waits for srcu period end. Caller must have a reference to the mark 177 * freeing. The actual freeing must happen after SRCU period ends and the
178 * or be protected by fsnotify_mark_srcu. 178 * caller is responsible for this.
179 *
180 * The function returns true if the mark was added to the list of marks for
181 * freeing. The function returns false if someone else has already called
182 * __fsnotify_free_mark() for the mark.
179 */ 183 */
180void fsnotify_free_mark(struct fsnotify_mark *mark) 184static bool __fsnotify_free_mark(struct fsnotify_mark *mark)
181{ 185{
182 struct fsnotify_group *group = mark->group; 186 struct fsnotify_group *group = mark->group;
183 187
@@ -185,17 +189,11 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
185 /* something else already called this function on this mark */ 189 /* something else already called this function on this mark */
186 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { 190 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
187 spin_unlock(&mark->lock); 191 spin_unlock(&mark->lock);
188 return; 192 return false;
189 } 193 }
190 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; 194 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
191 spin_unlock(&mark->lock); 195 spin_unlock(&mark->lock);
192 196
193 spin_lock(&destroy_lock);
194 list_add(&mark->g_list, &destroy_list);
195 spin_unlock(&destroy_lock);
196 queue_delayed_work(system_unbound_wq, &reaper_work,
197 FSNOTIFY_REAPER_DELAY);
198
199 /* 197 /*
200 * Some groups like to know that marks are being freed. This is a 198 * Some groups like to know that marks are being freed. This is a
201 * callback to the group function to let it know that this mark 199 * callback to the group function to let it know that this mark
@@ -203,6 +201,25 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
203 */ 201 */
204 if (group->ops->freeing_mark) 202 if (group->ops->freeing_mark)
205 group->ops->freeing_mark(mark, group); 203 group->ops->freeing_mark(mark, group);
204
205 spin_lock(&destroy_lock);
206 list_add(&mark->g_list, &destroy_list);
207 spin_unlock(&destroy_lock);
208
209 return true;
210}
211
212/*
213 * Free fsnotify mark. The freeing is actually happening from a workqueue which
214 * first waits for srcu period end. Caller must have a reference to the mark
215 * or be protected by fsnotify_mark_srcu.
216 */
217void fsnotify_free_mark(struct fsnotify_mark *mark)
218{
219 if (__fsnotify_free_mark(mark)) {
220 queue_delayed_work(system_unbound_wq, &reaper_work,
221 FSNOTIFY_REAPER_DELAY);
222 }
206} 223}
207 224
208void fsnotify_destroy_mark(struct fsnotify_mark *mark, 225void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -468,11 +485,29 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
468} 485}
469 486
470/* 487/*
471 * Given a group, destroy all of the marks associated with that group. 488 * Given a group, prepare for freeing all the marks associated with that group.
489 * The marks are attached to the list of marks prepared for destruction, the
490 * caller is responsible for freeing marks in that list after SRCU period has
491 * ended.
472 */ 492 */
473void fsnotify_clear_marks_by_group(struct fsnotify_group *group) 493void fsnotify_detach_group_marks(struct fsnotify_group *group)
474{ 494{
475 fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1); 495 struct fsnotify_mark *mark;
496
497 while (1) {
498 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
499 if (list_empty(&group->marks_list)) {
500 mutex_unlock(&group->mark_mutex);
501 break;
502 }
503 mark = list_first_entry(&group->marks_list,
504 struct fsnotify_mark, g_list);
505 fsnotify_get_mark(mark);
506 fsnotify_detach_mark(mark);
507 mutex_unlock(&group->mark_mutex);
508 __fsnotify_free_mark(mark);
509 fsnotify_put_mark(mark);
510 }
476} 511}
477 512
478void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old) 513void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
@@ -499,7 +534,11 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
499 mark->free_mark = free_mark; 534 mark->free_mark = free_mark;
500} 535}
501 536
502static void fsnotify_mark_destroy(struct work_struct *work) 537/*
538 * Destroy all marks in destroy_list, waits for SRCU period to finish before
539 * actually freeing marks.
540 */
541void fsnotify_mark_destroy_list(void)
503{ 542{
504 struct fsnotify_mark *mark, *next; 543 struct fsnotify_mark *mark, *next;
505 struct list_head private_destroy_list; 544 struct list_head private_destroy_list;
@@ -516,3 +555,8 @@ static void fsnotify_mark_destroy(struct work_struct *work)
516 fsnotify_put_mark(mark); 555 fsnotify_put_mark(mark);
517 } 556 }
518} 557}
558
559static void fsnotify_mark_destroy_workfn(struct work_struct *work)
560{
561 fsnotify_mark_destroy_list();
562}