aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/notify/notification.c41
1 files changed, 13 insertions, 28 deletions
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index b493c378445f..dafd0b7687b8 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -289,43 +289,28 @@ static void initialize_event(struct fsnotify_event *event)
289 289
290/* 290/*
291 * Caller damn well better be holding whatever mutex is protecting the 291 * Caller damn well better be holding whatever mutex is protecting the
292 * old_holder->event_list. 292 * old_holder->event_list and the new_event must be a clean event which
293 * cannot be found anywhere else in the kernel.
293 */ 294 */
294int fsnotify_replace_event(struct fsnotify_event_holder *old_holder, 295int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
295 struct fsnotify_event *new_event) 296 struct fsnotify_event *new_event)
296{ 297{
297 struct fsnotify_event *old_event = old_holder->event; 298 struct fsnotify_event *old_event = old_holder->event;
298 struct fsnotify_event_holder *new_holder = NULL; 299 struct fsnotify_event_holder *new_holder = &new_event->holder;
300
301 enum event_spinlock_class {
302 SPINLOCK_OLD,
303 SPINLOCK_NEW,
304 };
299 305
300 /* 306 /*
301 * There is one fsnotify_event_holder embedded inside each fsnotify_event. 307 * if the new_event's embedded holder is in use someone
302 * Check if we expect to be able to use that holder. If not alloc a new 308 * screwed up and didn't give us a clean new event.
303 * holder.
304 * For the overflow event it's possible that something will use the in
305 * event holder before we get the lock so we may need to jump back and
306 * alloc a new holder, this can't happen for most events...
307 */ 309 */
308 if (!list_empty(&new_event->holder.event_list)) { 310 BUG_ON(!list_empty(&new_holder->event_list));
309alloc_holder:
310 new_holder = fsnotify_alloc_event_holder();
311 if (!new_holder)
312 return -ENOMEM;
313 }
314 311
315 spin_lock(&old_event->lock); 312 spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
316 spin_lock(&new_event->lock); 313 spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
317
318 if (list_empty(&new_event->holder.event_list)) {
319 if (unlikely(new_holder))
320 fsnotify_destroy_event_holder(new_holder);
321 new_holder = &new_event->holder;
322 } else if (unlikely(!new_holder)) {
323 /* between the time we checked above and got the lock the in
324 * event holder was used, go back and get a new one */
325 spin_unlock(&new_event->lock);
326 spin_unlock(&old_event->lock);
327 goto alloc_holder;
328 }
329 314
330 new_holder->event = new_event; 315 new_holder->event = new_event;
331 list_replace_init(&old_holder->event_list, &new_holder->event_list); 316 list_replace_init(&old_holder->event_list, &new_holder->event_list);