aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify/inotify/inotify_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/notify/inotify/inotify_user.c')
-rw-r--r--fs/notify/inotify/inotify_user.c109
1 files changed, 71 insertions, 38 deletions
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index ff27a2965844..f30d9bbc2e1b 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -57,7 +57,6 @@ int inotify_max_user_watches __read_mostly;
57 57
58static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 58static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
59struct kmem_cache *event_priv_cachep __read_mostly; 59struct kmem_cache *event_priv_cachep __read_mostly;
60static struct fsnotify_event *inotify_ignored_event;
61 60
62/* 61/*
63 * When inotify registers a new group it increments this and uses that 62 * When inotify registers a new group it increments this and uses that
@@ -365,6 +364,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
365 return error; 364 return error;
366} 365}
367 366
367static void inotify_remove_from_idr(struct fsnotify_group *group,
368 struct inotify_inode_mark_entry *ientry)
369{
370 struct idr *idr;
371
372 spin_lock(&group->inotify_data.idr_lock);
373 idr = &group->inotify_data.idr;
374 idr_remove(idr, ientry->wd);
375 spin_unlock(&group->inotify_data.idr_lock);
376 ientry->wd = -1;
377}
368/* 378/*
369 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the 379 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
370 * internal reference help on the mark because it is in the idr. 380 * internal reference help on the mark because it is in the idr.
@@ -373,13 +383,19 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
373 struct fsnotify_group *group) 383 struct fsnotify_group *group)
374{ 384{
375 struct inotify_inode_mark_entry *ientry; 385 struct inotify_inode_mark_entry *ientry;
386 struct fsnotify_event *ignored_event;
376 struct inotify_event_private_data *event_priv; 387 struct inotify_event_private_data *event_priv;
377 struct fsnotify_event_private_data *fsn_event_priv; 388 struct fsnotify_event_private_data *fsn_event_priv;
378 struct idr *idr; 389
390 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
391 FSNOTIFY_EVENT_NONE, NULL, 0,
392 GFP_NOFS);
393 if (!ignored_event)
394 return;
379 395
380 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 396 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
381 397
382 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 398 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
383 if (unlikely(!event_priv)) 399 if (unlikely(!event_priv))
384 goto skip_send_ignore; 400 goto skip_send_ignore;
385 401
@@ -388,7 +404,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
388 fsn_event_priv->group = group; 404 fsn_event_priv->group = group;
389 event_priv->wd = ientry->wd; 405 event_priv->wd = ientry->wd;
390 406
391 fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv); 407 fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
392 408
393 /* did the private data get added? */ 409 /* did the private data get added? */
394 if (list_empty(&fsn_event_priv->event_list)) 410 if (list_empty(&fsn_event_priv->event_list))
@@ -396,14 +412,16 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
396 412
397skip_send_ignore: 413skip_send_ignore:
398 414
415 /* matches the reference taken when the event was created */
416 fsnotify_put_event(ignored_event);
417
399 /* remove this entry from the idr */ 418 /* remove this entry from the idr */
400 spin_lock(&group->inotify_data.idr_lock); 419 inotify_remove_from_idr(group, ientry);
401 idr = &group->inotify_data.idr;
402 idr_remove(idr, ientry->wd);
403 spin_unlock(&group->inotify_data.idr_lock);
404 420
405 /* removed from idr, drop that reference */ 421 /* removed from idr, drop that reference */
406 fsnotify_put_mark(entry); 422 fsnotify_put_mark(entry);
423
424 atomic_dec(&group->inotify_data.user->inotify_watches);
407} 425}
408 426
409/* ding dong the mark is dead */ 427/* ding dong the mark is dead */
@@ -418,6 +436,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
418{ 436{
419 struct fsnotify_mark_entry *entry = NULL; 437 struct fsnotify_mark_entry *entry = NULL;
420 struct inotify_inode_mark_entry *ientry; 438 struct inotify_inode_mark_entry *ientry;
439 struct inotify_inode_mark_entry *tmp_ientry;
421 int ret = 0; 440 int ret = 0;
422 int add = (arg & IN_MASK_ADD); 441 int add = (arg & IN_MASK_ADD);
423 __u32 mask; 442 __u32 mask;
@@ -428,54 +447,66 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
428 if (unlikely(!mask)) 447 if (unlikely(!mask))
429 return -EINVAL; 448 return -EINVAL;
430 449
431 ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 450 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
432 if (unlikely(!ientry)) 451 if (unlikely(!tmp_ientry))
433 return -ENOMEM; 452 return -ENOMEM;
434 /* we set the mask at the end after attaching it */ 453 /* we set the mask at the end after attaching it */
435 fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark); 454 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
436 ientry->wd = 0; 455 tmp_ientry->wd = -1;
437 456
438find_entry: 457find_entry:
439 spin_lock(&inode->i_lock); 458 spin_lock(&inode->i_lock);
440 entry = fsnotify_find_mark_entry(group, inode); 459 entry = fsnotify_find_mark_entry(group, inode);
441 spin_unlock(&inode->i_lock); 460 spin_unlock(&inode->i_lock);
442 if (entry) { 461 if (entry) {
443 kmem_cache_free(inotify_inode_mark_cachep, ientry);
444 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 462 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
445 } else { 463 } else {
446 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) { 464 ret = -ENOSPC;
447 ret = -ENOSPC; 465 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
448 goto out_err;
449 }
450
451 ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
452 if (ret == -EEXIST)
453 goto find_entry;
454 else if (ret)
455 goto out_err; 466 goto out_err;
456
457 entry = &ientry->fsn_entry;
458retry: 467retry:
459 ret = -ENOMEM; 468 ret = -ENOMEM;
460 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) 469 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
461 goto out_err; 470 goto out_err;
462 471
463 spin_lock(&group->inotify_data.idr_lock); 472 spin_lock(&group->inotify_data.idr_lock);
464 /* if entry is added to the idr we keep the reference obtained 473 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
465 * through fsnotify_mark_add. remember to drop this reference 474 group->inotify_data.last_wd,
466 * when entry is removed from idr */ 475 &tmp_ientry->wd);
467 ret = idr_get_new_above(&group->inotify_data.idr, entry,
468 ++group->inotify_data.last_wd,
469 &ientry->wd);
470 spin_unlock(&group->inotify_data.idr_lock); 476 spin_unlock(&group->inotify_data.idr_lock);
471 if (ret) { 477 if (ret) {
472 if (ret == -EAGAIN) 478 if (ret == -EAGAIN)
473 goto retry; 479 goto retry;
474 goto out_err; 480 goto out_err;
475 } 481 }
482
483 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
484 if (ret) {
485 inotify_remove_from_idr(group, tmp_ientry);
486 if (ret == -EEXIST)
487 goto find_entry;
488 goto out_err;
489 }
490
491 /* tmp_ientry has been added to the inode, so we are all set up.
492 * now we just need to make sure tmp_ientry doesn't get freed and
493 * we need to set up entry and ientry so the generic code can
494 * do its thing. */
495 ientry = tmp_ientry;
496 entry = &ientry->fsn_entry;
497 tmp_ientry = NULL;
498
476 atomic_inc(&group->inotify_data.user->inotify_watches); 499 atomic_inc(&group->inotify_data.user->inotify_watches);
500
501 /* update the idr hint */
502 group->inotify_data.last_wd = ientry->wd;
503
504 /* we put the mark on the idr, take a reference */
505 fsnotify_get_mark(entry);
477 } 506 }
478 507
508 ret = ientry->wd;
509
479 spin_lock(&entry->lock); 510 spin_lock(&entry->lock);
480 511
481 old_mask = entry->mask; 512 old_mask = entry->mask;
@@ -506,14 +537,19 @@ retry:
506 fsnotify_recalc_group_mask(group); 537 fsnotify_recalc_group_mask(group);
507 } 538 }
508 539
509 return ientry->wd; 540 /* this either matches fsnotify_find_mark_entry, or init_mark_entry
541 * depending on which path we took... */
542 fsnotify_put_mark(entry);
510 543
511out_err: 544out_err:
512 /* see this isn't supposed to happen, just kill the watch */ 545 /* could be an error, could be that we found an existing mark */
513 if (entry) { 546 if (tmp_ientry) {
514 fsnotify_destroy_mark_by_entry(entry); 547 /* on the idr but didn't make it on the inode */
515 fsnotify_put_mark(entry); 548 if (tmp_ientry->wd != -1)
549 inotify_remove_from_idr(group, tmp_ientry);
550 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
516 } 551 }
552
517 return ret; 553 return ret;
518} 554}
519 555
@@ -721,9 +757,6 @@ static int __init inotify_user_setup(void)
721 757
722 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 758 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
723 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 759 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
724 inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
725 if (!inotify_ignored_event)
726 panic("unable to allocate the inotify ignored event\n");
727 760
728 inotify_max_queued_events = 16384; 761 inotify_max_queued_events = 16384;
729 inotify_max_user_instances = 128; 762 inotify_max_user_instances = 128;