aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-08-24 16:03:35 -0400
committerEric Paris <eparis@redhat.com>2009-08-27 08:02:04 -0400
commit52cef7555adf5ca09b3b7283097466759120d901 (patch)
tree641ddd087f6effe88fad2e3c0b5d744e9d71fdda /fs/notify
parent1e23502cc57cef33455ac7cb9111e3c6d991a894 (diff)
inotify: seperate new watch creation updating existing watches
There is nothing known wrong with the inotify watch addition/modification but this patch seperates the two code paths to make them each easy to verify as correct. Signed-off-by: Eric Paris <eparis@redhat.com>
Diffstat (limited to 'fs/notify')
-rw-r--r--fs/notify/inotify/inotify_user.c172
1 files changed, 103 insertions, 69 deletions
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index dc32ed8323ba..d8f73c253073 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -431,80 +431,29 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry)
431 kmem_cache_free(inotify_inode_mark_cachep, ientry); 431 kmem_cache_free(inotify_inode_mark_cachep, ientry);
432} 432}
433 433
434static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) 434static int inotify_update_existing_watch(struct fsnotify_group *group,
435 struct inode *inode,
436 u32 arg)
435{ 437{
436 struct fsnotify_mark_entry *entry = NULL; 438 struct fsnotify_mark_entry *entry;
437 struct inotify_inode_mark_entry *ientry; 439 struct inotify_inode_mark_entry *ientry;
438 struct inotify_inode_mark_entry *tmp_ientry;
439 int ret = 0;
440 int add = (arg & IN_MASK_ADD);
441 __u32 mask;
442 __u32 old_mask, new_mask; 440 __u32 old_mask, new_mask;
441 __u32 mask;
442 int add = (arg & IN_MASK_ADD);
443 int ret;
443 444
444 /* don't allow invalid bits: we don't want flags set */ 445 /* don't allow invalid bits: we don't want flags set */
445 mask = inotify_arg_to_mask(arg); 446 mask = inotify_arg_to_mask(arg);
446 if (unlikely(!mask)) 447 if (unlikely(!mask))
447 return -EINVAL; 448 return -EINVAL;
448 449
449 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
450 if (unlikely(!tmp_ientry))
451 return -ENOMEM;
452 /* we set the mask at the end after attaching it */
453 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
454 tmp_ientry->wd = -1;
455
456find_entry:
457 spin_lock(&inode->i_lock); 450 spin_lock(&inode->i_lock);
458 entry = fsnotify_find_mark_entry(group, inode); 451 entry = fsnotify_find_mark_entry(group, inode);
459 spin_unlock(&inode->i_lock); 452 spin_unlock(&inode->i_lock);
460 if (entry) { 453 if (!entry)
461 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 454 return -ENOENT;
462 } else {
463 ret = -ENOSPC;
464 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
465 goto out_err;
466retry:
467 ret = -ENOMEM;
468 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
469 goto out_err;
470
471 spin_lock(&group->inotify_data.idr_lock);
472 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
473 group->inotify_data.last_wd,
474 &tmp_ientry->wd);
475 spin_unlock(&group->inotify_data.idr_lock);
476 if (ret) {
477 if (ret == -EAGAIN)
478 goto retry;
479 goto out_err;
480 }
481 455
482 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); 456 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
483 if (ret) {
484 inotify_remove_from_idr(group, tmp_ientry);
485 if (ret == -EEXIST)
486 goto find_entry;
487 goto out_err;
488 }
489
490 /* tmp_ientry has been added to the inode, so we are all set up.
491 * now we just need to make sure tmp_ientry doesn't get freed and
492 * we need to set up entry and ientry so the generic code can
493 * do its thing. */
494 ientry = tmp_ientry;
495 entry = &ientry->fsn_entry;
496 tmp_ientry = NULL;
497
498 atomic_inc(&group->inotify_data.user->inotify_watches);
499
500 /* update the idr hint */
501 group->inotify_data.last_wd = ientry->wd;
502
503 /* we put the mark on the idr, take a reference */
504 fsnotify_get_mark(entry);
505 }
506
507 ret = ientry->wd;
508 457
509 spin_lock(&entry->lock); 458 spin_lock(&entry->lock);
510 459
@@ -536,18 +485,103 @@ retry:
536 fsnotify_recalc_group_mask(group); 485 fsnotify_recalc_group_mask(group);
537 } 486 }
538 487
539 /* this either matches fsnotify_find_mark_entry, or init_mark_entry 488 /* return the wd */
540 * depending on which path we took... */ 489 ret = ientry->wd;
490
491 /* match the get from fsnotify_find_mark_entry() */
541 fsnotify_put_mark(entry); 492 fsnotify_put_mark(entry);
542 493
494 return ret;
495}
496
497static int inotify_new_watch(struct fsnotify_group *group,
498 struct inode *inode,
499 u32 arg)
500{
501 struct inotify_inode_mark_entry *tmp_ientry;
502 __u32 mask;
503 int ret;
504
505 /* don't allow invalid bits: we don't want flags set */
506 mask = inotify_arg_to_mask(arg);
507 if (unlikely(!mask))
508 return -EINVAL;
509
510 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
511 if (unlikely(!tmp_ientry))
512 return -ENOMEM;
513
514 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
515 tmp_ientry->fsn_entry.mask = mask;
516 tmp_ientry->wd = -1;
517
518 ret = -ENOSPC;
519 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
520 goto out_err;
521retry:
522 ret = -ENOMEM;
523 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
524 goto out_err;
525
526 spin_lock(&group->inotify_data.idr_lock);
527 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
528 group->inotify_data.last_wd,
529 &tmp_ientry->wd);
530 spin_unlock(&group->inotify_data.idr_lock);
531 if (ret) {
532 /* idr was out of memory allocate and try again */
533 if (ret == -EAGAIN)
534 goto retry;
535 goto out_err;
536 }
537
538 /* we are on the idr, now get on the inode */
539 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
540 if (ret) {
541 /* we failed to get on the inode, get off the idr */
542 inotify_remove_from_idr(group, tmp_ientry);
543 goto out_err;
544 }
545
546 /* we put the mark on the idr, take a reference */
547 fsnotify_get_mark(&tmp_ientry->fsn_entry);
548
549 /* update the idr hint, who cares about races, it's just a hint */
550 group->inotify_data.last_wd = tmp_ientry->wd;
551
552 /* increment the number of watches the user has */
553 atomic_inc(&group->inotify_data.user->inotify_watches);
554
555 /* return the watch descriptor for this new entry */
556 ret = tmp_ientry->wd;
557
558 /* match the ref from fsnotify_init_markentry() */
559 fsnotify_put_mark(&tmp_ientry->fsn_entry);
560
543out_err: 561out_err:
544 /* could be an error, could be that we found an existing mark */ 562 if (ret < 0)
545 if (tmp_ientry) {
546 /* on the idr but didn't make it on the inode */
547 if (tmp_ientry->wd != -1)
548 inotify_remove_from_idr(group, tmp_ientry);
549 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); 563 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
550 } 564
565 return ret;
566}
567
568static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
569{
570 int ret = 0;
571
572retry:
573 /* try to update and existing watch with the new arg */
574 ret = inotify_update_existing_watch(group, inode, arg);
575 /* no mark present, try to add a new one */
576 if (ret == -ENOENT)
577 ret = inotify_new_watch(group, inode, arg);
578 /*
579 * inotify_new_watch could race with another thread which did an
580 * inotify_new_watch between the update_existing and the add watch
581 * here, go back and try to update an existing mark again.
582 */
583 if (ret == -EEXIST)
584 goto retry;
551 585
552 return ret; 586 return ret;
553} 587}