aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-07-07 10:28:24 -0400
committerEric Paris <eparis@redhat.com>2009-07-21 15:26:26 -0400
commit7e790dd5fc937bc8d2400c30a05e32a9e9eef276 (patch)
treea593a3120eb8dfbf3febdf18b6d530376b593859 /fs
parent75fe2b26394c59c8e16bd7b76f4be5d048103ad1 (diff)
inotify: fix error paths in inotify_update_watch
inotify_update_watch could leave things in a horrid state on a number of error paths. We could try to remove idr entries that didn't exist, we could send an IN_IGNORED to userspace for watches that don't exist, and a bit of other stupidity. Clean these up by doing the idr addition before we put the mark on the inode since we can clean that up on error and getting off the inode's mark list is hard. Signed-off-by: Eric Paris <eparis@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/notify/inotify/inotify_user.c79
1 files changed, 49 insertions, 30 deletions
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index aff4214f16c3..726118a5845b 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -365,6 +365,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
365 return error; 365 return error;
366} 366}
367 367
368static void inotify_remove_from_idr(struct fsnotify_group *group,
369 struct inotify_inode_mark_entry *ientry)
370{
371 struct idr *idr;
372
373 spin_lock(&group->inotify_data.idr_lock);
374 idr = &group->inotify_data.idr;
375 idr_remove(idr, ientry->wd);
376 spin_unlock(&group->inotify_data.idr_lock);
377 ientry->wd = -1;
378}
368/* 379/*
369 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the 380 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
370 * internal reference help on the mark because it is in the idr. 381 * internal reference help on the mark because it is in the idr.
@@ -375,7 +386,6 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
375 struct inotify_inode_mark_entry *ientry; 386 struct inotify_inode_mark_entry *ientry;
376 struct inotify_event_private_data *event_priv; 387 struct inotify_event_private_data *event_priv;
377 struct fsnotify_event_private_data *fsn_event_priv; 388 struct fsnotify_event_private_data *fsn_event_priv;
378 struct idr *idr;
379 389
380 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 390 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
381 391
@@ -397,10 +407,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
397skip_send_ignore: 407skip_send_ignore:
398 408
399 /* remove this entry from the idr */ 409 /* remove this entry from the idr */
400 spin_lock(&group->inotify_data.idr_lock); 410 inotify_remove_from_idr(group, ientry);
401 idr = &group->inotify_data.idr;
402 idr_remove(idr, ientry->wd);
403 spin_unlock(&group->inotify_data.idr_lock);
404 411
405 /* removed from idr, drop that reference */ 412 /* removed from idr, drop that reference */
406 fsnotify_put_mark(entry); 413 fsnotify_put_mark(entry);
@@ -420,6 +427,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
420{ 427{
421 struct fsnotify_mark_entry *entry = NULL; 428 struct fsnotify_mark_entry *entry = NULL;
422 struct inotify_inode_mark_entry *ientry; 429 struct inotify_inode_mark_entry *ientry;
430 struct inotify_inode_mark_entry *tmp_ientry;
423 int ret = 0; 431 int ret = 0;
424 int add = (arg & IN_MASK_ADD); 432 int add = (arg & IN_MASK_ADD);
425 __u32 mask; 433 __u32 mask;
@@ -430,50 +438,60 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
430 if (unlikely(!mask)) 438 if (unlikely(!mask))
431 return -EINVAL; 439 return -EINVAL;
432 440
433 ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 441 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
434 if (unlikely(!ientry)) 442 if (unlikely(!tmp_ientry))
435 return -ENOMEM; 443 return -ENOMEM;
436 /* we set the mask at the end after attaching it */ 444 /* we set the mask at the end after attaching it */
437 fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark); 445 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
438 ientry->wd = 0; 446 tmp_ientry->wd = -1;
439 447
440find_entry: 448find_entry:
441 spin_lock(&inode->i_lock); 449 spin_lock(&inode->i_lock);
442 entry = fsnotify_find_mark_entry(group, inode); 450 entry = fsnotify_find_mark_entry(group, inode);
443 spin_unlock(&inode->i_lock); 451 spin_unlock(&inode->i_lock);
444 if (entry) { 452 if (entry) {
445 kmem_cache_free(inotify_inode_mark_cachep, ientry);
446 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 453 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
447 } else { 454 } else {
448 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) { 455 ret = -ENOSPC;
449 ret = -ENOSPC; 456 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
450 goto out_err;
451 }
452
453 ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
454 if (ret == -EEXIST)
455 goto find_entry;
456 else if (ret)
457 goto out_err; 457 goto out_err;
458
459 entry = &ientry->fsn_entry;
460retry: 458retry:
461 ret = -ENOMEM; 459 ret = -ENOMEM;
462 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) 460 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
463 goto out_err; 461 goto out_err;
464 462
465 spin_lock(&group->inotify_data.idr_lock); 463 spin_lock(&group->inotify_data.idr_lock);
466 ret = idr_get_new_above(&group->inotify_data.idr, entry, 464 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
467 ++group->inotify_data.last_wd, 465 group->inotify_data.last_wd,
468 &ientry->wd); 466 &tmp_ientry->wd);
469 spin_unlock(&group->inotify_data.idr_lock); 467 spin_unlock(&group->inotify_data.idr_lock);
470 if (ret) { 468 if (ret) {
471 if (ret == -EAGAIN) 469 if (ret == -EAGAIN)
472 goto retry; 470 goto retry;
473 goto out_err; 471 goto out_err;
474 } 472 }
473
474 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
475 if (ret) {
476 inotify_remove_from_idr(group, tmp_ientry);
477 if (ret == -EEXIST)
478 goto find_entry;
479 goto out_err;
480 }
481
482 /* tmp_ientry has been added to the inode, so we are all set up.
483 * now we just need to make sure tmp_ientry doesn't get freed and
484 * we need to set up entry and ientry so the generic code can
485 * do its thing. */
486 ientry = tmp_ientry;
487 entry = &ientry->fsn_entry;
488 tmp_ientry = NULL;
489
475 atomic_inc(&group->inotify_data.user->inotify_watches); 490 atomic_inc(&group->inotify_data.user->inotify_watches);
476 491
492 /* update the idr hint */
493 group->inotify_data.last_wd = ientry->wd;
494
477 /* we put the mark on the idr, take a reference */ 495 /* we put the mark on the idr, take a reference */
478 fsnotify_get_mark(entry); 496 fsnotify_get_mark(entry);
479 } 497 }
@@ -514,14 +532,15 @@ retry:
514 * depending on which path we took... */ 532 * depending on which path we took... */
515 fsnotify_put_mark(entry); 533 fsnotify_put_mark(entry);
516 534
517 return ret;
518
519out_err: 535out_err:
520 /* see this isn't supposed to happen, just kill the watch */ 536 /* could be an error, could be that we found an existing mark */
521 if (entry) { 537 if (tmp_ientry) {
522 fsnotify_destroy_mark_by_entry(entry); 538 /* on the idr but didn't make it on the inode */
523 fsnotify_put_mark(entry); 539 if (tmp_ientry->wd != -1)
540 inotify_remove_from_idr(group, tmp_ientry);
541 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
524 } 542 }
543
525 return ret; 544 return ret;
526} 545}
527 546