diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2006-03-25 06:07:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-25 11:22:53 -0500 |
commit | c32ccd87bfd1414b0aabfcd8dbc7539ad23bcbaa (patch) | |
tree | 612dc637976cbe36e8b72924a1f7bd76e75463fd /fs/inotify.c | |
parent | bf36b9011e3c5b2739f9da2f6de8a6fa3edded32 (diff) |
[PATCH] inotify: lock avoidance with parent watch status in dentry
Previous inotify work avoidance is good when inotify is completely unused,
but it breaks down if even a single watch is in place anywhere in the
system. Robin Holt notices that udev is one such culprit - it slows down a
512-thread application on a 512 CPU system from 6 seconds to 22 minutes.
Solve this by adding a flag in the dentry that tells inotify whether or not
its parent inode has a watch on it. Event queueing to parent will skip
taking locks if this flag is cleared. Setting and clearing of this flag on
all child dentries versus event delivery: this is no in terms of race
cases, and that was shown to be equivalent to always performing the check.
The essential behaviour is that activity occuring _after_ a watch has been
added and _before_ it has been removed, will generate events.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Robert Love <rml@novell.com>
Cc: John McCutchan <ttb@tentacle.dhs.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/inotify.c')
-rw-r--r-- | fs/inotify.c | 87 |
1 files changed, 77 insertions, 10 deletions
diff --git a/fs/inotify.c b/fs/inotify.c index 0ee39ef591c6..a61e93e17853 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <asm/ioctls.h> | 38 | #include <asm/ioctls.h> |
39 | 39 | ||
40 | static atomic_t inotify_cookie; | 40 | static atomic_t inotify_cookie; |
41 | static atomic_t inotify_watches; | ||
42 | 41 | ||
43 | static kmem_cache_t *watch_cachep; | 42 | static kmem_cache_t *watch_cachep; |
44 | static kmem_cache_t *event_cachep; | 43 | static kmem_cache_t *event_cachep; |
@@ -381,6 +380,48 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, | |||
381 | } | 380 | } |
382 | 381 | ||
383 | /* | 382 | /* |
383 | * inotify_inode_watched - returns nonzero if there are watches on this inode | ||
384 | * and zero otherwise. We call this lockless, we do not care if we race. | ||
385 | */ | ||
386 | static inline int inotify_inode_watched(struct inode *inode) | ||
387 | { | ||
388 | return !list_empty(&inode->inotify_watches); | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * Get child dentry flag into synch with parent inode. | ||
393 | * Flag should always be clear for negative dentrys. | ||
394 | */ | ||
395 | static void set_dentry_child_flags(struct inode *inode, int watched) | ||
396 | { | ||
397 | struct dentry *alias; | ||
398 | |||
399 | spin_lock(&dcache_lock); | ||
400 | list_for_each_entry(alias, &inode->i_dentry, d_alias) { | ||
401 | struct dentry *child; | ||
402 | |||
403 | list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { | ||
404 | if (!child->d_inode) { | ||
405 | WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); | ||
406 | continue; | ||
407 | } | ||
408 | spin_lock(&child->d_lock); | ||
409 | if (watched) { | ||
410 | WARN_ON(child->d_flags & | ||
411 | DCACHE_INOTIFY_PARENT_WATCHED); | ||
412 | child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; | ||
413 | } else { | ||
414 | WARN_ON(!(child->d_flags & | ||
415 | DCACHE_INOTIFY_PARENT_WATCHED)); | ||
416 | child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED; | ||
417 | } | ||
418 | spin_unlock(&child->d_lock); | ||
419 | } | ||
420 | } | ||
421 | spin_unlock(&dcache_lock); | ||
422 | } | ||
423 | |||
424 | /* | ||
384 | * create_watch - creates a watch on the given device. | 425 | * create_watch - creates a watch on the given device. |
385 | * | 426 | * |
386 | * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. | 427 | * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. |
@@ -426,7 +467,6 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, | |||
426 | get_inotify_watch(watch); | 467 | get_inotify_watch(watch); |
427 | 468 | ||
428 | atomic_inc(&dev->user->inotify_watches); | 469 | atomic_inc(&dev->user->inotify_watches); |
429 | atomic_inc(&inotify_watches); | ||
430 | 470 | ||
431 | return watch; | 471 | return watch; |
432 | } | 472 | } |
@@ -458,8 +498,10 @@ static void remove_watch_no_event(struct inotify_watch *watch, | |||
458 | list_del(&watch->i_list); | 498 | list_del(&watch->i_list); |
459 | list_del(&watch->d_list); | 499 | list_del(&watch->d_list); |
460 | 500 | ||
501 | if (!inotify_inode_watched(watch->inode)) | ||
502 | set_dentry_child_flags(watch->inode, 0); | ||
503 | |||
461 | atomic_dec(&dev->user->inotify_watches); | 504 | atomic_dec(&dev->user->inotify_watches); |
462 | atomic_dec(&inotify_watches); | ||
463 | idr_remove(&dev->idr, watch->wd); | 505 | idr_remove(&dev->idr, watch->wd); |
464 | put_inotify_watch(watch); | 506 | put_inotify_watch(watch); |
465 | } | 507 | } |
@@ -481,16 +523,39 @@ static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev) | |||
481 | remove_watch_no_event(watch, dev); | 523 | remove_watch_no_event(watch, dev); |
482 | } | 524 | } |
483 | 525 | ||
526 | /* Kernel API */ | ||
527 | |||
484 | /* | 528 | /* |
485 | * inotify_inode_watched - returns nonzero if there are watches on this inode | 529 | * inotify_d_instantiate - instantiate dcache entry for inode |
486 | * and zero otherwise. We call this lockless, we do not care if we race. | ||
487 | */ | 530 | */ |
488 | static inline int inotify_inode_watched(struct inode *inode) | 531 | void inotify_d_instantiate(struct dentry *entry, struct inode *inode) |
489 | { | 532 | { |
490 | return !list_empty(&inode->inotify_watches); | 533 | struct dentry *parent; |
534 | |||
535 | if (!inode) | ||
536 | return; | ||
537 | |||
538 | WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); | ||
539 | spin_lock(&entry->d_lock); | ||
540 | parent = entry->d_parent; | ||
541 | if (inotify_inode_watched(parent->d_inode)) | ||
542 | entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; | ||
543 | spin_unlock(&entry->d_lock); | ||
491 | } | 544 | } |
492 | 545 | ||
493 | /* Kernel API */ | 546 | /* |
547 | * inotify_d_move - dcache entry has been moved | ||
548 | */ | ||
549 | void inotify_d_move(struct dentry *entry) | ||
550 | { | ||
551 | struct dentry *parent; | ||
552 | |||
553 | parent = entry->d_parent; | ||
554 | if (inotify_inode_watched(parent->d_inode)) | ||
555 | entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; | ||
556 | else | ||
557 | entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; | ||
558 | } | ||
494 | 559 | ||
495 | /** | 560 | /** |
496 | * inotify_inode_queue_event - queue an event to all watches on this inode | 561 | * inotify_inode_queue_event - queue an event to all watches on this inode |
@@ -538,7 +603,7 @@ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, | |||
538 | struct dentry *parent; | 603 | struct dentry *parent; |
539 | struct inode *inode; | 604 | struct inode *inode; |
540 | 605 | ||
541 | if (!atomic_read (&inotify_watches)) | 606 | if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED)) |
542 | return; | 607 | return; |
543 | 608 | ||
544 | spin_lock(&dentry->d_lock); | 609 | spin_lock(&dentry->d_lock); |
@@ -993,6 +1058,9 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | |||
993 | goto out; | 1058 | goto out; |
994 | } | 1059 | } |
995 | 1060 | ||
1061 | if (!inotify_inode_watched(inode)) | ||
1062 | set_dentry_child_flags(inode, 1); | ||
1063 | |||
996 | /* Add the watch to the device's and the inode's list */ | 1064 | /* Add the watch to the device's and the inode's list */ |
997 | list_add(&watch->d_list, &dev->watches); | 1065 | list_add(&watch->d_list, &dev->watches); |
998 | list_add(&watch->i_list, &inode->inotify_watches); | 1066 | list_add(&watch->i_list, &inode->inotify_watches); |
@@ -1065,7 +1133,6 @@ static int __init inotify_setup(void) | |||
1065 | inotify_max_user_watches = 8192; | 1133 | inotify_max_user_watches = 8192; |
1066 | 1134 | ||
1067 | atomic_set(&inotify_cookie, 0); | 1135 | atomic_set(&inotify_cookie, 0); |
1068 | atomic_set(&inotify_watches, 0); | ||
1069 | 1136 | ||
1070 | watch_cachep = kmem_cache_create("inotify_watch_cache", | 1137 | watch_cachep = kmem_cache_create("inotify_watch_cache", |
1071 | sizeof(struct inotify_watch), | 1138 | sizeof(struct inotify_watch), |