diff options
Diffstat (limited to 'fs/inotify.c')
-rw-r--r-- | fs/inotify.c | 116 |
1 files changed, 58 insertions, 58 deletions
diff --git a/fs/inotify.c b/fs/inotify.c index 3041503bde02..0ee39ef591c6 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -54,10 +54,10 @@ int inotify_max_queued_events; | |||
54 | * Lock ordering: | 54 | * Lock ordering: |
55 | * | 55 | * |
56 | * dentry->d_lock (used to keep d_move() away from dentry->d_parent) | 56 | * dentry->d_lock (used to keep d_move() away from dentry->d_parent) |
57 | * iprune_sem (synchronize shrink_icache_memory()) | 57 | * iprune_mutex (synchronize shrink_icache_memory()) |
58 | * inode_lock (protects the super_block->s_inodes list) | 58 | * inode_lock (protects the super_block->s_inodes list) |
59 | * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) | 59 | * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) |
60 | * inotify_dev->sem (protects inotify_device and watches->d_list) | 60 | * inotify_dev->mutex (protects inotify_device and watches->d_list) |
61 | */ | 61 | */ |
62 | 62 | ||
63 | /* | 63 | /* |
@@ -79,12 +79,12 @@ int inotify_max_queued_events; | |||
79 | /* | 79 | /* |
80 | * struct inotify_device - represents an inotify instance | 80 | * struct inotify_device - represents an inotify instance |
81 | * | 81 | * |
82 | * This structure is protected by the semaphore 'sem'. | 82 | * This structure is protected by the mutex 'mutex'. |
83 | */ | 83 | */ |
84 | struct inotify_device { | 84 | struct inotify_device { |
85 | wait_queue_head_t wq; /* wait queue for i/o */ | 85 | wait_queue_head_t wq; /* wait queue for i/o */ |
86 | struct idr idr; /* idr mapping wd -> watch */ | 86 | struct idr idr; /* idr mapping wd -> watch */ |
87 | struct semaphore sem; /* protects this bad boy */ | 87 | struct mutex mutex; /* protects this bad boy */ |
88 | struct list_head events; /* list of queued events */ | 88 | struct list_head events; /* list of queued events */ |
89 | struct list_head watches; /* list of watches */ | 89 | struct list_head watches; /* list of watches */ |
90 | atomic_t count; /* reference count */ | 90 | atomic_t count; /* reference count */ |
@@ -101,7 +101,7 @@ struct inotify_device { | |||
101 | * device. In read(), this list is walked and all events that can fit in the | 101 | * device. In read(), this list is walked and all events that can fit in the |
102 | * buffer are returned. | 102 | * buffer are returned. |
103 | * | 103 | * |
104 | * Protected by dev->sem of the device in which we are queued. | 104 | * Protected by dev->mutex of the device in which we are queued. |
105 | */ | 105 | */ |
106 | struct inotify_kernel_event { | 106 | struct inotify_kernel_event { |
107 | struct inotify_event event; /* the user-space event */ | 107 | struct inotify_event event; /* the user-space event */ |
@@ -112,8 +112,8 @@ struct inotify_kernel_event { | |||
112 | /* | 112 | /* |
113 | * struct inotify_watch - represents a watch request on a specific inode | 113 | * struct inotify_watch - represents a watch request on a specific inode |
114 | * | 114 | * |
115 | * d_list is protected by dev->sem of the associated watch->dev. | 115 | * d_list is protected by dev->mutex of the associated watch->dev. |
116 | * i_list and mask are protected by inode->inotify_sem of the associated inode. | 116 | * i_list and mask are protected by inode->inotify_mutex of the associated inode. |
117 | * dev, inode, and wd are never written to once the watch is created. | 117 | * dev, inode, and wd are never written to once the watch is created. |
118 | */ | 118 | */ |
119 | struct inotify_watch { | 119 | struct inotify_watch { |
@@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | |||
261 | /* | 261 | /* |
262 | * inotify_dev_get_event - return the next event in the given dev's queue | 262 | * inotify_dev_get_event - return the next event in the given dev's queue |
263 | * | 263 | * |
264 | * Caller must hold dev->sem. | 264 | * Caller must hold dev->mutex. |
265 | */ | 265 | */ |
266 | static inline struct inotify_kernel_event * | 266 | static inline struct inotify_kernel_event * |
267 | inotify_dev_get_event(struct inotify_device *dev) | 267 | inotify_dev_get_event(struct inotify_device *dev) |
@@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev) | |||
272 | /* | 272 | /* |
273 | * inotify_dev_queue_event - add a new event to the given device | 273 | * inotify_dev_queue_event - add a new event to the given device |
274 | * | 274 | * |
275 | * Caller must hold dev->sem. Can sleep (calls kernel_event()). | 275 | * Caller must hold dev->mutex. Can sleep (calls kernel_event()). |
276 | */ | 276 | */ |
277 | static void inotify_dev_queue_event(struct inotify_device *dev, | 277 | static void inotify_dev_queue_event(struct inotify_device *dev, |
278 | struct inotify_watch *watch, u32 mask, | 278 | struct inotify_watch *watch, u32 mask, |
@@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev, | |||
315 | /* | 315 | /* |
316 | * remove_kevent - cleans up and ultimately frees the given kevent | 316 | * remove_kevent - cleans up and ultimately frees the given kevent |
317 | * | 317 | * |
318 | * Caller must hold dev->sem. | 318 | * Caller must hold dev->mutex. |
319 | */ | 319 | */ |
320 | static void remove_kevent(struct inotify_device *dev, | 320 | static void remove_kevent(struct inotify_device *dev, |
321 | struct inotify_kernel_event *kevent) | 321 | struct inotify_kernel_event *kevent) |
@@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev, | |||
332 | /* | 332 | /* |
333 | * inotify_dev_event_dequeue - destroy an event on the given device | 333 | * inotify_dev_event_dequeue - destroy an event on the given device |
334 | * | 334 | * |
335 | * Caller must hold dev->sem. | 335 | * Caller must hold dev->mutex. |
336 | */ | 336 | */ |
337 | static void inotify_dev_event_dequeue(struct inotify_device *dev) | 337 | static void inotify_dev_event_dequeue(struct inotify_device *dev) |
338 | { | 338 | { |
@@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) | |||
346 | /* | 346 | /* |
347 | * inotify_dev_get_wd - returns the next WD for use by the given dev | 347 | * inotify_dev_get_wd - returns the next WD for use by the given dev |
348 | * | 348 | * |
349 | * Callers must hold dev->sem. This function can sleep. | 349 | * Callers must hold dev->mutex. This function can sleep. |
350 | */ | 350 | */ |
351 | static int inotify_dev_get_wd(struct inotify_device *dev, | 351 | static int inotify_dev_get_wd(struct inotify_device *dev, |
352 | struct inotify_watch *watch) | 352 | struct inotify_watch *watch) |
@@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, | |||
383 | /* | 383 | /* |
384 | * create_watch - creates a watch on the given device. | 384 | * create_watch - creates a watch on the given device. |
385 | * | 385 | * |
386 | * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. | 386 | * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. |
387 | * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. | 387 | * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. |
388 | */ | 388 | */ |
389 | static struct inotify_watch *create_watch(struct inotify_device *dev, | 389 | static struct inotify_watch *create_watch(struct inotify_device *dev, |
@@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, | |||
434 | /* | 434 | /* |
435 | * inotify_find_dev - find the watch associated with the given inode and dev | 435 | * inotify_find_dev - find the watch associated with the given inode and dev |
436 | * | 436 | * |
437 | * Callers must hold inode->inotify_sem. | 437 | * Callers must hold inode->inotify_mutex. |
438 | */ | 438 | */ |
439 | static struct inotify_watch *inode_find_dev(struct inode *inode, | 439 | static struct inotify_watch *inode_find_dev(struct inode *inode, |
440 | struct inotify_device *dev) | 440 | struct inotify_device *dev) |
@@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, | |||
469 | * the IN_IGNORED event to the given device signifying that the inode is no | 469 | * the IN_IGNORED event to the given device signifying that the inode is no |
470 | * longer watched. | 470 | * longer watched. |
471 | * | 471 | * |
472 | * Callers must hold both inode->inotify_sem and dev->sem. We drop a | 472 | * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a |
473 | * reference to the inode before returning. | 473 | * reference to the inode before returning. |
474 | * | 474 | * |
475 | * The inode is not iput() so as to remain atomic. If the inode needs to be | 475 | * The inode is not iput() so as to remain atomic. If the inode needs to be |
@@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, | |||
507 | if (!inotify_inode_watched(inode)) | 507 | if (!inotify_inode_watched(inode)) |
508 | return; | 508 | return; |
509 | 509 | ||
510 | down(&inode->inotify_sem); | 510 | mutex_lock(&inode->inotify_mutex); |
511 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 511 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
512 | u32 watch_mask = watch->mask; | 512 | u32 watch_mask = watch->mask; |
513 | if (watch_mask & mask) { | 513 | if (watch_mask & mask) { |
514 | struct inotify_device *dev = watch->dev; | 514 | struct inotify_device *dev = watch->dev; |
515 | get_inotify_watch(watch); | 515 | get_inotify_watch(watch); |
516 | down(&dev->sem); | 516 | mutex_lock(&dev->mutex); |
517 | inotify_dev_queue_event(dev, watch, mask, cookie, name); | 517 | inotify_dev_queue_event(dev, watch, mask, cookie, name); |
518 | if (watch_mask & IN_ONESHOT) | 518 | if (watch_mask & IN_ONESHOT) |
519 | remove_watch_no_event(watch, dev); | 519 | remove_watch_no_event(watch, dev); |
520 | up(&dev->sem); | 520 | mutex_unlock(&dev->mutex); |
521 | put_inotify_watch(watch); | 521 | put_inotify_watch(watch); |
522 | } | 522 | } |
523 | } | 523 | } |
524 | up(&inode->inotify_sem); | 524 | mutex_unlock(&inode->inotify_mutex); |
525 | } | 525 | } |
526 | EXPORT_SYMBOL_GPL(inotify_inode_queue_event); | 526 | EXPORT_SYMBOL_GPL(inotify_inode_queue_event); |
527 | 527 | ||
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); | |||
569 | * @list: list of inodes being unmounted (sb->s_inodes) | 569 | * @list: list of inodes being unmounted (sb->s_inodes) |
570 | * | 570 | * |
571 | * Called with inode_lock held, protecting the unmounting super block's list | 571 | * Called with inode_lock held, protecting the unmounting super block's list |
572 | * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. | 572 | * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. |
573 | * We temporarily drop inode_lock, however, and CAN block. | 573 | * We temporarily drop inode_lock, however, and CAN block. |
574 | */ | 574 | */ |
575 | void inotify_unmount_inodes(struct list_head *list) | 575 | void inotify_unmount_inodes(struct list_head *list) |
@@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list) | |||
618 | * We can safely drop inode_lock here because we hold | 618 | * We can safely drop inode_lock here because we hold |
619 | * references on both inode and next_i. Also no new inodes | 619 | * references on both inode and next_i. Also no new inodes |
620 | * will be added since the umount has begun. Finally, | 620 | * will be added since the umount has begun. Finally, |
621 | * iprune_sem keeps shrink_icache_memory() away. | 621 | * iprune_mutex keeps shrink_icache_memory() away. |
622 | */ | 622 | */ |
623 | spin_unlock(&inode_lock); | 623 | spin_unlock(&inode_lock); |
624 | 624 | ||
@@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list) | |||
626 | iput(need_iput_tmp); | 626 | iput(need_iput_tmp); |
627 | 627 | ||
628 | /* for each watch, send IN_UNMOUNT and then remove it */ | 628 | /* for each watch, send IN_UNMOUNT and then remove it */ |
629 | down(&inode->inotify_sem); | 629 | mutex_lock(&inode->inotify_mutex); |
630 | watches = &inode->inotify_watches; | 630 | watches = &inode->inotify_watches; |
631 | list_for_each_entry_safe(watch, next_w, watches, i_list) { | 631 | list_for_each_entry_safe(watch, next_w, watches, i_list) { |
632 | struct inotify_device *dev = watch->dev; | 632 | struct inotify_device *dev = watch->dev; |
633 | down(&dev->sem); | 633 | mutex_lock(&dev->mutex); |
634 | inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); | 634 | inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); |
635 | remove_watch(watch, dev); | 635 | remove_watch(watch, dev); |
636 | up(&dev->sem); | 636 | mutex_unlock(&dev->mutex); |
637 | } | 637 | } |
638 | up(&inode->inotify_sem); | 638 | mutex_unlock(&inode->inotify_mutex); |
639 | iput(inode); | 639 | iput(inode); |
640 | 640 | ||
641 | spin_lock(&inode_lock); | 641 | spin_lock(&inode_lock); |
@@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode) | |||
651 | { | 651 | { |
652 | struct inotify_watch *watch, *next; | 652 | struct inotify_watch *watch, *next; |
653 | 653 | ||
654 | down(&inode->inotify_sem); | 654 | mutex_lock(&inode->inotify_mutex); |
655 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 655 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
656 | struct inotify_device *dev = watch->dev; | 656 | struct inotify_device *dev = watch->dev; |
657 | down(&dev->sem); | 657 | mutex_lock(&dev->mutex); |
658 | remove_watch(watch, dev); | 658 | remove_watch(watch, dev); |
659 | up(&dev->sem); | 659 | mutex_unlock(&dev->mutex); |
660 | } | 660 | } |
661 | up(&inode->inotify_sem); | 661 | mutex_unlock(&inode->inotify_mutex); |
662 | } | 662 | } |
663 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); | 663 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); |
664 | 664 | ||
@@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) | |||
670 | int ret = 0; | 670 | int ret = 0; |
671 | 671 | ||
672 | poll_wait(file, &dev->wq, wait); | 672 | poll_wait(file, &dev->wq, wait); |
673 | down(&dev->sem); | 673 | mutex_lock(&dev->mutex); |
674 | if (!list_empty(&dev->events)) | 674 | if (!list_empty(&dev->events)) |
675 | ret = POLLIN | POLLRDNORM; | 675 | ret = POLLIN | POLLRDNORM; |
676 | up(&dev->sem); | 676 | mutex_unlock(&dev->mutex); |
677 | 677 | ||
678 | return ret; | 678 | return ret; |
679 | } | 679 | } |
@@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
695 | 695 | ||
696 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | 696 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); |
697 | 697 | ||
698 | down(&dev->sem); | 698 | mutex_lock(&dev->mutex); |
699 | events = !list_empty(&dev->events); | 699 | events = !list_empty(&dev->events); |
700 | up(&dev->sem); | 700 | mutex_unlock(&dev->mutex); |
701 | if (events) { | 701 | if (events) { |
702 | ret = 0; | 702 | ret = 0; |
703 | break; | 703 | break; |
@@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
720 | if (ret) | 720 | if (ret) |
721 | return ret; | 721 | return ret; |
722 | 722 | ||
723 | down(&dev->sem); | 723 | mutex_lock(&dev->mutex); |
724 | while (1) { | 724 | while (1) { |
725 | struct inotify_kernel_event *kevent; | 725 | struct inotify_kernel_event *kevent; |
726 | 726 | ||
@@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
750 | 750 | ||
751 | remove_kevent(dev, kevent); | 751 | remove_kevent(dev, kevent); |
752 | } | 752 | } |
753 | up(&dev->sem); | 753 | mutex_unlock(&dev->mutex); |
754 | 754 | ||
755 | return ret; | 755 | return ret; |
756 | } | 756 | } |
@@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file) | |||
763 | * Destroy all of the watches on this device. Unfortunately, not very | 763 | * Destroy all of the watches on this device. Unfortunately, not very |
764 | * pretty. We cannot do a simple iteration over the list, because we | 764 | * pretty. We cannot do a simple iteration over the list, because we |
765 | * do not know the inode until we iterate to the watch. But we need to | 765 | * do not know the inode until we iterate to the watch. But we need to |
766 | * hold inode->inotify_sem before dev->sem. The following works. | 766 | * hold inode->inotify_mutex before dev->mutex. The following works. |
767 | */ | 767 | */ |
768 | while (1) { | 768 | while (1) { |
769 | struct inotify_watch *watch; | 769 | struct inotify_watch *watch; |
770 | struct list_head *watches; | 770 | struct list_head *watches; |
771 | struct inode *inode; | 771 | struct inode *inode; |
772 | 772 | ||
773 | down(&dev->sem); | 773 | mutex_lock(&dev->mutex); |
774 | watches = &dev->watches; | 774 | watches = &dev->watches; |
775 | if (list_empty(watches)) { | 775 | if (list_empty(watches)) { |
776 | up(&dev->sem); | 776 | mutex_unlock(&dev->mutex); |
777 | break; | 777 | break; |
778 | } | 778 | } |
779 | watch = list_entry(watches->next, struct inotify_watch, d_list); | 779 | watch = list_entry(watches->next, struct inotify_watch, d_list); |
780 | get_inotify_watch(watch); | 780 | get_inotify_watch(watch); |
781 | up(&dev->sem); | 781 | mutex_unlock(&dev->mutex); |
782 | 782 | ||
783 | inode = watch->inode; | 783 | inode = watch->inode; |
784 | down(&inode->inotify_sem); | 784 | mutex_lock(&inode->inotify_mutex); |
785 | down(&dev->sem); | 785 | mutex_lock(&dev->mutex); |
786 | remove_watch_no_event(watch, dev); | 786 | remove_watch_no_event(watch, dev); |
787 | up(&dev->sem); | 787 | mutex_unlock(&dev->mutex); |
788 | up(&inode->inotify_sem); | 788 | mutex_unlock(&inode->inotify_mutex); |
789 | put_inotify_watch(watch); | 789 | put_inotify_watch(watch); |
790 | } | 790 | } |
791 | 791 | ||
792 | /* destroy all of the events on this device */ | 792 | /* destroy all of the events on this device */ |
793 | down(&dev->sem); | 793 | mutex_lock(&dev->mutex); |
794 | while (!list_empty(&dev->events)) | 794 | while (!list_empty(&dev->events)) |
795 | inotify_dev_event_dequeue(dev); | 795 | inotify_dev_event_dequeue(dev); |
796 | up(&dev->sem); | 796 | mutex_unlock(&dev->mutex); |
797 | 797 | ||
798 | /* free this device: the put matching the get in inotify_init() */ | 798 | /* free this device: the put matching the get in inotify_init() */ |
799 | put_inotify_dev(dev); | 799 | put_inotify_dev(dev); |
@@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) | |||
811 | struct inotify_watch *watch; | 811 | struct inotify_watch *watch; |
812 | struct inode *inode; | 812 | struct inode *inode; |
813 | 813 | ||
814 | down(&dev->sem); | 814 | mutex_lock(&dev->mutex); |
815 | watch = idr_find(&dev->idr, wd); | 815 | watch = idr_find(&dev->idr, wd); |
816 | if (unlikely(!watch)) { | 816 | if (unlikely(!watch)) { |
817 | up(&dev->sem); | 817 | mutex_unlock(&dev->mutex); |
818 | return -EINVAL; | 818 | return -EINVAL; |
819 | } | 819 | } |
820 | get_inotify_watch(watch); | 820 | get_inotify_watch(watch); |
821 | inode = watch->inode; | 821 | inode = watch->inode; |
822 | up(&dev->sem); | 822 | mutex_unlock(&dev->mutex); |
823 | 823 | ||
824 | down(&inode->inotify_sem); | 824 | mutex_lock(&inode->inotify_mutex); |
825 | down(&dev->sem); | 825 | mutex_lock(&dev->mutex); |
826 | 826 | ||
827 | /* make sure that we did not race */ | 827 | /* make sure that we did not race */ |
828 | watch = idr_find(&dev->idr, wd); | 828 | watch = idr_find(&dev->idr, wd); |
829 | if (likely(watch)) | 829 | if (likely(watch)) |
830 | remove_watch(watch, dev); | 830 | remove_watch(watch, dev); |
831 | 831 | ||
832 | up(&dev->sem); | 832 | mutex_unlock(&dev->mutex); |
833 | up(&inode->inotify_sem); | 833 | mutex_unlock(&inode->inotify_mutex); |
834 | put_inotify_watch(watch); | 834 | put_inotify_watch(watch); |
835 | 835 | ||
836 | return 0; | 836 | return 0; |
@@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void) | |||
905 | INIT_LIST_HEAD(&dev->events); | 905 | INIT_LIST_HEAD(&dev->events); |
906 | INIT_LIST_HEAD(&dev->watches); | 906 | INIT_LIST_HEAD(&dev->watches); |
907 | init_waitqueue_head(&dev->wq); | 907 | init_waitqueue_head(&dev->wq); |
908 | sema_init(&dev->sem, 1); | 908 | mutex_init(&dev->mutex); |
909 | dev->event_count = 0; | 909 | dev->event_count = 0; |
910 | dev->queue_size = 0; | 910 | dev->queue_size = 0; |
911 | dev->max_events = inotify_max_queued_events; | 911 | dev->max_events = inotify_max_queued_events; |
@@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | |||
960 | inode = nd.dentry->d_inode; | 960 | inode = nd.dentry->d_inode; |
961 | dev = filp->private_data; | 961 | dev = filp->private_data; |
962 | 962 | ||
963 | down(&inode->inotify_sem); | 963 | mutex_lock(&inode->inotify_mutex); |
964 | down(&dev->sem); | 964 | mutex_lock(&dev->mutex); |
965 | 965 | ||
966 | if (mask & IN_MASK_ADD) | 966 | if (mask & IN_MASK_ADD) |
967 | mask_add = 1; | 967 | mask_add = 1; |
@@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | |||
998 | list_add(&watch->i_list, &inode->inotify_watches); | 998 | list_add(&watch->i_list, &inode->inotify_watches); |
999 | ret = watch->wd; | 999 | ret = watch->wd; |
1000 | out: | 1000 | out: |
1001 | up(&dev->sem); | 1001 | mutex_unlock(&dev->mutex); |
1002 | up(&inode->inotify_sem); | 1002 | mutex_unlock(&inode->inotify_mutex); |
1003 | path_release(&nd); | 1003 | path_release(&nd); |
1004 | fput_and_out: | 1004 | fput_and_out: |
1005 | fput_light(filp, fput_needed); | 1005 | fput_light(filp, fput_needed); |