diff options
Diffstat (limited to 'fs/notify/inotify')
-rw-r--r-- | fs/notify/inotify/Kconfig | 1 | ||||
-rw-r--r-- | fs/notify/inotify/inotify_fsnotify.c | 4 | ||||
-rw-r--r-- | fs/notify/inotify/inotify_user.c | 100 |
3 files changed, 25 insertions, 80 deletions
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig index 3e56dbffe729..b3a159b21cfd 100644 --- a/fs/notify/inotify/Kconfig +++ b/fs/notify/inotify/Kconfig | |||
@@ -15,6 +15,7 @@ config INOTIFY | |||
15 | 15 | ||
16 | config INOTIFY_USER | 16 | config INOTIFY_USER |
17 | bool "Inotify support for userspace" | 17 | bool "Inotify support for userspace" |
18 | select ANON_INODES | ||
18 | select FSNOTIFY | 19 | select FSNOTIFY |
19 | default y | 20 | default y |
20 | ---help--- | 21 | ---help--- |
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index c9ee67b442e1..e27960cd76ab 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/path.h> /* struct path */ | 28 | #include <linux/path.h> /* struct path */ |
29 | #include <linux/slab.h> /* kmem_* */ | 29 | #include <linux/slab.h> /* kmem_* */ |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/sched.h> | ||
31 | 32 | ||
32 | #include "inotify.h" | 33 | #include "inotify.h" |
33 | 34 | ||
@@ -121,7 +122,7 @@ static int idr_callback(int id, void *p, void *data) | |||
121 | if (warned) | 122 | if (warned) |
122 | return 0; | 123 | return 0; |
123 | 124 | ||
124 | warned = false; | 125 | warned = true; |
125 | entry = p; | 126 | entry = p; |
126 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | 127 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); |
127 | 128 | ||
@@ -146,6 +147,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group) | |||
146 | idr_for_each(&group->inotify_data.idr, idr_callback, group); | 147 | idr_for_each(&group->inotify_data.idr, idr_callback, group); |
147 | idr_remove_all(&group->inotify_data.idr); | 148 | idr_remove_all(&group->inotify_data.idr); |
148 | idr_destroy(&group->inotify_data.idr); | 149 | idr_destroy(&group->inotify_data.idr); |
150 | free_uid(group->inotify_data.user); | ||
149 | } | 151 | } |
150 | 152 | ||
151 | void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) | 153 | void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index dcd2040d330c..e46ca685b9be 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -29,14 +29,12 @@ | |||
29 | #include <linux/init.h> /* module_init */ | 29 | #include <linux/init.h> /* module_init */ |
30 | #include <linux/inotify.h> | 30 | #include <linux/inotify.h> |
31 | #include <linux/kernel.h> /* roundup() */ | 31 | #include <linux/kernel.h> /* roundup() */ |
32 | #include <linux/magic.h> /* superblock magic number */ | ||
33 | #include <linux/mount.h> /* mntget */ | ||
34 | #include <linux/namei.h> /* LOOKUP_FOLLOW */ | 32 | #include <linux/namei.h> /* LOOKUP_FOLLOW */ |
35 | #include <linux/path.h> /* struct path */ | ||
36 | #include <linux/sched.h> /* struct user */ | 33 | #include <linux/sched.h> /* struct user */ |
37 | #include <linux/slab.h> /* struct kmem_cache */ | 34 | #include <linux/slab.h> /* struct kmem_cache */ |
38 | #include <linux/syscalls.h> | 35 | #include <linux/syscalls.h> |
39 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/anon_inodes.h> | ||
40 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
41 | #include <linux/poll.h> | 39 | #include <linux/poll.h> |
42 | #include <linux/wait.h> | 40 | #include <linux/wait.h> |
@@ -45,8 +43,6 @@ | |||
45 | 43 | ||
46 | #include <asm/ioctls.h> | 44 | #include <asm/ioctls.h> |
47 | 45 | ||
48 | static struct vfsmount *inotify_mnt __read_mostly; | ||
49 | |||
50 | /* these are configurable via /proc/sys/fs/inotify/ */ | 46 | /* these are configurable via /proc/sys/fs/inotify/ */ |
51 | static int inotify_max_user_instances __read_mostly; | 47 | static int inotify_max_user_instances __read_mostly; |
52 | static int inotify_max_queued_events __read_mostly; | 48 | static int inotify_max_queued_events __read_mostly; |
@@ -69,36 +65,30 @@ static int zero; | |||
69 | 65 | ||
70 | ctl_table inotify_table[] = { | 66 | ctl_table inotify_table[] = { |
71 | { | 67 | { |
72 | .ctl_name = INOTIFY_MAX_USER_INSTANCES, | ||
73 | .procname = "max_user_instances", | 68 | .procname = "max_user_instances", |
74 | .data = &inotify_max_user_instances, | 69 | .data = &inotify_max_user_instances, |
75 | .maxlen = sizeof(int), | 70 | .maxlen = sizeof(int), |
76 | .mode = 0644, | 71 | .mode = 0644, |
77 | .proc_handler = &proc_dointvec_minmax, | 72 | .proc_handler = proc_dointvec_minmax, |
78 | .strategy = &sysctl_intvec, | ||
79 | .extra1 = &zero, | 73 | .extra1 = &zero, |
80 | }, | 74 | }, |
81 | { | 75 | { |
82 | .ctl_name = INOTIFY_MAX_USER_WATCHES, | ||
83 | .procname = "max_user_watches", | 76 | .procname = "max_user_watches", |
84 | .data = &inotify_max_user_watches, | 77 | .data = &inotify_max_user_watches, |
85 | .maxlen = sizeof(int), | 78 | .maxlen = sizeof(int), |
86 | .mode = 0644, | 79 | .mode = 0644, |
87 | .proc_handler = &proc_dointvec_minmax, | 80 | .proc_handler = proc_dointvec_minmax, |
88 | .strategy = &sysctl_intvec, | ||
89 | .extra1 = &zero, | 81 | .extra1 = &zero, |
90 | }, | 82 | }, |
91 | { | 83 | { |
92 | .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, | ||
93 | .procname = "max_queued_events", | 84 | .procname = "max_queued_events", |
94 | .data = &inotify_max_queued_events, | 85 | .data = &inotify_max_queued_events, |
95 | .maxlen = sizeof(int), | 86 | .maxlen = sizeof(int), |
96 | .mode = 0644, | 87 | .mode = 0644, |
97 | .proc_handler = &proc_dointvec_minmax, | 88 | .proc_handler = proc_dointvec_minmax, |
98 | .strategy = &sysctl_intvec, | ||
99 | .extra1 = &zero | 89 | .extra1 = &zero |
100 | }, | 90 | }, |
101 | { .ctl_name = 0 } | 91 | { } |
102 | }; | 92 | }; |
103 | #endif /* CONFIG_SYSCTL */ | 93 | #endif /* CONFIG_SYSCTL */ |
104 | 94 | ||
@@ -556,21 +546,24 @@ retry: | |||
556 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | 546 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) |
557 | goto out_err; | 547 | goto out_err; |
558 | 548 | ||
549 | /* we are putting the mark on the idr, take a reference */ | ||
550 | fsnotify_get_mark(&tmp_ientry->fsn_entry); | ||
551 | |||
559 | spin_lock(&group->inotify_data.idr_lock); | 552 | spin_lock(&group->inotify_data.idr_lock); |
560 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | 553 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, |
561 | group->inotify_data.last_wd, | 554 | group->inotify_data.last_wd+1, |
562 | &tmp_ientry->wd); | 555 | &tmp_ientry->wd); |
563 | spin_unlock(&group->inotify_data.idr_lock); | 556 | spin_unlock(&group->inotify_data.idr_lock); |
564 | if (ret) { | 557 | if (ret) { |
558 | /* we didn't get on the idr, drop the idr reference */ | ||
559 | fsnotify_put_mark(&tmp_ientry->fsn_entry); | ||
560 | |||
565 | /* idr was out of memory allocate and try again */ | 561 | /* idr was out of memory allocate and try again */ |
566 | if (ret == -EAGAIN) | 562 | if (ret == -EAGAIN) |
567 | goto retry; | 563 | goto retry; |
568 | goto out_err; | 564 | goto out_err; |
569 | } | 565 | } |
570 | 566 | ||
571 | /* we put the mark on the idr, take a reference */ | ||
572 | fsnotify_get_mark(&tmp_ientry->fsn_entry); | ||
573 | |||
574 | /* we are on the idr, now get on the inode */ | 567 | /* we are on the idr, now get on the inode */ |
575 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | 568 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); |
576 | if (ret) { | 569 | if (ret) { |
@@ -588,16 +581,13 @@ retry: | |||
588 | /* return the watch descriptor for this new entry */ | 581 | /* return the watch descriptor for this new entry */ |
589 | ret = tmp_ientry->wd; | 582 | ret = tmp_ientry->wd; |
590 | 583 | ||
591 | /* match the ref from fsnotify_init_markentry() */ | ||
592 | fsnotify_put_mark(&tmp_ientry->fsn_entry); | ||
593 | |||
594 | /* if this mark added a new event update the group mask */ | 584 | /* if this mark added a new event update the group mask */ |
595 | if (mask & ~group->mask) | 585 | if (mask & ~group->mask) |
596 | fsnotify_recalc_group_mask(group); | 586 | fsnotify_recalc_group_mask(group); |
597 | 587 | ||
598 | out_err: | 588 | out_err: |
599 | if (ret < 0) | 589 | /* match the ref from fsnotify_init_markentry() */ |
600 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); | 590 | fsnotify_put_mark(&tmp_ientry->fsn_entry); |
601 | 591 | ||
602 | return ret; | 592 | return ret; |
603 | } | 593 | } |
@@ -638,7 +628,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign | |||
638 | 628 | ||
639 | spin_lock_init(&group->inotify_data.idr_lock); | 629 | spin_lock_init(&group->inotify_data.idr_lock); |
640 | idr_init(&group->inotify_data.idr); | 630 | idr_init(&group->inotify_data.idr); |
641 | group->inotify_data.last_wd = 1; | 631 | group->inotify_data.last_wd = 0; |
642 | group->inotify_data.user = user; | 632 | group->inotify_data.user = user; |
643 | group->inotify_data.fa = NULL; | 633 | group->inotify_data.fa = NULL; |
644 | 634 | ||
@@ -651,8 +641,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) | |||
651 | { | 641 | { |
652 | struct fsnotify_group *group; | 642 | struct fsnotify_group *group; |
653 | struct user_struct *user; | 643 | struct user_struct *user; |
654 | struct file *filp; | 644 | int ret; |
655 | int fd, ret; | ||
656 | 645 | ||
657 | /* Check the IN_* constants for consistency. */ | 646 | /* Check the IN_* constants for consistency. */ |
658 | BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); | 647 | BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); |
@@ -661,16 +650,6 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) | |||
661 | if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) | 650 | if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) |
662 | return -EINVAL; | 651 | return -EINVAL; |
663 | 652 | ||
664 | fd = get_unused_fd_flags(flags & O_CLOEXEC); | ||
665 | if (fd < 0) | ||
666 | return fd; | ||
667 | |||
668 | filp = get_empty_filp(); | ||
669 | if (!filp) { | ||
670 | ret = -ENFILE; | ||
671 | goto out_put_fd; | ||
672 | } | ||
673 | |||
674 | user = get_current_user(); | 653 | user = get_current_user(); |
675 | if (unlikely(atomic_read(&user->inotify_devs) >= | 654 | if (unlikely(atomic_read(&user->inotify_devs) >= |
676 | inotify_max_user_instances)) { | 655 | inotify_max_user_instances)) { |
@@ -685,25 +664,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) | |||
685 | goto out_free_uid; | 664 | goto out_free_uid; |
686 | } | 665 | } |
687 | 666 | ||
688 | filp->f_op = &inotify_fops; | ||
689 | filp->f_path.mnt = mntget(inotify_mnt); | ||
690 | filp->f_path.dentry = dget(inotify_mnt->mnt_root); | ||
691 | filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; | ||
692 | filp->f_mode = FMODE_READ; | ||
693 | filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); | ||
694 | filp->private_data = group; | ||
695 | |||
696 | atomic_inc(&user->inotify_devs); | 667 | atomic_inc(&user->inotify_devs); |
697 | 668 | ||
698 | fd_install(fd, filp); | 669 | ret = anon_inode_getfd("inotify", &inotify_fops, group, |
699 | 670 | O_RDONLY | flags); | |
700 | return fd; | 671 | if (ret >= 0) |
672 | return ret; | ||
701 | 673 | ||
674 | atomic_dec(&user->inotify_devs); | ||
702 | out_free_uid: | 675 | out_free_uid: |
703 | free_uid(user); | 676 | free_uid(user); |
704 | put_filp(filp); | ||
705 | out_put_fd: | ||
706 | put_unused_fd(fd); | ||
707 | return ret; | 677 | return ret; |
708 | } | 678 | } |
709 | 679 | ||
@@ -747,10 +717,6 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, | |||
747 | 717 | ||
748 | /* create/update an inode mark */ | 718 | /* create/update an inode mark */ |
749 | ret = inotify_update_watch(group, inode, mask); | 719 | ret = inotify_update_watch(group, inode, mask); |
750 | if (unlikely(ret)) | ||
751 | goto path_put_and_out; | ||
752 | |||
753 | path_put_and_out: | ||
754 | path_put(&path); | 720 | path_put(&path); |
755 | fput_and_out: | 721 | fput_and_out: |
756 | fput_light(filp, fput_needed); | 722 | fput_light(filp, fput_needed); |
@@ -794,20 +760,6 @@ out: | |||
794 | return ret; | 760 | return ret; |
795 | } | 761 | } |
796 | 762 | ||
797 | static int | ||
798 | inotify_get_sb(struct file_system_type *fs_type, int flags, | ||
799 | const char *dev_name, void *data, struct vfsmount *mnt) | ||
800 | { | ||
801 | return get_sb_pseudo(fs_type, "inotify", NULL, | ||
802 | INOTIFYFS_SUPER_MAGIC, mnt); | ||
803 | } | ||
804 | |||
805 | static struct file_system_type inotify_fs_type = { | ||
806 | .name = "inotifyfs", | ||
807 | .get_sb = inotify_get_sb, | ||
808 | .kill_sb = kill_anon_super, | ||
809 | }; | ||
810 | |||
811 | /* | 763 | /* |
812 | * inotify_user_setup - Our initialization function. Note that we cannnot return | 764 | * inotify_user_setup - Our initialization function. Note that we cannnot return |
813 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | 765 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here |
@@ -815,16 +767,6 @@ static struct file_system_type inotify_fs_type = { | |||
815 | */ | 767 | */ |
816 | static int __init inotify_user_setup(void) | 768 | static int __init inotify_user_setup(void) |
817 | { | 769 | { |
818 | int ret; | ||
819 | |||
820 | ret = register_filesystem(&inotify_fs_type); | ||
821 | if (unlikely(ret)) | ||
822 | panic("inotify: register_filesystem returned %d!\n", ret); | ||
823 | |||
824 | inotify_mnt = kern_mount(&inotify_fs_type); | ||
825 | if (IS_ERR(inotify_mnt)) | ||
826 | panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); | ||
827 | |||
828 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); | 770 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); |
829 | event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); | 771 | event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); |
830 | 772 | ||