diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-08-17 00:51:31 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-08-17 00:51:31 -0400 |
commit | a2e30e529a48ef4e106e405f91cf4ae525bb01c4 (patch) | |
tree | 2def96ef17c0672c30f1a10287552978bf1d0b1c /fs/inotify.c | |
parent | edb3366703224d5d8df573ae698ccd6b488dc743 (diff) | |
parent | 2ad56496627630ebc99f06af5f81ca23e17e014e (diff) |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'fs/inotify.c')
-rw-r--r-- | fs/inotify.c | 130 |
1 files changed, 75 insertions, 55 deletions
diff --git a/fs/inotify.c b/fs/inotify.c index 54757be888b6..868901b1e779 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -62,8 +62,8 @@ int inotify_max_queued_events; | |||
62 | * Lifetimes of the three main data structures--inotify_device, inode, and | 62 | * Lifetimes of the three main data structures--inotify_device, inode, and |
63 | * inotify_watch--are managed by reference count. | 63 | * inotify_watch--are managed by reference count. |
64 | * | 64 | * |
65 | * inotify_device: Lifetime is from open until release. Additional references | 65 | * inotify_device: Lifetime is from inotify_init() until release. Additional |
66 | * can bump the count via get_inotify_dev() and drop the count via | 66 | * references can bump the count via get_inotify_dev() and drop the count via |
67 | * put_inotify_dev(). | 67 | * put_inotify_dev(). |
68 | * | 68 | * |
69 | * inotify_watch: Lifetime is from create_watch() to destory_watch(). | 69 | * inotify_watch: Lifetime is from create_watch() to destory_watch(). |
@@ -75,7 +75,7 @@ int inotify_max_queued_events; | |||
75 | */ | 75 | */ |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * struct inotify_device - represents an open instance of an inotify device | 78 | * struct inotify_device - represents an inotify instance |
79 | * | 79 | * |
80 | * This structure is protected by the semaphore 'sem'. | 80 | * This structure is protected by the semaphore 'sem'. |
81 | */ | 81 | */ |
@@ -90,6 +90,7 @@ struct inotify_device { | |||
90 | unsigned int queue_size; /* size of the queue (bytes) */ | 90 | unsigned int queue_size; /* size of the queue (bytes) */ |
91 | unsigned int event_count; /* number of pending events */ | 91 | unsigned int event_count; /* number of pending events */ |
92 | unsigned int max_events; /* maximum number of events */ | 92 | unsigned int max_events; /* maximum number of events */ |
93 | u32 last_wd; /* the last wd allocated */ | ||
93 | }; | 94 | }; |
94 | 95 | ||
95 | /* | 96 | /* |
@@ -352,7 +353,7 @@ static int inotify_dev_get_wd(struct inotify_device *dev, | |||
352 | do { | 353 | do { |
353 | if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) | 354 | if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) |
354 | return -ENOSPC; | 355 | return -ENOSPC; |
355 | ret = idr_get_new(&dev->idr, watch, &watch->wd); | 356 | ret = idr_get_new_above(&dev->idr, watch, dev->last_wd, &watch->wd); |
356 | } while (ret == -EAGAIN); | 357 | } while (ret == -EAGAIN); |
357 | 358 | ||
358 | return ret; | 359 | return ret; |
@@ -371,7 +372,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd) | |||
371 | /* you can only watch an inode if you have read permissions on it */ | 372 | /* you can only watch an inode if you have read permissions on it */ |
372 | error = permission(nd->dentry->d_inode, MAY_READ, NULL); | 373 | error = permission(nd->dentry->d_inode, MAY_READ, NULL); |
373 | if (error) | 374 | if (error) |
374 | path_release (nd); | 375 | path_release(nd); |
375 | return error; | 376 | return error; |
376 | } | 377 | } |
377 | 378 | ||
@@ -387,7 +388,8 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, | |||
387 | struct inotify_watch *watch; | 388 | struct inotify_watch *watch; |
388 | int ret; | 389 | int ret; |
389 | 390 | ||
390 | if (atomic_read(&dev->user->inotify_watches) >= inotify_max_user_watches) | 391 | if (atomic_read(&dev->user->inotify_watches) >= |
392 | inotify_max_user_watches) | ||
391 | return ERR_PTR(-ENOSPC); | 393 | return ERR_PTR(-ENOSPC); |
392 | 394 | ||
393 | watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); | 395 | watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); |
@@ -400,6 +402,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, | |||
400 | return ERR_PTR(ret); | 402 | return ERR_PTR(ret); |
401 | } | 403 | } |
402 | 404 | ||
405 | dev->last_wd = watch->wd; | ||
403 | watch->mask = mask; | 406 | watch->mask = mask; |
404 | atomic_set(&watch->count, 0); | 407 | atomic_set(&watch->count, 0); |
405 | INIT_LIST_HEAD(&watch->d_list); | 408 | INIT_LIST_HEAD(&watch->d_list); |
@@ -783,15 +786,14 @@ static int inotify_release(struct inode *ignored, struct file *file) | |||
783 | inotify_dev_event_dequeue(dev); | 786 | inotify_dev_event_dequeue(dev); |
784 | up(&dev->sem); | 787 | up(&dev->sem); |
785 | 788 | ||
786 | /* free this device: the put matching the get in inotify_open() */ | 789 | /* free this device: the put matching the get in inotify_init() */ |
787 | put_inotify_dev(dev); | 790 | put_inotify_dev(dev); |
788 | 791 | ||
789 | return 0; | 792 | return 0; |
790 | } | 793 | } |
791 | 794 | ||
792 | /* | 795 | /* |
793 | * inotify_ignore - handle the INOTIFY_IGNORE ioctl, asking that a given wd be | 796 | * inotify_ignore - remove a given wd from this inotify instance. |
794 | * removed from the device. | ||
795 | * | 797 | * |
796 | * Can sleep. | 798 | * Can sleep. |
797 | */ | 799 | */ |
@@ -856,42 +858,40 @@ asmlinkage long sys_inotify_init(void) | |||
856 | { | 858 | { |
857 | struct inotify_device *dev; | 859 | struct inotify_device *dev; |
858 | struct user_struct *user; | 860 | struct user_struct *user; |
859 | int ret = -ENOTTY; | 861 | struct file *filp; |
860 | int fd; | 862 | int fd, ret; |
861 | struct file *filp; | ||
862 | 863 | ||
863 | fd = get_unused_fd(); | 864 | fd = get_unused_fd(); |
864 | if (fd < 0) { | 865 | if (fd < 0) |
865 | ret = fd; | 866 | return fd; |
866 | goto out; | ||
867 | } | ||
868 | 867 | ||
869 | filp = get_empty_filp(); | 868 | filp = get_empty_filp(); |
870 | if (!filp) { | 869 | if (!filp) { |
871 | put_unused_fd(fd); | ||
872 | ret = -ENFILE; | 870 | ret = -ENFILE; |
873 | goto out; | 871 | goto out_put_fd; |
874 | } | 872 | } |
875 | filp->f_op = &inotify_fops; | ||
876 | filp->f_vfsmnt = mntget(inotify_mnt); | ||
877 | filp->f_dentry = dget(inotify_mnt->mnt_root); | ||
878 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | ||
879 | filp->f_mode = FMODE_READ; | ||
880 | filp->f_flags = O_RDONLY; | ||
881 | 873 | ||
882 | user = get_uid(current->user); | 874 | user = get_uid(current->user); |
883 | 875 | if (unlikely(atomic_read(&user->inotify_devs) >= | |
884 | if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_instances)) { | 876 | inotify_max_user_instances)) { |
885 | ret = -EMFILE; | 877 | ret = -EMFILE; |
886 | goto out_err; | 878 | goto out_free_uid; |
887 | } | 879 | } |
888 | 880 | ||
889 | dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); | 881 | dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); |
890 | if (unlikely(!dev)) { | 882 | if (unlikely(!dev)) { |
891 | ret = -ENOMEM; | 883 | ret = -ENOMEM; |
892 | goto out_err; | 884 | goto out_free_uid; |
893 | } | 885 | } |
894 | 886 | ||
887 | filp->f_op = &inotify_fops; | ||
888 | filp->f_vfsmnt = mntget(inotify_mnt); | ||
889 | filp->f_dentry = dget(inotify_mnt->mnt_root); | ||
890 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | ||
891 | filp->f_mode = FMODE_READ; | ||
892 | filp->f_flags = O_RDONLY; | ||
893 | filp->private_data = dev; | ||
894 | |||
895 | idr_init(&dev->idr); | 895 | idr_init(&dev->idr); |
896 | INIT_LIST_HEAD(&dev->events); | 896 | INIT_LIST_HEAD(&dev->events); |
897 | INIT_LIST_HEAD(&dev->watches); | 897 | INIT_LIST_HEAD(&dev->watches); |
@@ -901,50 +901,55 @@ asmlinkage long sys_inotify_init(void) | |||
901 | dev->queue_size = 0; | 901 | dev->queue_size = 0; |
902 | dev->max_events = inotify_max_queued_events; | 902 | dev->max_events = inotify_max_queued_events; |
903 | dev->user = user; | 903 | dev->user = user; |
904 | dev->last_wd = 0; | ||
904 | atomic_set(&dev->count, 0); | 905 | atomic_set(&dev->count, 0); |
905 | 906 | ||
906 | get_inotify_dev(dev); | 907 | get_inotify_dev(dev); |
907 | atomic_inc(&user->inotify_devs); | 908 | atomic_inc(&user->inotify_devs); |
909 | fd_install(fd, filp); | ||
908 | 910 | ||
909 | filp->private_data = dev; | ||
910 | fd_install (fd, filp); | ||
911 | return fd; | 911 | return fd; |
912 | out_err: | 912 | out_free_uid: |
913 | put_unused_fd (fd); | ||
914 | put_filp (filp); | ||
915 | free_uid(user); | 913 | free_uid(user); |
916 | out: | 914 | put_filp(filp); |
915 | out_put_fd: | ||
916 | put_unused_fd(fd); | ||
917 | return ret; | 917 | return ret; |
918 | } | 918 | } |
919 | 919 | ||
920 | asmlinkage long sys_inotify_add_watch(int fd, const char *path, u32 mask) | 920 | asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) |
921 | { | 921 | { |
922 | struct inotify_watch *watch, *old; | 922 | struct inotify_watch *watch, *old; |
923 | struct inode *inode; | 923 | struct inode *inode; |
924 | struct inotify_device *dev; | 924 | struct inotify_device *dev; |
925 | struct nameidata nd; | 925 | struct nameidata nd; |
926 | struct file *filp; | 926 | struct file *filp; |
927 | int ret; | 927 | int ret, fput_needed; |
928 | 928 | ||
929 | filp = fget(fd); | 929 | filp = fget_light(fd, &fput_needed); |
930 | if (!filp) | 930 | if (unlikely(!filp)) |
931 | return -EBADF; | 931 | return -EBADF; |
932 | 932 | ||
933 | dev = filp->private_data; | 933 | /* verify that this is indeed an inotify instance */ |
934 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
935 | ret = -EINVAL; | ||
936 | goto fput_and_out; | ||
937 | } | ||
934 | 938 | ||
935 | ret = find_inode((const char __user*) path, &nd); | 939 | ret = find_inode(path, &nd); |
936 | if (ret) | 940 | if (unlikely(ret)) |
937 | goto fput_and_out; | 941 | goto fput_and_out; |
938 | 942 | ||
939 | /* Held in place by reference in nd */ | 943 | /* inode held in place by reference to nd; dev by fget on fd */ |
940 | inode = nd.dentry->d_inode; | 944 | inode = nd.dentry->d_inode; |
945 | dev = filp->private_data; | ||
941 | 946 | ||
942 | down(&inode->inotify_sem); | 947 | down(&inode->inotify_sem); |
943 | down(&dev->sem); | 948 | down(&dev->sem); |
944 | 949 | ||
945 | /* don't let user-space set invalid bits: we don't want flags set */ | 950 | /* don't let user-space set invalid bits: we don't want flags set */ |
946 | mask &= IN_ALL_EVENTS; | 951 | mask &= IN_ALL_EVENTS; |
947 | if (!mask) { | 952 | if (unlikely(!mask)) { |
948 | ret = -EINVAL; | 953 | ret = -EINVAL; |
949 | goto out; | 954 | goto out; |
950 | } | 955 | } |
@@ -971,11 +976,11 @@ asmlinkage long sys_inotify_add_watch(int fd, const char *path, u32 mask) | |||
971 | list_add(&watch->i_list, &inode->inotify_watches); | 976 | list_add(&watch->i_list, &inode->inotify_watches); |
972 | ret = watch->wd; | 977 | ret = watch->wd; |
973 | out: | 978 | out: |
974 | path_release (&nd); | ||
975 | up(&dev->sem); | 979 | up(&dev->sem); |
976 | up(&inode->inotify_sem); | 980 | up(&inode->inotify_sem); |
981 | path_release(&nd); | ||
977 | fput_and_out: | 982 | fput_and_out: |
978 | fput(filp); | 983 | fput_light(filp, fput_needed); |
979 | return ret; | 984 | return ret; |
980 | } | 985 | } |
981 | 986 | ||
@@ -983,15 +988,23 @@ asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) | |||
983 | { | 988 | { |
984 | struct file *filp; | 989 | struct file *filp; |
985 | struct inotify_device *dev; | 990 | struct inotify_device *dev; |
986 | int ret; | 991 | int ret, fput_needed; |
987 | 992 | ||
988 | filp = fget(fd); | 993 | filp = fget_light(fd, &fput_needed); |
989 | if (!filp) | 994 | if (unlikely(!filp)) |
990 | return -EBADF; | 995 | return -EBADF; |
996 | |||
997 | /* verify that this is indeed an inotify instance */ | ||
998 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
999 | ret = -EINVAL; | ||
1000 | goto out; | ||
1001 | } | ||
1002 | |||
991 | dev = filp->private_data; | 1003 | dev = filp->private_data; |
992 | ret = inotify_ignore(dev, wd); | 1004 | ret = inotify_ignore(dev, wd); |
993 | fput(filp); | ||
994 | 1005 | ||
1006 | out: | ||
1007 | fput_light(filp, fput_needed); | ||
995 | return ret; | 1008 | return ret; |
996 | } | 1009 | } |
997 | 1010 | ||
@@ -1009,17 +1022,24 @@ static struct file_system_type inotify_fs_type = { | |||
1009 | }; | 1022 | }; |
1010 | 1023 | ||
1011 | /* | 1024 | /* |
1012 | * inotify_init - Our initialization function. Note that we cannnot return | 1025 | * inotify_setup - Our initialization function. Note that we cannnot return |
1013 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | 1026 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here |
1014 | * must result in panic(). | 1027 | * must result in panic(). |
1015 | */ | 1028 | */ |
1016 | static int __init inotify_init(void) | 1029 | static int __init inotify_setup(void) |
1017 | { | 1030 | { |
1018 | register_filesystem(&inotify_fs_type); | 1031 | int ret; |
1032 | |||
1033 | ret = register_filesystem(&inotify_fs_type); | ||
1034 | if (unlikely(ret)) | ||
1035 | panic("inotify: register_filesystem returned %d!\n", ret); | ||
1036 | |||
1019 | inotify_mnt = kern_mount(&inotify_fs_type); | 1037 | inotify_mnt = kern_mount(&inotify_fs_type); |
1038 | if (IS_ERR(inotify_mnt)) | ||
1039 | panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); | ||
1020 | 1040 | ||
1021 | inotify_max_queued_events = 8192; | 1041 | inotify_max_queued_events = 16384; |
1022 | inotify_max_user_instances = 8; | 1042 | inotify_max_user_instances = 128; |
1023 | inotify_max_user_watches = 8192; | 1043 | inotify_max_user_watches = 8192; |
1024 | 1044 | ||
1025 | atomic_set(&inotify_cookie, 0); | 1045 | atomic_set(&inotify_cookie, 0); |
@@ -1034,4 +1054,4 @@ static int __init inotify_init(void) | |||
1034 | return 0; | 1054 | return 0; |
1035 | } | 1055 | } |
1036 | 1056 | ||
1037 | module_init(inotify_init); | 1057 | module_init(inotify_setup); |