aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify/dnotify/dnotify.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/notify/dnotify/dnotify.c')
-rw-r--r--fs/notify/dnotify/dnotify.c191
1 files changed, 191 insertions, 0 deletions
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
new file mode 100644
index 000000000000..b0aa2cde80bd
--- /dev/null
+++ b/fs/notify/dnotify/dnotify.c
@@ -0,0 +1,191 @@
1/*
2 * Directory notifications for Linux.
3 *
4 * Copyright (C) 2000,2001,2002 Stephen Rothwell
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16#include <linux/fs.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/dnotify.h>
20#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/fdtable.h>
24
25int dir_notify_enable __read_mostly = 1;
26
27static struct kmem_cache *dn_cache __read_mostly;
28
29static void redo_inode_mask(struct inode *inode)
30{
31 unsigned long new_mask;
32 struct dnotify_struct *dn;
33
34 new_mask = 0;
35 for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next)
36 new_mask |= dn->dn_mask & ~DN_MULTISHOT;
37 inode->i_dnotify_mask = new_mask;
38}
39
40void dnotify_flush(struct file *filp, fl_owner_t id)
41{
42 struct dnotify_struct *dn;
43 struct dnotify_struct **prev;
44 struct inode *inode;
45
46 inode = filp->f_path.dentry->d_inode;
47 if (!S_ISDIR(inode->i_mode))
48 return;
49 spin_lock(&inode->i_lock);
50 prev = &inode->i_dnotify;
51 while ((dn = *prev) != NULL) {
52 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
53 *prev = dn->dn_next;
54 redo_inode_mask(inode);
55 kmem_cache_free(dn_cache, dn);
56 break;
57 }
58 prev = &dn->dn_next;
59 }
60 spin_unlock(&inode->i_lock);
61}
62
63int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
64{
65 struct dnotify_struct *dn;
66 struct dnotify_struct *odn;
67 struct dnotify_struct **prev;
68 struct inode *inode;
69 fl_owner_t id = current->files;
70 struct file *f;
71 int error = 0;
72
73 if ((arg & ~DN_MULTISHOT) == 0) {
74 dnotify_flush(filp, id);
75 return 0;
76 }
77 if (!dir_notify_enable)
78 return -EINVAL;
79 inode = filp->f_path.dentry->d_inode;
80 if (!S_ISDIR(inode->i_mode))
81 return -ENOTDIR;
82 dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
83 if (dn == NULL)
84 return -ENOMEM;
85 spin_lock(&inode->i_lock);
86 prev = &inode->i_dnotify;
87 while ((odn = *prev) != NULL) {
88 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
89 odn->dn_fd = fd;
90 odn->dn_mask |= arg;
91 inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
92 goto out_free;
93 }
94 prev = &odn->dn_next;
95 }
96
97 rcu_read_lock();
98 f = fcheck(fd);
99 rcu_read_unlock();
100 /* we'd lost the race with close(), sod off silently */
101 /* note that inode->i_lock prevents reordering problems
102 * between accesses to descriptor table and ->i_dnotify */
103 if (f != filp)
104 goto out_free;
105
106 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
107 if (error)
108 goto out_free;
109
110 dn->dn_mask = arg;
111 dn->dn_fd = fd;
112 dn->dn_filp = filp;
113 dn->dn_owner = id;
114 inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
115 dn->dn_next = inode->i_dnotify;
116 inode->i_dnotify = dn;
117 spin_unlock(&inode->i_lock);
118 return 0;
119
120out_free:
121 spin_unlock(&inode->i_lock);
122 kmem_cache_free(dn_cache, dn);
123 return error;
124}
125
126void __inode_dir_notify(struct inode *inode, unsigned long event)
127{
128 struct dnotify_struct * dn;
129 struct dnotify_struct **prev;
130 struct fown_struct * fown;
131 int changed = 0;
132
133 spin_lock(&inode->i_lock);
134 prev = &inode->i_dnotify;
135 while ((dn = *prev) != NULL) {
136 if ((dn->dn_mask & event) == 0) {
137 prev = &dn->dn_next;
138 continue;
139 }
140 fown = &dn->dn_filp->f_owner;
141 send_sigio(fown, dn->dn_fd, POLL_MSG);
142 if (dn->dn_mask & DN_MULTISHOT)
143 prev = &dn->dn_next;
144 else {
145 *prev = dn->dn_next;
146 changed = 1;
147 kmem_cache_free(dn_cache, dn);
148 }
149 }
150 if (changed)
151 redo_inode_mask(inode);
152 spin_unlock(&inode->i_lock);
153}
154
155EXPORT_SYMBOL(__inode_dir_notify);
156
157/*
158 * This is hopelessly wrong, but unfixable without API changes. At
159 * least it doesn't oops the kernel...
160 *
161 * To safely access ->d_parent we need to keep d_move away from it. Use the
162 * dentry's d_lock for this.
163 */
164void dnotify_parent(struct dentry *dentry, unsigned long event)
165{
166 struct dentry *parent;
167
168 if (!dir_notify_enable)
169 return;
170
171 spin_lock(&dentry->d_lock);
172 parent = dentry->d_parent;
173 if (parent->d_inode->i_dnotify_mask & event) {
174 dget(parent);
175 spin_unlock(&dentry->d_lock);
176 __inode_dir_notify(parent->d_inode, event);
177 dput(parent);
178 } else {
179 spin_unlock(&dentry->d_lock);
180 }
181}
182EXPORT_SYMBOL_GPL(dnotify_parent);
183
184static int __init dnotify_init(void)
185{
186 dn_cache = kmem_cache_create("dnotify_cache",
187 sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL);
188 return 0;
189}
190
191module_init(dnotify_init)