aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--fs/notify/inotify/Kconfig20
-rw-r--r--fs/notify/inotify/Makefile2
-rw-r--r--fs/notify/inotify/inotify.h21
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c137
-rw-r--r--fs/notify/inotify/inotify_user.c837
-rw-r--r--include/linux/fsnotify_backend.h11
-rw-r--r--init/Kconfig3
8 files changed, 585 insertions, 448 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 96e0c8c60796..e697b67031a2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2858,6 +2858,8 @@ P: John McCutchan
2858M: john@johnmccutchan.com 2858M: john@johnmccutchan.com
2859P: Robert Love 2859P: Robert Love
2860M: rlove@rlove.org 2860M: rlove@rlove.org
2861P: Eric Paris
2862M: eparis@parisplace.org
2861L: linux-kernel@vger.kernel.org 2863L: linux-kernel@vger.kernel.org
2862S: Maintained 2864S: Maintained
2863F: Documentation/filesystems/inotify.txt 2865F: Documentation/filesystems/inotify.txt
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index 446792841023..5356884289a1 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -1,26 +1,30 @@
1config INOTIFY 1config INOTIFY
2 bool "Inotify file change notification support" 2 bool "Inotify file change notification support"
3 default y 3 default n
4 ---help--- 4 ---help---
5 Say Y here to enable inotify support. Inotify is a file change 5 Say Y here to enable legacy in kernel inotify support. Inotify is a
6 notification system and a replacement for dnotify. Inotify fixes 6 file change notification system. It is a replacement for dnotify.
7 numerous shortcomings in dnotify and introduces several new features 7 This option only provides the legacy inotify in kernel API. There
8 including multiple file events, one-shot support, and unmount 8 are no in tree kernel users of this interface since it is deprecated.
9 notification. 9 You only need this if you are loading an out of tree kernel module
10 that uses inotify.
10 11
11 For more information, see <file:Documentation/filesystems/inotify.txt> 12 For more information, see <file:Documentation/filesystems/inotify.txt>
12 13
13 If unsure, say Y. 14 If unsure, say N.
14 15
15config INOTIFY_USER 16config INOTIFY_USER
16 bool "Inotify support for userspace" 17 bool "Inotify support for userspace"
17 depends on INOTIFY 18 depends on FSNOTIFY
18 default y 19 default y
19 ---help--- 20 ---help---
20 Say Y here to enable inotify support for userspace, including the 21 Say Y here to enable inotify support for userspace, including the
21 associated system calls. Inotify allows monitoring of both files and 22 associated system calls. Inotify allows monitoring of both files and
22 directories via a single open fd. Events are read from the file 23 directories via a single open fd. Events are read from the file
23 descriptor, which is also select()- and poll()-able. 24 descriptor, which is also select()- and poll()-able.
25 Inotify fixes numerous shortcomings in dnotify and introduces several
26 new features including multiple file events, one-shot support, and
27 unmount notification.
24 28
25 For more information, see <file:Documentation/filesystems/inotify.txt> 29 For more information, see <file:Documentation/filesystems/inotify.txt>
26 30
diff --git a/fs/notify/inotify/Makefile b/fs/notify/inotify/Makefile
index e290f3bb9d8d..943828171362 100644
--- a/fs/notify/inotify/Makefile
+++ b/fs/notify/inotify/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_INOTIFY) += inotify.o 1obj-$(CONFIG_INOTIFY) += inotify.o
2obj-$(CONFIG_INOTIFY_USER) += inotify_user.o 2obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
new file mode 100644
index 000000000000..ea2605a58b8a
--- /dev/null
+++ b/fs/notify/inotify/inotify.h
@@ -0,0 +1,21 @@
1#include <linux/fsnotify_backend.h>
2#include <linux/inotify.h>
3#include <linux/slab.h> /* struct kmem_cache */
4
5extern struct kmem_cache *event_priv_cachep;
6
7struct inotify_event_private_data {
8 struct fsnotify_event_private_data fsnotify_event_priv_data;
9 int wd;
10};
11
12struct inotify_inode_mark_entry {
13 /* fsnotify_mark_entry MUST be the first thing */
14 struct fsnotify_mark_entry fsn_entry;
15 int wd;
16};
17
18extern void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
19extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
20
21extern const struct fsnotify_ops inotify_fsnotify_ops;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
new file mode 100644
index 000000000000..160da5486839
--- /dev/null
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -0,0 +1,137 @@
1/*
2 * fs/inotify_user.c - inotify support for userspace
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
10 *
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 */
24
25#include <linux/fs.h> /* struct inode */
26#include <linux/fsnotify_backend.h>
27#include <linux/inotify.h>
28#include <linux/path.h> /* struct path */
29#include <linux/slab.h> /* kmem_* */
30#include <linux/types.h>
31
32#include "inotify.h"
33
34static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
35{
36 struct fsnotify_mark_entry *entry;
37 struct inotify_inode_mark_entry *ientry;
38 struct inode *to_tell;
39 struct inotify_event_private_data *event_priv;
40 struct fsnotify_event_private_data *fsn_event_priv;
41 int wd, ret;
42
43 to_tell = event->to_tell;
44
45 spin_lock(&to_tell->i_lock);
46 entry = fsnotify_find_mark_entry(group, to_tell);
47 spin_unlock(&to_tell->i_lock);
48 /* race with watch removal? We already passes should_send */
49 if (unlikely(!entry))
50 return 0;
51 ientry = container_of(entry, struct inotify_inode_mark_entry,
52 fsn_entry);
53 wd = ientry->wd;
54
55 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
56 if (unlikely(!event_priv))
57 return -ENOMEM;
58
59 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
60
61 fsn_event_priv->group = group;
62 event_priv->wd = wd;
63
64 ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
65 /* EEXIST is not an error */
66 if (ret == -EEXIST)
67 ret = 0;
68
69 /* did event_priv get attached? */
70 if (list_empty(&fsn_event_priv->event_list))
71 inotify_free_event_priv(fsn_event_priv);
72
73 /*
74 * If we hold the entry until after the event is on the queue
75 * IN_IGNORED won't be able to pass this event in the queue
76 */
77 fsnotify_put_mark(entry);
78
79 return ret;
80}
81
82static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
83{
84 inotify_destroy_mark_entry(entry, group);
85}
86
87static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
88{
89 struct fsnotify_mark_entry *entry;
90 bool send;
91
92 spin_lock(&inode->i_lock);
93 entry = fsnotify_find_mark_entry(group, inode);
94 spin_unlock(&inode->i_lock);
95 if (!entry)
96 return false;
97
98 send = (entry->mask & mask);
99
100 /* find took a reference */
101 fsnotify_put_mark(entry);
102
103 return send;
104}
105
106static int idr_callback(int id, void *p, void *data)
107{
108 BUG();
109 return 0;
110}
111
112static void inotify_free_group_priv(struct fsnotify_group *group)
113{
114 /* ideally the idr is empty and we won't hit the BUG in teh callback */
115 idr_for_each(&group->inotify_data.idr, idr_callback, NULL);
116 idr_remove_all(&group->inotify_data.idr);
117 idr_destroy(&group->inotify_data.idr);
118}
119
120void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
121{
122 struct inotify_event_private_data *event_priv;
123
124
125 event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
126 fsnotify_event_priv_data);
127
128 kmem_cache_free(event_priv_cachep, event_priv);
129}
130
131const struct fsnotify_ops inotify_fsnotify_ops = {
132 .handle_event = inotify_handle_event,
133 .should_send_event = inotify_should_send_event,
134 .free_group_priv = inotify_free_group_priv,
135 .free_event_priv = inotify_free_event_priv,
136 .freeing_mark = inotify_freeing_mark,
137};
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 1634319e2404..982a412ac5bc 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -8,6 +8,9 @@
8 * Copyright (C) 2005 John McCutchan 8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P. 9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
10 * 10 *
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
13 *
11 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2, or (at your option) any 16 * Free Software Foundation; either version 2, or (at your option) any
@@ -19,94 +22,48 @@
19 * General Public License for more details. 22 * General Public License for more details.
20 */ 23 */
21 24
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/fs.h>
26#include <linux/file.h> 25#include <linux/file.h>
27#include <linux/mount.h> 26#include <linux/fs.h> /* struct inode */
28#include <linux/namei.h> 27#include <linux/fsnotify_backend.h>
29#include <linux/poll.h> 28#include <linux/idr.h>
30#include <linux/init.h> 29#include <linux/init.h> /* module_init */
31#include <linux/list.h>
32#include <linux/inotify.h> 30#include <linux/inotify.h>
31#include <linux/kernel.h> /* roundup() */
32#include <linux/magic.h> /* superblock magic number */
33#include <linux/mount.h> /* mntget */
34#include <linux/namei.h> /* LOOKUP_FOLLOW */
35#include <linux/path.h> /* struct path */
36#include <linux/sched.h> /* struct user */
37#include <linux/slab.h> /* struct kmem_cache */
33#include <linux/syscalls.h> 38#include <linux/syscalls.h>
34#include <linux/magic.h> 39#include <linux/types.h>
40#include <linux/uaccess.h>
41#include <linux/poll.h>
42#include <linux/wait.h>
35 43
36#include <asm/ioctls.h> 44#include "inotify.h"
37 45
38static struct kmem_cache *watch_cachep __read_mostly; 46#include <asm/ioctls.h>
39static struct kmem_cache *event_cachep __read_mostly;
40 47
41static struct vfsmount *inotify_mnt __read_mostly; 48static struct vfsmount *inotify_mnt __read_mostly;
42 49
50/* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
51static struct inotify_event nul_inotify_event;
52
43/* these are configurable via /proc/sys/fs/inotify/ */ 53/* these are configurable via /proc/sys/fs/inotify/ */
44static int inotify_max_user_instances __read_mostly; 54static int inotify_max_user_instances __read_mostly;
45static int inotify_max_user_watches __read_mostly;
46static int inotify_max_queued_events __read_mostly; 55static int inotify_max_queued_events __read_mostly;
56int inotify_max_user_watches __read_mostly;
47 57
48/* 58static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
49 * Lock ordering: 59struct kmem_cache *event_priv_cachep __read_mostly;
50 * 60static struct fsnotify_event *inotify_ignored_event;
51 * inotify_dev->up_mutex (ensures we don't re-add the same watch)
52 * inode->inotify_mutex (protects inode's watch list)
53 * inotify_handle->mutex (protects inotify_handle's watch list)
54 * inotify_dev->ev_mutex (protects device's event queue)
55 */
56 61
57/* 62/*
58 * Lifetimes of the main data structures: 63 * When inotify registers a new group it increments this and uses that
59 * 64 * value as an offset to set the fsnotify group "name" and priority.
60 * inotify_device: Lifetime is managed by reference count, from
61 * sys_inotify_init() until release. Additional references can bump the count
62 * via get_inotify_dev() and drop the count via put_inotify_dev().
63 *
64 * inotify_user_watch: Lifetime is from create_watch() to the receipt of an
65 * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
66 * first event, or to inotify_destroy().
67 */ 65 */
68 66static atomic_t inotify_grp_num;
69/*
70 * struct inotify_device - represents an inotify instance
71 *
72 * This structure is protected by the mutex 'mutex'.
73 */
74struct inotify_device {
75 wait_queue_head_t wq; /* wait queue for i/o */
76 struct mutex ev_mutex; /* protects event queue */
77 struct mutex up_mutex; /* synchronizes watch updates */
78 struct list_head events; /* list of queued events */
79 struct user_struct *user; /* user who opened this dev */
80 struct inotify_handle *ih; /* inotify handle */
81 struct fasync_struct *fa; /* async notification */
82 atomic_t count; /* reference count */
83 unsigned int queue_size; /* size of the queue (bytes) */
84 unsigned int event_count; /* number of pending events */
85 unsigned int max_events; /* maximum number of events */
86};
87
88/*
89 * struct inotify_kernel_event - An inotify event, originating from a watch and
90 * queued for user-space. A list of these is attached to each instance of the
91 * device. In read(), this list is walked and all events that can fit in the
92 * buffer are returned.
93 *
94 * Protected by dev->ev_mutex of the device in which we are queued.
95 */
96struct inotify_kernel_event {
97 struct inotify_event event; /* the user-space event */
98 struct list_head list; /* entry in inotify_device's list */
99 char *name; /* filename, if any */
100};
101
102/*
103 * struct inotify_user_watch - our version of an inotify_watch, we add
104 * a reference to the associated inotify_device.
105 */
106struct inotify_user_watch {
107 struct inotify_device *dev; /* associated device */
108 struct inotify_watch wdata; /* inotify watch data */
109};
110 67
111#ifdef CONFIG_SYSCTL 68#ifdef CONFIG_SYSCTL
112 69
@@ -149,280 +106,36 @@ ctl_table inotify_table[] = {
149}; 106};
150#endif /* CONFIG_SYSCTL */ 107#endif /* CONFIG_SYSCTL */
151 108
152static inline void get_inotify_dev(struct inotify_device *dev) 109static inline __u32 inotify_arg_to_mask(u32 arg)
153{
154 atomic_inc(&dev->count);
155}
156
157static inline void put_inotify_dev(struct inotify_device *dev)
158{
159 if (atomic_dec_and_test(&dev->count)) {
160 atomic_dec(&dev->user->inotify_devs);
161 free_uid(dev->user);
162 kfree(dev);
163 }
164}
165
166/*
167 * free_inotify_user_watch - cleans up the watch and its references
168 */
169static void free_inotify_user_watch(struct inotify_watch *w)
170{
171 struct inotify_user_watch *watch;
172 struct inotify_device *dev;
173
174 watch = container_of(w, struct inotify_user_watch, wdata);
175 dev = watch->dev;
176
177 atomic_dec(&dev->user->inotify_watches);
178 put_inotify_dev(dev);
179 kmem_cache_free(watch_cachep, watch);
180}
181
182/*
183 * kernel_event - create a new kernel event with the given parameters
184 *
185 * This function can sleep.
186 */
187static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
188 const char *name)
189{
190 struct inotify_kernel_event *kevent;
191
192 kevent = kmem_cache_alloc(event_cachep, GFP_NOFS);
193 if (unlikely(!kevent))
194 return NULL;
195
196 /* we hand this out to user-space, so zero it just in case */
197 memset(&kevent->event, 0, sizeof(struct inotify_event));
198
199 kevent->event.wd = wd;
200 kevent->event.mask = mask;
201 kevent->event.cookie = cookie;
202
203 INIT_LIST_HEAD(&kevent->list);
204
205 if (name) {
206 size_t len, rem, event_size = sizeof(struct inotify_event);
207
208 /*
209 * We need to pad the filename so as to properly align an
210 * array of inotify_event structures. Because the structure is
211 * small and the common case is a small filename, we just round
212 * up to the next multiple of the structure's sizeof. This is
213 * simple and safe for all architectures.
214 */
215 len = strlen(name) + 1;
216 rem = event_size - len;
217 if (len > event_size) {
218 rem = event_size - (len % event_size);
219 if (len % event_size == 0)
220 rem = 0;
221 }
222
223 kevent->name = kmalloc(len + rem, GFP_NOFS);
224 if (unlikely(!kevent->name)) {
225 kmem_cache_free(event_cachep, kevent);
226 return NULL;
227 }
228 memcpy(kevent->name, name, len);
229 if (rem)
230 memset(kevent->name + len, 0, rem);
231 kevent->event.len = len + rem;
232 } else {
233 kevent->event.len = 0;
234 kevent->name = NULL;
235 }
236
237 return kevent;
238}
239
240/*
241 * inotify_dev_get_event - return the next event in the given dev's queue
242 *
243 * Caller must hold dev->ev_mutex.
244 */
245static inline struct inotify_kernel_event *
246inotify_dev_get_event(struct inotify_device *dev)
247{
248 return list_entry(dev->events.next, struct inotify_kernel_event, list);
249}
250
251/*
252 * inotify_dev_get_last_event - return the last event in the given dev's queue
253 *
254 * Caller must hold dev->ev_mutex.
255 */
256static inline struct inotify_kernel_event *
257inotify_dev_get_last_event(struct inotify_device *dev)
258{ 110{
259 if (list_empty(&dev->events)) 111 __u32 mask;
260 return NULL;
261 return list_entry(dev->events.prev, struct inotify_kernel_event, list);
262}
263 112
264/* 113 /* everything should accept their own ignored and cares about children */
265 * inotify_dev_queue_event - event handler registered with core inotify, adds 114 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
266 * a new event to the given device
267 *
268 * Can sleep (calls kernel_event()).
269 */
270static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
271 u32 cookie, const char *name,
272 struct inode *ignored)
273{
274 struct inotify_user_watch *watch;
275 struct inotify_device *dev;
276 struct inotify_kernel_event *kevent, *last;
277 115
278 watch = container_of(w, struct inotify_user_watch, wdata); 116 /* mask off the flags used to open the fd */
279 dev = watch->dev; 117 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
280 118
281 mutex_lock(&dev->ev_mutex); 119 return mask;
282
283 /* we can safely put the watch as we don't reference it while
284 * generating the event
285 */
286 if (mask & IN_IGNORED || w->mask & IN_ONESHOT)
287 put_inotify_watch(w); /* final put */
288
289 /* coalescing: drop this event if it is a dupe of the previous */
290 last = inotify_dev_get_last_event(dev);
291 if (last && last->event.mask == mask && last->event.wd == wd &&
292 last->event.cookie == cookie) {
293 const char *lastname = last->name;
294
295 if (!name && !lastname)
296 goto out;
297 if (name && lastname && !strcmp(lastname, name))
298 goto out;
299 }
300
301 /* the queue overflowed and we already sent the Q_OVERFLOW event */
302 if (unlikely(dev->event_count > dev->max_events))
303 goto out;
304
305 /* if the queue overflows, we need to notify user space */
306 if (unlikely(dev->event_count == dev->max_events))
307 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
308 else
309 kevent = kernel_event(wd, mask, cookie, name);
310
311 if (unlikely(!kevent))
312 goto out;
313
314 /* queue the event and wake up anyone waiting */
315 dev->event_count++;
316 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
317 list_add_tail(&kevent->list, &dev->events);
318 wake_up_interruptible(&dev->wq);
319 kill_fasync(&dev->fa, SIGIO, POLL_IN);
320
321out:
322 mutex_unlock(&dev->ev_mutex);
323}
324
325/*
326 * remove_kevent - cleans up the given kevent
327 *
328 * Caller must hold dev->ev_mutex.
329 */
330static void remove_kevent(struct inotify_device *dev,
331 struct inotify_kernel_event *kevent)
332{
333 list_del(&kevent->list);
334
335 dev->event_count--;
336 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
337}
338
339/*
340 * free_kevent - frees the given kevent.
341 */
342static void free_kevent(struct inotify_kernel_event *kevent)
343{
344 kfree(kevent->name);
345 kmem_cache_free(event_cachep, kevent);
346}
347
348/*
349 * inotify_dev_event_dequeue - destroy an event on the given device
350 *
351 * Caller must hold dev->ev_mutex.
352 */
353static void inotify_dev_event_dequeue(struct inotify_device *dev)
354{
355 if (!list_empty(&dev->events)) {
356 struct inotify_kernel_event *kevent;
357 kevent = inotify_dev_get_event(dev);
358 remove_kevent(dev, kevent);
359 free_kevent(kevent);
360 }
361}
362
363/*
364 * find_inode - resolve a user-given path to a specific inode
365 */
366static int find_inode(const char __user *dirname, struct path *path,
367 unsigned flags)
368{
369 int error;
370
371 error = user_path_at(AT_FDCWD, dirname, flags, path);
372 if (error)
373 return error;
374 /* you can only watch an inode if you have read permissions on it */
375 error = inode_permission(path->dentry->d_inode, MAY_READ);
376 if (error)
377 path_put(path);
378 return error;
379} 120}
380 121
381/* 122static inline u32 inotify_mask_to_arg(__u32 mask)
382 * create_watch - creates a watch on the given device.
383 *
384 * Callers must hold dev->up_mutex.
385 */
386static int create_watch(struct inotify_device *dev, struct inode *inode,
387 u32 mask)
388{ 123{
389 struct inotify_user_watch *watch; 124 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
390 int ret; 125 IN_Q_OVERFLOW);
391
392 if (atomic_read(&dev->user->inotify_watches) >=
393 inotify_max_user_watches)
394 return -ENOSPC;
395
396 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
397 if (unlikely(!watch))
398 return -ENOMEM;
399
400 /* save a reference to device and bump the count to make it official */
401 get_inotify_dev(dev);
402 watch->dev = dev;
403
404 atomic_inc(&dev->user->inotify_watches);
405
406 inotify_init_watch(&watch->wdata);
407 ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
408 if (ret < 0)
409 free_inotify_user_watch(&watch->wdata);
410
411 return ret;
412} 126}
413 127
414/* Device Interface */ 128/* intofiy userspace file descriptor functions */
415
416static unsigned int inotify_poll(struct file *file, poll_table *wait) 129static unsigned int inotify_poll(struct file *file, poll_table *wait)
417{ 130{
418 struct inotify_device *dev = file->private_data; 131 struct fsnotify_group *group = file->private_data;
419 int ret = 0; 132 int ret = 0;
420 133
421 poll_wait(file, &dev->wq, wait); 134 poll_wait(file, &group->notification_waitq, wait);
422 mutex_lock(&dev->ev_mutex); 135 mutex_lock(&group->notification_mutex);
423 if (!list_empty(&dev->events)) 136 if (!fsnotify_notify_queue_is_empty(group))
424 ret = POLLIN | POLLRDNORM; 137 ret = POLLIN | POLLRDNORM;
425 mutex_unlock(&dev->ev_mutex); 138 mutex_unlock(&group->notification_mutex);
426 139
427 return ret; 140 return ret;
428} 141}
@@ -432,26 +145,29 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
432 * enough to fit in "count". Return an error pointer if 145 * enough to fit in "count". Return an error pointer if
433 * not large enough. 146 * not large enough.
434 * 147 *
435 * Called with the device ev_mutex held. 148 * Called with the group->notification_mutex held.
436 */ 149 */
437static struct inotify_kernel_event *get_one_event(struct inotify_device *dev, 150static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
438 size_t count) 151 size_t count)
439{ 152{
440 size_t event_size = sizeof(struct inotify_event); 153 size_t event_size = sizeof(struct inotify_event);
441 struct inotify_kernel_event *kevent; 154 struct fsnotify_event *event;
442 155
443 if (list_empty(&dev->events)) 156 if (fsnotify_notify_queue_is_empty(group))
444 return NULL; 157 return NULL;
445 158
446 kevent = inotify_dev_get_event(dev); 159 event = fsnotify_peek_notify_event(group);
447 if (kevent->name) 160
448 event_size += kevent->event.len; 161 event_size += roundup(event->name_len, event_size);
449 162
450 if (event_size > count) 163 if (event_size > count)
451 return ERR_PTR(-EINVAL); 164 return ERR_PTR(-EINVAL);
452 165
453 remove_kevent(dev, kevent); 166 /* held the notification_mutex the whole time, so this is the
454 return kevent; 167 * same event we peeked above */
168 fsnotify_remove_notify_event(group);
169
170 return event;
455} 171}
456 172
457/* 173/*
@@ -460,51 +176,90 @@ static struct inotify_kernel_event *get_one_event(struct inotify_device *dev,
460 * We already checked that the event size is smaller than the 176 * We already checked that the event size is smaller than the
461 * buffer we had in "get_one_event()" above. 177 * buffer we had in "get_one_event()" above.
462 */ 178 */
463static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent, 179static ssize_t copy_event_to_user(struct fsnotify_group *group,
180 struct fsnotify_event *event,
464 char __user *buf) 181 char __user *buf)
465{ 182{
183 struct inotify_event inotify_event;
184 struct fsnotify_event_private_data *fsn_priv;
185 struct inotify_event_private_data *priv;
466 size_t event_size = sizeof(struct inotify_event); 186 size_t event_size = sizeof(struct inotify_event);
187 size_t name_len;
188
189 /* we get the inotify watch descriptor from the event private data */
190 spin_lock(&event->lock);
191 fsn_priv = fsnotify_remove_priv_from_event(group, event);
192 spin_unlock(&event->lock);
193
194 if (!fsn_priv)
195 inotify_event.wd = -1;
196 else {
197 priv = container_of(fsn_priv, struct inotify_event_private_data,
198 fsnotify_event_priv_data);
199 inotify_event.wd = priv->wd;
200 inotify_free_event_priv(fsn_priv);
201 }
202
203 /* round up event->name_len so it is a multiple of event_size */
204 name_len = roundup(event->name_len, event_size);
205 inotify_event.len = name_len;
206
207 inotify_event.mask = inotify_mask_to_arg(event->mask);
208 inotify_event.cookie = event->sync_cookie;
467 209
468 if (copy_to_user(buf, &kevent->event, event_size)) 210 /* send the main event */
211 if (copy_to_user(buf, &inotify_event, event_size))
469 return -EFAULT; 212 return -EFAULT;
470 213
471 if (kevent->name) { 214 buf += event_size;
472 buf += event_size;
473 215
474 if (copy_to_user(buf, kevent->name, kevent->event.len)) 216 /*
217 * fsnotify only stores the pathname, so here we have to send the pathname
218 * and then pad that pathname out to a multiple of sizeof(inotify_event)
219 * with zeros. I get my zeros from the nul_inotify_event.
220 */
221 if (name_len) {
222 unsigned int len_to_zero = name_len - event->name_len;
223 /* copy the path name */
224 if (copy_to_user(buf, event->file_name, event->name_len))
475 return -EFAULT; 225 return -EFAULT;
226 buf += event->name_len;
476 227
477 event_size += kevent->event.len; 228 /* fill userspace with 0's from nul_inotify_event */
229 if (copy_to_user(buf, &nul_inotify_event, len_to_zero))
230 return -EFAULT;
231 buf += len_to_zero;
232 event_size += name_len;
478 } 233 }
234
479 return event_size; 235 return event_size;
480} 236}
481 237
482static ssize_t inotify_read(struct file *file, char __user *buf, 238static ssize_t inotify_read(struct file *file, char __user *buf,
483 size_t count, loff_t *pos) 239 size_t count, loff_t *pos)
484{ 240{
485 struct inotify_device *dev; 241 struct fsnotify_group *group;
242 struct fsnotify_event *kevent;
486 char __user *start; 243 char __user *start;
487 int ret; 244 int ret;
488 DEFINE_WAIT(wait); 245 DEFINE_WAIT(wait);
489 246
490 start = buf; 247 start = buf;
491 dev = file->private_data; 248 group = file->private_data;
492 249
493 while (1) { 250 while (1) {
494 struct inotify_kernel_event *kevent; 251 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
495 252
496 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 253 mutex_lock(&group->notification_mutex);
497 254 kevent = get_one_event(group, count);
498 mutex_lock(&dev->ev_mutex); 255 mutex_unlock(&group->notification_mutex);
499 kevent = get_one_event(dev, count);
500 mutex_unlock(&dev->ev_mutex);
501 256
502 if (kevent) { 257 if (kevent) {
503 ret = PTR_ERR(kevent); 258 ret = PTR_ERR(kevent);
504 if (IS_ERR(kevent)) 259 if (IS_ERR(kevent))
505 break; 260 break;
506 ret = copy_event_to_user(kevent, buf); 261 ret = copy_event_to_user(group, kevent, buf);
507 free_kevent(kevent); 262 fsnotify_put_event(kevent);
508 if (ret < 0) 263 if (ret < 0)
509 break; 264 break;
510 buf += ret; 265 buf += ret;
@@ -525,7 +280,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
525 schedule(); 280 schedule();
526 } 281 }
527 282
528 finish_wait(&dev->wq, &wait); 283 finish_wait(&group->notification_waitq, &wait);
529 if (start != buf && ret != -EFAULT) 284 if (start != buf && ret != -EFAULT)
530 ret = buf - start; 285 ret = buf - start;
531 return ret; 286 return ret;
@@ -533,25 +288,19 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
533 288
534static int inotify_fasync(int fd, struct file *file, int on) 289static int inotify_fasync(int fd, struct file *file, int on)
535{ 290{
536 struct inotify_device *dev = file->private_data; 291 struct fsnotify_group *group = file->private_data;
537 292
538 return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; 293 return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
539} 294}
540 295
541static int inotify_release(struct inode *ignored, struct file *file) 296static int inotify_release(struct inode *ignored, struct file *file)
542{ 297{
543 struct inotify_device *dev = file->private_data; 298 struct fsnotify_group *group = file->private_data;
544
545 inotify_destroy(dev->ih);
546 299
547 /* destroy all of the events on this device */ 300 fsnotify_clear_marks_by_group(group);
548 mutex_lock(&dev->ev_mutex);
549 while (!list_empty(&dev->events))
550 inotify_dev_event_dequeue(dev);
551 mutex_unlock(&dev->ev_mutex);
552 301
553 /* free this device: the put matching the get in inotify_init() */ 302 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
554 put_inotify_dev(dev); 303 fsnotify_put_group(group);
555 304
556 return 0; 305 return 0;
557} 306}
@@ -559,16 +308,27 @@ static int inotify_release(struct inode *ignored, struct file *file)
559static long inotify_ioctl(struct file *file, unsigned int cmd, 308static long inotify_ioctl(struct file *file, unsigned int cmd,
560 unsigned long arg) 309 unsigned long arg)
561{ 310{
562 struct inotify_device *dev; 311 struct fsnotify_group *group;
312 struct fsnotify_event_holder *holder;
313 struct fsnotify_event *event;
563 void __user *p; 314 void __user *p;
564 int ret = -ENOTTY; 315 int ret = -ENOTTY;
316 size_t send_len = 0;
565 317
566 dev = file->private_data; 318 group = file->private_data;
567 p = (void __user *) arg; 319 p = (void __user *) arg;
568 320
569 switch (cmd) { 321 switch (cmd) {
570 case FIONREAD: 322 case FIONREAD:
571 ret = put_user(dev->queue_size, (int __user *) p); 323 mutex_lock(&group->notification_mutex);
324 list_for_each_entry(holder, &group->notification_list, event_list) {
325 event = holder->event;
326 send_len += sizeof(struct inotify_event);
327 send_len += roundup(event->name_len,
328 sizeof(struct inotify_event));
329 }
330 mutex_unlock(&group->notification_mutex);
331 ret = put_user(send_len, (int __user *) p);
572 break; 332 break;
573 } 333 }
574 334
@@ -576,23 +336,233 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
576} 336}
577 337
578static const struct file_operations inotify_fops = { 338static const struct file_operations inotify_fops = {
579 .poll = inotify_poll, 339 .poll = inotify_poll,
580 .read = inotify_read, 340 .read = inotify_read,
581 .fasync = inotify_fasync, 341 .fasync = inotify_fasync,
582 .release = inotify_release, 342 .release = inotify_release,
583 .unlocked_ioctl = inotify_ioctl, 343 .unlocked_ioctl = inotify_ioctl,
584 .compat_ioctl = inotify_ioctl, 344 .compat_ioctl = inotify_ioctl,
585}; 345};
586 346
587static const struct inotify_operations inotify_user_ops = {
588 .handle_event = inotify_dev_queue_event,
589 .destroy_watch = free_inotify_user_watch,
590};
591 347
348/*
349 * find_inode - resolve a user-given path to a specific inode
350 */
351static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
352{
353 int error;
354
355 error = user_path_at(AT_FDCWD, dirname, flags, path);
356 if (error)
357 return error;
358 /* you can only watch an inode if you have read permissions on it */
359 error = inode_permission(path->dentry->d_inode, MAY_READ);
360 if (error)
361 path_put(path);
362 return error;
363}
364
365/*
366 * When, for whatever reason, inotify is done with a mark (or what used to be a
367 * watch) we need to remove that watch from the idr and we need to send IN_IGNORED
368 * for the given wd.
369 *
370 * There is a bit of recursion here. The loop looks like:
371 * inotify_destroy_mark_entry -> fsnotify_destroy_mark_by_entry ->
372 * inotify_freeing_mark -> inotify_destory_mark_entry -> restart
373 * But the loop is broken in 2 places. fsnotify_destroy_mark_by_entry sets
374 * entry->group = NULL before the call to inotify_freeing_mark, so the if (egroup)
375 * test below will not call back to fsnotify again. But even if that test wasn't
376 * there this would still be safe since fsnotify_destroy_mark_by_entry() is
377 * safe from recursion.
378 */
379void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
380{
381 struct inotify_inode_mark_entry *ientry;
382 struct inotify_event_private_data *event_priv;
383 struct fsnotify_event_private_data *fsn_event_priv;
384 struct fsnotify_group *egroup;
385 struct idr *idr;
386
387 spin_lock(&entry->lock);
388 egroup = entry->group;
389
390 /* if egroup we aren't really done and something might still send events
391 * for this inode, on the callback we'll send the IN_IGNORED */
392 if (egroup) {
393 spin_unlock(&entry->lock);
394 fsnotify_destroy_mark_by_entry(entry);
395 return;
396 }
397 spin_unlock(&entry->lock);
398
399 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
400
401 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
402 if (unlikely(!event_priv))
403 goto skip_send_ignore;
404
405 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
406
407 fsn_event_priv->group = group;
408 event_priv->wd = ientry->wd;
409
410 fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv);
411
412 /* did the private data get added? */
413 if (list_empty(&fsn_event_priv->event_list))
414 inotify_free_event_priv(fsn_event_priv);
415
416skip_send_ignore:
417
418 /* remove this entry from the idr */
419 spin_lock(&group->inotify_data.idr_lock);
420 idr = &group->inotify_data.idr;
421 idr_remove(idr, ientry->wd);
422 spin_unlock(&group->inotify_data.idr_lock);
423
424 /* removed from idr, drop that reference */
425 fsnotify_put_mark(entry);
426}
427
428/* ding dong the mark is dead */
429static void inotify_free_mark(struct fsnotify_mark_entry *entry)
430{
431 struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
432
433 kmem_cache_free(inotify_inode_mark_cachep, ientry);
434}
435
436static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
437{
438 struct fsnotify_mark_entry *entry = NULL;
439 struct inotify_inode_mark_entry *ientry;
440 int ret = 0;
441 int add = (arg & IN_MASK_ADD);
442 __u32 mask;
443 __u32 old_mask, new_mask;
444
445 /* don't allow invalid bits: we don't want flags set */
446 mask = inotify_arg_to_mask(arg);
447 if (unlikely(!mask))
448 return -EINVAL;
449
450 ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
451 if (unlikely(!ientry))
452 return -ENOMEM;
453 /* we set the mask at the end after attaching it */
454 fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark);
455 ientry->wd = 0;
456
457find_entry:
458 spin_lock(&inode->i_lock);
459 entry = fsnotify_find_mark_entry(group, inode);
460 spin_unlock(&inode->i_lock);
461 if (entry) {
462 kmem_cache_free(inotify_inode_mark_cachep, ientry);
463 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
464 } else {
465 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) {
466 ret = -ENOSPC;
467 goto out_err;
468 }
469
470 ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
471 if (ret == -EEXIST)
472 goto find_entry;
473 else if (ret)
474 goto out_err;
475
476 entry = &ientry->fsn_entry;
477retry:
478 ret = -ENOMEM;
479 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
480 goto out_err;
481
482 spin_lock(&group->inotify_data.idr_lock);
483 /* if entry is added to the idr we keep the reference obtained
484 * through fsnotify_mark_add. remember to drop this reference
485 * when entry is removed from idr */
486 ret = idr_get_new_above(&group->inotify_data.idr, entry,
487 ++group->inotify_data.last_wd,
488 &ientry->wd);
489 spin_unlock(&group->inotify_data.idr_lock);
490 if (ret) {
491 if (ret == -EAGAIN)
492 goto retry;
493 goto out_err;
494 }
495 atomic_inc(&group->inotify_data.user->inotify_watches);
496 }
497
498 spin_lock(&entry->lock);
499
500 old_mask = entry->mask;
501 if (add) {
502 entry->mask |= mask;
503 new_mask = entry->mask;
504 } else {
505 entry->mask = mask;
506 new_mask = entry->mask;
507 }
508
509 spin_unlock(&entry->lock);
510
511 if (old_mask != new_mask) {
512 /* more bits in old than in new? */
513 int dropped = (old_mask & ~new_mask);
514 /* more bits in this entry than the inode's mask? */
515 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
516 /* more bits in this entry than the group? */
517 int do_group = (new_mask & ~group->mask);
518
519 /* update the inode with this new entry */
520 if (dropped || do_inode)
521 fsnotify_recalc_inode_mask(inode);
522
523 /* update the group mask with the new mask */
524 if (dropped || do_group)
525 fsnotify_recalc_group_mask(group);
526 }
527
528 return ientry->wd;
529
530out_err:
531 /* see this isn't supposed to happen, just kill the watch */
532 if (entry) {
533 fsnotify_destroy_mark_by_entry(entry);
534 fsnotify_put_mark(entry);
535 }
536 return ret;
537}
538
539static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
540{
541 struct fsnotify_group *group;
542 unsigned int grp_num;
543
544 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
545 grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
546 group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
547 if (IS_ERR(group))
548 return group;
549
550 group->max_events = max_events;
551
552 spin_lock_init(&group->inotify_data.idr_lock);
553 idr_init(&group->inotify_data.idr);
554 group->inotify_data.last_wd = 0;
555 group->inotify_data.user = user;
556 group->inotify_data.fa = NULL;
557
558 return group;
559}
560
561
562/* inotify syscalls */
592SYSCALL_DEFINE1(inotify_init1, int, flags) 563SYSCALL_DEFINE1(inotify_init1, int, flags)
593{ 564{
594 struct inotify_device *dev; 565 struct fsnotify_group *group;
595 struct inotify_handle *ih;
596 struct user_struct *user; 566 struct user_struct *user;
597 struct file *filp; 567 struct file *filp;
598 int fd, ret; 568 int fd, ret;
@@ -621,45 +591,27 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
621 goto out_free_uid; 591 goto out_free_uid;
622 } 592 }
623 593
624 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); 594 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
625 if (unlikely(!dev)) { 595 group = inotify_new_group(user, inotify_max_queued_events);
626 ret = -ENOMEM; 596 if (IS_ERR(group)) {
597 ret = PTR_ERR(group);
627 goto out_free_uid; 598 goto out_free_uid;
628 } 599 }
629 600
630 ih = inotify_init(&inotify_user_ops);
631 if (IS_ERR(ih)) {
632 ret = PTR_ERR(ih);
633 goto out_free_dev;
634 }
635 dev->ih = ih;
636 dev->fa = NULL;
637
638 filp->f_op = &inotify_fops; 601 filp->f_op = &inotify_fops;
639 filp->f_path.mnt = mntget(inotify_mnt); 602 filp->f_path.mnt = mntget(inotify_mnt);
640 filp->f_path.dentry = dget(inotify_mnt->mnt_root); 603 filp->f_path.dentry = dget(inotify_mnt->mnt_root);
641 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; 604 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
642 filp->f_mode = FMODE_READ; 605 filp->f_mode = FMODE_READ;
643 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); 606 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
644 filp->private_data = dev; 607 filp->private_data = group;
645 608
646 INIT_LIST_HEAD(&dev->events);
647 init_waitqueue_head(&dev->wq);
648 mutex_init(&dev->ev_mutex);
649 mutex_init(&dev->up_mutex);
650 dev->event_count = 0;
651 dev->queue_size = 0;
652 dev->max_events = inotify_max_queued_events;
653 dev->user = user;
654 atomic_set(&dev->count, 0);
655
656 get_inotify_dev(dev);
657 atomic_inc(&user->inotify_devs); 609 atomic_inc(&user->inotify_devs);
610
658 fd_install(fd, filp); 611 fd_install(fd, filp);
659 612
660 return fd; 613 return fd;
661out_free_dev: 614
662 kfree(dev);
663out_free_uid: 615out_free_uid:
664 free_uid(user); 616 free_uid(user);
665 put_filp(filp); 617 put_filp(filp);
@@ -676,8 +628,8 @@ SYSCALL_DEFINE0(inotify_init)
676SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, 628SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
677 u32, mask) 629 u32, mask)
678{ 630{
631 struct fsnotify_group *group;
679 struct inode *inode; 632 struct inode *inode;
680 struct inotify_device *dev;
681 struct path path; 633 struct path path;
682 struct file *filp; 634 struct file *filp;
683 int ret, fput_needed; 635 int ret, fput_needed;
@@ -698,20 +650,20 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
698 if (mask & IN_ONLYDIR) 650 if (mask & IN_ONLYDIR)
699 flags |= LOOKUP_DIRECTORY; 651 flags |= LOOKUP_DIRECTORY;
700 652
701 ret = find_inode(pathname, &path, flags); 653 ret = inotify_find_inode(pathname, &path, flags);
702 if (unlikely(ret)) 654 if (ret)
703 goto fput_and_out; 655 goto fput_and_out;
704 656
705 /* inode held in place by reference to path; dev by fget on fd */ 657 /* inode held in place by reference to path; group by fget on fd */
706 inode = path.dentry->d_inode; 658 inode = path.dentry->d_inode;
707 dev = filp->private_data; 659 group = filp->private_data;
708 660
709 mutex_lock(&dev->up_mutex); 661 /* create/update an inode mark */
710 ret = inotify_find_update_watch(dev->ih, inode, mask); 662 ret = inotify_update_watch(group, inode, mask);
711 if (ret == -ENOENT) 663 if (unlikely(ret))
712 ret = create_watch(dev, inode, mask); 664 goto path_put_and_out;
713 mutex_unlock(&dev->up_mutex);
714 665
666path_put_and_out:
715 path_put(&path); 667 path_put(&path);
716fput_and_out: 668fput_and_out:
717 fput_light(filp, fput_needed); 669 fput_light(filp, fput_needed);
@@ -720,9 +672,10 @@ fput_and_out:
720 672
721SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 673SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
722{ 674{
675 struct fsnotify_group *group;
676 struct fsnotify_mark_entry *entry;
723 struct file *filp; 677 struct file *filp;
724 struct inotify_device *dev; 678 int ret = 0, fput_needed;
725 int ret, fput_needed;
726 679
727 filp = fget_light(fd, &fput_needed); 680 filp = fget_light(fd, &fput_needed);
728 if (unlikely(!filp)) 681 if (unlikely(!filp))
@@ -734,10 +687,20 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
734 goto out; 687 goto out;
735 } 688 }
736 689
737 dev = filp->private_data; 690 group = filp->private_data;
738 691
739 /* we free our watch data when we get IN_IGNORED */ 692 spin_lock(&group->inotify_data.idr_lock);
740 ret = inotify_rm_wd(dev->ih, wd); 693 entry = idr_find(&group->inotify_data.idr, wd);
694 if (unlikely(!entry)) {
695 spin_unlock(&group->inotify_data.idr_lock);
696 ret = -EINVAL;
697 goto out;
698 }
699 fsnotify_get_mark(entry);
700 spin_unlock(&group->inotify_data.idr_lock);
701
702 inotify_destroy_mark_entry(entry, group);
703 fsnotify_put_mark(entry);
741 704
742out: 705out:
743 fput_light(filp, fput_needed); 706 fput_light(filp, fput_needed);
@@ -753,9 +716,9 @@ inotify_get_sb(struct file_system_type *fs_type, int flags,
753} 716}
754 717
755static struct file_system_type inotify_fs_type = { 718static struct file_system_type inotify_fs_type = {
756 .name = "inotifyfs", 719 .name = "inotifyfs",
757 .get_sb = inotify_get_sb, 720 .get_sb = inotify_get_sb,
758 .kill_sb = kill_anon_super, 721 .kill_sb = kill_anon_super,
759}; 722};
760 723
761/* 724/*
@@ -775,18 +738,16 @@ static int __init inotify_user_setup(void)
775 if (IS_ERR(inotify_mnt)) 738 if (IS_ERR(inotify_mnt))
776 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); 739 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
777 740
741 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
742 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
743 inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
744 if (!inotify_ignored_event)
745 panic("unable to allocate the inotify ignored event\n");
746
778 inotify_max_queued_events = 16384; 747 inotify_max_queued_events = 16384;
779 inotify_max_user_instances = 128; 748 inotify_max_user_instances = 128;
780 inotify_max_user_watches = 8192; 749 inotify_max_user_watches = 8192;
781 750
782 watch_cachep = kmem_cache_create("inotify_watch_cache",
783 sizeof(struct inotify_user_watch),
784 0, SLAB_PANIC, NULL);
785 event_cachep = kmem_cache_create("inotify_event_cache",
786 sizeof(struct inotify_kernel_event),
787 0, SLAB_PANIC, NULL);
788
789 return 0; 751 return 0;
790} 752}
791
792module_init(inotify_user_setup); 753module_init(inotify_user_setup);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index d2c0ee30e618..44848aa830dc 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -9,6 +9,7 @@
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11 11
12#include <linux/idr.h> /* inotify uses this */
12#include <linux/fs.h> /* struct inode */ 13#include <linux/fs.h> /* struct inode */
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/path.h> /* struct path */ 15#include <linux/path.h> /* struct path */
@@ -59,6 +60,7 @@
59 60
60/* listeners that hard code group numbers near the top */ 61/* listeners that hard code group numbers near the top */
61#define DNOTIFY_GROUP_NUM UINT_MAX 62#define DNOTIFY_GROUP_NUM UINT_MAX
63#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
62 64
63struct fsnotify_group; 65struct fsnotify_group;
64struct fsnotify_event; 66struct fsnotify_event;
@@ -141,6 +143,15 @@ struct fsnotify_group {
141 /* groups can define private fields here or use the void *private */ 143 /* groups can define private fields here or use the void *private */
142 union { 144 union {
143 void *private; 145 void *private;
146#ifdef CONFIG_INOTIFY_USER
147 struct inotify_group_private_data {
148 spinlock_t idr_lock;
149 struct idr idr;
150 u32 last_wd;
151 struct fasync_struct *fa; /* async notification */
152 struct user_struct *user;
153 } inotify_data;
154#endif
144 }; 155 };
145}; 156};
146 157
diff --git a/init/Kconfig b/init/Kconfig
index d4e9671347ee..5de1c17c51ed 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -302,7 +302,8 @@ config AUDITSYSCALL
302 302
303config AUDIT_TREE 303config AUDIT_TREE
304 def_bool y 304 def_bool y
305 depends on AUDITSYSCALL && INOTIFY 305 depends on AUDITSYSCALL
306 select INOTIFY
306 307
307menu "RCU Subsystem" 308menu "RCU Subsystem"
308 309