aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig24
-rw-r--r--fs/Makefile1
-rw-r--r--fs/exec.c6
-rw-r--r--fs/inotify.c991
-rw-r--r--fs/inotify_user.c719
-rw-r--r--fs/namei.c2
-rw-r--r--fs/open.c4
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/xattr.c4
9 files changed, 1037 insertions, 719 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 572cc435a1bb..20f9b557732d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -393,18 +393,30 @@ config INOTIFY
393 bool "Inotify file change notification support" 393 bool "Inotify file change notification support"
394 default y 394 default y
395 ---help--- 395 ---help---
396 Say Y here to enable inotify support and the associated system 396 Say Y here to enable inotify support. Inotify is a file change
397 calls. Inotify is a file change notification system and a 397 notification system and a replacement for dnotify. Inotify fixes
398 replacement for dnotify. Inotify fixes numerous shortcomings in 398 numerous shortcomings in dnotify and introduces several new features
399 dnotify and introduces several new features. It allows monitoring 399 including multiple file events, one-shot support, and unmount
400 of both files and directories via a single open fd. Other features
401 include multiple file events, one-shot support, and unmount
402 notification. 400 notification.
403 401
404 For more information, see Documentation/filesystems/inotify.txt 402 For more information, see Documentation/filesystems/inotify.txt
405 403
406 If unsure, say Y. 404 If unsure, say Y.
407 405
406config INOTIFY_USER
407 bool "Inotify support for userspace"
408 depends on INOTIFY
409 default y
410 ---help---
411 Say Y here to enable inotify support for userspace, including the
412 associated system calls. Inotify allows monitoring of both files and
413 directories via a single open fd. Events are read from the file
414 descriptor, which is also select()- and poll()-able.
415
416 For more information, see Documentation/filesystems/inotify.txt
417
418 If unsure, say Y.
419
408config QUOTA 420config QUOTA
409 bool "Quota support" 421 bool "Quota support"
410 help 422 help
diff --git a/fs/Makefile b/fs/Makefile
index 078d3d1191a5..d0ea6bfccf29 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -13,6 +13,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
13 ioprio.o pnode.o drop_caches.o splice.o sync.o 13 ioprio.o pnode.o drop_caches.o splice.o sync.o
14 14
15obj-$(CONFIG_INOTIFY) += inotify.o 15obj-$(CONFIG_INOTIFY) += inotify.o
16obj-$(CONFIG_INOTIFY_USER) += inotify_user.o
16obj-$(CONFIG_EPOLL) += eventpoll.o 17obj-$(CONFIG_EPOLL) += eventpoll.o
17obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o 18obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
18 19
diff --git a/fs/exec.c b/fs/exec.c
index 3a79d97ac234..d07858c0b7c4 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -49,6 +49,7 @@
49#include <linux/rmap.h> 49#include <linux/rmap.h>
50#include <linux/acct.h> 50#include <linux/acct.h>
51#include <linux/cn_proc.h> 51#include <linux/cn_proc.h>
52#include <linux/audit.h>
52 53
53#include <asm/uaccess.h> 54#include <asm/uaccess.h>
54#include <asm/mmu_context.h> 55#include <asm/mmu_context.h>
@@ -1085,6 +1086,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1085 /* kernel module loader fixup */ 1086 /* kernel module loader fixup */
1086 /* so we don't try to load run modprobe in kernel space. */ 1087 /* so we don't try to load run modprobe in kernel space. */
1087 set_fs(USER_DS); 1088 set_fs(USER_DS);
1089
1090 retval = audit_bprm(bprm);
1091 if (retval)
1092 return retval;
1093
1088 retval = -ENOENT; 1094 retval = -ENOENT;
1089 for (try=0; try<2; try++) { 1095 for (try=0; try<2; try++) {
1090 read_lock(&binfmt_lock); 1096 read_lock(&binfmt_lock);
diff --git a/fs/inotify.c b/fs/inotify.c
index 732ec4bd5774..723836a1f718 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -5,7 +5,10 @@
5 * John McCutchan <ttb@tentacle.dhs.org> 5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com> 6 * Robert Love <rml@novell.com>
7 * 7 *
8 * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
9 *
8 * Copyright (C) 2005 John McCutchan 10 * Copyright (C) 2005 John McCutchan
11 * Copyright 2006 Hewlett-Packard Development Company, L.P.
9 * 12 *
10 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 14 * under the terms of the GNU General Public License as published by the
@@ -20,35 +23,17 @@
20 23
21#include <linux/module.h> 24#include <linux/module.h>
22#include <linux/kernel.h> 25#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/spinlock.h> 26#include <linux/spinlock.h>
25#include <linux/idr.h> 27#include <linux/idr.h>
26#include <linux/slab.h> 28#include <linux/slab.h>
27#include <linux/fs.h> 29#include <linux/fs.h>
28#include <linux/file.h>
29#include <linux/mount.h>
30#include <linux/namei.h>
31#include <linux/poll.h>
32#include <linux/init.h> 30#include <linux/init.h>
33#include <linux/list.h> 31#include <linux/list.h>
34#include <linux/writeback.h> 32#include <linux/writeback.h>
35#include <linux/inotify.h> 33#include <linux/inotify.h>
36#include <linux/syscalls.h>
37
38#include <asm/ioctls.h>
39 34
40static atomic_t inotify_cookie; 35static atomic_t inotify_cookie;
41 36
42static kmem_cache_t *watch_cachep __read_mostly;
43static kmem_cache_t *event_cachep __read_mostly;
44
45static struct vfsmount *inotify_mnt __read_mostly;
46
47/* these are configurable via /proc/sys/fs/inotify/ */
48int inotify_max_user_instances __read_mostly;
49int inotify_max_user_watches __read_mostly;
50int inotify_max_queued_events __read_mostly;
51
52/* 37/*
53 * Lock ordering: 38 * Lock ordering:
54 * 39 *
@@ -56,327 +41,108 @@ int inotify_max_queued_events __read_mostly;
56 * iprune_mutex (synchronize shrink_icache_memory()) 41 * iprune_mutex (synchronize shrink_icache_memory())
57 * inode_lock (protects the super_block->s_inodes list) 42 * inode_lock (protects the super_block->s_inodes list)
58 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) 43 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
59 * inotify_dev->mutex (protects inotify_device and watches->d_list) 44 * inotify_handle->mutex (protects inotify_handle and watches->h_list)
45 *
46 * The inode->inotify_mutex and inotify_handle->mutex and held during execution
47 * of a caller's event handler. Thus, the caller must not hold any locks
48 * taken in their event handler while calling any of the published inotify
49 * interfaces.
60 */ 50 */
61 51
62/* 52/*
63 * Lifetimes of the three main data structures--inotify_device, inode, and 53 * Lifetimes of the three main data structures--inotify_handle, inode, and
64 * inotify_watch--are managed by reference count. 54 * inotify_watch--are managed by reference count.
65 * 55 *
66 * inotify_device: Lifetime is from inotify_init() until release. Additional 56 * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
67 * references can bump the count via get_inotify_dev() and drop the count via 57 * Additional references can bump the count via get_inotify_handle() and drop
68 * put_inotify_dev(). 58 * the count via put_inotify_handle().
69 * 59 *
70 * inotify_watch: Lifetime is from create_watch() to destory_watch(). 60 * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
71 * Additional references can bump the count via get_inotify_watch() and drop 61 * to remove_watch_no_event(). Additional references can bump the count via
72 * the count via put_inotify_watch(). 62 * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
63 * is reponsible for the final put after receiving IN_IGNORED, or when using
64 * IN_ONESHOT after receiving the first event. Inotify does the final put if
65 * inotify_destroy() is called.
73 * 66 *
74 * inode: Pinned so long as the inode is associated with a watch, from 67 * inode: Pinned so long as the inode is associated with a watch, from
75 * create_watch() to put_inotify_watch(). 68 * inotify_add_watch() to the final put_inotify_watch().
76 */ 69 */
77 70
78/* 71/*
79 * struct inotify_device - represents an inotify instance 72 * struct inotify_handle - represents an inotify instance
80 * 73 *
81 * This structure is protected by the mutex 'mutex'. 74 * This structure is protected by the mutex 'mutex'.
82 */ 75 */
83struct inotify_device { 76struct inotify_handle {
84 wait_queue_head_t wq; /* wait queue for i/o */
85 struct idr idr; /* idr mapping wd -> watch */ 77 struct idr idr; /* idr mapping wd -> watch */
86 struct mutex mutex; /* protects this bad boy */ 78 struct mutex mutex; /* protects this bad boy */
87 struct list_head events; /* list of queued events */
88 struct list_head watches; /* list of watches */ 79 struct list_head watches; /* list of watches */
89 atomic_t count; /* reference count */ 80 atomic_t count; /* reference count */
90 struct user_struct *user; /* user who opened this dev */
91 unsigned int queue_size; /* size of the queue (bytes) */
92 unsigned int event_count; /* number of pending events */
93 unsigned int max_events; /* maximum number of events */
94 u32 last_wd; /* the last wd allocated */ 81 u32 last_wd; /* the last wd allocated */
82 const struct inotify_operations *in_ops; /* inotify caller operations */
95}; 83};
96 84
97/* 85static inline void get_inotify_handle(struct inotify_handle *ih)
98 * struct inotify_kernel_event - An inotify event, originating from a watch and
99 * queued for user-space. A list of these is attached to each instance of the
100 * device. In read(), this list is walked and all events that can fit in the
101 * buffer are returned.
102 *
103 * Protected by dev->mutex of the device in which we are queued.
104 */
105struct inotify_kernel_event {
106 struct inotify_event event; /* the user-space event */
107 struct list_head list; /* entry in inotify_device's list */
108 char *name; /* filename, if any */
109};
110
111/*
112 * struct inotify_watch - represents a watch request on a specific inode
113 *
114 * d_list is protected by dev->mutex of the associated watch->dev.
115 * i_list and mask are protected by inode->inotify_mutex of the associated inode.
116 * dev, inode, and wd are never written to once the watch is created.
117 */
118struct inotify_watch {
119 struct list_head d_list; /* entry in inotify_device's list */
120 struct list_head i_list; /* entry in inode's list */
121 atomic_t count; /* reference count */
122 struct inotify_device *dev; /* associated device */
123 struct inode *inode; /* associated inode */
124 s32 wd; /* watch descriptor */
125 u32 mask; /* event mask for this watch */
126};
127
128#ifdef CONFIG_SYSCTL
129
130#include <linux/sysctl.h>
131
132static int zero;
133
134ctl_table inotify_table[] = {
135 {
136 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
137 .procname = "max_user_instances",
138 .data = &inotify_max_user_instances,
139 .maxlen = sizeof(int),
140 .mode = 0644,
141 .proc_handler = &proc_dointvec_minmax,
142 .strategy = &sysctl_intvec,
143 .extra1 = &zero,
144 },
145 {
146 .ctl_name = INOTIFY_MAX_USER_WATCHES,
147 .procname = "max_user_watches",
148 .data = &inotify_max_user_watches,
149 .maxlen = sizeof(int),
150 .mode = 0644,
151 .proc_handler = &proc_dointvec_minmax,
152 .strategy = &sysctl_intvec,
153 .extra1 = &zero,
154 },
155 {
156 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
157 .procname = "max_queued_events",
158 .data = &inotify_max_queued_events,
159 .maxlen = sizeof(int),
160 .mode = 0644,
161 .proc_handler = &proc_dointvec_minmax,
162 .strategy = &sysctl_intvec,
163 .extra1 = &zero
164 },
165 { .ctl_name = 0 }
166};
167#endif /* CONFIG_SYSCTL */
168
169static inline void get_inotify_dev(struct inotify_device *dev)
170{ 86{
171 atomic_inc(&dev->count); 87 atomic_inc(&ih->count);
172} 88}
173 89
174static inline void put_inotify_dev(struct inotify_device *dev) 90static inline void put_inotify_handle(struct inotify_handle *ih)
175{ 91{
176 if (atomic_dec_and_test(&dev->count)) { 92 if (atomic_dec_and_test(&ih->count)) {
177 atomic_dec(&dev->user->inotify_devs); 93 idr_destroy(&ih->idr);
178 free_uid(dev->user); 94 kfree(ih);
179 idr_destroy(&dev->idr);
180 kfree(dev);
181 } 95 }
182} 96}
183 97
184static inline void get_inotify_watch(struct inotify_watch *watch) 98/**
99 * get_inotify_watch - grab a reference to an inotify_watch
100 * @watch: watch to grab
101 */
102void get_inotify_watch(struct inotify_watch *watch)
185{ 103{
186 atomic_inc(&watch->count); 104 atomic_inc(&watch->count);
187} 105}
106EXPORT_SYMBOL_GPL(get_inotify_watch);
188 107
189/* 108/**
190 * put_inotify_watch - decrements the ref count on a given watch. cleans up 109 * put_inotify_watch - decrements the ref count on a given watch. cleans up
191 * the watch and its references if the count reaches zero. 110 * watch references if the count reaches zero. inotify_watch is freed by
111 * inotify callers via the destroy_watch() op.
112 * @watch: watch to release
192 */ 113 */
193static inline void put_inotify_watch(struct inotify_watch *watch) 114void put_inotify_watch(struct inotify_watch *watch)
194{ 115{
195 if (atomic_dec_and_test(&watch->count)) { 116 if (atomic_dec_and_test(&watch->count)) {
196 put_inotify_dev(watch->dev); 117 struct inotify_handle *ih = watch->ih;
197 iput(watch->inode);
198 kmem_cache_free(watch_cachep, watch);
199 }
200}
201
202/*
203 * kernel_event - create a new kernel event with the given parameters
204 *
205 * This function can sleep.
206 */
207static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
208 const char *name)
209{
210 struct inotify_kernel_event *kevent;
211
212 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
213 if (unlikely(!kevent))
214 return NULL;
215
216 /* we hand this out to user-space, so zero it just in case */
217 memset(&kevent->event, 0, sizeof(struct inotify_event));
218
219 kevent->event.wd = wd;
220 kevent->event.mask = mask;
221 kevent->event.cookie = cookie;
222
223 INIT_LIST_HEAD(&kevent->list);
224
225 if (name) {
226 size_t len, rem, event_size = sizeof(struct inotify_event);
227
228 /*
229 * We need to pad the filename so as to properly align an
230 * array of inotify_event structures. Because the structure is
231 * small and the common case is a small filename, we just round
232 * up to the next multiple of the structure's sizeof. This is
233 * simple and safe for all architectures.
234 */
235 len = strlen(name) + 1;
236 rem = event_size - len;
237 if (len > event_size) {
238 rem = event_size - (len % event_size);
239 if (len % event_size == 0)
240 rem = 0;
241 }
242
243 kevent->name = kmalloc(len + rem, GFP_KERNEL);
244 if (unlikely(!kevent->name)) {
245 kmem_cache_free(event_cachep, kevent);
246 return NULL;
247 }
248 memcpy(kevent->name, name, len);
249 if (rem)
250 memset(kevent->name + len, 0, rem);
251 kevent->event.len = len + rem;
252 } else {
253 kevent->event.len = 0;
254 kevent->name = NULL;
255 }
256
257 return kevent;
258}
259
260/*
261 * inotify_dev_get_event - return the next event in the given dev's queue
262 *
263 * Caller must hold dev->mutex.
264 */
265static inline struct inotify_kernel_event *
266inotify_dev_get_event(struct inotify_device *dev)
267{
268 return list_entry(dev->events.next, struct inotify_kernel_event, list);
269}
270
271/*
272 * inotify_dev_queue_event - add a new event to the given device
273 *
274 * Caller must hold dev->mutex. Can sleep (calls kernel_event()).
275 */
276static void inotify_dev_queue_event(struct inotify_device *dev,
277 struct inotify_watch *watch, u32 mask,
278 u32 cookie, const char *name)
279{
280 struct inotify_kernel_event *kevent, *last;
281
282 /* coalescing: drop this event if it is a dupe of the previous */
283 last = inotify_dev_get_event(dev);
284 if (last && last->event.mask == mask && last->event.wd == watch->wd &&
285 last->event.cookie == cookie) {
286 const char *lastname = last->name;
287
288 if (!name && !lastname)
289 return;
290 if (name && lastname && !strcmp(lastname, name))
291 return;
292 }
293
294 /* the queue overflowed and we already sent the Q_OVERFLOW event */
295 if (unlikely(dev->event_count > dev->max_events))
296 return;
297
298 /* if the queue overflows, we need to notify user space */
299 if (unlikely(dev->event_count == dev->max_events))
300 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
301 else
302 kevent = kernel_event(watch->wd, mask, cookie, name);
303
304 if (unlikely(!kevent))
305 return;
306
307 /* queue the event and wake up anyone waiting */
308 dev->event_count++;
309 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
310 list_add_tail(&kevent->list, &dev->events);
311 wake_up_interruptible(&dev->wq);
312}
313
314/*
315 * remove_kevent - cleans up and ultimately frees the given kevent
316 *
317 * Caller must hold dev->mutex.
318 */
319static void remove_kevent(struct inotify_device *dev,
320 struct inotify_kernel_event *kevent)
321{
322 list_del(&kevent->list);
323
324 dev->event_count--;
325 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
326
327 kfree(kevent->name);
328 kmem_cache_free(event_cachep, kevent);
329}
330 118
331/* 119 iput(watch->inode);
332 * inotify_dev_event_dequeue - destroy an event on the given device 120 ih->in_ops->destroy_watch(watch);
333 * 121 put_inotify_handle(ih);
334 * Caller must hold dev->mutex.
335 */
336static void inotify_dev_event_dequeue(struct inotify_device *dev)
337{
338 if (!list_empty(&dev->events)) {
339 struct inotify_kernel_event *kevent;
340 kevent = inotify_dev_get_event(dev);
341 remove_kevent(dev, kevent);
342 } 122 }
343} 123}
124EXPORT_SYMBOL_GPL(put_inotify_watch);
344 125
345/* 126/*
346 * inotify_dev_get_wd - returns the next WD for use by the given dev 127 * inotify_handle_get_wd - returns the next WD for use by the given handle
347 * 128 *
348 * Callers must hold dev->mutex. This function can sleep. 129 * Callers must hold ih->mutex. This function can sleep.
349 */ 130 */
350static int inotify_dev_get_wd(struct inotify_device *dev, 131static int inotify_handle_get_wd(struct inotify_handle *ih,
351 struct inotify_watch *watch) 132 struct inotify_watch *watch)
352{ 133{
353 int ret; 134 int ret;
354 135
355 do { 136 do {
356 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) 137 if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
357 return -ENOSPC; 138 return -ENOSPC;
358 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd); 139 ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
359 } while (ret == -EAGAIN); 140 } while (ret == -EAGAIN);
360 141
361 return ret; 142 if (likely(!ret))
362} 143 ih->last_wd = watch->wd;
363 144
364/* 145 return ret;
365 * find_inode - resolve a user-given path to a specific inode and return a nd
366 */
367static int find_inode(const char __user *dirname, struct nameidata *nd,
368 unsigned flags)
369{
370 int error;
371
372 error = __user_walk(dirname, flags, nd);
373 if (error)
374 return error;
375 /* you can only watch an inode if you have read permissions on it */
376 error = vfs_permission(nd, MAY_READ);
377 if (error)
378 path_release(nd);
379 return error;
380} 146}
381 147
382/* 148/*
@@ -422,67 +188,18 @@ static void set_dentry_child_flags(struct inode *inode, int watched)
422} 188}
423 189
424/* 190/*
425 * create_watch - creates a watch on the given device. 191 * inotify_find_handle - find the watch associated with the given inode and
426 * 192 * handle
427 * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
428 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
429 */
430static struct inotify_watch *create_watch(struct inotify_device *dev,
431 u32 mask, struct inode *inode)
432{
433 struct inotify_watch *watch;
434 int ret;
435
436 if (atomic_read(&dev->user->inotify_watches) >=
437 inotify_max_user_watches)
438 return ERR_PTR(-ENOSPC);
439
440 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
441 if (unlikely(!watch))
442 return ERR_PTR(-ENOMEM);
443
444 ret = inotify_dev_get_wd(dev, watch);
445 if (unlikely(ret)) {
446 kmem_cache_free(watch_cachep, watch);
447 return ERR_PTR(ret);
448 }
449
450 dev->last_wd = watch->wd;
451 watch->mask = mask;
452 atomic_set(&watch->count, 0);
453 INIT_LIST_HEAD(&watch->d_list);
454 INIT_LIST_HEAD(&watch->i_list);
455
456 /* save a reference to device and bump the count to make it official */
457 get_inotify_dev(dev);
458 watch->dev = dev;
459
460 /*
461 * Save a reference to the inode and bump the ref count to make it
462 * official. We hold a reference to nameidata, which makes this safe.
463 */
464 watch->inode = igrab(inode);
465
466 /* bump our own count, corresponding to our entry in dev->watches */
467 get_inotify_watch(watch);
468
469 atomic_inc(&dev->user->inotify_watches);
470
471 return watch;
472}
473
474/*
475 * inotify_find_dev - find the watch associated with the given inode and dev
476 * 193 *
477 * Callers must hold inode->inotify_mutex. 194 * Callers must hold inode->inotify_mutex.
478 */ 195 */
479static struct inotify_watch *inode_find_dev(struct inode *inode, 196static struct inotify_watch *inode_find_handle(struct inode *inode,
480 struct inotify_device *dev) 197 struct inotify_handle *ih)
481{ 198{
482 struct inotify_watch *watch; 199 struct inotify_watch *watch;
483 200
484 list_for_each_entry(watch, &inode->inotify_watches, i_list) { 201 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
485 if (watch->dev == dev) 202 if (watch->ih == ih)
486 return watch; 203 return watch;
487 } 204 }
488 205
@@ -490,40 +207,40 @@ static struct inotify_watch *inode_find_dev(struct inode *inode,
490} 207}
491 208
492/* 209/*
493 * remove_watch_no_event - remove_watch() without the IN_IGNORED event. 210 * remove_watch_no_event - remove watch without the IN_IGNORED event.
211 *
212 * Callers must hold both inode->inotify_mutex and ih->mutex.
494 */ 213 */
495static void remove_watch_no_event(struct inotify_watch *watch, 214static void remove_watch_no_event(struct inotify_watch *watch,
496 struct inotify_device *dev) 215 struct inotify_handle *ih)
497{ 216{
498 list_del(&watch->i_list); 217 list_del(&watch->i_list);
499 list_del(&watch->d_list); 218 list_del(&watch->h_list);
500 219
501 if (!inotify_inode_watched(watch->inode)) 220 if (!inotify_inode_watched(watch->inode))
502 set_dentry_child_flags(watch->inode, 0); 221 set_dentry_child_flags(watch->inode, 0);
503 222
504 atomic_dec(&dev->user->inotify_watches); 223 idr_remove(&ih->idr, watch->wd);
505 idr_remove(&dev->idr, watch->wd);
506 put_inotify_watch(watch);
507} 224}
508 225
509/* 226/**
510 * remove_watch - Remove a watch from both the device and the inode. Sends 227 * inotify_remove_watch_locked - Remove a watch from both the handle and the
511 * the IN_IGNORED event to the given device signifying that the inode is no 228 * inode. Sends the IN_IGNORED event signifying that the inode is no longer
512 * longer watched. 229 * watched. May be invoked from a caller's event handler.
513 * 230 * @ih: inotify handle associated with watch
514 * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a 231 * @watch: watch to remove
515 * reference to the inode before returning.
516 * 232 *
517 * The inode is not iput() so as to remain atomic. If the inode needs to be 233 * Callers must hold both inode->inotify_mutex and ih->mutex.
518 * iput(), the call returns one. Otherwise, it returns zero.
519 */ 234 */
520static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev) 235void inotify_remove_watch_locked(struct inotify_handle *ih,
236 struct inotify_watch *watch)
521{ 237{
522 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL); 238 remove_watch_no_event(watch, ih);
523 remove_watch_no_event(watch, dev); 239 ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
524} 240}
241EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
525 242
526/* Kernel API */ 243/* Kernel API for producing events */
527 244
528/* 245/*
529 * inotify_d_instantiate - instantiate dcache entry for inode 246 * inotify_d_instantiate - instantiate dcache entry for inode
@@ -563,9 +280,10 @@ void inotify_d_move(struct dentry *entry)
563 * @mask: event mask describing this event 280 * @mask: event mask describing this event
564 * @cookie: cookie for synchronization, or zero 281 * @cookie: cookie for synchronization, or zero
565 * @name: filename, if any 282 * @name: filename, if any
283 * @n_inode: inode associated with name
566 */ 284 */
567void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, 285void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
568 const char *name) 286 const char *name, struct inode *n_inode)
569{ 287{
570 struct inotify_watch *watch, *next; 288 struct inotify_watch *watch, *next;
571 289
@@ -576,14 +294,13 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
576 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 294 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
577 u32 watch_mask = watch->mask; 295 u32 watch_mask = watch->mask;
578 if (watch_mask & mask) { 296 if (watch_mask & mask) {
579 struct inotify_device *dev = watch->dev; 297 struct inotify_handle *ih= watch->ih;
580 get_inotify_watch(watch); 298 mutex_lock(&ih->mutex);
581 mutex_lock(&dev->mutex);
582 inotify_dev_queue_event(dev, watch, mask, cookie, name);
583 if (watch_mask & IN_ONESHOT) 299 if (watch_mask & IN_ONESHOT)
584 remove_watch_no_event(watch, dev); 300 remove_watch_no_event(watch, ih);
585 mutex_unlock(&dev->mutex); 301 ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
586 put_inotify_watch(watch); 302 name, n_inode);
303 mutex_unlock(&ih->mutex);
587 } 304 }
588 } 305 }
589 mutex_unlock(&inode->inotify_mutex); 306 mutex_unlock(&inode->inotify_mutex);
@@ -613,7 +330,8 @@ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
613 if (inotify_inode_watched(inode)) { 330 if (inotify_inode_watched(inode)) {
614 dget(parent); 331 dget(parent);
615 spin_unlock(&dentry->d_lock); 332 spin_unlock(&dentry->d_lock);
616 inotify_inode_queue_event(inode, mask, cookie, name); 333 inotify_inode_queue_event(inode, mask, cookie, name,
334 dentry->d_inode);
617 dput(parent); 335 dput(parent);
618 } else 336 } else
619 spin_unlock(&dentry->d_lock); 337 spin_unlock(&dentry->d_lock);
@@ -665,7 +383,7 @@ void inotify_unmount_inodes(struct list_head *list)
665 383
666 need_iput_tmp = need_iput; 384 need_iput_tmp = need_iput;
667 need_iput = NULL; 385 need_iput = NULL;
668 /* In case the remove_watch() drops a reference. */ 386 /* In case inotify_remove_watch_locked() drops a reference. */
669 if (inode != need_iput_tmp) 387 if (inode != need_iput_tmp)
670 __iget(inode); 388 __iget(inode);
671 else 389 else
@@ -694,11 +412,12 @@ void inotify_unmount_inodes(struct list_head *list)
694 mutex_lock(&inode->inotify_mutex); 412 mutex_lock(&inode->inotify_mutex);
695 watches = &inode->inotify_watches; 413 watches = &inode->inotify_watches;
696 list_for_each_entry_safe(watch, next_w, watches, i_list) { 414 list_for_each_entry_safe(watch, next_w, watches, i_list) {
697 struct inotify_device *dev = watch->dev; 415 struct inotify_handle *ih= watch->ih;
698 mutex_lock(&dev->mutex); 416 mutex_lock(&ih->mutex);
699 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); 417 ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
700 remove_watch(watch, dev); 418 NULL, NULL);
701 mutex_unlock(&dev->mutex); 419 inotify_remove_watch_locked(ih, watch);
420 mutex_unlock(&ih->mutex);
702 } 421 }
703 mutex_unlock(&inode->inotify_mutex); 422 mutex_unlock(&inode->inotify_mutex);
704 iput(inode); 423 iput(inode);
@@ -718,432 +437,292 @@ void inotify_inode_is_dead(struct inode *inode)
718 437
719 mutex_lock(&inode->inotify_mutex); 438 mutex_lock(&inode->inotify_mutex);
720 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 439 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
721 struct inotify_device *dev = watch->dev; 440 struct inotify_handle *ih = watch->ih;
722 mutex_lock(&dev->mutex); 441 mutex_lock(&ih->mutex);
723 remove_watch(watch, dev); 442 inotify_remove_watch_locked(ih, watch);
724 mutex_unlock(&dev->mutex); 443 mutex_unlock(&ih->mutex);
725 } 444 }
726 mutex_unlock(&inode->inotify_mutex); 445 mutex_unlock(&inode->inotify_mutex);
727} 446}
728EXPORT_SYMBOL_GPL(inotify_inode_is_dead); 447EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
729 448
730/* Device Interface */ 449/* Kernel Consumer API */
731 450
732static unsigned int inotify_poll(struct file *file, poll_table *wait) 451/**
452 * inotify_init - allocate and initialize an inotify instance
453 * @ops: caller's inotify operations
454 */
455struct inotify_handle *inotify_init(const struct inotify_operations *ops)
733{ 456{
734 struct inotify_device *dev = file->private_data; 457 struct inotify_handle *ih;
735 int ret = 0;
736 458
737 poll_wait(file, &dev->wq, wait); 459 ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
738 mutex_lock(&dev->mutex); 460 if (unlikely(!ih))
739 if (!list_empty(&dev->events)) 461 return ERR_PTR(-ENOMEM);
740 ret = POLLIN | POLLRDNORM;
741 mutex_unlock(&dev->mutex);
742 462
743 return ret; 463 idr_init(&ih->idr);
464 INIT_LIST_HEAD(&ih->watches);
465 mutex_init(&ih->mutex);
466 ih->last_wd = 0;
467 ih->in_ops = ops;
468 atomic_set(&ih->count, 0);
469 get_inotify_handle(ih);
470
471 return ih;
744} 472}
473EXPORT_SYMBOL_GPL(inotify_init);
745 474
746static ssize_t inotify_read(struct file *file, char __user *buf, 475/**
747 size_t count, loff_t *pos) 476 * inotify_init_watch - initialize an inotify watch
477 * @watch: watch to initialize
478 */
479void inotify_init_watch(struct inotify_watch *watch)
748{ 480{
749 size_t event_size = sizeof (struct inotify_event); 481 INIT_LIST_HEAD(&watch->h_list);
750 struct inotify_device *dev; 482 INIT_LIST_HEAD(&watch->i_list);
751 char __user *start; 483 atomic_set(&watch->count, 0);
752 int ret; 484 get_inotify_watch(watch); /* initial get */
753 DEFINE_WAIT(wait);
754
755 start = buf;
756 dev = file->private_data;
757
758 while (1) {
759 int events;
760
761 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
762
763 mutex_lock(&dev->mutex);
764 events = !list_empty(&dev->events);
765 mutex_unlock(&dev->mutex);
766 if (events) {
767 ret = 0;
768 break;
769 }
770
771 if (file->f_flags & O_NONBLOCK) {
772 ret = -EAGAIN;
773 break;
774 }
775
776 if (signal_pending(current)) {
777 ret = -EINTR;
778 break;
779 }
780
781 schedule();
782 }
783
784 finish_wait(&dev->wq, &wait);
785 if (ret)
786 return ret;
787
788 mutex_lock(&dev->mutex);
789 while (1) {
790 struct inotify_kernel_event *kevent;
791
792 ret = buf - start;
793 if (list_empty(&dev->events))
794 break;
795
796 kevent = inotify_dev_get_event(dev);
797 if (event_size + kevent->event.len > count)
798 break;
799
800 if (copy_to_user(buf, &kevent->event, event_size)) {
801 ret = -EFAULT;
802 break;
803 }
804 buf += event_size;
805 count -= event_size;
806
807 if (kevent->name) {
808 if (copy_to_user(buf, kevent->name, kevent->event.len)){
809 ret = -EFAULT;
810 break;
811 }
812 buf += kevent->event.len;
813 count -= kevent->event.len;
814 }
815
816 remove_kevent(dev, kevent);
817 }
818 mutex_unlock(&dev->mutex);
819
820 return ret;
821} 485}
486EXPORT_SYMBOL_GPL(inotify_init_watch);
822 487
823static int inotify_release(struct inode *ignored, struct file *file) 488/**
489 * inotify_destroy - clean up and destroy an inotify instance
490 * @ih: inotify handle
491 */
492void inotify_destroy(struct inotify_handle *ih)
824{ 493{
825 struct inotify_device *dev = file->private_data;
826
827 /* 494 /*
828 * Destroy all of the watches on this device. Unfortunately, not very 495 * Destroy all of the watches for this handle. Unfortunately, not very
829 * pretty. We cannot do a simple iteration over the list, because we 496 * pretty. We cannot do a simple iteration over the list, because we
830 * do not know the inode until we iterate to the watch. But we need to 497 * do not know the inode until we iterate to the watch. But we need to
831 * hold inode->inotify_mutex before dev->mutex. The following works. 498 * hold inode->inotify_mutex before ih->mutex. The following works.
832 */ 499 */
833 while (1) { 500 while (1) {
834 struct inotify_watch *watch; 501 struct inotify_watch *watch;
835 struct list_head *watches; 502 struct list_head *watches;
836 struct inode *inode; 503 struct inode *inode;
837 504
838 mutex_lock(&dev->mutex); 505 mutex_lock(&ih->mutex);
839 watches = &dev->watches; 506 watches = &ih->watches;
840 if (list_empty(watches)) { 507 if (list_empty(watches)) {
841 mutex_unlock(&dev->mutex); 508 mutex_unlock(&ih->mutex);
842 break; 509 break;
843 } 510 }
844 watch = list_entry(watches->next, struct inotify_watch, d_list); 511 watch = list_entry(watches->next, struct inotify_watch, h_list);
845 get_inotify_watch(watch); 512 get_inotify_watch(watch);
846 mutex_unlock(&dev->mutex); 513 mutex_unlock(&ih->mutex);
847 514
848 inode = watch->inode; 515 inode = watch->inode;
849 mutex_lock(&inode->inotify_mutex); 516 mutex_lock(&inode->inotify_mutex);
850 mutex_lock(&dev->mutex); 517 mutex_lock(&ih->mutex);
851 518
852 /* make sure we didn't race with another list removal */ 519 /* make sure we didn't race with another list removal */
853 if (likely(idr_find(&dev->idr, watch->wd))) 520 if (likely(idr_find(&ih->idr, watch->wd))) {
854 remove_watch_no_event(watch, dev); 521 remove_watch_no_event(watch, ih);
522 put_inotify_watch(watch);
523 }
855 524
856 mutex_unlock(&dev->mutex); 525 mutex_unlock(&ih->mutex);
857 mutex_unlock(&inode->inotify_mutex); 526 mutex_unlock(&inode->inotify_mutex);
858 put_inotify_watch(watch); 527 put_inotify_watch(watch);
859 } 528 }
860 529
861 /* destroy all of the events on this device */ 530 /* free this handle: the put matching the get in inotify_init() */
862 mutex_lock(&dev->mutex); 531 put_inotify_handle(ih);
863 while (!list_empty(&dev->events))
864 inotify_dev_event_dequeue(dev);
865 mutex_unlock(&dev->mutex);
866
867 /* free this device: the put matching the get in inotify_init() */
868 put_inotify_dev(dev);
869
870 return 0;
871} 532}
533EXPORT_SYMBOL_GPL(inotify_destroy);
872 534
873/* 535/**
874 * inotify_ignore - remove a given wd from this inotify instance. 536 * inotify_find_watch - find an existing watch for an (ih,inode) pair
537 * @ih: inotify handle
538 * @inode: inode to watch
539 * @watchp: pointer to existing inotify_watch
875 * 540 *
876 * Can sleep. 541 * Caller must pin given inode (via nameidata).
877 */ 542 */
878static int inotify_ignore(struct inotify_device *dev, s32 wd) 543s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
544 struct inotify_watch **watchp)
879{ 545{
880 struct inotify_watch *watch; 546 struct inotify_watch *old;
881 struct inode *inode; 547 int ret = -ENOENT;
882
883 mutex_lock(&dev->mutex);
884 watch = idr_find(&dev->idr, wd);
885 if (unlikely(!watch)) {
886 mutex_unlock(&dev->mutex);
887 return -EINVAL;
888 }
889 get_inotify_watch(watch);
890 inode = watch->inode;
891 mutex_unlock(&dev->mutex);
892 548
893 mutex_lock(&inode->inotify_mutex); 549 mutex_lock(&inode->inotify_mutex);
894 mutex_lock(&dev->mutex); 550 mutex_lock(&ih->mutex);
895 551
896 /* make sure that we did not race */ 552 old = inode_find_handle(inode, ih);
897 if (likely(idr_find(&dev->idr, wd) == watch)) 553 if (unlikely(old)) {
898 remove_watch(watch, dev); 554 get_inotify_watch(old); /* caller must put watch */
555 *watchp = old;
556 ret = old->wd;
557 }
899 558
900 mutex_unlock(&dev->mutex); 559 mutex_unlock(&ih->mutex);
901 mutex_unlock(&inode->inotify_mutex); 560 mutex_unlock(&inode->inotify_mutex);
902 put_inotify_watch(watch);
903 561
904 return 0; 562 return ret;
905} 563}
564EXPORT_SYMBOL_GPL(inotify_find_watch);
906 565
907static long inotify_ioctl(struct file *file, unsigned int cmd, 566/**
908 unsigned long arg) 567 * inotify_find_update_watch - find and update the mask of an existing watch
568 * @ih: inotify handle
569 * @inode: inode's watch to update
570 * @mask: mask of events to watch
571 *
572 * Caller must pin given inode (via nameidata).
573 */
574s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
575 u32 mask)
909{ 576{
910 struct inotify_device *dev; 577 struct inotify_watch *old;
911 void __user *p; 578 int mask_add = 0;
912 int ret = -ENOTTY; 579 int ret;
913
914 dev = file->private_data;
915 p = (void __user *) arg;
916
917 switch (cmd) {
918 case FIONREAD:
919 ret = put_user(dev->queue_size, (int __user *) p);
920 break;
921 }
922
923 return ret;
924}
925 580
926static const struct file_operations inotify_fops = { 581 if (mask & IN_MASK_ADD)
927 .poll = inotify_poll, 582 mask_add = 1;
928 .read = inotify_read,
929 .release = inotify_release,
930 .unlocked_ioctl = inotify_ioctl,
931 .compat_ioctl = inotify_ioctl,
932};
933 583
934asmlinkage long sys_inotify_init(void) 584 /* don't allow invalid bits: we don't want flags set */
935{ 585 mask &= IN_ALL_EVENTS | IN_ONESHOT;
936 struct inotify_device *dev; 586 if (unlikely(!mask))
937 struct user_struct *user; 587 return -EINVAL;
938 struct file *filp;
939 int fd, ret;
940
941 fd = get_unused_fd();
942 if (fd < 0)
943 return fd;
944
945 filp = get_empty_filp();
946 if (!filp) {
947 ret = -ENFILE;
948 goto out_put_fd;
949 }
950 588
951 user = get_uid(current->user); 589 mutex_lock(&inode->inotify_mutex);
952 if (unlikely(atomic_read(&user->inotify_devs) >= 590 mutex_lock(&ih->mutex);
953 inotify_max_user_instances)) {
954 ret = -EMFILE;
955 goto out_free_uid;
956 }
957 591
958 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); 592 /*
959 if (unlikely(!dev)) { 593 * Handle the case of re-adding a watch on an (inode,ih) pair that we
960 ret = -ENOMEM; 594 * are already watching. We just update the mask and return its wd.
961 goto out_free_uid; 595 */
596 old = inode_find_handle(inode, ih);
597 if (unlikely(!old)) {
598 ret = -ENOENT;
599 goto out;
962 } 600 }
963 601
964 filp->f_op = &inotify_fops; 602 if (mask_add)
965 filp->f_vfsmnt = mntget(inotify_mnt); 603 old->mask |= mask;
966 filp->f_dentry = dget(inotify_mnt->mnt_root); 604 else
967 filp->f_mapping = filp->f_dentry->d_inode->i_mapping; 605 old->mask = mask;
968 filp->f_mode = FMODE_READ; 606 ret = old->wd;
969 filp->f_flags = O_RDONLY; 607out:
970 filp->private_data = dev; 608 mutex_unlock(&ih->mutex);
971 609 mutex_unlock(&inode->inotify_mutex);
972 idr_init(&dev->idr);
973 INIT_LIST_HEAD(&dev->events);
974 INIT_LIST_HEAD(&dev->watches);
975 init_waitqueue_head(&dev->wq);
976 mutex_init(&dev->mutex);
977 dev->event_count = 0;
978 dev->queue_size = 0;
979 dev->max_events = inotify_max_queued_events;
980 dev->user = user;
981 dev->last_wd = 0;
982 atomic_set(&dev->count, 0);
983
984 get_inotify_dev(dev);
985 atomic_inc(&user->inotify_devs);
986 fd_install(fd, filp);
987
988 return fd;
989out_free_uid:
990 free_uid(user);
991 put_filp(filp);
992out_put_fd:
993 put_unused_fd(fd);
994 return ret; 610 return ret;
995} 611}
612EXPORT_SYMBOL_GPL(inotify_find_update_watch);
996 613
997asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) 614/**
615 * inotify_add_watch - add a watch to an inotify instance
616 * @ih: inotify handle
617 * @watch: caller allocated watch structure
618 * @inode: inode to watch
619 * @mask: mask of events to watch
620 *
621 * Caller must pin given inode (via nameidata).
622 * Caller must ensure it only calls inotify_add_watch() once per watch.
623 * Calls inotify_handle_get_wd() so may sleep.
624 */
625s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
626 struct inode *inode, u32 mask)
998{ 627{
999 struct inotify_watch *watch, *old; 628 int ret = 0;
1000 struct inode *inode;
1001 struct inotify_device *dev;
1002 struct nameidata nd;
1003 struct file *filp;
1004 int ret, fput_needed;
1005 int mask_add = 0;
1006 unsigned flags = 0;
1007
1008 filp = fget_light(fd, &fput_needed);
1009 if (unlikely(!filp))
1010 return -EBADF;
1011
1012 /* verify that this is indeed an inotify instance */
1013 if (unlikely(filp->f_op != &inotify_fops)) {
1014 ret = -EINVAL;
1015 goto fput_and_out;
1016 }
1017
1018 if (!(mask & IN_DONT_FOLLOW))
1019 flags |= LOOKUP_FOLLOW;
1020 if (mask & IN_ONLYDIR)
1021 flags |= LOOKUP_DIRECTORY;
1022
1023 ret = find_inode(path, &nd, flags);
1024 if (unlikely(ret))
1025 goto fput_and_out;
1026 629
1027 /* inode held in place by reference to nd; dev by fget on fd */ 630 /* don't allow invalid bits: we don't want flags set */
1028 inode = nd.dentry->d_inode; 631 mask &= IN_ALL_EVENTS | IN_ONESHOT;
1029 dev = filp->private_data; 632 if (unlikely(!mask))
633 return -EINVAL;
634 watch->mask = mask;
1030 635
1031 mutex_lock(&inode->inotify_mutex); 636 mutex_lock(&inode->inotify_mutex);
1032 mutex_lock(&dev->mutex); 637 mutex_lock(&ih->mutex);
1033
1034 if (mask & IN_MASK_ADD)
1035 mask_add = 1;
1036 638
1037 /* don't let user-space set invalid bits: we don't want flags set */ 639 /* Initialize a new watch */
1038 mask &= IN_ALL_EVENTS | IN_ONESHOT; 640 ret = inotify_handle_get_wd(ih, watch);
1039 if (unlikely(!mask)) { 641 if (unlikely(ret))
1040 ret = -EINVAL;
1041 goto out; 642 goto out;
1042 } 643 ret = watch->wd;
644
645 /* save a reference to handle and bump the count to make it official */
646 get_inotify_handle(ih);
647 watch->ih = ih;
1043 648
1044 /* 649 /*
1045 * Handle the case of re-adding a watch on an (inode,dev) pair that we 650 * Save a reference to the inode and bump the ref count to make it
1046 * are already watching. We just update the mask and return its wd. 651 * official. We hold a reference to nameidata, which makes this safe.
1047 */ 652 */
1048 old = inode_find_dev(inode, dev); 653 watch->inode = igrab(inode);
1049 if (unlikely(old)) {
1050 if (mask_add)
1051 old->mask |= mask;
1052 else
1053 old->mask = mask;
1054 ret = old->wd;
1055 goto out;
1056 }
1057
1058 watch = create_watch(dev, mask, inode);
1059 if (unlikely(IS_ERR(watch))) {
1060 ret = PTR_ERR(watch);
1061 goto out;
1062 }
1063 654
1064 if (!inotify_inode_watched(inode)) 655 if (!inotify_inode_watched(inode))
1065 set_dentry_child_flags(inode, 1); 656 set_dentry_child_flags(inode, 1);
1066 657
1067 /* Add the watch to the device's and the inode's list */ 658 /* Add the watch to the handle's and the inode's list */
1068 list_add(&watch->d_list, &dev->watches); 659 list_add(&watch->h_list, &ih->watches);
1069 list_add(&watch->i_list, &inode->inotify_watches); 660 list_add(&watch->i_list, &inode->inotify_watches);
1070 ret = watch->wd;
1071out: 661out:
1072 mutex_unlock(&dev->mutex); 662 mutex_unlock(&ih->mutex);
1073 mutex_unlock(&inode->inotify_mutex); 663 mutex_unlock(&inode->inotify_mutex);
1074 path_release(&nd);
1075fput_and_out:
1076 fput_light(filp, fput_needed);
1077 return ret; 664 return ret;
1078} 665}
666EXPORT_SYMBOL_GPL(inotify_add_watch);
1079 667
1080asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) 668/**
669 * inotify_rm_wd - remove a watch from an inotify instance
670 * @ih: inotify handle
671 * @wd: watch descriptor to remove
672 *
673 * Can sleep.
674 */
675int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
1081{ 676{
1082 struct file *filp; 677 struct inotify_watch *watch;
1083 struct inotify_device *dev; 678 struct inode *inode;
1084 int ret, fput_needed;
1085
1086 filp = fget_light(fd, &fput_needed);
1087 if (unlikely(!filp))
1088 return -EBADF;
1089 679
1090 /* verify that this is indeed an inotify instance */ 680 mutex_lock(&ih->mutex);
1091 if (unlikely(filp->f_op != &inotify_fops)) { 681 watch = idr_find(&ih->idr, wd);
1092 ret = -EINVAL; 682 if (unlikely(!watch)) {
1093 goto out; 683 mutex_unlock(&ih->mutex);
684 return -EINVAL;
1094 } 685 }
686 get_inotify_watch(watch);
687 inode = watch->inode;
688 mutex_unlock(&ih->mutex);
1095 689
1096 dev = filp->private_data; 690 mutex_lock(&inode->inotify_mutex);
1097 ret = inotify_ignore(dev, wd); 691 mutex_lock(&ih->mutex);
1098 692
1099out: 693 /* make sure that we did not race */
1100 fput_light(filp, fput_needed); 694 if (likely(idr_find(&ih->idr, wd) == watch))
1101 return ret; 695 inotify_remove_watch_locked(ih, watch);
696
697 mutex_unlock(&ih->mutex);
698 mutex_unlock(&inode->inotify_mutex);
699 put_inotify_watch(watch);
700
701 return 0;
1102} 702}
703EXPORT_SYMBOL_GPL(inotify_rm_wd);
1103 704
1104static struct super_block * 705/**
1105inotify_get_sb(struct file_system_type *fs_type, int flags, 706 * inotify_rm_watch - remove a watch from an inotify instance
1106 const char *dev_name, void *data) 707 * @ih: inotify handle
708 * @watch: watch to remove
709 *
710 * Can sleep.
711 */
712int inotify_rm_watch(struct inotify_handle *ih,
713 struct inotify_watch *watch)
1107{ 714{
1108 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); 715 return inotify_rm_wd(ih, watch->wd);
1109} 716}
1110 717EXPORT_SYMBOL_GPL(inotify_rm_watch);
1111static struct file_system_type inotify_fs_type = {
1112 .name = "inotifyfs",
1113 .get_sb = inotify_get_sb,
1114 .kill_sb = kill_anon_super,
1115};
1116 718
1117/* 719/*
1118 * inotify_setup - Our initialization function. Note that we cannnot return 720 * inotify_setup - core initialization function
1119 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1120 * must result in panic().
1121 */ 721 */
1122static int __init inotify_setup(void) 722static int __init inotify_setup(void)
1123{ 723{
1124 int ret;
1125
1126 ret = register_filesystem(&inotify_fs_type);
1127 if (unlikely(ret))
1128 panic("inotify: register_filesystem returned %d!\n", ret);
1129
1130 inotify_mnt = kern_mount(&inotify_fs_type);
1131 if (IS_ERR(inotify_mnt))
1132 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
1133
1134 inotify_max_queued_events = 16384;
1135 inotify_max_user_instances = 128;
1136 inotify_max_user_watches = 8192;
1137
1138 atomic_set(&inotify_cookie, 0); 724 atomic_set(&inotify_cookie, 0);
1139 725
1140 watch_cachep = kmem_cache_create("inotify_watch_cache",
1141 sizeof(struct inotify_watch),
1142 0, SLAB_PANIC, NULL, NULL);
1143 event_cachep = kmem_cache_create("inotify_event_cache",
1144 sizeof(struct inotify_kernel_event),
1145 0, SLAB_PANIC, NULL, NULL);
1146
1147 return 0; 726 return 0;
1148} 727}
1149 728
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
new file mode 100644
index 000000000000..9e9931e2badd
--- /dev/null
+++ b/fs/inotify_user.c
@@ -0,0 +1,719 @@
1/*
2 * fs/inotify_user.c - inotify support for userspace
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2, or (at your option) any
14 * later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 */
21
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/fs.h>
26#include <linux/file.h>
27#include <linux/mount.h>
28#include <linux/namei.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/inotify.h>
33#include <linux/syscalls.h>
34
35#include <asm/ioctls.h>
36
37static kmem_cache_t *watch_cachep __read_mostly;
38static kmem_cache_t *event_cachep __read_mostly;
39
40static struct vfsmount *inotify_mnt __read_mostly;
41
42/* these are configurable via /proc/sys/fs/inotify/ */
43int inotify_max_user_instances __read_mostly;
44int inotify_max_user_watches __read_mostly;
45int inotify_max_queued_events __read_mostly;
46
47/*
48 * Lock ordering:
49 *
50 * inotify_dev->up_mutex (ensures we don't re-add the same watch)
51 * inode->inotify_mutex (protects inode's watch list)
52 * inotify_handle->mutex (protects inotify_handle's watch list)
53 * inotify_dev->ev_mutex (protects device's event queue)
54 */
55
56/*
57 * Lifetimes of the main data structures:
58 *
59 * inotify_device: Lifetime is managed by reference count, from
60 * sys_inotify_init() until release. Additional references can bump the count
61 * via get_inotify_dev() and drop the count via put_inotify_dev().
62 *
63 * inotify_user_watch: Lifetime is from create_watch() to the receipt of an
64 * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
65 * first event, or to inotify_destroy().
66 */
67
68/*
69 * struct inotify_device - represents an inotify instance
70 *
71 * This structure is protected by the mutex 'mutex'.
72 */
73struct inotify_device {
74 wait_queue_head_t wq; /* wait queue for i/o */
75 struct mutex ev_mutex; /* protects event queue */
76 struct mutex up_mutex; /* synchronizes watch updates */
77 struct list_head events; /* list of queued events */
78 atomic_t count; /* reference count */
79 struct user_struct *user; /* user who opened this dev */
80 struct inotify_handle *ih; /* inotify handle */
81 unsigned int queue_size; /* size of the queue (bytes) */
82 unsigned int event_count; /* number of pending events */
83 unsigned int max_events; /* maximum number of events */
84};
85
86/*
87 * struct inotify_kernel_event - An inotify event, originating from a watch and
88 * queued for user-space. A list of these is attached to each instance of the
89 * device. In read(), this list is walked and all events that can fit in the
90 * buffer are returned.
91 *
92 * Protected by dev->ev_mutex of the device in which we are queued.
93 */
94struct inotify_kernel_event {
95 struct inotify_event event; /* the user-space event */
96 struct list_head list; /* entry in inotify_device's list */
97 char *name; /* filename, if any */
98};
99
100/*
101 * struct inotify_user_watch - our version of an inotify_watch, we add
102 * a reference to the associated inotify_device.
103 */
104struct inotify_user_watch {
105 struct inotify_device *dev; /* associated device */
106 struct inotify_watch wdata; /* inotify watch data */
107};
108
109#ifdef CONFIG_SYSCTL
110
111#include <linux/sysctl.h>
112
113static int zero;
114
115ctl_table inotify_table[] = {
116 {
117 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
118 .procname = "max_user_instances",
119 .data = &inotify_max_user_instances,
120 .maxlen = sizeof(int),
121 .mode = 0644,
122 .proc_handler = &proc_dointvec_minmax,
123 .strategy = &sysctl_intvec,
124 .extra1 = &zero,
125 },
126 {
127 .ctl_name = INOTIFY_MAX_USER_WATCHES,
128 .procname = "max_user_watches",
129 .data = &inotify_max_user_watches,
130 .maxlen = sizeof(int),
131 .mode = 0644,
132 .proc_handler = &proc_dointvec_minmax,
133 .strategy = &sysctl_intvec,
134 .extra1 = &zero,
135 },
136 {
137 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
138 .procname = "max_queued_events",
139 .data = &inotify_max_queued_events,
140 .maxlen = sizeof(int),
141 .mode = 0644,
142 .proc_handler = &proc_dointvec_minmax,
143 .strategy = &sysctl_intvec,
144 .extra1 = &zero
145 },
146 { .ctl_name = 0 }
147};
148#endif /* CONFIG_SYSCTL */
149
150static inline void get_inotify_dev(struct inotify_device *dev)
151{
152 atomic_inc(&dev->count);
153}
154
155static inline void put_inotify_dev(struct inotify_device *dev)
156{
157 if (atomic_dec_and_test(&dev->count)) {
158 atomic_dec(&dev->user->inotify_devs);
159 free_uid(dev->user);
160 kfree(dev);
161 }
162}
163
164/*
165 * free_inotify_user_watch - cleans up the watch and its references
166 */
167static void free_inotify_user_watch(struct inotify_watch *w)
168{
169 struct inotify_user_watch *watch;
170 struct inotify_device *dev;
171
172 watch = container_of(w, struct inotify_user_watch, wdata);
173 dev = watch->dev;
174
175 atomic_dec(&dev->user->inotify_watches);
176 put_inotify_dev(dev);
177 kmem_cache_free(watch_cachep, watch);
178}
179
180/*
181 * kernel_event - create a new kernel event with the given parameters
182 *
183 * This function can sleep.
184 */
185static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
186 const char *name)
187{
188 struct inotify_kernel_event *kevent;
189
190 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
191 if (unlikely(!kevent))
192 return NULL;
193
194 /* we hand this out to user-space, so zero it just in case */
195 memset(&kevent->event, 0, sizeof(struct inotify_event));
196
197 kevent->event.wd = wd;
198 kevent->event.mask = mask;
199 kevent->event.cookie = cookie;
200
201 INIT_LIST_HEAD(&kevent->list);
202
203 if (name) {
204 size_t len, rem, event_size = sizeof(struct inotify_event);
205
206 /*
207 * We need to pad the filename so as to properly align an
208 * array of inotify_event structures. Because the structure is
209 * small and the common case is a small filename, we just round
210 * up to the next multiple of the structure's sizeof. This is
211 * simple and safe for all architectures.
212 */
213 len = strlen(name) + 1;
214 rem = event_size - len;
215 if (len > event_size) {
216 rem = event_size - (len % event_size);
217 if (len % event_size == 0)
218 rem = 0;
219 }
220
221 kevent->name = kmalloc(len + rem, GFP_KERNEL);
222 if (unlikely(!kevent->name)) {
223 kmem_cache_free(event_cachep, kevent);
224 return NULL;
225 }
226 memcpy(kevent->name, name, len);
227 if (rem)
228 memset(kevent->name + len, 0, rem);
229 kevent->event.len = len + rem;
230 } else {
231 kevent->event.len = 0;
232 kevent->name = NULL;
233 }
234
235 return kevent;
236}
237
238/*
239 * inotify_dev_get_event - return the next event in the given dev's queue
240 *
241 * Caller must hold dev->ev_mutex.
242 */
243static inline struct inotify_kernel_event *
244inotify_dev_get_event(struct inotify_device *dev)
245{
246 return list_entry(dev->events.next, struct inotify_kernel_event, list);
247}
248
249/*
250 * inotify_dev_queue_event - event handler registered with core inotify, adds
251 * a new event to the given device
252 *
253 * Can sleep (calls kernel_event()).
254 */
255static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
256 u32 cookie, const char *name,
257 struct inode *ignored)
258{
259 struct inotify_user_watch *watch;
260 struct inotify_device *dev;
261 struct inotify_kernel_event *kevent, *last;
262
263 watch = container_of(w, struct inotify_user_watch, wdata);
264 dev = watch->dev;
265
266 mutex_lock(&dev->ev_mutex);
267
268 /* we can safely put the watch as we don't reference it while
269 * generating the event
270 */
271 if (mask & IN_IGNORED || mask & IN_ONESHOT)
272 put_inotify_watch(w); /* final put */
273
274 /* coalescing: drop this event if it is a dupe of the previous */
275 last = inotify_dev_get_event(dev);
276 if (last && last->event.mask == mask && last->event.wd == wd &&
277 last->event.cookie == cookie) {
278 const char *lastname = last->name;
279
280 if (!name && !lastname)
281 goto out;
282 if (name && lastname && !strcmp(lastname, name))
283 goto out;
284 }
285
286 /* the queue overflowed and we already sent the Q_OVERFLOW event */
287 if (unlikely(dev->event_count > dev->max_events))
288 goto out;
289
290 /* if the queue overflows, we need to notify user space */
291 if (unlikely(dev->event_count == dev->max_events))
292 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
293 else
294 kevent = kernel_event(wd, mask, cookie, name);
295
296 if (unlikely(!kevent))
297 goto out;
298
299 /* queue the event and wake up anyone waiting */
300 dev->event_count++;
301 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
302 list_add_tail(&kevent->list, &dev->events);
303 wake_up_interruptible(&dev->wq);
304
305out:
306 mutex_unlock(&dev->ev_mutex);
307}
308
309/*
310 * remove_kevent - cleans up and ultimately frees the given kevent
311 *
312 * Caller must hold dev->ev_mutex.
313 */
314static void remove_kevent(struct inotify_device *dev,
315 struct inotify_kernel_event *kevent)
316{
317 list_del(&kevent->list);
318
319 dev->event_count--;
320 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
321
322 kfree(kevent->name);
323 kmem_cache_free(event_cachep, kevent);
324}
325
326/*
327 * inotify_dev_event_dequeue - destroy an event on the given device
328 *
329 * Caller must hold dev->ev_mutex.
330 */
331static void inotify_dev_event_dequeue(struct inotify_device *dev)
332{
333 if (!list_empty(&dev->events)) {
334 struct inotify_kernel_event *kevent;
335 kevent = inotify_dev_get_event(dev);
336 remove_kevent(dev, kevent);
337 }
338}
339
340/*
341 * find_inode - resolve a user-given path to a specific inode and return a nd
342 */
343static int find_inode(const char __user *dirname, struct nameidata *nd,
344 unsigned flags)
345{
346 int error;
347
348 error = __user_walk(dirname, flags, nd);
349 if (error)
350 return error;
351 /* you can only watch an inode if you have read permissions on it */
352 error = vfs_permission(nd, MAY_READ);
353 if (error)
354 path_release(nd);
355 return error;
356}
357
358/*
359 * create_watch - creates a watch on the given device.
360 *
361 * Callers must hold dev->up_mutex.
362 */
363static int create_watch(struct inotify_device *dev, struct inode *inode,
364 u32 mask)
365{
366 struct inotify_user_watch *watch;
367 int ret;
368
369 if (atomic_read(&dev->user->inotify_watches) >=
370 inotify_max_user_watches)
371 return -ENOSPC;
372
373 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
374 if (unlikely(!watch))
375 return -ENOMEM;
376
377 /* save a reference to device and bump the count to make it official */
378 get_inotify_dev(dev);
379 watch->dev = dev;
380
381 atomic_inc(&dev->user->inotify_watches);
382
383 inotify_init_watch(&watch->wdata);
384 ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
385 if (ret < 0)
386 free_inotify_user_watch(&watch->wdata);
387
388 return ret;
389}
390
391/* Device Interface */
392
393static unsigned int inotify_poll(struct file *file, poll_table *wait)
394{
395 struct inotify_device *dev = file->private_data;
396 int ret = 0;
397
398 poll_wait(file, &dev->wq, wait);
399 mutex_lock(&dev->ev_mutex);
400 if (!list_empty(&dev->events))
401 ret = POLLIN | POLLRDNORM;
402 mutex_unlock(&dev->ev_mutex);
403
404 return ret;
405}
406
407static ssize_t inotify_read(struct file *file, char __user *buf,
408 size_t count, loff_t *pos)
409{
410 size_t event_size = sizeof (struct inotify_event);
411 struct inotify_device *dev;
412 char __user *start;
413 int ret;
414 DEFINE_WAIT(wait);
415
416 start = buf;
417 dev = file->private_data;
418
419 while (1) {
420 int events;
421
422 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
423
424 mutex_lock(&dev->ev_mutex);
425 events = !list_empty(&dev->events);
426 mutex_unlock(&dev->ev_mutex);
427 if (events) {
428 ret = 0;
429 break;
430 }
431
432 if (file->f_flags & O_NONBLOCK) {
433 ret = -EAGAIN;
434 break;
435 }
436
437 if (signal_pending(current)) {
438 ret = -EINTR;
439 break;
440 }
441
442 schedule();
443 }
444
445 finish_wait(&dev->wq, &wait);
446 if (ret)
447 return ret;
448
449 mutex_lock(&dev->ev_mutex);
450 while (1) {
451 struct inotify_kernel_event *kevent;
452
453 ret = buf - start;
454 if (list_empty(&dev->events))
455 break;
456
457 kevent = inotify_dev_get_event(dev);
458 if (event_size + kevent->event.len > count)
459 break;
460
461 if (copy_to_user(buf, &kevent->event, event_size)) {
462 ret = -EFAULT;
463 break;
464 }
465 buf += event_size;
466 count -= event_size;
467
468 if (kevent->name) {
469 if (copy_to_user(buf, kevent->name, kevent->event.len)){
470 ret = -EFAULT;
471 break;
472 }
473 buf += kevent->event.len;
474 count -= kevent->event.len;
475 }
476
477 remove_kevent(dev, kevent);
478 }
479 mutex_unlock(&dev->ev_mutex);
480
481 return ret;
482}
483
484static int inotify_release(struct inode *ignored, struct file *file)
485{
486 struct inotify_device *dev = file->private_data;
487
488 inotify_destroy(dev->ih);
489
490 /* destroy all of the events on this device */
491 mutex_lock(&dev->ev_mutex);
492 while (!list_empty(&dev->events))
493 inotify_dev_event_dequeue(dev);
494 mutex_unlock(&dev->ev_mutex);
495
496 /* free this device: the put matching the get in inotify_init() */
497 put_inotify_dev(dev);
498
499 return 0;
500}
501
502static long inotify_ioctl(struct file *file, unsigned int cmd,
503 unsigned long arg)
504{
505 struct inotify_device *dev;
506 void __user *p;
507 int ret = -ENOTTY;
508
509 dev = file->private_data;
510 p = (void __user *) arg;
511
512 switch (cmd) {
513 case FIONREAD:
514 ret = put_user(dev->queue_size, (int __user *) p);
515 break;
516 }
517
518 return ret;
519}
520
521static const struct file_operations inotify_fops = {
522 .poll = inotify_poll,
523 .read = inotify_read,
524 .release = inotify_release,
525 .unlocked_ioctl = inotify_ioctl,
526 .compat_ioctl = inotify_ioctl,
527};
528
529static const struct inotify_operations inotify_user_ops = {
530 .handle_event = inotify_dev_queue_event,
531 .destroy_watch = free_inotify_user_watch,
532};
533
534asmlinkage long sys_inotify_init(void)
535{
536 struct inotify_device *dev;
537 struct inotify_handle *ih;
538 struct user_struct *user;
539 struct file *filp;
540 int fd, ret;
541
542 fd = get_unused_fd();
543 if (fd < 0)
544 return fd;
545
546 filp = get_empty_filp();
547 if (!filp) {
548 ret = -ENFILE;
549 goto out_put_fd;
550 }
551
552 user = get_uid(current->user);
553 if (unlikely(atomic_read(&user->inotify_devs) >=
554 inotify_max_user_instances)) {
555 ret = -EMFILE;
556 goto out_free_uid;
557 }
558
559 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
560 if (unlikely(!dev)) {
561 ret = -ENOMEM;
562 goto out_free_uid;
563 }
564
565 ih = inotify_init(&inotify_user_ops);
566 if (unlikely(IS_ERR(ih))) {
567 ret = PTR_ERR(ih);
568 goto out_free_dev;
569 }
570 dev->ih = ih;
571
572 filp->f_op = &inotify_fops;
573 filp->f_vfsmnt = mntget(inotify_mnt);
574 filp->f_dentry = dget(inotify_mnt->mnt_root);
575 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
576 filp->f_mode = FMODE_READ;
577 filp->f_flags = O_RDONLY;
578 filp->private_data = dev;
579
580 INIT_LIST_HEAD(&dev->events);
581 init_waitqueue_head(&dev->wq);
582 mutex_init(&dev->ev_mutex);
583 mutex_init(&dev->up_mutex);
584 dev->event_count = 0;
585 dev->queue_size = 0;
586 dev->max_events = inotify_max_queued_events;
587 dev->user = user;
588 atomic_set(&dev->count, 0);
589
590 get_inotify_dev(dev);
591 atomic_inc(&user->inotify_devs);
592 fd_install(fd, filp);
593
594 return fd;
595out_free_dev:
596 kfree(dev);
597out_free_uid:
598 free_uid(user);
599 put_filp(filp);
600out_put_fd:
601 put_unused_fd(fd);
602 return ret;
603}
604
605asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
606{
607 struct inode *inode;
608 struct inotify_device *dev;
609 struct nameidata nd;
610 struct file *filp;
611 int ret, fput_needed;
612 unsigned flags = 0;
613
614 filp = fget_light(fd, &fput_needed);
615 if (unlikely(!filp))
616 return -EBADF;
617
618 /* verify that this is indeed an inotify instance */
619 if (unlikely(filp->f_op != &inotify_fops)) {
620 ret = -EINVAL;
621 goto fput_and_out;
622 }
623
624 if (!(mask & IN_DONT_FOLLOW))
625 flags |= LOOKUP_FOLLOW;
626 if (mask & IN_ONLYDIR)
627 flags |= LOOKUP_DIRECTORY;
628
629 ret = find_inode(path, &nd, flags);
630 if (unlikely(ret))
631 goto fput_and_out;
632
633 /* inode held in place by reference to nd; dev by fget on fd */
634 inode = nd.dentry->d_inode;
635 dev = filp->private_data;
636
637 mutex_lock(&dev->up_mutex);
638 ret = inotify_find_update_watch(dev->ih, inode, mask);
639 if (ret == -ENOENT)
640 ret = create_watch(dev, inode, mask);
641 mutex_unlock(&dev->up_mutex);
642
643 path_release(&nd);
644fput_and_out:
645 fput_light(filp, fput_needed);
646 return ret;
647}
648
649asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
650{
651 struct file *filp;
652 struct inotify_device *dev;
653 int ret, fput_needed;
654
655 filp = fget_light(fd, &fput_needed);
656 if (unlikely(!filp))
657 return -EBADF;
658
659 /* verify that this is indeed an inotify instance */
660 if (unlikely(filp->f_op != &inotify_fops)) {
661 ret = -EINVAL;
662 goto out;
663 }
664
665 dev = filp->private_data;
666
667 /* we free our watch data when we get IN_IGNORED */
668 ret = inotify_rm_wd(dev->ih, wd);
669
670out:
671 fput_light(filp, fput_needed);
672 return ret;
673}
674
675static struct super_block *
676inotify_get_sb(struct file_system_type *fs_type, int flags,
677 const char *dev_name, void *data)
678{
679 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
680}
681
682static struct file_system_type inotify_fs_type = {
683 .name = "inotifyfs",
684 .get_sb = inotify_get_sb,
685 .kill_sb = kill_anon_super,
686};
687
688/*
689 * inotify_user_setup - Our initialization function. Note that we cannnot return
690 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
691 * must result in panic().
692 */
693static int __init inotify_user_setup(void)
694{
695 int ret;
696
697 ret = register_filesystem(&inotify_fs_type);
698 if (unlikely(ret))
699 panic("inotify: register_filesystem returned %d!\n", ret);
700
701 inotify_mnt = kern_mount(&inotify_fs_type);
702 if (IS_ERR(inotify_mnt))
703 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
704
705 inotify_max_queued_events = 16384;
706 inotify_max_user_instances = 128;
707 inotify_max_user_watches = 8192;
708
709 watch_cachep = kmem_cache_create("inotify_watch_cache",
710 sizeof(struct inotify_user_watch),
711 0, SLAB_PANIC, NULL, NULL);
712 event_cachep = kmem_cache_create("inotify_event_cache",
713 sizeof(struct inotify_kernel_event),
714 0, SLAB_PANIC, NULL, NULL);
715
716 return 0;
717}
718
719module_init(inotify_user_setup);
diff --git a/fs/namei.c b/fs/namei.c
index d6e2ee251736..184fe4acf824 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1127,7 +1127,7 @@ out:
1127 if (likely(retval == 0)) { 1127 if (likely(retval == 0)) {
1128 if (unlikely(current->audit_context && nd && nd->dentry && 1128 if (unlikely(current->audit_context && nd && nd->dentry &&
1129 nd->dentry->d_inode)) 1129 nd->dentry->d_inode))
1130 audit_inode(name, nd->dentry->d_inode, flags); 1130 audit_inode(name, nd->dentry->d_inode);
1131 } 1131 }
1132out_fail: 1132out_fail:
1133 return retval; 1133 return retval;
diff --git a/fs/open.c b/fs/open.c
index 317b7c7f38a7..4f178acd4c09 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -633,7 +633,7 @@ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
633 dentry = file->f_dentry; 633 dentry = file->f_dentry;
634 inode = dentry->d_inode; 634 inode = dentry->d_inode;
635 635
636 audit_inode(NULL, inode, 0); 636 audit_inode(NULL, inode);
637 637
638 err = -EROFS; 638 err = -EROFS;
639 if (IS_RDONLY(inode)) 639 if (IS_RDONLY(inode))
@@ -786,7 +786,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group)
786 if (file) { 786 if (file) {
787 struct dentry * dentry; 787 struct dentry * dentry;
788 dentry = file->f_dentry; 788 dentry = file->f_dentry;
789 audit_inode(NULL, dentry->d_inode, 0); 789 audit_inode(NULL, dentry->d_inode);
790 error = chown_common(dentry, user, group); 790 error = chown_common(dentry, user, group);
791 fput(file); 791 fput(file);
792 } 792 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6cc77dc3f3ff..6afff725a8c9 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1019,8 +1019,8 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1019 if (current != task) 1019 if (current != task)
1020 return -EPERM; 1020 return -EPERM;
1021 1021
1022 if (count > PAGE_SIZE) 1022 if (count >= PAGE_SIZE)
1023 count = PAGE_SIZE; 1023 count = PAGE_SIZE - 1;
1024 1024
1025 if (*ppos != 0) { 1025 if (*ppos != 0) {
1026 /* No partial writes. */ 1026 /* No partial writes. */
@@ -1033,6 +1033,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1033 if (copy_from_user(page, buf, count)) 1033 if (copy_from_user(page, buf, count))
1034 goto out_free_page; 1034 goto out_free_page;
1035 1035
1036 page[count] = '\0';
1036 loginuid = simple_strtoul(page, &tmp, 10); 1037 loginuid = simple_strtoul(page, &tmp, 10);
1037 if (tmp == page) { 1038 if (tmp == page) {
1038 length = -EINVAL; 1039 length = -EINVAL;
diff --git a/fs/xattr.c b/fs/xattr.c
index e416190f5e9c..c32f15b5f60f 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -242,7 +242,7 @@ sys_fsetxattr(int fd, char __user *name, void __user *value,
242 if (!f) 242 if (!f)
243 return error; 243 return error;
244 dentry = f->f_dentry; 244 dentry = f->f_dentry;
245 audit_inode(NULL, dentry->d_inode, 0); 245 audit_inode(NULL, dentry->d_inode);
246 error = setxattr(dentry, name, value, size, flags); 246 error = setxattr(dentry, name, value, size, flags);
247 fput(f); 247 fput(f);
248 return error; 248 return error;
@@ -469,7 +469,7 @@ sys_fremovexattr(int fd, char __user *name)
469 if (!f) 469 if (!f)
470 return error; 470 return error;
471 dentry = f->f_dentry; 471 dentry = f->f_dentry;
472 audit_inode(NULL, dentry->d_inode, 0); 472 audit_inode(NULL, dentry->d_inode);
473 error = removexattr(dentry, name); 473 error = removexattr(dentry, name);
474 fput(f); 474 fput(f);
475 return error; 475 return error;