aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify/notification.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/notify/notification.c')
-rw-r--r--fs/notify/notification.c334
1 files changed, 28 insertions, 306 deletions
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 7b51b05f160c..952237b8e2d2 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -48,15 +48,6 @@
48#include <linux/fsnotify_backend.h> 48#include <linux/fsnotify_backend.h>
49#include "fsnotify.h" 49#include "fsnotify.h"
50 50
51static struct kmem_cache *fsnotify_event_cachep;
52static struct kmem_cache *fsnotify_event_holder_cachep;
53/*
54 * This is a magic event we send when the q is too full. Since it doesn't
55 * hold real event information we just keep one system wide and use it any time
56 * it is needed. It's refcnt is set 1 at kernel init time and will never
57 * get set to 0 so it will never get 'freed'
58 */
59static struct fsnotify_event *q_overflow_event;
60static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); 51static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61 52
62/** 53/**
@@ -76,60 +67,14 @@ bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
76 return list_empty(&group->notification_list) ? true : false; 67 return list_empty(&group->notification_list) ? true : false;
77} 68}
78 69
79void fsnotify_get_event(struct fsnotify_event *event) 70void fsnotify_destroy_event(struct fsnotify_group *group,
71 struct fsnotify_event *event)
80{ 72{
81 atomic_inc(&event->refcnt); 73 /* Overflow events are per-group and we don't want to free them */
82} 74 if (!event || event->mask == FS_Q_OVERFLOW)
83
84void fsnotify_put_event(struct fsnotify_event *event)
85{
86 if (!event)
87 return; 75 return;
88 76
89 if (atomic_dec_and_test(&event->refcnt)) { 77 group->ops->free_event(event);
90 pr_debug("%s: event=%p\n", __func__, event);
91
92 if (event->data_type == FSNOTIFY_EVENT_PATH)
93 path_put(&event->path);
94
95 BUG_ON(!list_empty(&event->private_data_list));
96
97 kfree(event->file_name);
98 put_pid(event->tgid);
99 kmem_cache_free(fsnotify_event_cachep, event);
100 }
101}
102
103struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
104{
105 return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL);
106}
107
108void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
109{
110 if (holder)
111 kmem_cache_free(fsnotify_event_holder_cachep, holder);
112}
113
114/*
115 * Find the private data that the group previously attached to this event when
116 * the group added the event to the notification queue (fsnotify_add_notify_event)
117 */
118struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event)
119{
120 struct fsnotify_event_private_data *lpriv;
121 struct fsnotify_event_private_data *priv = NULL;
122
123 assert_spin_locked(&event->lock);
124
125 list_for_each_entry(lpriv, &event->private_data_list, event_list) {
126 if (lpriv->group == group) {
127 priv = lpriv;
128 list_del(&priv->event_list);
129 break;
130 }
131 }
132 return priv;
133} 78}
134 79
135/* 80/*
@@ -137,91 +82,35 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot
137 * event off the queue to deal with. If the event is successfully added to the 82 * event off the queue to deal with. If the event is successfully added to the
138 * group's notification queue, a reference is taken on event. 83 * group's notification queue, a reference is taken on event.
139 */ 84 */
140struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, 85struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
141 struct fsnotify_event_private_data *priv, 86 struct fsnotify_event *event,
142 struct fsnotify_event *(*merge)(struct list_head *, 87 struct fsnotify_event *(*merge)(struct list_head *,
143 struct fsnotify_event *)) 88 struct fsnotify_event *))
144{ 89{
145 struct fsnotify_event *return_event = NULL; 90 struct fsnotify_event *return_event = NULL;
146 struct fsnotify_event_holder *holder = NULL;
147 struct list_head *list = &group->notification_list; 91 struct list_head *list = &group->notification_list;
148 92
149 pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv); 93 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
150
151 /*
152 * There is one fsnotify_event_holder embedded inside each fsnotify_event.
153 * Check if we expect to be able to use that holder. If not alloc a new
154 * holder.
155 * For the overflow event it's possible that something will use the in
156 * event holder before we get the lock so we may need to jump back and
157 * alloc a new holder, this can't happen for most events...
158 */
159 if (!list_empty(&event->holder.event_list)) {
160alloc_holder:
161 holder = fsnotify_alloc_event_holder();
162 if (!holder)
163 return ERR_PTR(-ENOMEM);
164 }
165 94
166 mutex_lock(&group->notification_mutex); 95 mutex_lock(&group->notification_mutex);
167 96
168 if (group->q_len >= group->max_events) { 97 if (group->q_len >= group->max_events) {
169 event = q_overflow_event; 98 /* Queue overflow event only if it isn't already queued */
170 99 if (list_empty(&group->overflow_event.list))
171 /* 100 event = &group->overflow_event;
172 * we need to return the overflow event
173 * which means we need a ref
174 */
175 fsnotify_get_event(event);
176 return_event = event; 101 return_event = event;
177
178 /* sorry, no private data on the overflow event */
179 priv = NULL;
180 } 102 }
181 103
182 if (!list_empty(list) && merge) { 104 if (!list_empty(list) && merge) {
183 struct fsnotify_event *tmp; 105 return_event = merge(list, event);
184
185 tmp = merge(list, event);
186 if (tmp) {
187 mutex_unlock(&group->notification_mutex);
188
189 if (return_event)
190 fsnotify_put_event(return_event);
191 if (holder != &event->holder)
192 fsnotify_destroy_event_holder(holder);
193 return tmp;
194 }
195 }
196
197 spin_lock(&event->lock);
198
199 if (list_empty(&event->holder.event_list)) {
200 if (unlikely(holder))
201 fsnotify_destroy_event_holder(holder);
202 holder = &event->holder;
203 } else if (unlikely(!holder)) {
204 /* between the time we checked above and got the lock the in
205 * event holder was used, go back and get a new one */
206 spin_unlock(&event->lock);
207 mutex_unlock(&group->notification_mutex);
208
209 if (return_event) { 106 if (return_event) {
210 fsnotify_put_event(return_event); 107 mutex_unlock(&group->notification_mutex);
211 return_event = NULL; 108 return return_event;
212 } 109 }
213
214 goto alloc_holder;
215 } 110 }
216 111
217 group->q_len++; 112 group->q_len++;
218 holder->event = event; 113 list_add_tail(&event->list, list);
219
220 fsnotify_get_event(event);
221 list_add_tail(&holder->event_list, list);
222 if (priv)
223 list_add_tail(&priv->event_list, &event->private_data_list);
224 spin_unlock(&event->lock);
225 mutex_unlock(&group->notification_mutex); 114 mutex_unlock(&group->notification_mutex);
226 115
227 wake_up(&group->notification_waitq); 116 wake_up(&group->notification_waitq);
@@ -230,32 +119,20 @@ alloc_holder:
230} 119}
231 120
232/* 121/*
233 * Remove and return the first event from the notification list. There is a 122 * Remove and return the first event from the notification list. It is the
234 * reference held on this event since it was on the list. It is the responsibility 123 * responsibility of the caller to destroy the obtained event
235 * of the caller to drop this reference.
236 */ 124 */
237struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group) 125struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
238{ 126{
239 struct fsnotify_event *event; 127 struct fsnotify_event *event;
240 struct fsnotify_event_holder *holder;
241 128
242 BUG_ON(!mutex_is_locked(&group->notification_mutex)); 129 BUG_ON(!mutex_is_locked(&group->notification_mutex));
243 130
244 pr_debug("%s: group=%p\n", __func__, group); 131 pr_debug("%s: group=%p\n", __func__, group);
245 132
246 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); 133 event = list_first_entry(&group->notification_list,
247 134 struct fsnotify_event, list);
248 event = holder->event; 135 list_del(&event->list);
249
250 spin_lock(&event->lock);
251 holder->event = NULL;
252 list_del_init(&holder->event_list);
253 spin_unlock(&event->lock);
254
255 /* event == holder means we are referenced through the in event holder */
256 if (holder != &event->holder)
257 fsnotify_destroy_event_holder(holder);
258
259 group->q_len--; 136 group->q_len--;
260 137
261 return event; 138 return event;
@@ -266,15 +143,10 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group
266 */ 143 */
267struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group) 144struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
268{ 145{
269 struct fsnotify_event *event;
270 struct fsnotify_event_holder *holder;
271
272 BUG_ON(!mutex_is_locked(&group->notification_mutex)); 146 BUG_ON(!mutex_is_locked(&group->notification_mutex));
273 147
274 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); 148 return list_first_entry(&group->notification_list,
275 event = holder->event; 149 struct fsnotify_event, list);
276
277 return event;
278} 150}
279 151
280/* 152/*
@@ -284,181 +156,31 @@ struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
284void fsnotify_flush_notify(struct fsnotify_group *group) 156void fsnotify_flush_notify(struct fsnotify_group *group)
285{ 157{
286 struct fsnotify_event *event; 158 struct fsnotify_event *event;
287 struct fsnotify_event_private_data *priv;
288 159
289 mutex_lock(&group->notification_mutex); 160 mutex_lock(&group->notification_mutex);
290 while (!fsnotify_notify_queue_is_empty(group)) { 161 while (!fsnotify_notify_queue_is_empty(group)) {
291 event = fsnotify_remove_notify_event(group); 162 event = fsnotify_remove_notify_event(group);
292 /* if they don't implement free_event_priv they better not have attached any */ 163 fsnotify_destroy_event(group, event);
293 if (group->ops->free_event_priv) {
294 spin_lock(&event->lock);
295 priv = fsnotify_remove_priv_from_event(group, event);
296 spin_unlock(&event->lock);
297 if (priv)
298 group->ops->free_event_priv(priv);
299 }
300 fsnotify_put_event(event); /* matches fsnotify_add_notify_event */
301 } 164 }
302 mutex_unlock(&group->notification_mutex); 165 mutex_unlock(&group->notification_mutex);
303} 166}
304 167
305static void initialize_event(struct fsnotify_event *event)
306{
307 INIT_LIST_HEAD(&event->holder.event_list);
308 atomic_set(&event->refcnt, 1);
309
310 spin_lock_init(&event->lock);
311
312 INIT_LIST_HEAD(&event->private_data_list);
313}
314
315/*
316 * Caller damn well better be holding whatever mutex is protecting the
317 * old_holder->event_list and the new_event must be a clean event which
318 * cannot be found anywhere else in the kernel.
319 */
320int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
321 struct fsnotify_event *new_event)
322{
323 struct fsnotify_event *old_event = old_holder->event;
324 struct fsnotify_event_holder *new_holder = &new_event->holder;
325
326 enum event_spinlock_class {
327 SPINLOCK_OLD,
328 SPINLOCK_NEW,
329 };
330
331 pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, new_event);
332
333 /*
334 * if the new_event's embedded holder is in use someone
335 * screwed up and didn't give us a clean new event.
336 */
337 BUG_ON(!list_empty(&new_holder->event_list));
338
339 spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
340 spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
341
342 new_holder->event = new_event;
343 list_replace_init(&old_holder->event_list, &new_holder->event_list);
344
345 spin_unlock(&new_event->lock);
346 spin_unlock(&old_event->lock);
347
348 /* event == holder means we are referenced through the in event holder */
349 if (old_holder != &old_event->holder)
350 fsnotify_destroy_event_holder(old_holder);
351
352 fsnotify_get_event(new_event); /* on the list take reference */
353 fsnotify_put_event(old_event); /* off the list, drop reference */
354
355 return 0;
356}
357
358struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
359{
360 struct fsnotify_event *event;
361
362 event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
363 if (!event)
364 return NULL;
365
366 pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, event);
367
368 memcpy(event, old_event, sizeof(*event));
369 initialize_event(event);
370
371 if (event->name_len) {
372 event->file_name = kstrdup(old_event->file_name, GFP_KERNEL);
373 if (!event->file_name) {
374 kmem_cache_free(fsnotify_event_cachep, event);
375 return NULL;
376 }
377 }
378 event->tgid = get_pid(old_event->tgid);
379 if (event->data_type == FSNOTIFY_EVENT_PATH)
380 path_get(&event->path);
381
382 return event;
383}
384
385/* 168/*
386 * fsnotify_create_event - Allocate a new event which will be sent to each 169 * fsnotify_create_event - Allocate a new event which will be sent to each
387 * group's handle_event function if the group was interested in this 170 * group's handle_event function if the group was interested in this
388 * particular event. 171 * particular event.
389 * 172 *
390 * @to_tell the inode which is supposed to receive the event (sometimes a 173 * @inode the inode which is supposed to receive the event (sometimes a
391 * parent of the inode to which the event happened. 174 * parent of the inode to which the event happened.
392 * @mask what actually happened. 175 * @mask what actually happened.
393 * @data pointer to the object which was actually affected 176 * @data pointer to the object which was actually affected
394 * @data_type flag indication if the data is a file, path, inode, nothing... 177 * @data_type flag indication if the data is a file, path, inode, nothing...
395 * @name the filename, if available 178 * @name the filename, if available
396 */ 179 */
397struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, 180void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode,
398 int data_type, const unsigned char *name, 181 u32 mask)
399 u32 cookie, gfp_t gfp)
400{ 182{
401 struct fsnotify_event *event; 183 INIT_LIST_HEAD(&event->list);
402 184 event->inode = inode;
403 event = kmem_cache_zalloc(fsnotify_event_cachep, gfp);
404 if (!event)
405 return NULL;
406
407 pr_debug("%s: event=%p to_tell=%p mask=%x data=%p data_type=%d\n",
408 __func__, event, to_tell, mask, data, data_type);
409
410 initialize_event(event);
411
412 if (name) {
413 event->file_name = kstrdup(name, gfp);
414 if (!event->file_name) {
415 kmem_cache_free(fsnotify_event_cachep, event);
416 return NULL;
417 }
418 event->name_len = strlen(event->file_name);
419 }
420
421 event->tgid = get_pid(task_tgid(current));
422 event->sync_cookie = cookie;
423 event->to_tell = to_tell;
424 event->data_type = data_type;
425
426 switch (data_type) {
427 case FSNOTIFY_EVENT_PATH: {
428 struct path *path = data;
429 event->path.dentry = path->dentry;
430 event->path.mnt = path->mnt;
431 path_get(&event->path);
432 break;
433 }
434 case FSNOTIFY_EVENT_INODE:
435 event->inode = data;
436 break;
437 case FSNOTIFY_EVENT_NONE:
438 event->inode = NULL;
439 event->path.dentry = NULL;
440 event->path.mnt = NULL;
441 break;
442 default:
443 BUG();
444 }
445
446 event->mask = mask; 185 event->mask = mask;
447
448 return event;
449}
450
451static __init int fsnotify_notification_init(void)
452{
453 fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
454 fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
455
456 q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL,
457 FSNOTIFY_EVENT_NONE, NULL, 0,
458 GFP_KERNEL);
459 if (!q_overflow_event)
460 panic("unable to allocate fsnotify q_overflow_event\n");
461
462 return 0;
463} 186}
464subsys_initcall(fsnotify_notification_init);