diff options
Diffstat (limited to 'fs/notify/notification.c')
-rw-r--r-- | fs/notify/notification.c | 230 |
1 files changed, 223 insertions, 7 deletions
diff --git a/fs/notify/notification.c b/fs/notify/notification.c index b8e9a87f8f58..dddecc74e63d 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c | |||
@@ -16,6 +16,21 @@ | |||
16 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | ||
20 | * Basic idea behind the notification queue: An fsnotify group (like inotify) | ||
21 | * sends the userspace notification about events asyncronously some time after | ||
22 | * the event happened. When inotify gets an event it will need to add that | ||
23 | * event to the group notify queue. Since a single event might need to be on | ||
24 | * multiple group's notification queues we can't add the event directly to each | ||
25 | * queue and instead add a small "event_holder" to each queue. This event_holder | ||
26 | * has a pointer back to the original event. Since the majority of events are | ||
27 | * going to end up on one, and only one, notification queue we embed one | ||
28 | * event_holder into each event. This means we have a single allocation instead | ||
29 | * of always needing two. If the embedded event_holder is already in use by | ||
30 | * another group a new event_holder (from fsnotify_event_holder_cachep) will be | ||
31 | * allocated and used. | ||
32 | */ | ||
33 | |||
19 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
20 | #include <linux/init.h> | 35 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
@@ -33,6 +48,21 @@ | |||
33 | #include "fsnotify.h" | 48 | #include "fsnotify.h" |
34 | 49 | ||
35 | static struct kmem_cache *fsnotify_event_cachep; | 50 | static struct kmem_cache *fsnotify_event_cachep; |
51 | static struct kmem_cache *fsnotify_event_holder_cachep; | ||
52 | /* | ||
53 | * This is a magic event we send when the q is too full. Since it doesn't | ||
54 | * hold real event information we just keep one system wide and use it any time | ||
55 | * it is needed. It's refcnt is set 1 at kernel init time and will never | ||
56 | * get set to 0 so it will never get 'freed' | ||
57 | */ | ||
58 | static struct fsnotify_event q_overflow_event; | ||
59 | |||
60 | /* return true if the notify queue is empty, false otherwise */ | ||
61 | bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) | ||
62 | { | ||
63 | BUG_ON(!mutex_is_locked(&group->notification_mutex)); | ||
64 | return list_empty(&group->notification_list) ? true : false; | ||
65 | } | ||
36 | 66 | ||
37 | void fsnotify_get_event(struct fsnotify_event *event) | 67 | void fsnotify_get_event(struct fsnotify_event *event) |
38 | { | 68 | { |
@@ -52,19 +82,176 @@ void fsnotify_put_event(struct fsnotify_event *event) | |||
52 | } | 82 | } |
53 | } | 83 | } |
54 | 84 | ||
85 | struct fsnotify_event_holder *fsnotify_alloc_event_holder(void) | ||
86 | { | ||
87 | return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL); | ||
88 | } | ||
89 | |||
90 | void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder) | ||
91 | { | ||
92 | kmem_cache_free(fsnotify_event_holder_cachep, holder); | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * check if 2 events contain the same information. | ||
97 | */ | ||
98 | static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new) | ||
99 | { | ||
100 | if ((old->mask == new->mask) && | ||
101 | (old->to_tell == new->to_tell) && | ||
102 | (old->data_type == new->data_type)) { | ||
103 | switch (old->data_type) { | ||
104 | case (FSNOTIFY_EVENT_INODE): | ||
105 | if (old->inode == new->inode) | ||
106 | return true; | ||
107 | break; | ||
108 | case (FSNOTIFY_EVENT_PATH): | ||
109 | if ((old->path.mnt == new->path.mnt) && | ||
110 | (old->path.dentry == new->path.dentry)) | ||
111 | return true; | ||
112 | case (FSNOTIFY_EVENT_NONE): | ||
113 | return true; | ||
114 | }; | ||
115 | } | ||
116 | return false; | ||
117 | } | ||
118 | |||
55 | /* | 119 | /* |
56 | * Allocate a new event which will be sent to each group's handle_event function | 120 | * Add an event to the group notification queue. The group can later pull this |
57 | * if the group was interested in this particular event. | 121 | * event off the queue to deal with. If the event is successfully added to the |
122 | * group's notification queue, a reference is taken on event. | ||
58 | */ | 123 | */ |
59 | struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | 124 | int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event) |
60 | void *data, int data_type) | 125 | { |
126 | struct fsnotify_event_holder *holder = NULL; | ||
127 | struct list_head *list = &group->notification_list; | ||
128 | struct fsnotify_event_holder *last_holder; | ||
129 | struct fsnotify_event *last_event; | ||
130 | |||
131 | /* | ||
132 | * There is one fsnotify_event_holder embedded inside each fsnotify_event. | ||
133 | * Check if we expect to be able to use that holder. If not alloc a new | ||
134 | * holder. | ||
135 | * For the overflow event it's possible that something will use the in | ||
136 | * event holder before we get the lock so we may need to jump back and | ||
137 | * alloc a new holder, this can't happen for most events... | ||
138 | */ | ||
139 | if (!list_empty(&event->holder.event_list)) { | ||
140 | alloc_holder: | ||
141 | holder = fsnotify_alloc_event_holder(); | ||
142 | if (!holder) | ||
143 | return -ENOMEM; | ||
144 | } | ||
145 | |||
146 | mutex_lock(&group->notification_mutex); | ||
147 | |||
148 | if (group->q_len >= group->max_events) | ||
149 | event = &q_overflow_event; | ||
150 | |||
151 | spin_lock(&event->lock); | ||
152 | |||
153 | if (list_empty(&event->holder.event_list)) { | ||
154 | if (unlikely(holder)) | ||
155 | fsnotify_destroy_event_holder(holder); | ||
156 | holder = &event->holder; | ||
157 | } else if (unlikely(!holder)) { | ||
158 | /* between the time we checked above and got the lock the in | ||
159 | * event holder was used, go back and get a new one */ | ||
160 | spin_unlock(&event->lock); | ||
161 | mutex_unlock(&group->notification_mutex); | ||
162 | goto alloc_holder; | ||
163 | } | ||
164 | |||
165 | if (!list_empty(list)) { | ||
166 | last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list); | ||
167 | last_event = last_holder->event; | ||
168 | if (event_compare(last_event, event)) { | ||
169 | spin_unlock(&event->lock); | ||
170 | mutex_unlock(&group->notification_mutex); | ||
171 | if (holder != &event->holder) | ||
172 | fsnotify_destroy_event_holder(holder); | ||
173 | return 0; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | group->q_len++; | ||
178 | holder->event = event; | ||
179 | |||
180 | fsnotify_get_event(event); | ||
181 | list_add_tail(&holder->event_list, list); | ||
182 | spin_unlock(&event->lock); | ||
183 | mutex_unlock(&group->notification_mutex); | ||
184 | |||
185 | wake_up(&group->notification_waitq); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Remove and return the first event from the notification list. There is a | ||
191 | * reference held on this event since it was on the list. It is the responsibility | ||
192 | * of the caller to drop this reference. | ||
193 | */ | ||
194 | struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group) | ||
61 | { | 195 | { |
62 | struct fsnotify_event *event; | 196 | struct fsnotify_event *event; |
197 | struct fsnotify_event_holder *holder; | ||
63 | 198 | ||
64 | event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); | 199 | BUG_ON(!mutex_is_locked(&group->notification_mutex)); |
65 | if (!event) | ||
66 | return NULL; | ||
67 | 200 | ||
201 | holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); | ||
202 | |||
203 | event = holder->event; | ||
204 | |||
205 | spin_lock(&event->lock); | ||
206 | holder->event = NULL; | ||
207 | list_del_init(&holder->event_list); | ||
208 | spin_unlock(&event->lock); | ||
209 | |||
210 | /* event == holder means we are referenced through the in event holder */ | ||
211 | if (holder != &event->holder) | ||
212 | fsnotify_destroy_event_holder(holder); | ||
213 | |||
214 | group->q_len--; | ||
215 | |||
216 | return event; | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * This will not remove the event, that must be done with fsnotify_remove_notify_event() | ||
221 | */ | ||
222 | struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group) | ||
223 | { | ||
224 | struct fsnotify_event *event; | ||
225 | struct fsnotify_event_holder *holder; | ||
226 | |||
227 | BUG_ON(!mutex_is_locked(&group->notification_mutex)); | ||
228 | |||
229 | holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); | ||
230 | event = holder->event; | ||
231 | |||
232 | return event; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * Called when a group is being torn down to clean up any outstanding | ||
237 | * event notifications. | ||
238 | */ | ||
239 | void fsnotify_flush_notify(struct fsnotify_group *group) | ||
240 | { | ||
241 | struct fsnotify_event *event; | ||
242 | |||
243 | mutex_lock(&group->notification_mutex); | ||
244 | while (!fsnotify_notify_queue_is_empty(group)) { | ||
245 | event = fsnotify_remove_notify_event(group); | ||
246 | fsnotify_put_event(event); /* matches fsnotify_add_notify_event */ | ||
247 | } | ||
248 | mutex_unlock(&group->notification_mutex); | ||
249 | } | ||
250 | |||
251 | static void initialize_event(struct fsnotify_event *event) | ||
252 | { | ||
253 | event->holder.event = NULL; | ||
254 | INIT_LIST_HEAD(&event->holder.event_list); | ||
68 | atomic_set(&event->refcnt, 1); | 255 | atomic_set(&event->refcnt, 1); |
69 | 256 | ||
70 | spin_lock_init(&event->lock); | 257 | spin_lock_init(&event->lock); |
@@ -72,7 +259,32 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | |||
72 | event->path.dentry = NULL; | 259 | event->path.dentry = NULL; |
73 | event->path.mnt = NULL; | 260 | event->path.mnt = NULL; |
74 | event->inode = NULL; | 261 | event->inode = NULL; |
262 | event->data_type = FSNOTIFY_EVENT_NONE; | ||
75 | 263 | ||
264 | event->to_tell = NULL; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * fsnotify_create_event - Allocate a new event which will be sent to each | ||
269 | * group's handle_event function if the group was interested in this | ||
270 | * particular event. | ||
271 | * | ||
272 | * @to_tell the inode which is supposed to receive the event (sometimes a | ||
273 | * parent of the inode to which the event happened. | ||
274 | * @mask what actually happened. | ||
275 | * @data pointer to the object which was actually affected | ||
276 | * @data_type flag indication if the data is a file, path, inode, nothing... | ||
277 | */ | ||
278 | struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | ||
279 | void *data, int data_type) | ||
280 | { | ||
281 | struct fsnotify_event *event; | ||
282 | |||
283 | event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); | ||
284 | if (!event) | ||
285 | return NULL; | ||
286 | |||
287 | initialize_event(event); | ||
76 | event->to_tell = to_tell; | 288 | event->to_tell = to_tell; |
77 | 289 | ||
78 | switch (data_type) { | 290 | switch (data_type) { |
@@ -114,6 +326,10 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | |||
114 | __init int fsnotify_notification_init(void) | 326 | __init int fsnotify_notification_init(void) |
115 | { | 327 | { |
116 | fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); | 328 | fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); |
329 | fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC); | ||
330 | |||
331 | initialize_event(&q_overflow_event); | ||
332 | q_overflow_event.mask = FS_Q_OVERFLOW; | ||
117 | 333 | ||
118 | return 0; | 334 | return 0; |
119 | } | 335 | } |