aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-05-21 17:01:26 -0400
committerEric Paris <eparis@redhat.com>2009-06-11 14:57:53 -0400
commit3be25f49b9d6a97eae9bcb96d3292072b7658bd8 (patch)
tree36f7d96481a47a6bde3c2f961346e940698111e0 /fs/notify
parent90586523eb4b349806887c62ee70685a49415124 (diff)
fsnotify: add marks to inodes so groups can interpret how to handle those inodes
This patch creates a way for fsnotify groups to attach marks to inodes. These marks have little meaning to the generic fsnotify infrastructure and thus their meaning should be interpreted by the group that attached them to the inode's list. dnotify and inotify will make use of these markings to indicate which inodes are of interest to their respective groups. But this implementation has the useful property that in the future other listeners could actually use the marks for the exact opposite reason, aka to indicate which inodes it had NO interest in. Signed-off-by: Eric Paris <eparis@redhat.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/notify')
-rw-r--r--fs/notify/Makefile2
-rw-r--r--fs/notify/fsnotify.c13
-rw-r--r--fs/notify/fsnotify.h5
-rw-r--r--fs/notify/group.c49
-rw-r--r--fs/notify/inode_mark.c329
5 files changed, 396 insertions, 2 deletions
diff --git a/fs/notify/Makefile b/fs/notify/Makefile
index db5467b5b58d..0922cc826c46 100644
--- a/fs/notify/Makefile
+++ b/fs/notify/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o 1obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o
2 2
3obj-y += dnotify/ 3obj-y += dnotify/
4obj-y += inotify/ 4obj-y += inotify/
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 56bee0f10c38..d5654629c659 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -26,6 +26,15 @@
26#include "fsnotify.h" 26#include "fsnotify.h"
27 27
28/* 28/*
29 * Clear all of the marks on an inode when it is being evicted from core
30 */
31void __fsnotify_inode_delete(struct inode *inode)
32{
33 fsnotify_clear_marks_by_inode(inode);
34}
35EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
36
37/*
29 * This is the main call to fsnotify. The VFS calls into hook specific functions 38 * This is the main call to fsnotify. The VFS calls into hook specific functions
30 * in linux/fsnotify.h. Those functions then in turn call here. Here will call 39 * in linux/fsnotify.h. Those functions then in turn call here. Here will call
31 * out to all of the registered fsnotify_group. Those groups can then use the 40 * out to all of the registered fsnotify_group. Those groups can then use the
@@ -43,6 +52,8 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is)
43 if (!(mask & fsnotify_mask)) 52 if (!(mask & fsnotify_mask))
44 return; 53 return;
45 54
55 if (!(mask & to_tell->i_fsnotify_mask))
56 return;
46 /* 57 /*
47 * SRCU!! the groups list is very very much read only and the path is 58 * SRCU!! the groups list is very very much read only and the path is
48 * very hot. The VAST majority of events are not going to need to do 59 * very hot. The VAST majority of events are not going to need to do
@@ -51,6 +62,8 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is)
51 idx = srcu_read_lock(&fsnotify_grp_srcu); 62 idx = srcu_read_lock(&fsnotify_grp_srcu);
52 list_for_each_entry_rcu(group, &fsnotify_groups, group_list) { 63 list_for_each_entry_rcu(group, &fsnotify_groups, group_list) {
53 if (mask & group->mask) { 64 if (mask & group->mask) {
65 if (!group->ops->should_send_event(group, to_tell, mask))
66 continue;
54 if (!event) { 67 if (!event) {
55 event = fsnotify_create_event(to_tell, mask, data, data_is); 68 event = fsnotify_create_event(to_tell, mask, data, data_is);
56 /* shit, we OOM'd and now we can't tell, maybe 69 /* shit, we OOM'd and now we can't tell, maybe
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index c6a8bd476572..8ebcbe893c91 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -12,4 +12,9 @@ extern struct srcu_struct fsnotify_grp_srcu;
12extern struct list_head fsnotify_groups; 12extern struct list_head fsnotify_groups;
13/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */ 13/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */
14extern __u32 fsnotify_mask; 14extern __u32 fsnotify_mask;
15
16/* final kfree of a group */
17extern void fsnotify_final_destroy_group(struct fsnotify_group *group);
18/* run the list of all marks associated with inode and flag them to be freed */
19extern void fsnotify_clear_marks_by_inode(struct inode *inode);
15#endif /* __FS_NOTIFY_FSNOTIFY_H_ */ 20#endif /* __FS_NOTIFY_FSNOTIFY_H_ */
diff --git a/fs/notify/group.c b/fs/notify/group.c
index c6812953b968..a29d2fa67927 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -55,6 +55,29 @@ void fsnotify_recalc_global_mask(void)
55} 55}
56 56
57/* 57/*
58 * Update the group->mask by running all of the marks associated with this
59 * group and finding the bitwise | of all of the mark->mask. If we change
60 * the group->mask we need to update the global mask of events interesting
61 * to the system.
62 */
63void fsnotify_recalc_group_mask(struct fsnotify_group *group)
64{
65 __u32 mask = 0;
66 __u32 old_mask = group->mask;
67 struct fsnotify_mark_entry *entry;
68
69 spin_lock(&group->mark_lock);
70 list_for_each_entry(entry, &group->mark_entries, g_list)
71 mask |= entry->mask;
72 spin_unlock(&group->mark_lock);
73
74 group->mask = mask;
75
76 if (old_mask != mask)
77 fsnotify_recalc_global_mask();
78}
79
80/*
58 * Take a reference to a group so things found under the fsnotify_grp_mutex 81 * Take a reference to a group so things found under the fsnotify_grp_mutex
59 * can't get freed under us 82 * can't get freed under us
60 */ 83 */
@@ -66,7 +89,7 @@ static void fsnotify_get_group(struct fsnotify_group *group)
66/* 89/*
67 * Final freeing of a group 90 * Final freeing of a group
68 */ 91 */
69static void fsnotify_destroy_group(struct fsnotify_group *group) 92void fsnotify_final_destroy_group(struct fsnotify_group *group)
70{ 93{
71 if (group->ops->free_group_priv) 94 if (group->ops->free_group_priv)
72 group->ops->free_group_priv(group); 95 group->ops->free_group_priv(group);
@@ -75,6 +98,24 @@ static void fsnotify_destroy_group(struct fsnotify_group *group)
75} 98}
76 99
77/* 100/*
101 * Trying to get rid of a group. We need to first get rid of any outstanding
102 * allocations and then free the group. Remember that fsnotify_clear_marks_by_group
103 * could miss marks that are being freed by inode and those marks could still
104 * hold a reference to this group (via group->num_marks) If we get into that
105 * situtation, the fsnotify_final_destroy_group will get called when that final
106 * mark is freed.
107 */
108static void fsnotify_destroy_group(struct fsnotify_group *group)
109{
110 /* clear all inode mark entries for this group */
111 fsnotify_clear_marks_by_group(group);
112
113 /* past the point of no return, matches the initial value of 1 */
114 if (atomic_dec_and_test(&group->num_marks))
115 fsnotify_final_destroy_group(group);
116}
117
118/*
78 * Remove this group from the global list of groups that will get events 119 * Remove this group from the global list of groups that will get events
79 * this can be done even if there are still references and things still using 120 * this can be done even if there are still references and things still using
80 * this group. This just stops the group from getting new events. 121 * this group. This just stops the group from getting new events.
@@ -173,6 +214,10 @@ struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
173 group->group_num = group_num; 214 group->group_num = group_num;
174 group->mask = mask; 215 group->mask = mask;
175 216
217 spin_lock_init(&group->mark_lock);
218 atomic_set(&group->num_marks, 0);
219 INIT_LIST_HEAD(&group->mark_entries);
220
176 group->ops = ops; 221 group->ops = ops;
177 222
178 mutex_lock(&fsnotify_grp_mutex); 223 mutex_lock(&fsnotify_grp_mutex);
@@ -188,6 +233,8 @@ struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
188 /* group not found, add a new one */ 233 /* group not found, add a new one */
189 list_add_rcu(&group->group_list, &fsnotify_groups); 234 list_add_rcu(&group->group_list, &fsnotify_groups);
190 group->on_group_list = 1; 235 group->on_group_list = 1;
236 /* being on the fsnotify_groups list holds one num_marks */
237 atomic_inc(&group->num_marks);
191 238
192 mutex_unlock(&fsnotify_grp_mutex); 239 mutex_unlock(&fsnotify_grp_mutex);
193 240
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
new file mode 100644
index 000000000000..cdc154146974
--- /dev/null
+++ b/fs/notify/inode_mark.c
@@ -0,0 +1,329 @@
1/*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/*
20 * fsnotify inode mark locking/lifetime/and refcnting
21 *
22 * REFCNT:
23 * The mark->refcnt tells how many "things" in the kernel currently are
24 * referencing this object. The object typically will live inside the kernel
25 * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
26 * which can find this object holding the appropriete locks, can take a reference
27 * and the object itself is guarenteed to survive until the reference is dropped.
28 *
29 * LOCKING:
30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST
31 * be taken in order as follows:
32 *
33 * entry->lock
34 * group->mark_lock
35 * inode->i_lock
36 *
37 * entry->lock protects 2 things, entry->group and entry->inode. You must hold
38 * that lock to dereference either of these things (they could be NULL even with
39 * the lock)
40 *
41 * group->mark_lock protects the mark_entries list anchored inside a given group
42 * and each entry is hooked via the g_list. It also sorta protects the
43 * free_g_list, which when used is anchored by a private list on the stack of the
44 * task which held the group->mark_lock.
45 *
46 * inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
47 * given inode and each entry is hooked via the i_list. (and sorta the
48 * free_i_list)
49 *
50 *
51 * LIFETIME:
52 * Inode marks survive between when they are added to an inode and when their
53 * refcnt==0.
54 *
55 * The inode mark can be cleared for a number of different reasons including:
56 * - The inode is unlinked for the last time. (fsnotify_inode_remove)
57 * - The inode is being evicted from cache. (fsnotify_inode_delete)
58 * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
59 * - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
60 * - The fsnotify_group associated with the mark is going away and all such marks
61 * need to be cleaned up. (fsnotify_clear_marks_by_group)
62 *
63 * Worst case we are given an inode and need to clean up all the marks on that
64 * inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
65 * mark on the list we take a reference (so the mark can't disappear under us).
66 * We remove that mark form the inode's list of marks and we add this mark to a
67 * private list anchored on the stack using i_free_list; At this point we no
68 * longer fear anything finding the mark using the inode's list of marks.
69 *
70 * We can safely and locklessly run the private list on the stack of everything
71 * we just unattached from the original inode. For each mark on the private list
72 * we grab the mark-> and can thus dereference mark->group and mark->inode. If
73 * we see the group and inode are not NULL we take those locks. Now holding all
74 * 3 locks we can completely remove the mark from other tasks finding it in the
75 * future. Remember, 10 things might already be referencing this mark, but they
76 * better be holding a ref. We drop our reference we took before we unhooked it
77 * from the inode. When the ref hits 0 we can free the mark.
78 *
79 * Very similarly for freeing by group, except we use free_g_list.
80 *
81 * This has the very interesting property of being able to run concurrently with
82 * any (or all) other directions.
83 */
84
85#include <linux/fs.h>
86#include <linux/init.h>
87#include <linux/kernel.h>
88#include <linux/module.h>
89#include <linux/mutex.h>
90#include <linux/slab.h>
91#include <linux/spinlock.h>
92
93#include <asm/atomic.h>
94
95#include <linux/fsnotify_backend.h>
96#include "fsnotify.h"
97
98void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
99{
100 atomic_inc(&entry->refcnt);
101}
102
103void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
104{
105 if (atomic_dec_and_test(&entry->refcnt))
106 entry->free_mark(entry);
107}
108
109/*
110 * Recalculate the mask of events relevant to a given inode locked.
111 */
112static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
113{
114 struct fsnotify_mark_entry *entry;
115 struct hlist_node *pos;
116 __u32 new_mask = 0;
117
118 assert_spin_locked(&inode->i_lock);
119
120 hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
121 new_mask |= entry->mask;
122 inode->i_fsnotify_mask = new_mask;
123}
124
125/*
126 * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
127 * any notifier is interested in hearing for this inode.
128 */
129void fsnotify_recalc_inode_mask(struct inode *inode)
130{
131 spin_lock(&inode->i_lock);
132 fsnotify_recalc_inode_mask_locked(inode);
133 spin_unlock(&inode->i_lock);
134}
135
136/*
137 * Any time a mark is getting freed we end up here.
138 * The caller had better be holding a reference to this mark so we don't actually
139 * do the final put under the entry->lock
140 */
141void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
142{
143 struct fsnotify_group *group;
144 struct inode *inode;
145
146 spin_lock(&entry->lock);
147
148 group = entry->group;
149 inode = entry->inode;
150
151 BUG_ON(group && !inode);
152 BUG_ON(!group && inode);
153
154 /* if !group something else already marked this to die */
155 if (!group) {
156 spin_unlock(&entry->lock);
157 return;
158 }
159
160 /* 1 from caller and 1 for being on i_list/g_list */
161 BUG_ON(atomic_read(&entry->refcnt) < 2);
162
163 spin_lock(&group->mark_lock);
164 spin_lock(&inode->i_lock);
165
166 hlist_del_init(&entry->i_list);
167 entry->inode = NULL;
168
169 list_del_init(&entry->g_list);
170 entry->group = NULL;
171
172 fsnotify_put_mark(entry); /* for i_list and g_list */
173
174 /*
175 * this mark is now off the inode->i_fsnotify_mark_entries list and we
176 * hold the inode->i_lock, so this is the perfect time to update the
177 * inode->i_fsnotify_mask
178 */
179 fsnotify_recalc_inode_mask_locked(inode);
180
181 spin_unlock(&inode->i_lock);
182 spin_unlock(&group->mark_lock);
183 spin_unlock(&entry->lock);
184
185 /*
186 * Some groups like to know that marks are being freed. This is a
187 * callback to the group function to let it know that this entry
188 * is being freed.
189 */
190 group->ops->freeing_mark(entry, group);
191
192 /*
193 * it's possible that this group tried to destroy itself, but this
194 * this mark was simultaneously being freed by inode. If that's the
195 * case, we finish freeing the group here.
196 */
197 if (unlikely(atomic_dec_and_test(&group->num_marks)))
198 fsnotify_final_destroy_group(group);
199}
200
201/*
202 * Given a group, destroy all of the marks associated with that group.
203 */
204void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
205{
206 struct fsnotify_mark_entry *lentry, *entry;
207 LIST_HEAD(free_list);
208
209 spin_lock(&group->mark_lock);
210 list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
211 list_add(&entry->free_g_list, &free_list);
212 list_del_init(&entry->g_list);
213 fsnotify_get_mark(entry);
214 }
215 spin_unlock(&group->mark_lock);
216
217 list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
218 fsnotify_destroy_mark_by_entry(entry);
219 fsnotify_put_mark(entry);
220 }
221}
222
223/*
224 * Given an inode, destroy all of the marks associated with that inode.
225 */
226void fsnotify_clear_marks_by_inode(struct inode *inode)
227{
228 struct fsnotify_mark_entry *entry, *lentry;
229 struct hlist_node *pos, *n;
230 LIST_HEAD(free_list);
231
232 spin_lock(&inode->i_lock);
233 hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
234 list_add(&entry->free_i_list, &free_list);
235 hlist_del_init(&entry->i_list);
236 fsnotify_get_mark(entry);
237 }
238 spin_unlock(&inode->i_lock);
239
240 list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
241 fsnotify_destroy_mark_by_entry(entry);
242 fsnotify_put_mark(entry);
243 }
244}
245
246/*
247 * given a group and inode, find the mark associated with that combination.
248 * if found take a reference to that mark and return it, else return NULL
249 */
250struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group,
251 struct inode *inode)
252{
253 struct fsnotify_mark_entry *entry;
254 struct hlist_node *pos;
255
256 assert_spin_locked(&inode->i_lock);
257
258 hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
259 if (entry->group == group) {
260 fsnotify_get_mark(entry);
261 return entry;
262 }
263 }
264 return NULL;
265}
266
267/*
268 * Nothing fancy, just initialize lists and locks and counters.
269 */
270void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
271 void (*free_mark)(struct fsnotify_mark_entry *entry))
272
273{
274 spin_lock_init(&entry->lock);
275 atomic_set(&entry->refcnt, 1);
276 INIT_HLIST_NODE(&entry->i_list);
277 entry->group = NULL;
278 entry->mask = 0;
279 entry->inode = NULL;
280 entry->free_mark = free_mark;
281}
282
283/*
284 * Attach an initialized mark entry to a given group and inode.
285 * These marks may be used for the fsnotify backend to determine which
286 * event types should be delivered to which group and for which inodes.
287 */
288int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
289 struct fsnotify_group *group, struct inode *inode)
290{
291 struct fsnotify_mark_entry *lentry;
292 int ret = 0;
293
294 /*
295 * LOCKING ORDER!!!!
296 * entry->lock
297 * group->mark_lock
298 * inode->i_lock
299 */
300 spin_lock(&entry->lock);
301 spin_lock(&group->mark_lock);
302 spin_lock(&inode->i_lock);
303
304 entry->group = group;
305 entry->inode = inode;
306
307 lentry = fsnotify_find_mark_entry(group, inode);
308 if (!lentry) {
309 hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
310 list_add(&entry->g_list, &group->mark_entries);
311
312 fsnotify_get_mark(entry); /* for i_list and g_list */
313
314 atomic_inc(&group->num_marks);
315
316 fsnotify_recalc_inode_mask_locked(inode);
317 }
318
319 spin_unlock(&inode->i_lock);
320 spin_unlock(&group->mark_lock);
321 spin_unlock(&entry->lock);
322
323 if (lentry) {
324 ret = -EEXIST;
325 fsnotify_put_mark(lentry);
326 }
327
328 return ret;
329}