aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-12-17 21:24:24 -0500
committerEric Paris <eparis@redhat.com>2010-07-28 09:58:54 -0400
commitef5e2b785fb3216269e6d0656d38ec286b98dbe5 (patch)
tree6adf1f49a217c77e9ab28a5c57d1ed2c150008a8
parent000285deb99a5e0636fdd3c6a2483a5d039ee2c2 (diff)
dnotify: rename mark_entry to mark
nomenclature change. Used to call things 'entries' but now we just call them 'marks.' Do those changes for dnotify. Signed-off-by: Eric Paris <eparis@redhat.com>
-rw-r--r--fs/notify/dnotify/dnotify.c170
1 files changed, 85 insertions, 85 deletions
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index b202bc590c61..3efb8b9a572d 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -29,7 +29,7 @@
29int dir_notify_enable __read_mostly = 1; 29int dir_notify_enable __read_mostly = 1;
30 30
31static struct kmem_cache *dnotify_struct_cache __read_mostly; 31static struct kmem_cache *dnotify_struct_cache __read_mostly;
32static struct kmem_cache *dnotify_mark_entry_cache __read_mostly; 32static struct kmem_cache *dnotify_mark_cache __read_mostly;
33static struct fsnotify_group *dnotify_group __read_mostly; 33static struct fsnotify_group *dnotify_group __read_mostly;
34static DEFINE_MUTEX(dnotify_mark_mutex); 34static DEFINE_MUTEX(dnotify_mark_mutex);
35 35
@@ -38,8 +38,8 @@ static DEFINE_MUTEX(dnotify_mark_mutex);
38 * is being watched by dnotify. If multiple userspace applications are watching 38 * is being watched by dnotify. If multiple userspace applications are watching
39 * the same directory with dnotify their information is chained in dn 39 * the same directory with dnotify their information is chained in dn
40 */ 40 */
41struct dnotify_mark_entry { 41struct dnotify_mark {
42 struct fsnotify_mark fsn_entry; 42 struct fsnotify_mark fsn_mark;
43 struct dnotify_struct *dn; 43 struct dnotify_struct *dn;
44}; 44};
45 45
@@ -51,27 +51,27 @@ struct dnotify_mark_entry {
51 * it calls the fsnotify function so it can update the set of all events relevant 51 * it calls the fsnotify function so it can update the set of all events relevant
52 * to this inode. 52 * to this inode.
53 */ 53 */
54static void dnotify_recalc_inode_mask(struct fsnotify_mark *entry) 54static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
55{ 55{
56 __u32 new_mask, old_mask; 56 __u32 new_mask, old_mask;
57 struct dnotify_struct *dn; 57 struct dnotify_struct *dn;
58 struct dnotify_mark_entry *dnentry = container_of(entry, 58 struct dnotify_mark *dn_mark = container_of(fsn_mark,
59 struct dnotify_mark_entry, 59 struct dnotify_mark,
60 fsn_entry); 60 fsn_mark);
61 61
62 assert_spin_locked(&entry->lock); 62 assert_spin_locked(&fsn_mark->lock);
63 63
64 old_mask = entry->mask; 64 old_mask = fsn_mark->mask;
65 new_mask = 0; 65 new_mask = 0;
66 for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next) 66 for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next)
67 new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); 67 new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
68 entry->mask = new_mask; 68 fsn_mark->mask = new_mask;
69 69
70 if (old_mask == new_mask) 70 if (old_mask == new_mask)
71 return; 71 return;
72 72
73 if (entry->i.inode) 73 if (fsn_mark->i.inode)
74 fsnotify_recalc_inode_mask(entry->i.inode); 74 fsnotify_recalc_inode_mask(fsn_mark->i.inode);
75} 75}
76 76
77/* 77/*
@@ -85,8 +85,8 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *entry)
85static int dnotify_handle_event(struct fsnotify_group *group, 85static int dnotify_handle_event(struct fsnotify_group *group,
86 struct fsnotify_event *event) 86 struct fsnotify_event *event)
87{ 87{
88 struct fsnotify_mark *entry = NULL; 88 struct fsnotify_mark *fsn_mark = NULL;
89 struct dnotify_mark_entry *dnentry; 89 struct dnotify_mark *dn_mark;
90 struct inode *to_tell; 90 struct inode *to_tell;
91 struct dnotify_struct *dn; 91 struct dnotify_struct *dn;
92 struct dnotify_struct **prev; 92 struct dnotify_struct **prev;
@@ -96,16 +96,16 @@ static int dnotify_handle_event(struct fsnotify_group *group,
96 to_tell = event->to_tell; 96 to_tell = event->to_tell;
97 97
98 spin_lock(&to_tell->i_lock); 98 spin_lock(&to_tell->i_lock);
99 entry = fsnotify_find_mark(group, to_tell); 99 fsn_mark = fsnotify_find_mark(group, to_tell);
100 spin_unlock(&to_tell->i_lock); 100 spin_unlock(&to_tell->i_lock);
101 101
102 /* unlikely since we alreay passed dnotify_should_send_event() */ 102 /* unlikely since we alreay passed dnotify_should_send_event() */
103 if (unlikely(!entry)) 103 if (unlikely(!fsn_mark))
104 return 0; 104 return 0;
105 dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); 105 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
106 106
107 spin_lock(&entry->lock); 107 spin_lock(&fsn_mark->lock);
108 prev = &dnentry->dn; 108 prev = &dn_mark->dn;
109 while ((dn = *prev) != NULL) { 109 while ((dn = *prev) != NULL) {
110 if ((dn->dn_mask & test_mask) == 0) { 110 if ((dn->dn_mask & test_mask) == 0) {
111 prev = &dn->dn_next; 111 prev = &dn->dn_next;
@@ -118,12 +118,12 @@ static int dnotify_handle_event(struct fsnotify_group *group,
118 else { 118 else {
119 *prev = dn->dn_next; 119 *prev = dn->dn_next;
120 kmem_cache_free(dnotify_struct_cache, dn); 120 kmem_cache_free(dnotify_struct_cache, dn);
121 dnotify_recalc_inode_mask(entry); 121 dnotify_recalc_inode_mask(fsn_mark);
122 } 122 }
123 } 123 }
124 124
125 spin_unlock(&entry->lock); 125 spin_unlock(&fsn_mark->lock);
126 fsnotify_put_mark(entry); 126 fsnotify_put_mark(fsn_mark);
127 127
128 return 0; 128 return 0;
129} 129}
@@ -136,7 +136,7 @@ static bool dnotify_should_send_event(struct fsnotify_group *group,
136 struct inode *inode, struct vfsmount *mnt, 136 struct inode *inode, struct vfsmount *mnt,
137 __u32 mask, void *data, int data_type) 137 __u32 mask, void *data, int data_type)
138{ 138{
139 struct fsnotify_mark *entry; 139 struct fsnotify_mark *fsn_mark;
140 bool send; 140 bool send;
141 141
142 /* !dir_notify_enable should never get here, don't waste time checking 142 /* !dir_notify_enable should never get here, don't waste time checking
@@ -148,30 +148,30 @@ static bool dnotify_should_send_event(struct fsnotify_group *group,
148 return false; 148 return false;
149 149
150 spin_lock(&inode->i_lock); 150 spin_lock(&inode->i_lock);
151 entry = fsnotify_find_mark(group, inode); 151 fsn_mark = fsnotify_find_mark(group, inode);
152 spin_unlock(&inode->i_lock); 152 spin_unlock(&inode->i_lock);
153 153
154 /* no mark means no dnotify watch */ 154 /* no mark means no dnotify watch */
155 if (!entry) 155 if (!fsn_mark)
156 return false; 156 return false;
157 157
158 mask = (mask & ~FS_EVENT_ON_CHILD); 158 mask = (mask & ~FS_EVENT_ON_CHILD);
159 send = (mask & entry->mask); 159 send = (mask & fsn_mark->mask);
160 160
161 fsnotify_put_mark(entry); /* matches fsnotify_find_mark */ 161 fsnotify_put_mark(fsn_mark); /* matches fsnotify_find_mark */
162 162
163 return send; 163 return send;
164} 164}
165 165
166static void dnotify_free_mark(struct fsnotify_mark *entry) 166static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)
167{ 167{
168 struct dnotify_mark_entry *dnentry = container_of(entry, 168 struct dnotify_mark *dn_mark = container_of(fsn_mark,
169 struct dnotify_mark_entry, 169 struct dnotify_mark,
170 fsn_entry); 170 fsn_mark);
171 171
172 BUG_ON(dnentry->dn); 172 BUG_ON(dn_mark->dn);
173 173
174 kmem_cache_free(dnotify_mark_entry_cache, dnentry); 174 kmem_cache_free(dnotify_mark_cache, dn_mark);
175} 175}
176 176
177static struct fsnotify_ops dnotify_fsnotify_ops = { 177static struct fsnotify_ops dnotify_fsnotify_ops = {
@@ -191,8 +191,8 @@ static struct fsnotify_ops dnotify_fsnotify_ops = {
191 */ 191 */
192void dnotify_flush(struct file *filp, fl_owner_t id) 192void dnotify_flush(struct file *filp, fl_owner_t id)
193{ 193{
194 struct fsnotify_mark *entry; 194 struct fsnotify_mark *fsn_mark;
195 struct dnotify_mark_entry *dnentry; 195 struct dnotify_mark *dn_mark;
196 struct dnotify_struct *dn; 196 struct dnotify_struct *dn;
197 struct dnotify_struct **prev; 197 struct dnotify_struct **prev;
198 struct inode *inode; 198 struct inode *inode;
@@ -202,37 +202,37 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
202 return; 202 return;
203 203
204 spin_lock(&inode->i_lock); 204 spin_lock(&inode->i_lock);
205 entry = fsnotify_find_mark(dnotify_group, inode); 205 fsn_mark = fsnotify_find_mark(dnotify_group, inode);
206 spin_unlock(&inode->i_lock); 206 spin_unlock(&inode->i_lock);
207 if (!entry) 207 if (!fsn_mark)
208 return; 208 return;
209 dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); 209 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
210 210
211 mutex_lock(&dnotify_mark_mutex); 211 mutex_lock(&dnotify_mark_mutex);
212 212
213 spin_lock(&entry->lock); 213 spin_lock(&fsn_mark->lock);
214 prev = &dnentry->dn; 214 prev = &dn_mark->dn;
215 while ((dn = *prev) != NULL) { 215 while ((dn = *prev) != NULL) {
216 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { 216 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
217 *prev = dn->dn_next; 217 *prev = dn->dn_next;
218 kmem_cache_free(dnotify_struct_cache, dn); 218 kmem_cache_free(dnotify_struct_cache, dn);
219 dnotify_recalc_inode_mask(entry); 219 dnotify_recalc_inode_mask(fsn_mark);
220 break; 220 break;
221 } 221 }
222 prev = &dn->dn_next; 222 prev = &dn->dn_next;
223 } 223 }
224 224
225 spin_unlock(&entry->lock); 225 spin_unlock(&fsn_mark->lock);
226 226
227 /* nothing else could have found us thanks to the dnotify_mark_mutex */ 227 /* nothing else could have found us thanks to the dnotify_mark_mutex */
228 if (dnentry->dn == NULL) 228 if (dn_mark->dn == NULL)
229 fsnotify_destroy_mark(entry); 229 fsnotify_destroy_mark(fsn_mark);
230 230
231 fsnotify_recalc_group_mask(dnotify_group); 231 fsnotify_recalc_group_mask(dnotify_group);
232 232
233 mutex_unlock(&dnotify_mark_mutex); 233 mutex_unlock(&dnotify_mark_mutex);
234 234
235 fsnotify_put_mark(entry); 235 fsnotify_put_mark(fsn_mark);
236} 236}
237 237
238/* this conversion is done only at watch creation */ 238/* this conversion is done only at watch creation */
@@ -264,12 +264,12 @@ static __u32 convert_arg(unsigned long arg)
264 * onto that mark. This function either attaches the new dnotify_struct onto 264 * onto that mark. This function either attaches the new dnotify_struct onto
265 * that list, or it |= the mask onto an existing dnofiy_struct. 265 * that list, or it |= the mask onto an existing dnofiy_struct.
266 */ 266 */
267static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry, 267static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
268 fl_owner_t id, int fd, struct file *filp, __u32 mask) 268 fl_owner_t id, int fd, struct file *filp, __u32 mask)
269{ 269{
270 struct dnotify_struct *odn; 270 struct dnotify_struct *odn;
271 271
272 odn = dnentry->dn; 272 odn = dn_mark->dn;
273 while (odn != NULL) { 273 while (odn != NULL) {
274 /* adding more events to existing dnofiy_struct? */ 274 /* adding more events to existing dnofiy_struct? */
275 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { 275 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
@@ -284,8 +284,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
284 dn->dn_fd = fd; 284 dn->dn_fd = fd;
285 dn->dn_filp = filp; 285 dn->dn_filp = filp;
286 dn->dn_owner = id; 286 dn->dn_owner = id;
287 dn->dn_next = dnentry->dn; 287 dn->dn_next = dn_mark->dn;
288 dnentry->dn = dn; 288 dn_mark->dn = dn;
289 289
290 return 0; 290 return 0;
291} 291}
@@ -297,8 +297,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
297 */ 297 */
298int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) 298int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
299{ 299{
300 struct dnotify_mark_entry *new_dnentry, *dnentry; 300 struct dnotify_mark *new_dn_mark, *dn_mark;
301 struct fsnotify_mark *new_entry, *entry; 301 struct fsnotify_mark *new_fsn_mark, *fsn_mark;
302 struct dnotify_struct *dn; 302 struct dnotify_struct *dn;
303 struct inode *inode; 303 struct inode *inode;
304 fl_owner_t id = current->files; 304 fl_owner_t id = current->files;
@@ -307,7 +307,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
307 __u32 mask; 307 __u32 mask;
308 308
309 /* we use these to tell if we need to kfree */ 309 /* we use these to tell if we need to kfree */
310 new_entry = NULL; 310 new_fsn_mark = NULL;
311 dn = NULL; 311 dn = NULL;
312 312
313 if (!dir_notify_enable) { 313 if (!dir_notify_enable) {
@@ -337,8 +337,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
337 } 337 }
338 338
339 /* new fsnotify mark, we expect most fcntl calls to add a new mark */ 339 /* new fsnotify mark, we expect most fcntl calls to add a new mark */
340 new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL); 340 new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
341 if (!new_dnentry) { 341 if (!new_dn_mark) {
342 error = -ENOMEM; 342 error = -ENOMEM;
343 goto out_err; 343 goto out_err;
344 } 344 }
@@ -346,29 +346,29 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
346 /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */ 346 /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
347 mask = convert_arg(arg); 347 mask = convert_arg(arg);
348 348
349 /* set up the new_entry and new_dnentry */ 349 /* set up the new_fsn_mark and new_dn_mark */
350 new_entry = &new_dnentry->fsn_entry; 350 new_fsn_mark = &new_dn_mark->fsn_mark;
351 fsnotify_init_mark(new_entry, dnotify_free_mark); 351 fsnotify_init_mark(new_fsn_mark, dnotify_free_mark);
352 new_entry->mask = mask; 352 new_fsn_mark->mask = mask;
353 new_dnentry->dn = NULL; 353 new_dn_mark->dn = NULL;
354 354
355 /* this is needed to prevent the fcntl/close race described below */ 355 /* this is needed to prevent the fcntl/close race described below */
356 mutex_lock(&dnotify_mark_mutex); 356 mutex_lock(&dnotify_mark_mutex);
357 357
358 /* add the new_entry or find an old one. */ 358 /* add the new_fsn_mark or find an old one. */
359 spin_lock(&inode->i_lock); 359 spin_lock(&inode->i_lock);
360 entry = fsnotify_find_mark(dnotify_group, inode); 360 fsn_mark = fsnotify_find_mark(dnotify_group, inode);
361 spin_unlock(&inode->i_lock); 361 spin_unlock(&inode->i_lock);
362 if (entry) { 362 if (fsn_mark) {
363 dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); 363 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
364 spin_lock(&entry->lock); 364 spin_lock(&fsn_mark->lock);
365 } else { 365 } else {
366 fsnotify_add_mark(new_entry, dnotify_group, inode, 0); 366 fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, 0);
367 spin_lock(&new_entry->lock); 367 spin_lock(&new_fsn_mark->lock);
368 entry = new_entry; 368 fsn_mark = new_fsn_mark;
369 dnentry = new_dnentry; 369 dn_mark = new_dn_mark;
370 /* we used new_entry, so don't free it */ 370 /* we used new_fsn_mark, so don't free it */
371 new_entry = NULL; 371 new_fsn_mark = NULL;
372 } 372 }
373 373
374 rcu_read_lock(); 374 rcu_read_lock();
@@ -377,17 +377,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
377 377
378 /* if (f != filp) means that we lost a race and another task/thread 378 /* if (f != filp) means that we lost a race and another task/thread
379 * actually closed the fd we are still playing with before we grabbed 379 * actually closed the fd we are still playing with before we grabbed
380 * the dnotify_mark_mutex and entry->lock. Since closing the fd is the 380 * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
381 * only time we clean up the marks we need to get our mark off 381 * only time we clean up the marks we need to get our mark off
382 * the list. */ 382 * the list. */
383 if (f != filp) { 383 if (f != filp) {
384 /* if we added ourselves, shoot ourselves, it's possible that 384 /* if we added ourselves, shoot ourselves, it's possible that
385 * the flush actually did shoot this entry. That's fine too 385 * the flush actually did shoot this fsn_mark. That's fine too
386 * since multiple calls to destroy_mark is perfectly safe, if 386 * since multiple calls to destroy_mark is perfectly safe, if
387 * we found a dnentry already attached to the inode, just sod 387 * we found a dn_mark already attached to the inode, just sod
388 * off silently as the flush at close time dealt with it. 388 * off silently as the flush at close time dealt with it.
389 */ 389 */
390 if (dnentry == new_dnentry) 390 if (dn_mark == new_dn_mark)
391 destroy = 1; 391 destroy = 1;
392 goto out; 392 goto out;
393 } 393 }
@@ -395,13 +395,13 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
395 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 395 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
396 if (error) { 396 if (error) {
397 /* if we added, we must shoot */ 397 /* if we added, we must shoot */
398 if (dnentry == new_dnentry) 398 if (dn_mark == new_dn_mark)
399 destroy = 1; 399 destroy = 1;
400 goto out; 400 goto out;
401 } 401 }
402 402
403 error = attach_dn(dn, dnentry, id, fd, filp, mask); 403 error = attach_dn(dn, dn_mark, id, fd, filp, mask);
404 /* !error means that we attached the dn to the dnentry, so don't free it */ 404 /* !error means that we attached the dn to the dn_mark, so don't free it */
405 if (!error) 405 if (!error)
406 dn = NULL; 406 dn = NULL;
407 /* -EEXIST means that we didn't add this new dn and used an old one. 407 /* -EEXIST means that we didn't add this new dn and used an old one.
@@ -409,20 +409,20 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
409 else if (error == -EEXIST) 409 else if (error == -EEXIST)
410 error = 0; 410 error = 0;
411 411
412 dnotify_recalc_inode_mask(entry); 412 dnotify_recalc_inode_mask(fsn_mark);
413out: 413out:
414 spin_unlock(&entry->lock); 414 spin_unlock(&fsn_mark->lock);
415 415
416 if (destroy) 416 if (destroy)
417 fsnotify_destroy_mark(entry); 417 fsnotify_destroy_mark(fsn_mark);
418 418
419 fsnotify_recalc_group_mask(dnotify_group); 419 fsnotify_recalc_group_mask(dnotify_group);
420 420
421 mutex_unlock(&dnotify_mark_mutex); 421 mutex_unlock(&dnotify_mark_mutex);
422 fsnotify_put_mark(entry); 422 fsnotify_put_mark(fsn_mark);
423out_err: 423out_err:
424 if (new_entry) 424 if (new_fsn_mark)
425 fsnotify_put_mark(new_entry); 425 fsnotify_put_mark(new_fsn_mark);
426 if (dn) 426 if (dn)
427 kmem_cache_free(dnotify_struct_cache, dn); 427 kmem_cache_free(dnotify_struct_cache, dn);
428 return error; 428 return error;
@@ -431,7 +431,7 @@ out_err:
431static int __init dnotify_init(void) 431static int __init dnotify_init(void)
432{ 432{
433 dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC); 433 dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
434 dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC); 434 dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
435 435
436 dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops); 436 dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
437 if (IS_ERR(dnotify_group)) 437 if (IS_ERR(dnotify_group))