aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-12-17 21:24:24 -0500
committerEric Paris <eparis@redhat.com>2010-07-28 09:58:54 -0400
commit000285deb99a5e0636fdd3c6a2483a5d039ee2c2 (patch)
tree45b13f2253265703a540bdd99685f4f56ac8e21b
parent841bdc10f573aa010dd5818d35a5690b7d9f73ce (diff)
inotify: rename mark_entry to just mark
rename anything in inotify that deals with mark_entry to just be mark. It makes a lot more sense. Signed-off-by: Eric Paris <eparis@redhat.com>
-rw-r--r--fs/notify/inotify/inotify.h7
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c48
-rw-r--r--fs/notify/inotify/inotify_user.c192
3 files changed, 123 insertions, 124 deletions
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
index 07be6df2428f..b6642e4de4bf 100644
--- a/fs/notify/inotify/inotify.h
+++ b/fs/notify/inotify/inotify.h
@@ -9,13 +9,12 @@ struct inotify_event_private_data {
9 int wd; 9 int wd;
10}; 10};
11 11
12struct inotify_inode_mark_entry { 12struct inotify_inode_mark {
13 /* fsnotify_mark MUST be the first thing */ 13 struct fsnotify_mark fsn_mark;
14 struct fsnotify_mark fsn_entry;
15 int wd; 14 int wd;
16}; 15};
17 16
18extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *entry, 17extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
19 struct fsnotify_group *group); 18 struct fsnotify_group *group);
20extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv); 19extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
21 20
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index f8a2a6eda133..12dc72be992e 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -88,8 +88,8 @@ static int inotify_merge(struct list_head *list, struct fsnotify_event *event)
88 88
89static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) 89static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
90{ 90{
91 struct fsnotify_mark *entry; 91 struct fsnotify_mark *fsn_mark;
92 struct inotify_inode_mark_entry *ientry; 92 struct inotify_inode_mark *i_mark;
93 struct inode *to_tell; 93 struct inode *to_tell;
94 struct inotify_event_private_data *event_priv; 94 struct inotify_event_private_data *event_priv;
95 struct fsnotify_event_private_data *fsn_event_priv; 95 struct fsnotify_event_private_data *fsn_event_priv;
@@ -98,14 +98,14 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
98 to_tell = event->to_tell; 98 to_tell = event->to_tell;
99 99
100 spin_lock(&to_tell->i_lock); 100 spin_lock(&to_tell->i_lock);
101 entry = fsnotify_find_mark(group, to_tell); 101 fsn_mark = fsnotify_find_mark(group, to_tell);
102 spin_unlock(&to_tell->i_lock); 102 spin_unlock(&to_tell->i_lock);
103 /* race with watch removal? We already passes should_send */ 103 /* race with watch removal? We already passes should_send */
104 if (unlikely(!entry)) 104 if (unlikely(!fsn_mark))
105 return 0; 105 return 0;
106 ientry = container_of(entry, struct inotify_inode_mark_entry, 106 i_mark = container_of(fsn_mark, struct inotify_inode_mark,
107 fsn_entry); 107 fsn_mark);
108 wd = ientry->wd; 108 wd = i_mark->wd;
109 109
110 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 110 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
111 if (unlikely(!event_priv)) 111 if (unlikely(!event_priv))
@@ -127,37 +127,37 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
127 } 127 }
128 128
129 /* 129 /*
130 * If we hold the entry until after the event is on the queue 130 * If we hold the fsn_mark until after the event is on the queue
131 * IN_IGNORED won't be able to pass this event in the queue 131 * IN_IGNORED won't be able to pass this event in the queue
132 */ 132 */
133 fsnotify_put_mark(entry); 133 fsnotify_put_mark(fsn_mark);
134 134
135 return ret; 135 return ret;
136} 136}
137 137
138static void inotify_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) 138static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
139{ 139{
140 inotify_ignored_and_remove_idr(entry, group); 140 inotify_ignored_and_remove_idr(fsn_mark, group);
141} 141}
142 142
143static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, 143static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
144 struct vfsmount *mnt, __u32 mask, void *data, 144 struct vfsmount *mnt, __u32 mask, void *data,
145 int data_type) 145 int data_type)
146{ 146{
147 struct fsnotify_mark *entry; 147 struct fsnotify_mark *fsn_mark;
148 bool send; 148 bool send;
149 149
150 spin_lock(&inode->i_lock); 150 spin_lock(&inode->i_lock);
151 entry = fsnotify_find_mark(group, inode); 151 fsn_mark = fsnotify_find_mark(group, inode);
152 spin_unlock(&inode->i_lock); 152 spin_unlock(&inode->i_lock);
153 if (!entry) 153 if (!fsn_mark)
154 return false; 154 return false;
155 155
156 mask = (mask & ~FS_EVENT_ON_CHILD); 156 mask = (mask & ~FS_EVENT_ON_CHILD);
157 send = (entry->mask & mask); 157 send = (fsn_mark->mask & mask);
158 158
159 /* find took a reference */ 159 /* find took a reference */
160 fsnotify_put_mark(entry); 160 fsnotify_put_mark(fsn_mark);
161 161
162 return send; 162 return send;
163} 163}
@@ -171,18 +171,18 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
171 */ 171 */
172static int idr_callback(int id, void *p, void *data) 172static int idr_callback(int id, void *p, void *data)
173{ 173{
174 struct fsnotify_mark *entry; 174 struct fsnotify_mark *fsn_mark;
175 struct inotify_inode_mark_entry *ientry; 175 struct inotify_inode_mark *i_mark;
176 static bool warned = false; 176 static bool warned = false;
177 177
178 if (warned) 178 if (warned)
179 return 0; 179 return 0;
180 180
181 warned = true; 181 warned = true;
182 entry = p; 182 fsn_mark = p;
183 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 183 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
184 184
185 WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in " 185 WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
186 "idr. Probably leaking memory\n", id, p, data); 186 "idr. Probably leaking memory\n", id, p, data);
187 187
188 /* 188 /*
@@ -191,9 +191,9 @@ static int idr_callback(int id, void *p, void *data)
191 * out why we got here and the panic is no worse than the original 191 * out why we got here and the panic is no worse than the original
192 * BUG() that was here. 192 * BUG() that was here.
193 */ 193 */
194 if (entry) 194 if (fsn_mark)
195 printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n", 195 printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
196 entry->group, entry->i.inode, ientry->wd); 196 fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
197 return 0; 197 return 0;
198} 198}
199 199
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 118085c9d2d9..80d102acb86b 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -353,7 +353,7 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
353 353
354static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, 354static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
355 int *last_wd, 355 int *last_wd,
356 struct inotify_inode_mark_entry *ientry) 356 struct inotify_inode_mark *i_mark)
357{ 357{
358 int ret; 358 int ret;
359 359
@@ -362,12 +362,12 @@ static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
362 return -ENOMEM; 362 return -ENOMEM;
363 363
364 spin_lock(idr_lock); 364 spin_lock(idr_lock);
365 ret = idr_get_new_above(idr, ientry, *last_wd + 1, 365 ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
366 &ientry->wd); 366 &i_mark->wd);
367 /* we added the mark to the idr, take a reference */ 367 /* we added the mark to the idr, take a reference */
368 if (!ret) { 368 if (!ret) {
369 fsnotify_get_mark(&ientry->fsn_entry); 369 *last_wd = i_mark->wd;
370 *last_wd = ientry->wd; 370 fsnotify_get_mark(&i_mark->fsn_mark);
371 } 371 }
372 spin_unlock(idr_lock); 372 spin_unlock(idr_lock);
373 } while (ret == -EAGAIN); 373 } while (ret == -EAGAIN);
@@ -375,53 +375,53 @@ static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
375 return ret; 375 return ret;
376} 376}
377 377
378static struct inotify_inode_mark_entry *inotify_idr_find_locked(struct fsnotify_group *group, 378static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
379 int wd) 379 int wd)
380{ 380{
381 struct idr *idr = &group->inotify_data.idr; 381 struct idr *idr = &group->inotify_data.idr;
382 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 382 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
383 struct inotify_inode_mark_entry *ientry; 383 struct inotify_inode_mark *i_mark;
384 384
385 assert_spin_locked(idr_lock); 385 assert_spin_locked(idr_lock);
386 386
387 ientry = idr_find(idr, wd); 387 i_mark = idr_find(idr, wd);
388 if (ientry) { 388 if (i_mark) {
389 struct fsnotify_mark *fsn_entry = &ientry->fsn_entry; 389 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
390 390
391 fsnotify_get_mark(fsn_entry); 391 fsnotify_get_mark(fsn_mark);
392 /* One ref for being in the idr, one ref we just took */ 392 /* One ref for being in the idr, one ref we just took */
393 BUG_ON(atomic_read(&fsn_entry->refcnt) < 2); 393 BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
394 } 394 }
395 395
396 return ientry; 396 return i_mark;
397} 397}
398 398
399static struct inotify_inode_mark_entry *inotify_idr_find(struct fsnotify_group *group, 399static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
400 int wd) 400 int wd)
401{ 401{
402 struct inotify_inode_mark_entry *ientry; 402 struct inotify_inode_mark *i_mark;
403 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 403 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
404 404
405 spin_lock(idr_lock); 405 spin_lock(idr_lock);
406 ientry = inotify_idr_find_locked(group, wd); 406 i_mark = inotify_idr_find_locked(group, wd);
407 spin_unlock(idr_lock); 407 spin_unlock(idr_lock);
408 408
409 return ientry; 409 return i_mark;
410} 410}
411 411
412static void do_inotify_remove_from_idr(struct fsnotify_group *group, 412static void do_inotify_remove_from_idr(struct fsnotify_group *group,
413 struct inotify_inode_mark_entry *ientry) 413 struct inotify_inode_mark *i_mark)
414{ 414{
415 struct idr *idr = &group->inotify_data.idr; 415 struct idr *idr = &group->inotify_data.idr;
416 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 416 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
417 int wd = ientry->wd; 417 int wd = i_mark->wd;
418 418
419 assert_spin_locked(idr_lock); 419 assert_spin_locked(idr_lock);
420 420
421 idr_remove(idr, wd); 421 idr_remove(idr, wd);
422 422
423 /* removed from the idr, drop that ref */ 423 /* removed from the idr, drop that ref */
424 fsnotify_put_mark(&ientry->fsn_entry); 424 fsnotify_put_mark(&i_mark->fsn_mark);
425} 425}
426 426
427/* 427/*
@@ -429,48 +429,48 @@ static void do_inotify_remove_from_idr(struct fsnotify_group *group,
429 * on the mark because it was in the idr. 429 * on the mark because it was in the idr.
430 */ 430 */
431static void inotify_remove_from_idr(struct fsnotify_group *group, 431static void inotify_remove_from_idr(struct fsnotify_group *group,
432 struct inotify_inode_mark_entry *ientry) 432 struct inotify_inode_mark *i_mark)
433{ 433{
434 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 434 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
435 struct inotify_inode_mark_entry *found_ientry = NULL; 435 struct inotify_inode_mark *found_i_mark = NULL;
436 int wd; 436 int wd;
437 437
438 spin_lock(idr_lock); 438 spin_lock(idr_lock);
439 wd = ientry->wd; 439 wd = i_mark->wd;
440 440
441 /* 441 /*
442 * does this ientry think it is in the idr? we shouldn't get called 442 * does this i_mark think it is in the idr? we shouldn't get called
443 * if it wasn't.... 443 * if it wasn't....
444 */ 444 */
445 if (wd == -1) { 445 if (wd == -1) {
446 WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p" 446 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
447 " ientry->inode=%p\n", __func__, ientry, ientry->wd, 447 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
448 ientry->fsn_entry.group, ientry->fsn_entry.i.inode); 448 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
449 goto out; 449 goto out;
450 } 450 }
451 451
452 /* Lets look in the idr to see if we find it */ 452 /* Lets look in the idr to see if we find it */
453 found_ientry = inotify_idr_find_locked(group, wd); 453 found_i_mark = inotify_idr_find_locked(group, wd);
454 if (unlikely(!found_ientry)) { 454 if (unlikely(!found_i_mark)) {
455 WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p" 455 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
456 " ientry->inode=%p\n", __func__, ientry, ientry->wd, 456 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
457 ientry->fsn_entry.group, ientry->fsn_entry.i.inode); 457 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
458 goto out; 458 goto out;
459 } 459 }
460 460
461 /* 461 /*
462 * We found an entry in the idr at the right wd, but it's 462 * We found an mark in the idr at the right wd, but it's
463 * not the entry we were told to remove. eparis seriously 463 * not the mark we were told to remove. eparis seriously
464 * fucked up somewhere. 464 * fucked up somewhere.
465 */ 465 */
466 if (unlikely(found_ientry != ientry)) { 466 if (unlikely(found_i_mark != i_mark)) {
467 WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p " 467 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
468 "entry->inode=%p found_ientry=%p found_ientry->wd=%d " 468 "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
469 "found_ientry->group=%p found_ientry->inode=%p\n", 469 "found_i_mark->group=%p found_i_mark->inode=%p\n",
470 __func__, ientry, ientry->wd, ientry->fsn_entry.group, 470 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
471 ientry->fsn_entry.i.inode, found_ientry, found_ientry->wd, 471 i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
472 found_ientry->fsn_entry.group, 472 found_i_mark->fsn_mark.group,
473 found_ientry->fsn_entry.i.inode); 473 found_i_mark->fsn_mark.i.inode);
474 goto out; 474 goto out;
475 } 475 }
476 476
@@ -479,30 +479,30 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
479 * one ref held by the caller trying to kill us 479 * one ref held by the caller trying to kill us
480 * one ref grabbed by inotify_idr_find 480 * one ref grabbed by inotify_idr_find
481 */ 481 */
482 if (unlikely(atomic_read(&ientry->fsn_entry.refcnt) < 3)) { 482 if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
483 printk(KERN_ERR "%s: ientry=%p ientry->wd=%d ientry->group=%p" 483 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
484 " ientry->inode=%p\n", __func__, ientry, ientry->wd, 484 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
485 ientry->fsn_entry.group, ientry->fsn_entry.i.inode); 485 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
486 /* we can't really recover with bad ref cnting.. */ 486 /* we can't really recover with bad ref cnting.. */
487 BUG(); 487 BUG();
488 } 488 }
489 489
490 do_inotify_remove_from_idr(group, ientry); 490 do_inotify_remove_from_idr(group, i_mark);
491out: 491out:
492 /* match the ref taken by inotify_idr_find_locked() */ 492 /* match the ref taken by inotify_idr_find_locked() */
493 if (found_ientry) 493 if (found_i_mark)
494 fsnotify_put_mark(&found_ientry->fsn_entry); 494 fsnotify_put_mark(&found_i_mark->fsn_mark);
495 ientry->wd = -1; 495 i_mark->wd = -1;
496 spin_unlock(idr_lock); 496 spin_unlock(idr_lock);
497} 497}
498 498
499/* 499/*
500 * Send IN_IGNORED for this wd, remove this wd from the idr. 500 * Send IN_IGNORED for this wd, remove this wd from the idr.
501 */ 501 */
502void inotify_ignored_and_remove_idr(struct fsnotify_mark *entry, 502void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
503 struct fsnotify_group *group) 503 struct fsnotify_group *group)
504{ 504{
505 struct inotify_inode_mark_entry *ientry; 505 struct inotify_inode_mark *i_mark;
506 struct fsnotify_event *ignored_event; 506 struct fsnotify_event *ignored_event;
507 struct inotify_event_private_data *event_priv; 507 struct inotify_event_private_data *event_priv;
508 struct fsnotify_event_private_data *fsn_event_priv; 508 struct fsnotify_event_private_data *fsn_event_priv;
@@ -514,7 +514,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *entry,
514 if (!ignored_event) 514 if (!ignored_event)
515 return; 515 return;
516 516
517 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 517 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
518 518
519 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); 519 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
520 if (unlikely(!event_priv)) 520 if (unlikely(!event_priv))
@@ -523,7 +523,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *entry,
523 fsn_event_priv = &event_priv->fsnotify_event_priv_data; 523 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
524 524
525 fsn_event_priv->group = group; 525 fsn_event_priv->group = group;
526 event_priv->wd = ientry->wd; 526 event_priv->wd = i_mark->wd;
527 527
528 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL); 528 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
529 if (ret) 529 if (ret)
@@ -534,28 +534,28 @@ skip_send_ignore:
534 /* matches the reference taken when the event was created */ 534 /* matches the reference taken when the event was created */
535 fsnotify_put_event(ignored_event); 535 fsnotify_put_event(ignored_event);
536 536
537 /* remove this entry from the idr */ 537 /* remove this mark from the idr */
538 inotify_remove_from_idr(group, ientry); 538 inotify_remove_from_idr(group, i_mark);
539 539
540 atomic_dec(&group->inotify_data.user->inotify_watches); 540 atomic_dec(&group->inotify_data.user->inotify_watches);
541} 541}
542 542
543/* ding dong the mark is dead */ 543/* ding dong the mark is dead */
544static void inotify_free_mark(struct fsnotify_mark *entry) 544static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
545{ 545{
546 struct inotify_inode_mark_entry *ientry; 546 struct inotify_inode_mark *i_mark;
547 547
548 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 548 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
549 549
550 kmem_cache_free(inotify_inode_mark_cachep, ientry); 550 kmem_cache_free(inotify_inode_mark_cachep, i_mark);
551} 551}
552 552
553static int inotify_update_existing_watch(struct fsnotify_group *group, 553static int inotify_update_existing_watch(struct fsnotify_group *group,
554 struct inode *inode, 554 struct inode *inode,
555 u32 arg) 555 u32 arg)
556{ 556{
557 struct fsnotify_mark *entry; 557 struct fsnotify_mark *fsn_mark;
558 struct inotify_inode_mark_entry *ientry; 558 struct inotify_inode_mark *i_mark;
559 __u32 old_mask, new_mask; 559 __u32 old_mask, new_mask;
560 __u32 mask; 560 __u32 mask;
561 int add = (arg & IN_MASK_ADD); 561 int add = (arg & IN_MASK_ADD);
@@ -567,35 +567,35 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
567 return -EINVAL; 567 return -EINVAL;
568 568
569 spin_lock(&inode->i_lock); 569 spin_lock(&inode->i_lock);
570 entry = fsnotify_find_mark(group, inode); 570 fsn_mark = fsnotify_find_mark(group, inode);
571 spin_unlock(&inode->i_lock); 571 spin_unlock(&inode->i_lock);
572 if (!entry) 572 if (!fsn_mark)
573 return -ENOENT; 573 return -ENOENT;
574 574
575 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 575 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
576 576
577 spin_lock(&entry->lock); 577 spin_lock(&fsn_mark->lock);
578 578
579 old_mask = entry->mask; 579 old_mask = fsn_mark->mask;
580 if (add) { 580 if (add) {
581 entry->mask |= mask; 581 fsn_mark->mask |= mask;
582 new_mask = entry->mask; 582 new_mask = fsn_mark->mask;
583 } else { 583 } else {
584 entry->mask = mask; 584 fsn_mark->mask = mask;
585 new_mask = entry->mask; 585 new_mask = fsn_mark->mask;
586 } 586 }
587 587
588 spin_unlock(&entry->lock); 588 spin_unlock(&fsn_mark->lock);
589 589
590 if (old_mask != new_mask) { 590 if (old_mask != new_mask) {
591 /* more bits in old than in new? */ 591 /* more bits in old than in new? */
592 int dropped = (old_mask & ~new_mask); 592 int dropped = (old_mask & ~new_mask);
593 /* more bits in this entry than the inode's mask? */ 593 /* more bits in this fsn_mark than the inode's mask? */
594 int do_inode = (new_mask & ~inode->i_fsnotify_mask); 594 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
595 /* more bits in this entry than the group? */ 595 /* more bits in this fsn_mark than the group? */
596 int do_group = (new_mask & ~group->mask); 596 int do_group = (new_mask & ~group->mask);
597 597
598 /* update the inode with this new entry */ 598 /* update the inode with this new fsn_mark */
599 if (dropped || do_inode) 599 if (dropped || do_inode)
600 fsnotify_recalc_inode_mask(inode); 600 fsnotify_recalc_inode_mask(inode);
601 601
@@ -605,10 +605,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
605 } 605 }
606 606
607 /* return the wd */ 607 /* return the wd */
608 ret = ientry->wd; 608 ret = i_mark->wd;
609 609
610 /* match the get from fsnotify_find_mark() */ 610 /* match the get from fsnotify_find_mark() */
611 fsnotify_put_mark(entry); 611 fsnotify_put_mark(fsn_mark);
612 612
613 return ret; 613 return ret;
614} 614}
@@ -617,7 +617,7 @@ static int inotify_new_watch(struct fsnotify_group *group,
617 struct inode *inode, 617 struct inode *inode,
618 u32 arg) 618 u32 arg)
619{ 619{
620 struct inotify_inode_mark_entry *tmp_ientry; 620 struct inotify_inode_mark *tmp_i_mark;
621 __u32 mask; 621 __u32 mask;
622 int ret; 622 int ret;
623 struct idr *idr = &group->inotify_data.idr; 623 struct idr *idr = &group->inotify_data.idr;
@@ -628,44 +628,44 @@ static int inotify_new_watch(struct fsnotify_group *group,
628 if (unlikely(!mask)) 628 if (unlikely(!mask))
629 return -EINVAL; 629 return -EINVAL;
630 630
631 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 631 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
632 if (unlikely(!tmp_ientry)) 632 if (unlikely(!tmp_i_mark))
633 return -ENOMEM; 633 return -ENOMEM;
634 634
635 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); 635 fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
636 tmp_ientry->fsn_entry.mask = mask; 636 tmp_i_mark->fsn_mark.mask = mask;
637 tmp_ientry->wd = -1; 637 tmp_i_mark->wd = -1;
638 638
639 ret = -ENOSPC; 639 ret = -ENOSPC;
640 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) 640 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
641 goto out_err; 641 goto out_err;
642 642
643 ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd, 643 ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
644 tmp_ientry); 644 tmp_i_mark);
645 if (ret) 645 if (ret)
646 goto out_err; 646 goto out_err;
647 647
648 /* we are on the idr, now get on the inode */ 648 /* we are on the idr, now get on the inode */
649 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode, 0); 649 ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, 0);
650 if (ret) { 650 if (ret) {
651 /* we failed to get on the inode, get off the idr */ 651 /* we failed to get on the inode, get off the idr */
652 inotify_remove_from_idr(group, tmp_ientry); 652 inotify_remove_from_idr(group, tmp_i_mark);
653 goto out_err; 653 goto out_err;
654 } 654 }
655 655
656 /* increment the number of watches the user has */ 656 /* increment the number of watches the user has */
657 atomic_inc(&group->inotify_data.user->inotify_watches); 657 atomic_inc(&group->inotify_data.user->inotify_watches);
658 658
659 /* return the watch descriptor for this new entry */ 659 /* return the watch descriptor for this new mark */
660 ret = tmp_ientry->wd; 660 ret = tmp_i_mark->wd;
661 661
662 /* if this mark added a new event update the group mask */ 662 /* if this mark added a new event update the group mask */
663 if (mask & ~group->mask) 663 if (mask & ~group->mask)
664 fsnotify_recalc_group_mask(group); 664 fsnotify_recalc_group_mask(group);
665 665
666out_err: 666out_err:
667 /* match the ref from fsnotify_init_markentry() */ 667 /* match the ref from fsnotify_init_mark() */
668 fsnotify_put_mark(&tmp_ientry->fsn_entry); 668 fsnotify_put_mark(&tmp_i_mark->fsn_mark);
669 669
670 return ret; 670 return ret;
671} 671}
@@ -801,7 +801,7 @@ fput_and_out:
801SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 801SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
802{ 802{
803 struct fsnotify_group *group; 803 struct fsnotify_group *group;
804 struct inotify_inode_mark_entry *ientry; 804 struct inotify_inode_mark *i_mark;
805 struct file *filp; 805 struct file *filp;
806 int ret = 0, fput_needed; 806 int ret = 0, fput_needed;
807 807
@@ -817,16 +817,16 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
817 group = filp->private_data; 817 group = filp->private_data;
818 818
819 ret = -EINVAL; 819 ret = -EINVAL;
820 ientry = inotify_idr_find(group, wd); 820 i_mark = inotify_idr_find(group, wd);
821 if (unlikely(!ientry)) 821 if (unlikely(!i_mark))
822 goto out; 822 goto out;
823 823
824 ret = 0; 824 ret = 0;
825 825
826 fsnotify_destroy_mark(&ientry->fsn_entry); 826 fsnotify_destroy_mark(&i_mark->fsn_mark);
827 827
828 /* match ref taken by inotify_idr_find */ 828 /* match ref taken by inotify_idr_find */
829 fsnotify_put_mark(&ientry->fsn_entry); 829 fsnotify_put_mark(&i_mark->fsn_mark);
830 830
831out: 831out:
832 fput_light(filp, fput_needed); 832 fput_light(filp, fput_needed);
@@ -840,7 +840,7 @@ out:
840 */ 840 */
841static int __init inotify_user_setup(void) 841static int __init inotify_user_setup(void)
842{ 842{
843 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 843 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
844 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 844 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
845 845
846 inotify_max_queued_events = 16384; 846 inotify_max_queued_events = 16384;