diff options
Diffstat (limited to 'fs/notify/inotify')
-rw-r--r-- | fs/notify/inotify/Kconfig | 2 | ||||
-rw-r--r-- | fs/notify/inotify/inotify_fsnotify.c | 46 | ||||
-rw-r--r-- | fs/notify/inotify/inotify_user.c | 271 |
3 files changed, 226 insertions, 93 deletions
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig index 5356884289a1..3e56dbffe729 100644 --- a/fs/notify/inotify/Kconfig +++ b/fs/notify/inotify/Kconfig | |||
@@ -15,7 +15,7 @@ config INOTIFY | |||
15 | 15 | ||
16 | config INOTIFY_USER | 16 | config INOTIFY_USER |
17 | bool "Inotify support for userspace" | 17 | bool "Inotify support for userspace" |
18 | depends on FSNOTIFY | 18 | select FSNOTIFY |
19 | default y | 19 | default y |
20 | ---help--- | 20 | ---help--- |
21 | Say Y here to enable inotify support for userspace, including the | 21 | Say Y here to enable inotify support for userspace, including the |
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 47cd258fd24d..c9ee67b442e1 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c | |||
@@ -62,13 +62,14 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev | |||
62 | event_priv->wd = wd; | 62 | event_priv->wd = wd; |
63 | 63 | ||
64 | ret = fsnotify_add_notify_event(group, event, fsn_event_priv); | 64 | ret = fsnotify_add_notify_event(group, event, fsn_event_priv); |
65 | /* EEXIST is not an error */ | 65 | if (ret) { |
66 | if (ret == -EEXIST) | ||
67 | ret = 0; | ||
68 | |||
69 | /* did event_priv get attached? */ | ||
70 | if (list_empty(&fsn_event_priv->event_list)) | ||
71 | inotify_free_event_priv(fsn_event_priv); | 66 | inotify_free_event_priv(fsn_event_priv); |
67 | /* EEXIST says we tail matched, EOVERFLOW isn't something | ||
68 | * to report up the stack. */ | ||
69 | if ((ret == -EEXIST) || | ||
70 | (ret == -EOVERFLOW)) | ||
71 | ret = 0; | ||
72 | } | ||
72 | 73 | ||
73 | /* | 74 | /* |
74 | * If we hold the entry until after the event is on the queue | 75 | * If we hold the entry until after the event is on the queue |
@@ -104,16 +105,45 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode | |||
104 | return send; | 105 | return send; |
105 | } | 106 | } |
106 | 107 | ||
108 | /* | ||
109 | * This is NEVER supposed to be called. Inotify marks should either have been | ||
110 | * removed from the idr when the watch was removed or in the | ||
111 | * fsnotify_destroy_mark_by_group() call when the inotify instance was being | ||
112 | * torn down. This is only called if the idr is about to be freed but there | ||
113 | * are still marks in it. | ||
114 | */ | ||
107 | static int idr_callback(int id, void *p, void *data) | 115 | static int idr_callback(int id, void *p, void *data) |
108 | { | 116 | { |
109 | BUG(); | 117 | struct fsnotify_mark_entry *entry; |
118 | struct inotify_inode_mark_entry *ientry; | ||
119 | static bool warned = false; | ||
120 | |||
121 | if (warned) | ||
122 | return 0; | ||
123 | |||
124 | warned = false; | ||
125 | entry = p; | ||
126 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | ||
127 | |||
128 | WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in " | ||
129 | "idr. Probably leaking memory\n", id, p, data); | ||
130 | |||
131 | /* | ||
132 | * I'm taking the liberty of assuming that the mark in question is a | ||
133 | * valid address and I'm dereferencing it. This might help to figure | ||
134 | * out why we got here and the panic is no worse than the original | ||
135 | * BUG() that was here. | ||
136 | */ | ||
137 | if (entry) | ||
138 | printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n", | ||
139 | entry->group, entry->inode, ientry->wd); | ||
110 | return 0; | 140 | return 0; |
111 | } | 141 | } |
112 | 142 | ||
113 | static void inotify_free_group_priv(struct fsnotify_group *group) | 143 | static void inotify_free_group_priv(struct fsnotify_group *group) |
114 | { | 144 | { |
115 | /* ideally the idr is empty and we won't hit the BUG in teh callback */ | 145 | /* ideally the idr is empty and we won't hit the BUG in teh callback */ |
116 | idr_for_each(&group->inotify_data.idr, idr_callback, NULL); | 146 | idr_for_each(&group->inotify_data.idr, idr_callback, group); |
117 | idr_remove_all(&group->inotify_data.idr); | 147 | idr_remove_all(&group->inotify_data.idr); |
118 | idr_destroy(&group->inotify_data.idr); | 148 | idr_destroy(&group->inotify_data.idr); |
119 | } | 149 | } |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index ff27a2965844..dcd2040d330c 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -47,9 +47,6 @@ | |||
47 | 47 | ||
48 | static struct vfsmount *inotify_mnt __read_mostly; | 48 | static struct vfsmount *inotify_mnt __read_mostly; |
49 | 49 | ||
50 | /* this just sits here and wastes global memory. used to just pad userspace messages with zeros */ | ||
51 | static struct inotify_event nul_inotify_event; | ||
52 | |||
53 | /* these are configurable via /proc/sys/fs/inotify/ */ | 50 | /* these are configurable via /proc/sys/fs/inotify/ */ |
54 | static int inotify_max_user_instances __read_mostly; | 51 | static int inotify_max_user_instances __read_mostly; |
55 | static int inotify_max_queued_events __read_mostly; | 52 | static int inotify_max_queued_events __read_mostly; |
@@ -57,7 +54,6 @@ int inotify_max_user_watches __read_mostly; | |||
57 | 54 | ||
58 | static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; | 55 | static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; |
59 | struct kmem_cache *event_priv_cachep __read_mostly; | 56 | struct kmem_cache *event_priv_cachep __read_mostly; |
60 | static struct fsnotify_event *inotify_ignored_event; | ||
61 | 57 | ||
62 | /* | 58 | /* |
63 | * When inotify registers a new group it increments this and uses that | 59 | * When inotify registers a new group it increments this and uses that |
@@ -158,7 +154,8 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, | |||
158 | 154 | ||
159 | event = fsnotify_peek_notify_event(group); | 155 | event = fsnotify_peek_notify_event(group); |
160 | 156 | ||
161 | event_size += roundup(event->name_len, event_size); | 157 | if (event->name_len) |
158 | event_size += roundup(event->name_len + 1, event_size); | ||
162 | 159 | ||
163 | if (event_size > count) | 160 | if (event_size > count) |
164 | return ERR_PTR(-EINVAL); | 161 | return ERR_PTR(-EINVAL); |
@@ -184,7 +181,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
184 | struct fsnotify_event_private_data *fsn_priv; | 181 | struct fsnotify_event_private_data *fsn_priv; |
185 | struct inotify_event_private_data *priv; | 182 | struct inotify_event_private_data *priv; |
186 | size_t event_size = sizeof(struct inotify_event); | 183 | size_t event_size = sizeof(struct inotify_event); |
187 | size_t name_len; | 184 | size_t name_len = 0; |
188 | 185 | ||
189 | /* we get the inotify watch descriptor from the event private data */ | 186 | /* we get the inotify watch descriptor from the event private data */ |
190 | spin_lock(&event->lock); | 187 | spin_lock(&event->lock); |
@@ -200,8 +197,12 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
200 | inotify_free_event_priv(fsn_priv); | 197 | inotify_free_event_priv(fsn_priv); |
201 | } | 198 | } |
202 | 199 | ||
203 | /* round up event->name_len so it is a multiple of event_size */ | 200 | /* |
204 | name_len = roundup(event->name_len, event_size); | 201 | * round up event->name_len so it is a multiple of event_size |
202 | * plus an extra byte for the terminating '\0'. | ||
203 | */ | ||
204 | if (event->name_len) | ||
205 | name_len = roundup(event->name_len + 1, event_size); | ||
205 | inotify_event.len = name_len; | 206 | inotify_event.len = name_len; |
206 | 207 | ||
207 | inotify_event.mask = inotify_mask_to_arg(event->mask); | 208 | inotify_event.mask = inotify_mask_to_arg(event->mask); |
@@ -225,8 +226,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
225 | return -EFAULT; | 226 | return -EFAULT; |
226 | buf += event->name_len; | 227 | buf += event->name_len; |
227 | 228 | ||
228 | /* fill userspace with 0's from nul_inotify_event */ | 229 | /* fill userspace with 0's */ |
229 | if (copy_to_user(buf, &nul_inotify_event, len_to_zero)) | 230 | if (clear_user(buf, len_to_zero)) |
230 | return -EFAULT; | 231 | return -EFAULT; |
231 | buf += len_to_zero; | 232 | buf += len_to_zero; |
232 | event_size += name_len; | 233 | event_size += name_len; |
@@ -327,8 +328,9 @@ static long inotify_ioctl(struct file *file, unsigned int cmd, | |||
327 | list_for_each_entry(holder, &group->notification_list, event_list) { | 328 | list_for_each_entry(holder, &group->notification_list, event_list) { |
328 | event = holder->event; | 329 | event = holder->event; |
329 | send_len += sizeof(struct inotify_event); | 330 | send_len += sizeof(struct inotify_event); |
330 | send_len += roundup(event->name_len, | 331 | if (event->name_len) |
331 | sizeof(struct inotify_event)); | 332 | send_len += roundup(event->name_len + 1, |
333 | sizeof(struct inotify_event)); | ||
332 | } | 334 | } |
333 | mutex_unlock(&group->notification_mutex); | 335 | mutex_unlock(&group->notification_mutex); |
334 | ret = put_user(send_len, (int __user *) p); | 336 | ret = put_user(send_len, (int __user *) p); |
@@ -366,20 +368,71 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns | |||
366 | } | 368 | } |
367 | 369 | ||
368 | /* | 370 | /* |
369 | * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the | 371 | * Remove the mark from the idr (if present) and drop the reference |
370 | * internal reference help on the mark because it is in the idr. | 372 | * on the mark because it was in the idr. |
373 | */ | ||
374 | static void inotify_remove_from_idr(struct fsnotify_group *group, | ||
375 | struct inotify_inode_mark_entry *ientry) | ||
376 | { | ||
377 | struct idr *idr; | ||
378 | struct fsnotify_mark_entry *entry; | ||
379 | struct inotify_inode_mark_entry *found_ientry; | ||
380 | int wd; | ||
381 | |||
382 | spin_lock(&group->inotify_data.idr_lock); | ||
383 | idr = &group->inotify_data.idr; | ||
384 | wd = ientry->wd; | ||
385 | |||
386 | if (wd == -1) | ||
387 | goto out; | ||
388 | |||
389 | entry = idr_find(&group->inotify_data.idr, wd); | ||
390 | if (unlikely(!entry)) | ||
391 | goto out; | ||
392 | |||
393 | found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | ||
394 | if (unlikely(found_ientry != ientry)) { | ||
395 | /* We found an entry in the idr with the right wd, but it's | ||
396 | * not the entry we were told to remove. eparis seriously | ||
397 | * fucked up somewhere. */ | ||
398 | WARN_ON(1); | ||
399 | ientry->wd = -1; | ||
400 | goto out; | ||
401 | } | ||
402 | |||
403 | /* One ref for being in the idr, one ref held by the caller */ | ||
404 | BUG_ON(atomic_read(&entry->refcnt) < 2); | ||
405 | |||
406 | idr_remove(idr, wd); | ||
407 | ientry->wd = -1; | ||
408 | |||
409 | /* removed from the idr, drop that ref */ | ||
410 | fsnotify_put_mark(entry); | ||
411 | out: | ||
412 | spin_unlock(&group->inotify_data.idr_lock); | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * Send IN_IGNORED for this wd, remove this wd from the idr. | ||
371 | */ | 417 | */ |
372 | void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | 418 | void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, |
373 | struct fsnotify_group *group) | 419 | struct fsnotify_group *group) |
374 | { | 420 | { |
375 | struct inotify_inode_mark_entry *ientry; | 421 | struct inotify_inode_mark_entry *ientry; |
422 | struct fsnotify_event *ignored_event; | ||
376 | struct inotify_event_private_data *event_priv; | 423 | struct inotify_event_private_data *event_priv; |
377 | struct fsnotify_event_private_data *fsn_event_priv; | 424 | struct fsnotify_event_private_data *fsn_event_priv; |
378 | struct idr *idr; | 425 | int ret; |
426 | |||
427 | ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, | ||
428 | FSNOTIFY_EVENT_NONE, NULL, 0, | ||
429 | GFP_NOFS); | ||
430 | if (!ignored_event) | ||
431 | return; | ||
379 | 432 | ||
380 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | 433 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); |
381 | 434 | ||
382 | event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); | 435 | event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); |
383 | if (unlikely(!event_priv)) | 436 | if (unlikely(!event_priv)) |
384 | goto skip_send_ignore; | 437 | goto skip_send_ignore; |
385 | 438 | ||
@@ -388,22 +441,19 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | |||
388 | fsn_event_priv->group = group; | 441 | fsn_event_priv->group = group; |
389 | event_priv->wd = ientry->wd; | 442 | event_priv->wd = ientry->wd; |
390 | 443 | ||
391 | fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv); | 444 | ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); |
392 | 445 | if (ret) | |
393 | /* did the private data get added? */ | ||
394 | if (list_empty(&fsn_event_priv->event_list)) | ||
395 | inotify_free_event_priv(fsn_event_priv); | 446 | inotify_free_event_priv(fsn_event_priv); |
396 | 447 | ||
397 | skip_send_ignore: | 448 | skip_send_ignore: |
398 | 449 | ||
450 | /* matches the reference taken when the event was created */ | ||
451 | fsnotify_put_event(ignored_event); | ||
452 | |||
399 | /* remove this entry from the idr */ | 453 | /* remove this entry from the idr */ |
400 | spin_lock(&group->inotify_data.idr_lock); | 454 | inotify_remove_from_idr(group, ientry); |
401 | idr = &group->inotify_data.idr; | ||
402 | idr_remove(idr, ientry->wd); | ||
403 | spin_unlock(&group->inotify_data.idr_lock); | ||
404 | 455 | ||
405 | /* removed from idr, drop that reference */ | 456 | atomic_dec(&group->inotify_data.user->inotify_watches); |
406 | fsnotify_put_mark(entry); | ||
407 | } | 457 | } |
408 | 458 | ||
409 | /* ding dong the mark is dead */ | 459 | /* ding dong the mark is dead */ |
@@ -414,67 +464,29 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry) | |||
414 | kmem_cache_free(inotify_inode_mark_cachep, ientry); | 464 | kmem_cache_free(inotify_inode_mark_cachep, ientry); |
415 | } | 465 | } |
416 | 466 | ||
417 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) | 467 | static int inotify_update_existing_watch(struct fsnotify_group *group, |
468 | struct inode *inode, | ||
469 | u32 arg) | ||
418 | { | 470 | { |
419 | struct fsnotify_mark_entry *entry = NULL; | 471 | struct fsnotify_mark_entry *entry; |
420 | struct inotify_inode_mark_entry *ientry; | 472 | struct inotify_inode_mark_entry *ientry; |
421 | int ret = 0; | ||
422 | int add = (arg & IN_MASK_ADD); | ||
423 | __u32 mask; | ||
424 | __u32 old_mask, new_mask; | 473 | __u32 old_mask, new_mask; |
474 | __u32 mask; | ||
475 | int add = (arg & IN_MASK_ADD); | ||
476 | int ret; | ||
425 | 477 | ||
426 | /* don't allow invalid bits: we don't want flags set */ | 478 | /* don't allow invalid bits: we don't want flags set */ |
427 | mask = inotify_arg_to_mask(arg); | 479 | mask = inotify_arg_to_mask(arg); |
428 | if (unlikely(!mask)) | 480 | if (unlikely(!mask)) |
429 | return -EINVAL; | 481 | return -EINVAL; |
430 | 482 | ||
431 | ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); | ||
432 | if (unlikely(!ientry)) | ||
433 | return -ENOMEM; | ||
434 | /* we set the mask at the end after attaching it */ | ||
435 | fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark); | ||
436 | ientry->wd = 0; | ||
437 | |||
438 | find_entry: | ||
439 | spin_lock(&inode->i_lock); | 483 | spin_lock(&inode->i_lock); |
440 | entry = fsnotify_find_mark_entry(group, inode); | 484 | entry = fsnotify_find_mark_entry(group, inode); |
441 | spin_unlock(&inode->i_lock); | 485 | spin_unlock(&inode->i_lock); |
442 | if (entry) { | 486 | if (!entry) |
443 | kmem_cache_free(inotify_inode_mark_cachep, ientry); | 487 | return -ENOENT; |
444 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | ||
445 | } else { | ||
446 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) { | ||
447 | ret = -ENOSPC; | ||
448 | goto out_err; | ||
449 | } | ||
450 | |||
451 | ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode); | ||
452 | if (ret == -EEXIST) | ||
453 | goto find_entry; | ||
454 | else if (ret) | ||
455 | goto out_err; | ||
456 | 488 | ||
457 | entry = &ientry->fsn_entry; | 489 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); |
458 | retry: | ||
459 | ret = -ENOMEM; | ||
460 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | ||
461 | goto out_err; | ||
462 | |||
463 | spin_lock(&group->inotify_data.idr_lock); | ||
464 | /* if entry is added to the idr we keep the reference obtained | ||
465 | * through fsnotify_mark_add. remember to drop this reference | ||
466 | * when entry is removed from idr */ | ||
467 | ret = idr_get_new_above(&group->inotify_data.idr, entry, | ||
468 | ++group->inotify_data.last_wd, | ||
469 | &ientry->wd); | ||
470 | spin_unlock(&group->inotify_data.idr_lock); | ||
471 | if (ret) { | ||
472 | if (ret == -EAGAIN) | ||
473 | goto retry; | ||
474 | goto out_err; | ||
475 | } | ||
476 | atomic_inc(&group->inotify_data.user->inotify_watches); | ||
477 | } | ||
478 | 490 | ||
479 | spin_lock(&entry->lock); | 491 | spin_lock(&entry->lock); |
480 | 492 | ||
@@ -506,14 +518,108 @@ retry: | |||
506 | fsnotify_recalc_group_mask(group); | 518 | fsnotify_recalc_group_mask(group); |
507 | } | 519 | } |
508 | 520 | ||
509 | return ientry->wd; | 521 | /* return the wd */ |
522 | ret = ientry->wd; | ||
510 | 523 | ||
511 | out_err: | 524 | /* match the get from fsnotify_find_mark_entry() */ |
512 | /* see this isn't supposed to happen, just kill the watch */ | 525 | fsnotify_put_mark(entry); |
513 | if (entry) { | 526 | |
514 | fsnotify_destroy_mark_by_entry(entry); | 527 | return ret; |
515 | fsnotify_put_mark(entry); | 528 | } |
529 | |||
530 | static int inotify_new_watch(struct fsnotify_group *group, | ||
531 | struct inode *inode, | ||
532 | u32 arg) | ||
533 | { | ||
534 | struct inotify_inode_mark_entry *tmp_ientry; | ||
535 | __u32 mask; | ||
536 | int ret; | ||
537 | |||
538 | /* don't allow invalid bits: we don't want flags set */ | ||
539 | mask = inotify_arg_to_mask(arg); | ||
540 | if (unlikely(!mask)) | ||
541 | return -EINVAL; | ||
542 | |||
543 | tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); | ||
544 | if (unlikely(!tmp_ientry)) | ||
545 | return -ENOMEM; | ||
546 | |||
547 | fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); | ||
548 | tmp_ientry->fsn_entry.mask = mask; | ||
549 | tmp_ientry->wd = -1; | ||
550 | |||
551 | ret = -ENOSPC; | ||
552 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) | ||
553 | goto out_err; | ||
554 | retry: | ||
555 | ret = -ENOMEM; | ||
556 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | ||
557 | goto out_err; | ||
558 | |||
559 | spin_lock(&group->inotify_data.idr_lock); | ||
560 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | ||
561 | group->inotify_data.last_wd, | ||
562 | &tmp_ientry->wd); | ||
563 | spin_unlock(&group->inotify_data.idr_lock); | ||
564 | if (ret) { | ||
565 | /* idr was out of memory allocate and try again */ | ||
566 | if (ret == -EAGAIN) | ||
567 | goto retry; | ||
568 | goto out_err; | ||
569 | } | ||
570 | |||
571 | /* we put the mark on the idr, take a reference */ | ||
572 | fsnotify_get_mark(&tmp_ientry->fsn_entry); | ||
573 | |||
574 | /* we are on the idr, now get on the inode */ | ||
575 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | ||
576 | if (ret) { | ||
577 | /* we failed to get on the inode, get off the idr */ | ||
578 | inotify_remove_from_idr(group, tmp_ientry); | ||
579 | goto out_err; | ||
516 | } | 580 | } |
581 | |||
582 | /* update the idr hint, who cares about races, it's just a hint */ | ||
583 | group->inotify_data.last_wd = tmp_ientry->wd; | ||
584 | |||
585 | /* increment the number of watches the user has */ | ||
586 | atomic_inc(&group->inotify_data.user->inotify_watches); | ||
587 | |||
588 | /* return the watch descriptor for this new entry */ | ||
589 | ret = tmp_ientry->wd; | ||
590 | |||
591 | /* match the ref from fsnotify_init_markentry() */ | ||
592 | fsnotify_put_mark(&tmp_ientry->fsn_entry); | ||
593 | |||
594 | /* if this mark added a new event update the group mask */ | ||
595 | if (mask & ~group->mask) | ||
596 | fsnotify_recalc_group_mask(group); | ||
597 | |||
598 | out_err: | ||
599 | if (ret < 0) | ||
600 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); | ||
601 | |||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) | ||
606 | { | ||
607 | int ret = 0; | ||
608 | |||
609 | retry: | ||
610 | /* try to update and existing watch with the new arg */ | ||
611 | ret = inotify_update_existing_watch(group, inode, arg); | ||
612 | /* no mark present, try to add a new one */ | ||
613 | if (ret == -ENOENT) | ||
614 | ret = inotify_new_watch(group, inode, arg); | ||
615 | /* | ||
616 | * inotify_new_watch could race with another thread which did an | ||
617 | * inotify_new_watch between the update_existing and the add watch | ||
618 | * here, go back and try to update an existing mark again. | ||
619 | */ | ||
620 | if (ret == -EEXIST) | ||
621 | goto retry; | ||
622 | |||
517 | return ret; | 623 | return ret; |
518 | } | 624 | } |
519 | 625 | ||
@@ -532,7 +638,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign | |||
532 | 638 | ||
533 | spin_lock_init(&group->inotify_data.idr_lock); | 639 | spin_lock_init(&group->inotify_data.idr_lock); |
534 | idr_init(&group->inotify_data.idr); | 640 | idr_init(&group->inotify_data.idr); |
535 | group->inotify_data.last_wd = 0; | 641 | group->inotify_data.last_wd = 1; |
536 | group->inotify_data.user = user; | 642 | group->inotify_data.user = user; |
537 | group->inotify_data.fa = NULL; | 643 | group->inotify_data.fa = NULL; |
538 | 644 | ||
@@ -721,9 +827,6 @@ static int __init inotify_user_setup(void) | |||
721 | 827 | ||
722 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); | 828 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); |
723 | event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); | 829 | event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); |
724 | inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0); | ||
725 | if (!inotify_ignored_event) | ||
726 | panic("unable to allocate the inotify ignored event\n"); | ||
727 | 830 | ||
728 | inotify_max_queued_events = 16384; | 831 | inotify_max_queued_events = 16384; |
729 | inotify_max_user_instances = 128; | 832 | inotify_max_user_instances = 128; |