diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-08-27 15:26:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-08-27 15:26:02 -0400 |
commit | 9c504cadc443a3d002fa581ec5109c0ef02d7b14 (patch) | |
tree | 0b44c60843062e5ee8d91a636dab67ada5286156 /fs | |
parent | 4f8ee2c9cc0e885d2bb50ef26db66150ab25213e (diff) | |
parent | 0db501bd0610ee0c0aca84d927f90bcccd09e2bd (diff) |
Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify
* 'for-linus' of git://git.infradead.org/users/eparis/notify:
inotify: Ensure we alwasy write the terminating NULL.
inotify: fix locking around inotify watching in the idr
inotify: do not BUG on idr entries at inotify destruction
inotify: seperate new watch creation updating existing watches
Diffstat (limited to 'fs')
-rw-r--r-- | fs/notify/inotify/inotify_fsnotify.c | 33 | ||||
-rw-r--r-- | fs/notify/inotify/inotify_user.c | 229 |
2 files changed, 177 insertions, 85 deletions
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 5dcbafe72d71..c9ee67b442e1 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c | |||
@@ -105,16 +105,45 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode | |||
105 | return send; | 105 | return send; |
106 | } | 106 | } |
107 | 107 | ||
108 | /* | ||
109 | * This is NEVER supposed to be called. Inotify marks should either have been | ||
110 | * removed from the idr when the watch was removed or in the | ||
111 | * fsnotify_destroy_mark_by_group() call when the inotify instance was being | ||
112 | * torn down. This is only called if the idr is about to be freed but there | ||
113 | * are still marks in it. | ||
114 | */ | ||
108 | static int idr_callback(int id, void *p, void *data) | 115 | static int idr_callback(int id, void *p, void *data) |
109 | { | 116 | { |
110 | BUG(); | 117 | struct fsnotify_mark_entry *entry; |
118 | struct inotify_inode_mark_entry *ientry; | ||
119 | static bool warned = false; | ||
120 | |||
121 | if (warned) | ||
122 | return 0; | ||
123 | |||
124 | warned = false; | ||
125 | entry = p; | ||
126 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | ||
127 | |||
128 | WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in " | ||
129 | "idr. Probably leaking memory\n", id, p, data); | ||
130 | |||
131 | /* | ||
132 | * I'm taking the liberty of assuming that the mark in question is a | ||
133 | * valid address and I'm dereferencing it. This might help to figure | ||
134 | * out why we got here and the panic is no worse than the original | ||
135 | * BUG() that was here. | ||
136 | */ | ||
137 | if (entry) | ||
138 | printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n", | ||
139 | entry->group, entry->inode, ientry->wd); | ||
111 | return 0; | 140 | return 0; |
112 | } | 141 | } |
113 | 142 | ||
114 | static void inotify_free_group_priv(struct fsnotify_group *group) | 143 | static void inotify_free_group_priv(struct fsnotify_group *group) |
115 | { | 144 | { |
116 | /* ideally the idr is empty and we won't hit the BUG in teh callback */ | 145 | /* ideally the idr is empty and we won't hit the BUG in teh callback */ |
117 | idr_for_each(&group->inotify_data.idr, idr_callback, NULL); | 146 | idr_for_each(&group->inotify_data.idr, idr_callback, group); |
118 | idr_remove_all(&group->inotify_data.idr); | 147 | idr_remove_all(&group->inotify_data.idr); |
119 | idr_destroy(&group->inotify_data.idr); | 148 | idr_destroy(&group->inotify_data.idr); |
120 | } | 149 | } |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index dc32ed8323ba..0e781bc88d1e 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -47,9 +47,6 @@ | |||
47 | 47 | ||
48 | static struct vfsmount *inotify_mnt __read_mostly; | 48 | static struct vfsmount *inotify_mnt __read_mostly; |
49 | 49 | ||
50 | /* this just sits here and wastes global memory. used to just pad userspace messages with zeros */ | ||
51 | static struct inotify_event nul_inotify_event; | ||
52 | |||
53 | /* these are configurable via /proc/sys/fs/inotify/ */ | 50 | /* these are configurable via /proc/sys/fs/inotify/ */ |
54 | static int inotify_max_user_instances __read_mostly; | 51 | static int inotify_max_user_instances __read_mostly; |
55 | static int inotify_max_queued_events __read_mostly; | 52 | static int inotify_max_queued_events __read_mostly; |
@@ -199,8 +196,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
199 | inotify_free_event_priv(fsn_priv); | 196 | inotify_free_event_priv(fsn_priv); |
200 | } | 197 | } |
201 | 198 | ||
202 | /* round up event->name_len so it is a multiple of event_size */ | 199 | /* round up event->name_len so it is a multiple of event_size |
203 | name_len = roundup(event->name_len, event_size); | 200 | * plus an extra byte for the terminating '\0'. |
201 | */ | ||
202 | name_len = roundup(event->name_len + 1, event_size); | ||
204 | inotify_event.len = name_len; | 203 | inotify_event.len = name_len; |
205 | 204 | ||
206 | inotify_event.mask = inotify_mask_to_arg(event->mask); | 205 | inotify_event.mask = inotify_mask_to_arg(event->mask); |
@@ -224,8 +223,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
224 | return -EFAULT; | 223 | return -EFAULT; |
225 | buf += event->name_len; | 224 | buf += event->name_len; |
226 | 225 | ||
227 | /* fill userspace with 0's from nul_inotify_event */ | 226 | /* fill userspace with 0's */ |
228 | if (copy_to_user(buf, &nul_inotify_event, len_to_zero)) | 227 | if (clear_user(buf, len_to_zero)) |
229 | return -EFAULT; | 228 | return -EFAULT; |
230 | buf += len_to_zero; | 229 | buf += len_to_zero; |
231 | event_size += name_len; | 230 | event_size += name_len; |
@@ -364,20 +363,53 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns | |||
364 | return error; | 363 | return error; |
365 | } | 364 | } |
366 | 365 | ||
366 | /* | ||
367 | * Remove the mark from the idr (if present) and drop the reference | ||
368 | * on the mark because it was in the idr. | ||
369 | */ | ||
367 | static void inotify_remove_from_idr(struct fsnotify_group *group, | 370 | static void inotify_remove_from_idr(struct fsnotify_group *group, |
368 | struct inotify_inode_mark_entry *ientry) | 371 | struct inotify_inode_mark_entry *ientry) |
369 | { | 372 | { |
370 | struct idr *idr; | 373 | struct idr *idr; |
374 | struct fsnotify_mark_entry *entry; | ||
375 | struct inotify_inode_mark_entry *found_ientry; | ||
376 | int wd; | ||
371 | 377 | ||
372 | spin_lock(&group->inotify_data.idr_lock); | 378 | spin_lock(&group->inotify_data.idr_lock); |
373 | idr = &group->inotify_data.idr; | 379 | idr = &group->inotify_data.idr; |
374 | idr_remove(idr, ientry->wd); | 380 | wd = ientry->wd; |
375 | spin_unlock(&group->inotify_data.idr_lock); | 381 | |
382 | if (wd == -1) | ||
383 | goto out; | ||
384 | |||
385 | entry = idr_find(&group->inotify_data.idr, wd); | ||
386 | if (unlikely(!entry)) | ||
387 | goto out; | ||
388 | |||
389 | found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | ||
390 | if (unlikely(found_ientry != ientry)) { | ||
391 | /* We found an entry in the idr with the right wd, but it's | ||
392 | * not the entry we were told to remove. eparis seriously | ||
393 | * fucked up somewhere. */ | ||
394 | WARN_ON(1); | ||
395 | ientry->wd = -1; | ||
396 | goto out; | ||
397 | } | ||
398 | |||
399 | /* One ref for being in the idr, one ref held by the caller */ | ||
400 | BUG_ON(atomic_read(&entry->refcnt) < 2); | ||
401 | |||
402 | idr_remove(idr, wd); | ||
376 | ientry->wd = -1; | 403 | ientry->wd = -1; |
404 | |||
405 | /* removed from the idr, drop that ref */ | ||
406 | fsnotify_put_mark(entry); | ||
407 | out: | ||
408 | spin_unlock(&group->inotify_data.idr_lock); | ||
377 | } | 409 | } |
410 | |||
378 | /* | 411 | /* |
379 | * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the | 412 | * Send IN_IGNORED for this wd, remove this wd from the idr. |
380 | * internal reference help on the mark because it is in the idr. | ||
381 | */ | 413 | */ |
382 | void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | 414 | void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, |
383 | struct fsnotify_group *group) | 415 | struct fsnotify_group *group) |
@@ -417,9 +449,6 @@ skip_send_ignore: | |||
417 | /* remove this entry from the idr */ | 449 | /* remove this entry from the idr */ |
418 | inotify_remove_from_idr(group, ientry); | 450 | inotify_remove_from_idr(group, ientry); |
419 | 451 | ||
420 | /* removed from idr, drop that reference */ | ||
421 | fsnotify_put_mark(entry); | ||
422 | |||
423 | atomic_dec(&group->inotify_data.user->inotify_watches); | 452 | atomic_dec(&group->inotify_data.user->inotify_watches); |
424 | } | 453 | } |
425 | 454 | ||
@@ -431,80 +460,29 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry) | |||
431 | kmem_cache_free(inotify_inode_mark_cachep, ientry); | 460 | kmem_cache_free(inotify_inode_mark_cachep, ientry); |
432 | } | 461 | } |
433 | 462 | ||
434 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) | 463 | static int inotify_update_existing_watch(struct fsnotify_group *group, |
464 | struct inode *inode, | ||
465 | u32 arg) | ||
435 | { | 466 | { |
436 | struct fsnotify_mark_entry *entry = NULL; | 467 | struct fsnotify_mark_entry *entry; |
437 | struct inotify_inode_mark_entry *ientry; | 468 | struct inotify_inode_mark_entry *ientry; |
438 | struct inotify_inode_mark_entry *tmp_ientry; | ||
439 | int ret = 0; | ||
440 | int add = (arg & IN_MASK_ADD); | ||
441 | __u32 mask; | ||
442 | __u32 old_mask, new_mask; | 469 | __u32 old_mask, new_mask; |
470 | __u32 mask; | ||
471 | int add = (arg & IN_MASK_ADD); | ||
472 | int ret; | ||
443 | 473 | ||
444 | /* don't allow invalid bits: we don't want flags set */ | 474 | /* don't allow invalid bits: we don't want flags set */ |
445 | mask = inotify_arg_to_mask(arg); | 475 | mask = inotify_arg_to_mask(arg); |
446 | if (unlikely(!mask)) | 476 | if (unlikely(!mask)) |
447 | return -EINVAL; | 477 | return -EINVAL; |
448 | 478 | ||
449 | tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); | ||
450 | if (unlikely(!tmp_ientry)) | ||
451 | return -ENOMEM; | ||
452 | /* we set the mask at the end after attaching it */ | ||
453 | fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); | ||
454 | tmp_ientry->wd = -1; | ||
455 | |||
456 | find_entry: | ||
457 | spin_lock(&inode->i_lock); | 479 | spin_lock(&inode->i_lock); |
458 | entry = fsnotify_find_mark_entry(group, inode); | 480 | entry = fsnotify_find_mark_entry(group, inode); |
459 | spin_unlock(&inode->i_lock); | 481 | spin_unlock(&inode->i_lock); |
460 | if (entry) { | 482 | if (!entry) |
461 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | 483 | return -ENOENT; |
462 | } else { | ||
463 | ret = -ENOSPC; | ||
464 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) | ||
465 | goto out_err; | ||
466 | retry: | ||
467 | ret = -ENOMEM; | ||
468 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | ||
469 | goto out_err; | ||
470 | |||
471 | spin_lock(&group->inotify_data.idr_lock); | ||
472 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | ||
473 | group->inotify_data.last_wd, | ||
474 | &tmp_ientry->wd); | ||
475 | spin_unlock(&group->inotify_data.idr_lock); | ||
476 | if (ret) { | ||
477 | if (ret == -EAGAIN) | ||
478 | goto retry; | ||
479 | goto out_err; | ||
480 | } | ||
481 | |||
482 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | ||
483 | if (ret) { | ||
484 | inotify_remove_from_idr(group, tmp_ientry); | ||
485 | if (ret == -EEXIST) | ||
486 | goto find_entry; | ||
487 | goto out_err; | ||
488 | } | ||
489 | |||
490 | /* tmp_ientry has been added to the inode, so we are all set up. | ||
491 | * now we just need to make sure tmp_ientry doesn't get freed and | ||
492 | * we need to set up entry and ientry so the generic code can | ||
493 | * do its thing. */ | ||
494 | ientry = tmp_ientry; | ||
495 | entry = &ientry->fsn_entry; | ||
496 | tmp_ientry = NULL; | ||
497 | |||
498 | atomic_inc(&group->inotify_data.user->inotify_watches); | ||
499 | |||
500 | /* update the idr hint */ | ||
501 | group->inotify_data.last_wd = ientry->wd; | ||
502 | |||
503 | /* we put the mark on the idr, take a reference */ | ||
504 | fsnotify_get_mark(entry); | ||
505 | } | ||
506 | 484 | ||
507 | ret = ientry->wd; | 485 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); |
508 | 486 | ||
509 | spin_lock(&entry->lock); | 487 | spin_lock(&entry->lock); |
510 | 488 | ||
@@ -536,18 +514,103 @@ retry: | |||
536 | fsnotify_recalc_group_mask(group); | 514 | fsnotify_recalc_group_mask(group); |
537 | } | 515 | } |
538 | 516 | ||
539 | /* this either matches fsnotify_find_mark_entry, or init_mark_entry | 517 | /* return the wd */ |
540 | * depending on which path we took... */ | 518 | ret = ientry->wd; |
519 | |||
520 | /* match the get from fsnotify_find_mark_entry() */ | ||
541 | fsnotify_put_mark(entry); | 521 | fsnotify_put_mark(entry); |
542 | 522 | ||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | static int inotify_new_watch(struct fsnotify_group *group, | ||
527 | struct inode *inode, | ||
528 | u32 arg) | ||
529 | { | ||
530 | struct inotify_inode_mark_entry *tmp_ientry; | ||
531 | __u32 mask; | ||
532 | int ret; | ||
533 | |||
534 | /* don't allow invalid bits: we don't want flags set */ | ||
535 | mask = inotify_arg_to_mask(arg); | ||
536 | if (unlikely(!mask)) | ||
537 | return -EINVAL; | ||
538 | |||
539 | tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); | ||
540 | if (unlikely(!tmp_ientry)) | ||
541 | return -ENOMEM; | ||
542 | |||
543 | fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); | ||
544 | tmp_ientry->fsn_entry.mask = mask; | ||
545 | tmp_ientry->wd = -1; | ||
546 | |||
547 | ret = -ENOSPC; | ||
548 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) | ||
549 | goto out_err; | ||
550 | retry: | ||
551 | ret = -ENOMEM; | ||
552 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | ||
553 | goto out_err; | ||
554 | |||
555 | spin_lock(&group->inotify_data.idr_lock); | ||
556 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | ||
557 | group->inotify_data.last_wd, | ||
558 | &tmp_ientry->wd); | ||
559 | spin_unlock(&group->inotify_data.idr_lock); | ||
560 | if (ret) { | ||
561 | /* idr was out of memory allocate and try again */ | ||
562 | if (ret == -EAGAIN) | ||
563 | goto retry; | ||
564 | goto out_err; | ||
565 | } | ||
566 | |||
567 | /* we put the mark on the idr, take a reference */ | ||
568 | fsnotify_get_mark(&tmp_ientry->fsn_entry); | ||
569 | |||
570 | /* we are on the idr, now get on the inode */ | ||
571 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | ||
572 | if (ret) { | ||
573 | /* we failed to get on the inode, get off the idr */ | ||
574 | inotify_remove_from_idr(group, tmp_ientry); | ||
575 | goto out_err; | ||
576 | } | ||
577 | |||
578 | /* update the idr hint, who cares about races, it's just a hint */ | ||
579 | group->inotify_data.last_wd = tmp_ientry->wd; | ||
580 | |||
581 | /* increment the number of watches the user has */ | ||
582 | atomic_inc(&group->inotify_data.user->inotify_watches); | ||
583 | |||
584 | /* return the watch descriptor for this new entry */ | ||
585 | ret = tmp_ientry->wd; | ||
586 | |||
587 | /* match the ref from fsnotify_init_markentry() */ | ||
588 | fsnotify_put_mark(&tmp_ientry->fsn_entry); | ||
589 | |||
543 | out_err: | 590 | out_err: |
544 | /* could be an error, could be that we found an existing mark */ | 591 | if (ret < 0) |
545 | if (tmp_ientry) { | ||
546 | /* on the idr but didn't make it on the inode */ | ||
547 | if (tmp_ientry->wd != -1) | ||
548 | inotify_remove_from_idr(group, tmp_ientry); | ||
549 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); | 592 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); |
550 | } | 593 | |
594 | return ret; | ||
595 | } | ||
596 | |||
597 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) | ||
598 | { | ||
599 | int ret = 0; | ||
600 | |||
601 | retry: | ||
602 | /* try to update and existing watch with the new arg */ | ||
603 | ret = inotify_update_existing_watch(group, inode, arg); | ||
604 | /* no mark present, try to add a new one */ | ||
605 | if (ret == -ENOENT) | ||
606 | ret = inotify_new_watch(group, inode, arg); | ||
607 | /* | ||
608 | * inotify_new_watch could race with another thread which did an | ||
609 | * inotify_new_watch between the update_existing and the add watch | ||
610 | * here, go back and try to update an existing mark again. | ||
611 | */ | ||
612 | if (ret == -EEXIST) | ||
613 | goto retry; | ||
551 | 614 | ||
552 | return ret; | 615 | return ret; |
553 | } | 616 | } |