diff options
Diffstat (limited to 'fs/notify/inotify/inotify_user.c')
-rw-r--r-- | fs/notify/inotify/inotify_user.c | 238 |
1 files changed, 150 insertions, 88 deletions
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index f30d9bbc2e1..0e781bc88d1 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -47,9 +47,6 @@ | |||
47 | 47 | ||
48 | static struct vfsmount *inotify_mnt __read_mostly; | 48 | static struct vfsmount *inotify_mnt __read_mostly; |
49 | 49 | ||
50 | /* this just sits here and wastes global memory. used to just pad userspace messages with zeros */ | ||
51 | static struct inotify_event nul_inotify_event; | ||
52 | |||
53 | /* these are configurable via /proc/sys/fs/inotify/ */ | 50 | /* these are configurable via /proc/sys/fs/inotify/ */ |
54 | static int inotify_max_user_instances __read_mostly; | 51 | static int inotify_max_user_instances __read_mostly; |
55 | static int inotify_max_queued_events __read_mostly; | 52 | static int inotify_max_queued_events __read_mostly; |
@@ -199,8 +196,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
199 | inotify_free_event_priv(fsn_priv); | 196 | inotify_free_event_priv(fsn_priv); |
200 | } | 197 | } |
201 | 198 | ||
202 | /* round up event->name_len so it is a multiple of event_size */ | 199 | /* round up event->name_len so it is a multiple of event_size |
203 | name_len = roundup(event->name_len, event_size); | 200 | * plus an extra byte for the terminating '\0'. |
201 | */ | ||
202 | name_len = roundup(event->name_len + 1, event_size); | ||
204 | inotify_event.len = name_len; | 203 | inotify_event.len = name_len; |
205 | 204 | ||
206 | inotify_event.mask = inotify_mask_to_arg(event->mask); | 205 | inotify_event.mask = inotify_mask_to_arg(event->mask); |
@@ -224,8 +223,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
224 | return -EFAULT; | 223 | return -EFAULT; |
225 | buf += event->name_len; | 224 | buf += event->name_len; |
226 | 225 | ||
227 | /* fill userspace with 0's from nul_inotify_event */ | 226 | /* fill userspace with 0's */ |
228 | if (copy_to_user(buf, &nul_inotify_event, len_to_zero)) | 227 | if (clear_user(buf, len_to_zero)) |
229 | return -EFAULT; | 228 | return -EFAULT; |
230 | buf += len_to_zero; | 229 | buf += len_to_zero; |
231 | event_size += name_len; | 230 | event_size += name_len; |
@@ -364,20 +363,53 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns | |||
364 | return error; | 363 | return error; |
365 | } | 364 | } |
366 | 365 | ||
366 | /* | ||
367 | * Remove the mark from the idr (if present) and drop the reference | ||
368 | * on the mark because it was in the idr. | ||
369 | */ | ||
367 | static void inotify_remove_from_idr(struct fsnotify_group *group, | 370 | static void inotify_remove_from_idr(struct fsnotify_group *group, |
368 | struct inotify_inode_mark_entry *ientry) | 371 | struct inotify_inode_mark_entry *ientry) |
369 | { | 372 | { |
370 | struct idr *idr; | 373 | struct idr *idr; |
374 | struct fsnotify_mark_entry *entry; | ||
375 | struct inotify_inode_mark_entry *found_ientry; | ||
376 | int wd; | ||
371 | 377 | ||
372 | spin_lock(&group->inotify_data.idr_lock); | 378 | spin_lock(&group->inotify_data.idr_lock); |
373 | idr = &group->inotify_data.idr; | 379 | idr = &group->inotify_data.idr; |
374 | idr_remove(idr, ientry->wd); | 380 | wd = ientry->wd; |
375 | spin_unlock(&group->inotify_data.idr_lock); | 381 | |
382 | if (wd == -1) | ||
383 | goto out; | ||
384 | |||
385 | entry = idr_find(&group->inotify_data.idr, wd); | ||
386 | if (unlikely(!entry)) | ||
387 | goto out; | ||
388 | |||
389 | found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | ||
390 | if (unlikely(found_ientry != ientry)) { | ||
391 | /* We found an entry in the idr with the right wd, but it's | ||
392 | * not the entry we were told to remove. eparis seriously | ||
393 | * fucked up somewhere. */ | ||
394 | WARN_ON(1); | ||
395 | ientry->wd = -1; | ||
396 | goto out; | ||
397 | } | ||
398 | |||
399 | /* One ref for being in the idr, one ref held by the caller */ | ||
400 | BUG_ON(atomic_read(&entry->refcnt) < 2); | ||
401 | |||
402 | idr_remove(idr, wd); | ||
376 | ientry->wd = -1; | 403 | ientry->wd = -1; |
404 | |||
405 | /* removed from the idr, drop that ref */ | ||
406 | fsnotify_put_mark(entry); | ||
407 | out: | ||
408 | spin_unlock(&group->inotify_data.idr_lock); | ||
377 | } | 409 | } |
410 | |||
378 | /* | 411 | /* |
379 | * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the | 412 | * Send IN_IGNORED for this wd, remove this wd from the idr. |
380 | * internal reference help on the mark because it is in the idr. | ||
381 | */ | 413 | */ |
382 | void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | 414 | void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, |
383 | struct fsnotify_group *group) | 415 | struct fsnotify_group *group) |
@@ -386,6 +418,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | |||
386 | struct fsnotify_event *ignored_event; | 418 | struct fsnotify_event *ignored_event; |
387 | struct inotify_event_private_data *event_priv; | 419 | struct inotify_event_private_data *event_priv; |
388 | struct fsnotify_event_private_data *fsn_event_priv; | 420 | struct fsnotify_event_private_data *fsn_event_priv; |
421 | int ret; | ||
389 | 422 | ||
390 | ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, | 423 | ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, |
391 | FSNOTIFY_EVENT_NONE, NULL, 0, | 424 | FSNOTIFY_EVENT_NONE, NULL, 0, |
@@ -404,10 +437,8 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, | |||
404 | fsn_event_priv->group = group; | 437 | fsn_event_priv->group = group; |
405 | event_priv->wd = ientry->wd; | 438 | event_priv->wd = ientry->wd; |
406 | 439 | ||
407 | fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); | 440 | ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); |
408 | 441 | if (ret) | |
409 | /* did the private data get added? */ | ||
410 | if (list_empty(&fsn_event_priv->event_list)) | ||
411 | inotify_free_event_priv(fsn_event_priv); | 442 | inotify_free_event_priv(fsn_event_priv); |
412 | 443 | ||
413 | skip_send_ignore: | 444 | skip_send_ignore: |
@@ -418,9 +449,6 @@ skip_send_ignore: | |||
418 | /* remove this entry from the idr */ | 449 | /* remove this entry from the idr */ |
419 | inotify_remove_from_idr(group, ientry); | 450 | inotify_remove_from_idr(group, ientry); |
420 | 451 | ||
421 | /* removed from idr, drop that reference */ | ||
422 | fsnotify_put_mark(entry); | ||
423 | |||
424 | atomic_dec(&group->inotify_data.user->inotify_watches); | 452 | atomic_dec(&group->inotify_data.user->inotify_watches); |
425 | } | 453 | } |
426 | 454 | ||
@@ -432,80 +460,29 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry) | |||
432 | kmem_cache_free(inotify_inode_mark_cachep, ientry); | 460 | kmem_cache_free(inotify_inode_mark_cachep, ientry); |
433 | } | 461 | } |
434 | 462 | ||
435 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) | 463 | static int inotify_update_existing_watch(struct fsnotify_group *group, |
464 | struct inode *inode, | ||
465 | u32 arg) | ||
436 | { | 466 | { |
437 | struct fsnotify_mark_entry *entry = NULL; | 467 | struct fsnotify_mark_entry *entry; |
438 | struct inotify_inode_mark_entry *ientry; | 468 | struct inotify_inode_mark_entry *ientry; |
439 | struct inotify_inode_mark_entry *tmp_ientry; | ||
440 | int ret = 0; | ||
441 | int add = (arg & IN_MASK_ADD); | ||
442 | __u32 mask; | ||
443 | __u32 old_mask, new_mask; | 469 | __u32 old_mask, new_mask; |
470 | __u32 mask; | ||
471 | int add = (arg & IN_MASK_ADD); | ||
472 | int ret; | ||
444 | 473 | ||
445 | /* don't allow invalid bits: we don't want flags set */ | 474 | /* don't allow invalid bits: we don't want flags set */ |
446 | mask = inotify_arg_to_mask(arg); | 475 | mask = inotify_arg_to_mask(arg); |
447 | if (unlikely(!mask)) | 476 | if (unlikely(!mask)) |
448 | return -EINVAL; | 477 | return -EINVAL; |
449 | 478 | ||
450 | tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); | ||
451 | if (unlikely(!tmp_ientry)) | ||
452 | return -ENOMEM; | ||
453 | /* we set the mask at the end after attaching it */ | ||
454 | fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); | ||
455 | tmp_ientry->wd = -1; | ||
456 | |||
457 | find_entry: | ||
458 | spin_lock(&inode->i_lock); | 479 | spin_lock(&inode->i_lock); |
459 | entry = fsnotify_find_mark_entry(group, inode); | 480 | entry = fsnotify_find_mark_entry(group, inode); |
460 | spin_unlock(&inode->i_lock); | 481 | spin_unlock(&inode->i_lock); |
461 | if (entry) { | 482 | if (!entry) |
462 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | 483 | return -ENOENT; |
463 | } else { | ||
464 | ret = -ENOSPC; | ||
465 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) | ||
466 | goto out_err; | ||
467 | retry: | ||
468 | ret = -ENOMEM; | ||
469 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | ||
470 | goto out_err; | ||
471 | |||
472 | spin_lock(&group->inotify_data.idr_lock); | ||
473 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | ||
474 | group->inotify_data.last_wd, | ||
475 | &tmp_ientry->wd); | ||
476 | spin_unlock(&group->inotify_data.idr_lock); | ||
477 | if (ret) { | ||
478 | if (ret == -EAGAIN) | ||
479 | goto retry; | ||
480 | goto out_err; | ||
481 | } | ||
482 | 484 | ||
483 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | 485 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); |
484 | if (ret) { | ||
485 | inotify_remove_from_idr(group, tmp_ientry); | ||
486 | if (ret == -EEXIST) | ||
487 | goto find_entry; | ||
488 | goto out_err; | ||
489 | } | ||
490 | |||
491 | /* tmp_ientry has been added to the inode, so we are all set up. | ||
492 | * now we just need to make sure tmp_ientry doesn't get freed and | ||
493 | * we need to set up entry and ientry so the generic code can | ||
494 | * do its thing. */ | ||
495 | ientry = tmp_ientry; | ||
496 | entry = &ientry->fsn_entry; | ||
497 | tmp_ientry = NULL; | ||
498 | |||
499 | atomic_inc(&group->inotify_data.user->inotify_watches); | ||
500 | |||
501 | /* update the idr hint */ | ||
502 | group->inotify_data.last_wd = ientry->wd; | ||
503 | |||
504 | /* we put the mark on the idr, take a reference */ | ||
505 | fsnotify_get_mark(entry); | ||
506 | } | ||
507 | |||
508 | ret = ientry->wd; | ||
509 | 486 | ||
510 | spin_lock(&entry->lock); | 487 | spin_lock(&entry->lock); |
511 | 488 | ||
@@ -537,18 +514,103 @@ retry: | |||
537 | fsnotify_recalc_group_mask(group); | 514 | fsnotify_recalc_group_mask(group); |
538 | } | 515 | } |
539 | 516 | ||
540 | /* this either matches fsnotify_find_mark_entry, or init_mark_entry | 517 | /* return the wd */ |
541 | * depending on which path we took... */ | 518 | ret = ientry->wd; |
519 | |||
520 | /* match the get from fsnotify_find_mark_entry() */ | ||
542 | fsnotify_put_mark(entry); | 521 | fsnotify_put_mark(entry); |
543 | 522 | ||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | static int inotify_new_watch(struct fsnotify_group *group, | ||
527 | struct inode *inode, | ||
528 | u32 arg) | ||
529 | { | ||
530 | struct inotify_inode_mark_entry *tmp_ientry; | ||
531 | __u32 mask; | ||
532 | int ret; | ||
533 | |||
534 | /* don't allow invalid bits: we don't want flags set */ | ||
535 | mask = inotify_arg_to_mask(arg); | ||
536 | if (unlikely(!mask)) | ||
537 | return -EINVAL; | ||
538 | |||
539 | tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); | ||
540 | if (unlikely(!tmp_ientry)) | ||
541 | return -ENOMEM; | ||
542 | |||
543 | fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); | ||
544 | tmp_ientry->fsn_entry.mask = mask; | ||
545 | tmp_ientry->wd = -1; | ||
546 | |||
547 | ret = -ENOSPC; | ||
548 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) | ||
549 | goto out_err; | ||
550 | retry: | ||
551 | ret = -ENOMEM; | ||
552 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | ||
553 | goto out_err; | ||
554 | |||
555 | spin_lock(&group->inotify_data.idr_lock); | ||
556 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | ||
557 | group->inotify_data.last_wd, | ||
558 | &tmp_ientry->wd); | ||
559 | spin_unlock(&group->inotify_data.idr_lock); | ||
560 | if (ret) { | ||
561 | /* idr was out of memory allocate and try again */ | ||
562 | if (ret == -EAGAIN) | ||
563 | goto retry; | ||
564 | goto out_err; | ||
565 | } | ||
566 | |||
567 | /* we put the mark on the idr, take a reference */ | ||
568 | fsnotify_get_mark(&tmp_ientry->fsn_entry); | ||
569 | |||
570 | /* we are on the idr, now get on the inode */ | ||
571 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | ||
572 | if (ret) { | ||
573 | /* we failed to get on the inode, get off the idr */ | ||
574 | inotify_remove_from_idr(group, tmp_ientry); | ||
575 | goto out_err; | ||
576 | } | ||
577 | |||
578 | /* update the idr hint, who cares about races, it's just a hint */ | ||
579 | group->inotify_data.last_wd = tmp_ientry->wd; | ||
580 | |||
581 | /* increment the number of watches the user has */ | ||
582 | atomic_inc(&group->inotify_data.user->inotify_watches); | ||
583 | |||
584 | /* return the watch descriptor for this new entry */ | ||
585 | ret = tmp_ientry->wd; | ||
586 | |||
587 | /* match the ref from fsnotify_init_markentry() */ | ||
588 | fsnotify_put_mark(&tmp_ientry->fsn_entry); | ||
589 | |||
544 | out_err: | 590 | out_err: |
545 | /* could be an error, could be that we found an existing mark */ | 591 | if (ret < 0) |
546 | if (tmp_ientry) { | ||
547 | /* on the idr but didn't make it on the inode */ | ||
548 | if (tmp_ientry->wd != -1) | ||
549 | inotify_remove_from_idr(group, tmp_ientry); | ||
550 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); | 592 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); |
551 | } | 593 | |
594 | return ret; | ||
595 | } | ||
596 | |||
597 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) | ||
598 | { | ||
599 | int ret = 0; | ||
600 | |||
601 | retry: | ||
602 | /* try to update and existing watch with the new arg */ | ||
603 | ret = inotify_update_existing_watch(group, inode, arg); | ||
604 | /* no mark present, try to add a new one */ | ||
605 | if (ret == -ENOENT) | ||
606 | ret = inotify_new_watch(group, inode, arg); | ||
607 | /* | ||
608 | * inotify_new_watch could race with another thread which did an | ||
609 | * inotify_new_watch between the update_existing and the add watch | ||
610 | * here, go back and try to update an existing mark again. | ||
611 | */ | ||
612 | if (ret == -EEXIST) | ||
613 | goto retry; | ||
552 | 614 | ||
553 | return ret; | 615 | return ret; |
554 | } | 616 | } |
@@ -568,7 +630,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign | |||
568 | 630 | ||
569 | spin_lock_init(&group->inotify_data.idr_lock); | 631 | spin_lock_init(&group->inotify_data.idr_lock); |
570 | idr_init(&group->inotify_data.idr); | 632 | idr_init(&group->inotify_data.idr); |
571 | group->inotify_data.last_wd = 0; | 633 | group->inotify_data.last_wd = 1; |
572 | group->inotify_data.user = user; | 634 | group->inotify_data.user = user; |
573 | group->inotify_data.fa = NULL; | 635 | group->inotify_data.fa = NULL; |
574 | 636 | ||