diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-23 11:53:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-23 11:53:20 -0400 |
commit | efd247fa34084d9b162f485004ae6d8a04059f0c (patch) | |
tree | 417dcbe06d5cce1353a4c19cbda480ae67652b5c /fs/aio.c | |
parent | af66df5ecf9c9e2d2ff86e8203510c1c4519d64c (diff) | |
parent | 59fcbddaff6f862cc1584b488866d9c4a5579085 (diff) |
Merge branches 'sched/debug' and 'linus' into sched/core
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 42 |
1 files changed, 30 insertions, 12 deletions
@@ -443,7 +443,7 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx) | |||
443 | req->private = NULL; | 443 | req->private = NULL; |
444 | req->ki_iovec = NULL; | 444 | req->ki_iovec = NULL; |
445 | INIT_LIST_HEAD(&req->ki_run_list); | 445 | INIT_LIST_HEAD(&req->ki_run_list); |
446 | req->ki_eventfd = ERR_PTR(-EINVAL); | 446 | req->ki_eventfd = NULL; |
447 | 447 | ||
448 | /* Check if the completion queue has enough free space to | 448 | /* Check if the completion queue has enough free space to |
449 | * accept an event from this io. | 449 | * accept an event from this io. |
@@ -485,8 +485,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
485 | { | 485 | { |
486 | assert_spin_locked(&ctx->ctx_lock); | 486 | assert_spin_locked(&ctx->ctx_lock); |
487 | 487 | ||
488 | if (!IS_ERR(req->ki_eventfd)) | ||
489 | fput(req->ki_eventfd); | ||
490 | if (req->ki_dtor) | 488 | if (req->ki_dtor) |
491 | req->ki_dtor(req); | 489 | req->ki_dtor(req); |
492 | if (req->ki_iovec != &req->ki_inline_vec) | 490 | if (req->ki_iovec != &req->ki_inline_vec) |
@@ -508,8 +506,11 @@ static void aio_fput_routine(struct work_struct *data) | |||
508 | list_del(&req->ki_list); | 506 | list_del(&req->ki_list); |
509 | spin_unlock_irq(&fput_lock); | 507 | spin_unlock_irq(&fput_lock); |
510 | 508 | ||
511 | /* Complete the fput */ | 509 | /* Complete the fput(s) */ |
512 | __fput(req->ki_filp); | 510 | if (req->ki_filp != NULL) |
511 | __fput(req->ki_filp); | ||
512 | if (req->ki_eventfd != NULL) | ||
513 | __fput(req->ki_eventfd); | ||
513 | 514 | ||
514 | /* Link the iocb into the context's free list */ | 515 | /* Link the iocb into the context's free list */ |
515 | spin_lock_irq(&ctx->ctx_lock); | 516 | spin_lock_irq(&ctx->ctx_lock); |
@@ -527,12 +528,14 @@ static void aio_fput_routine(struct work_struct *data) | |||
527 | */ | 528 | */ |
528 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | 529 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) |
529 | { | 530 | { |
531 | int schedule_putreq = 0; | ||
532 | |||
530 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", | 533 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", |
531 | req, atomic_long_read(&req->ki_filp->f_count)); | 534 | req, atomic_long_read(&req->ki_filp->f_count)); |
532 | 535 | ||
533 | assert_spin_locked(&ctx->ctx_lock); | 536 | assert_spin_locked(&ctx->ctx_lock); |
534 | 537 | ||
535 | req->ki_users --; | 538 | req->ki_users--; |
536 | BUG_ON(req->ki_users < 0); | 539 | BUG_ON(req->ki_users < 0); |
537 | if (likely(req->ki_users)) | 540 | if (likely(req->ki_users)) |
538 | return 0; | 541 | return 0; |
@@ -540,10 +543,23 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
540 | req->ki_cancel = NULL; | 543 | req->ki_cancel = NULL; |
541 | req->ki_retry = NULL; | 544 | req->ki_retry = NULL; |
542 | 545 | ||
543 | /* Must be done under the lock to serialise against cancellation. | 546 | /* |
544 | * Call this aio_fput as it duplicates fput via the fput_work. | 547 | * Try to optimize the aio and eventfd file* puts, by avoiding to |
548 | * schedule work in case it is not __fput() time. In normal cases, | ||
549 | * we would not be holding the last reference to the file*, so | ||
550 | * this function will be executed w/out any aio kthread wakeup. | ||
545 | */ | 551 | */ |
546 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { | 552 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) |
553 | schedule_putreq++; | ||
554 | else | ||
555 | req->ki_filp = NULL; | ||
556 | if (req->ki_eventfd != NULL) { | ||
557 | if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count))) | ||
558 | schedule_putreq++; | ||
559 | else | ||
560 | req->ki_eventfd = NULL; | ||
561 | } | ||
562 | if (unlikely(schedule_putreq)) { | ||
547 | get_ioctx(ctx); | 563 | get_ioctx(ctx); |
548 | spin_lock(&fput_lock); | 564 | spin_lock(&fput_lock); |
549 | list_add(&req->ki_list, &fput_head); | 565 | list_add(&req->ki_list, &fput_head); |
@@ -571,7 +587,7 @@ int aio_put_req(struct kiocb *req) | |||
571 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) | 587 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
572 | { | 588 | { |
573 | struct mm_struct *mm = current->mm; | 589 | struct mm_struct *mm = current->mm; |
574 | struct kioctx *ctx = NULL; | 590 | struct kioctx *ctx, *ret = NULL; |
575 | struct hlist_node *n; | 591 | struct hlist_node *n; |
576 | 592 | ||
577 | rcu_read_lock(); | 593 | rcu_read_lock(); |
@@ -579,12 +595,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
579 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { | 595 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { |
580 | if (ctx->user_id == ctx_id && !ctx->dead) { | 596 | if (ctx->user_id == ctx_id && !ctx->dead) { |
581 | get_ioctx(ctx); | 597 | get_ioctx(ctx); |
598 | ret = ctx; | ||
582 | break; | 599 | break; |
583 | } | 600 | } |
584 | } | 601 | } |
585 | 602 | ||
586 | rcu_read_unlock(); | 603 | rcu_read_unlock(); |
587 | return ctx; | 604 | return ret; |
588 | } | 605 | } |
589 | 606 | ||
590 | /* | 607 | /* |
@@ -1009,7 +1026,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2) | |||
1009 | * eventfd. The eventfd_signal() function is safe to be called | 1026 | * eventfd. The eventfd_signal() function is safe to be called |
1010 | * from IRQ context. | 1027 | * from IRQ context. |
1011 | */ | 1028 | */ |
1012 | if (!IS_ERR(iocb->ki_eventfd)) | 1029 | if (iocb->ki_eventfd != NULL) |
1013 | eventfd_signal(iocb->ki_eventfd, 1); | 1030 | eventfd_signal(iocb->ki_eventfd, 1); |
1014 | 1031 | ||
1015 | put_rq: | 1032 | put_rq: |
@@ -1608,6 +1625,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1608 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); | 1625 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); |
1609 | if (IS_ERR(req->ki_eventfd)) { | 1626 | if (IS_ERR(req->ki_eventfd)) { |
1610 | ret = PTR_ERR(req->ki_eventfd); | 1627 | ret = PTR_ERR(req->ki_eventfd); |
1628 | req->ki_eventfd = NULL; | ||
1611 | goto out_put_req; | 1629 | goto out_put_req; |
1612 | } | 1630 | } |
1613 | } | 1631 | } |