aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-07 19:18:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 21:38:28 -0400
commit2d68449e86168744513ca4f13477f081ce167130 (patch)
tree93fd1853637a41f2906ed96662a15168c251ec4b /fs
parent162934de51e0271f6e2955075735656ea5092ea9 (diff)
aio: kill return value of aio_complete()
Nothing used the return value, and it probably wasn't possible to use it safely for the locked versions (aio_complete(), aio_put_req()). Just kill it. Signed-off-by: Kent Overstreet <koverstreet@google.com> Acked-by: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Reviewed-by: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 6e095a95a7c6..8b43d6bf8c50 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -531,7 +531,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
531/* __aio_put_req 531/* __aio_put_req
532 * Returns true if this put was the last user of the request. 532 * Returns true if this put was the last user of the request.
533 */ 533 */
534static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) 534static void __aio_put_req(struct kioctx *ctx, struct kiocb *req)
535{ 535{
536 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 536 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
537 req, atomic_long_read(&req->ki_filp->f_count)); 537 req, atomic_long_read(&req->ki_filp->f_count));
@@ -541,7 +541,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
541 req->ki_users--; 541 req->ki_users--;
542 BUG_ON(req->ki_users < 0); 542 BUG_ON(req->ki_users < 0);
543 if (likely(req->ki_users)) 543 if (likely(req->ki_users))
544 return 0; 544 return;
545 list_del(&req->ki_list); /* remove from active_reqs */ 545 list_del(&req->ki_list); /* remove from active_reqs */
546 req->ki_cancel = NULL; 546 req->ki_cancel = NULL;
547 req->ki_retry = NULL; 547 req->ki_retry = NULL;
@@ -549,21 +549,18 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
549 fput(req->ki_filp); 549 fput(req->ki_filp);
550 req->ki_filp = NULL; 550 req->ki_filp = NULL;
551 really_put_req(ctx, req); 551 really_put_req(ctx, req);
552 return 1;
553} 552}
554 553
555/* aio_put_req 554/* aio_put_req
556 * Returns true if this put was the last user of the kiocb, 555 * Returns true if this put was the last user of the kiocb,
557 * false if the request is still in use. 556 * false if the request is still in use.
558 */ 557 */
559int aio_put_req(struct kiocb *req) 558void aio_put_req(struct kiocb *req)
560{ 559{
561 struct kioctx *ctx = req->ki_ctx; 560 struct kioctx *ctx = req->ki_ctx;
562 int ret;
563 spin_lock_irq(&ctx->ctx_lock); 561 spin_lock_irq(&ctx->ctx_lock);
564 ret = __aio_put_req(ctx, req); 562 __aio_put_req(ctx, req);
565 spin_unlock_irq(&ctx->ctx_lock); 563 spin_unlock_irq(&ctx->ctx_lock);
566 return ret;
567} 564}
568EXPORT_SYMBOL(aio_put_req); 565EXPORT_SYMBOL(aio_put_req);
569 566
@@ -593,10 +590,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
593 590
594/* aio_complete 591/* aio_complete
595 * Called when the io request on the given iocb is complete. 592 * Called when the io request on the given iocb is complete.
596 * Returns true if this is the last user of the request. The
597 * only other user of the request can be the cancellation code.
598 */ 593 */
599int aio_complete(struct kiocb *iocb, long res, long res2) 594void aio_complete(struct kiocb *iocb, long res, long res2)
600{ 595{
601 struct kioctx *ctx = iocb->ki_ctx; 596 struct kioctx *ctx = iocb->ki_ctx;
602 struct aio_ring_info *info; 597 struct aio_ring_info *info;
@@ -604,7 +599,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
604 struct io_event *event; 599 struct io_event *event;
605 unsigned long flags; 600 unsigned long flags;
606 unsigned long tail; 601 unsigned long tail;
607 int ret;
608 602
609 /* 603 /*
610 * Special case handling for sync iocbs: 604 * Special case handling for sync iocbs:
@@ -618,7 +612,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
618 iocb->ki_user_data = res; 612 iocb->ki_user_data = res;
619 iocb->ki_users = 0; 613 iocb->ki_users = 0;
620 wake_up_process(iocb->ki_obj.tsk); 614 wake_up_process(iocb->ki_obj.tsk);
621 return 1; 615 return;
622 } 616 }
623 617
624 info = &ctx->ring_info; 618 info = &ctx->ring_info;
@@ -677,7 +671,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
677 671
678put_rq: 672put_rq:
679 /* everything turned out well, dispose of the aiocb. */ 673 /* everything turned out well, dispose of the aiocb. */
680 ret = __aio_put_req(ctx, iocb); 674 __aio_put_req(ctx, iocb);
681 675
682 /* 676 /*
683 * We have to order our ring_info tail store above and test 677 * We have to order our ring_info tail store above and test
@@ -691,7 +685,6 @@ put_rq:
691 wake_up(&ctx->wait); 685 wake_up(&ctx->wait);
692 686
693 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 687 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
694 return ret;
695} 688}
696EXPORT_SYMBOL(aio_complete); 689EXPORT_SYMBOL(aio_complete);
697 690