aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin LaHaise <bcrl@linux.intel.com>2005-09-09 16:02:09 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-09 16:57:32 -0400
commitac0b1bc1edbe81c0cb36cad7e7f5b91f4d9e12ed (patch)
treeb982e728eafeebc226882f91482ff7d0d9ee5cc3
parent8f58202bf6b915656e116ece3bc4ace14bfe533a (diff)
[PATCH] aio: kiocb locking to serialise retry and cancel
Implement a per-kiocb lock to serialise retry operations and cancel. This is done using wait_on_bit_lock() on the KIF_LOCKED bit of kiocb->ki_flags. Also, make the cancellation path lock the kiocb and subsequently release all references to it if the cancel was successful. This version includes a fix for the deadlock with __aio_run_iocbs. Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/aio.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 769791df36b4..201c1847fa07 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -546,6 +546,24 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id)
546 return ioctx; 546 return ioctx;
547} 547}
548 548
549static int lock_kiocb_action(void *param)
550{
551 schedule();
552 return 0;
553}
554
555static inline void lock_kiocb(struct kiocb *iocb)
556{
557 wait_on_bit_lock(&iocb->ki_flags, KIF_LOCKED, lock_kiocb_action,
558 TASK_UNINTERRUPTIBLE);
559}
560
561static inline void unlock_kiocb(struct kiocb *iocb)
562{
563 kiocbClearLocked(iocb);
564 wake_up_bit(&iocb->ki_flags, KIF_LOCKED);
565}
566
549/* 567/*
550 * use_mm 568 * use_mm
551 * Makes the calling kernel thread take on the specified 569 * Makes the calling kernel thread take on the specified
@@ -786,7 +804,9 @@ static int __aio_run_iocbs(struct kioctx *ctx)
786 * Hold an extra reference while retrying i/o. 804 * Hold an extra reference while retrying i/o.
787 */ 805 */
788 iocb->ki_users++; /* grab extra reference */ 806 iocb->ki_users++; /* grab extra reference */
807 lock_kiocb(iocb);
789 aio_run_iocb(iocb); 808 aio_run_iocb(iocb);
809 unlock_kiocb(iocb);
790 if (__aio_put_req(ctx, iocb)) /* drop extra ref */ 810 if (__aio_put_req(ctx, iocb)) /* drop extra ref */
791 put_ioctx(ctx); 811 put_ioctx(ctx);
792 } 812 }
@@ -1527,10 +1547,9 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1527 goto out_put_req; 1547 goto out_put_req;
1528 1548
1529 spin_lock_irq(&ctx->ctx_lock); 1549 spin_lock_irq(&ctx->ctx_lock);
1530 if (likely(list_empty(&ctx->run_list))) { 1550 aio_run_iocb(req);
1531 aio_run_iocb(req); 1551 unlock_kiocb(req);
1532 } else { 1552 if (!list_empty(&ctx->run_list)) {
1533 list_add_tail(&req->ki_run_list, &ctx->run_list);
1534 /* drain the run list */ 1553 /* drain the run list */
1535 while (__aio_run_iocbs(ctx)) 1554 while (__aio_run_iocbs(ctx))
1536 ; 1555 ;
@@ -1661,6 +1680,7 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
1661 if (NULL != cancel) { 1680 if (NULL != cancel) {
1662 struct io_event tmp; 1681 struct io_event tmp;
1663 pr_debug("calling cancel\n"); 1682 pr_debug("calling cancel\n");
1683 lock_kiocb(kiocb);
1664 memset(&tmp, 0, sizeof(tmp)); 1684 memset(&tmp, 0, sizeof(tmp));
1665 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; 1685 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1666 tmp.data = kiocb->ki_user_data; 1686 tmp.data = kiocb->ki_user_data;
@@ -1672,6 +1692,7 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
1672 if (copy_to_user(result, &tmp, sizeof(tmp))) 1692 if (copy_to_user(result, &tmp, sizeof(tmp)))
1673 ret = -EFAULT; 1693 ret = -EFAULT;
1674 } 1694 }
1695 unlock_kiocb(kiocb);
1675 } else 1696 } else
1676 ret = -EINVAL; 1697 ret = -EINVAL;
1677 1698