aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-13 17:45:08 -0400
committerBenjamin LaHaise <bcrl@kvack.org>2013-07-30 11:53:11 -0400
commitbec68faaf3ba74ed0dcd5dc3a881b30aec542973 (patch)
treefc8704d99fea0108346de7c1ca2500ac7199f1ae /fs/aio.c
parent723be6e39d14254bb5bb9f422b434566d359fa6e (diff)
aio: io_cancel() no longer returns the io_event
Originally, io_event() was documented to return the io_event if cancellation succeeded - the io_event wouldn't be delivered via the ring buffer like it normally would. But this isn't what the implementation was actually doing; the only driver implementing cancellation, the usb gadget code, never returned an io_event in its cancel function. And aio_complete() was recently changed to no longer suppress event delivery if the kiocb had been cancelled. This gets rid of the unused io_event argument to kiocb_cancel() and kiocb->ki_cancel(), and changes io_cancel() to return -EINPROGRESS if kiocb->ki_cancel() returned success. Also tweak the refcounting in kiocb_cancel() to make more sense. Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c40
1 files changed, 10 insertions, 30 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 7b470bfbf891..12b37689dd2c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -358,8 +358,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
358} 358}
359EXPORT_SYMBOL(kiocb_set_cancel_fn); 359EXPORT_SYMBOL(kiocb_set_cancel_fn);
360 360
361static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, 361static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
362 struct io_event *res)
363{ 362{
364 kiocb_cancel_fn *old, *cancel; 363 kiocb_cancel_fn *old, *cancel;
365 int ret = -EINVAL; 364 int ret = -EINVAL;
@@ -381,12 +380,10 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
381 atomic_inc(&kiocb->ki_users); 380 atomic_inc(&kiocb->ki_users);
382 spin_unlock_irq(&ctx->ctx_lock); 381 spin_unlock_irq(&ctx->ctx_lock);
383 382
384 memset(res, 0, sizeof(*res)); 383 ret = cancel(kiocb);
385 res->obj = (u64)(unsigned long)kiocb->ki_obj.user;
386 res->data = kiocb->ki_user_data;
387 ret = cancel(kiocb, res);
388 384
389 spin_lock_irq(&ctx->ctx_lock); 385 spin_lock_irq(&ctx->ctx_lock);
386 aio_put_req(kiocb);
390 387
391 return ret; 388 return ret;
392} 389}
@@ -408,7 +405,6 @@ static void free_ioctx(struct work_struct *work)
408{ 405{
409 struct kioctx *ctx = container_of(work, struct kioctx, free_work); 406 struct kioctx *ctx = container_of(work, struct kioctx, free_work);
410 struct aio_ring *ring; 407 struct aio_ring *ring;
411 struct io_event res;
412 struct kiocb *req; 408 struct kiocb *req;
413 unsigned cpu, head, avail; 409 unsigned cpu, head, avail;
414 410
@@ -419,7 +415,7 @@ static void free_ioctx(struct work_struct *work)
419 struct kiocb, ki_list); 415 struct kiocb, ki_list);
420 416
421 list_del_init(&req->ki_list); 417 list_del_init(&req->ki_list);
422 kiocb_cancel(ctx, req, &res); 418 kiocb_cancel(ctx, req);
423 } 419 }
424 420
425 spin_unlock_irq(&ctx->ctx_lock); 421 spin_unlock_irq(&ctx->ctx_lock);
@@ -796,21 +792,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
796 } 792 }
797 793
798 /* 794 /*
799 * cancelled requests don't get events, userland was given one
800 * when the event got cancelled.
801 */
802 if (unlikely(xchg(&iocb->ki_cancel,
803 KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
804 /*
805 * Can't use the percpu reqs_available here - could race with
806 * free_ioctx()
807 */
808 atomic_inc(&ctx->reqs_available);
809 /* Still need the wake_up in case free_ioctx is waiting */
810 goto put_rq;
811 }
812
813 /*
814 * Add a completion event to the ring buffer. Must be done holding 795 * Add a completion event to the ring buffer. Must be done holding
815 * ctx->completion_lock to prevent other code from messing with the tail 796 * ctx->completion_lock to prevent other code from messing with the tail
816 * pointer since we might be called from irq context. 797 * pointer since we might be called from irq context.
@@ -862,7 +843,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
862 if (iocb->ki_eventfd != NULL) 843 if (iocb->ki_eventfd != NULL)
863 eventfd_signal(iocb->ki_eventfd, 1); 844 eventfd_signal(iocb->ki_eventfd, 1);
864 845
865put_rq:
866 /* everything turned out well, dispose of the aiocb. */ 846 /* everything turned out well, dispose of the aiocb. */
867 aio_put_req(iocb); 847 aio_put_req(iocb);
868 848
@@ -1439,7 +1419,6 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1439SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1419SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1440 struct io_event __user *, result) 1420 struct io_event __user *, result)
1441{ 1421{
1442 struct io_event res;
1443 struct kioctx *ctx; 1422 struct kioctx *ctx;
1444 struct kiocb *kiocb; 1423 struct kiocb *kiocb;
1445 u32 key; 1424 u32 key;
@@ -1457,18 +1436,19 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1457 1436
1458 kiocb = lookup_kiocb(ctx, iocb, key); 1437 kiocb = lookup_kiocb(ctx, iocb, key);
1459 if (kiocb) 1438 if (kiocb)
1460 ret = kiocb_cancel(ctx, kiocb, &res); 1439 ret = kiocb_cancel(ctx, kiocb);
1461 else 1440 else
1462 ret = -EINVAL; 1441 ret = -EINVAL;
1463 1442
1464 spin_unlock_irq(&ctx->ctx_lock); 1443 spin_unlock_irq(&ctx->ctx_lock);
1465 1444
1466 if (!ret) { 1445 if (!ret) {
1467 /* Cancellation succeeded -- copy the result 1446 /*
1468 * into the user's buffer. 1447 * The result argument is no longer used - the io_event is
1448 * always delivered via the ring buffer. -EINPROGRESS indicates
1449 * cancellation is progress:
1469 */ 1450 */
1470 if (copy_to_user(result, &res, sizeof(res))) 1451 ret = -EINPROGRESS;
1471 ret = -EFAULT;
1472 } 1452 }
1473 1453
1474 percpu_ref_put(&ctx->users); 1454 percpu_ref_put(&ctx->users);