diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 40 |
1 files changed, 10 insertions, 30 deletions
@@ -358,8 +358,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) | |||
358 | } | 358 | } |
359 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | 359 | EXPORT_SYMBOL(kiocb_set_cancel_fn); |
360 | 360 | ||
361 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, | 361 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) |
362 | struct io_event *res) | ||
363 | { | 362 | { |
364 | kiocb_cancel_fn *old, *cancel; | 363 | kiocb_cancel_fn *old, *cancel; |
365 | int ret = -EINVAL; | 364 | int ret = -EINVAL; |
@@ -381,12 +380,10 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, | |||
381 | atomic_inc(&kiocb->ki_users); | 380 | atomic_inc(&kiocb->ki_users); |
382 | spin_unlock_irq(&ctx->ctx_lock); | 381 | spin_unlock_irq(&ctx->ctx_lock); |
383 | 382 | ||
384 | memset(res, 0, sizeof(*res)); | 383 | ret = cancel(kiocb); |
385 | res->obj = (u64)(unsigned long)kiocb->ki_obj.user; | ||
386 | res->data = kiocb->ki_user_data; | ||
387 | ret = cancel(kiocb, res); | ||
388 | 384 | ||
389 | spin_lock_irq(&ctx->ctx_lock); | 385 | spin_lock_irq(&ctx->ctx_lock); |
386 | aio_put_req(kiocb); | ||
390 | 387 | ||
391 | return ret; | 388 | return ret; |
392 | } | 389 | } |
@@ -408,7 +405,6 @@ static void free_ioctx(struct work_struct *work) | |||
408 | { | 405 | { |
409 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); | 406 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
410 | struct aio_ring *ring; | 407 | struct aio_ring *ring; |
411 | struct io_event res; | ||
412 | struct kiocb *req; | 408 | struct kiocb *req; |
413 | unsigned cpu, head, avail; | 409 | unsigned cpu, head, avail; |
414 | 410 | ||
@@ -419,7 +415,7 @@ static void free_ioctx(struct work_struct *work) | |||
419 | struct kiocb, ki_list); | 415 | struct kiocb, ki_list); |
420 | 416 | ||
421 | list_del_init(&req->ki_list); | 417 | list_del_init(&req->ki_list); |
422 | kiocb_cancel(ctx, req, &res); | 418 | kiocb_cancel(ctx, req); |
423 | } | 419 | } |
424 | 420 | ||
425 | spin_unlock_irq(&ctx->ctx_lock); | 421 | spin_unlock_irq(&ctx->ctx_lock); |
@@ -796,21 +792,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
796 | } | 792 | } |
797 | 793 | ||
798 | /* | 794 | /* |
799 | * cancelled requests don't get events, userland was given one | ||
800 | * when the event got cancelled. | ||
801 | */ | ||
802 | if (unlikely(xchg(&iocb->ki_cancel, | ||
803 | KIOCB_CANCELLED) == KIOCB_CANCELLED)) { | ||
804 | /* | ||
805 | * Can't use the percpu reqs_available here - could race with | ||
806 | * free_ioctx() | ||
807 | */ | ||
808 | atomic_inc(&ctx->reqs_available); | ||
809 | /* Still need the wake_up in case free_ioctx is waiting */ | ||
810 | goto put_rq; | ||
811 | } | ||
812 | |||
813 | /* | ||
814 | * Add a completion event to the ring buffer. Must be done holding | 795 | * Add a completion event to the ring buffer. Must be done holding |
815 | * ctx->completion_lock to prevent other code from messing with the tail | 796 | * ctx->completion_lock to prevent other code from messing with the tail |
816 | * pointer since we might be called from irq context. | 797 | * pointer since we might be called from irq context. |
@@ -862,7 +843,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
862 | if (iocb->ki_eventfd != NULL) | 843 | if (iocb->ki_eventfd != NULL) |
863 | eventfd_signal(iocb->ki_eventfd, 1); | 844 | eventfd_signal(iocb->ki_eventfd, 1); |
864 | 845 | ||
865 | put_rq: | ||
866 | /* everything turned out well, dispose of the aiocb. */ | 846 | /* everything turned out well, dispose of the aiocb. */ |
867 | aio_put_req(iocb); | 847 | aio_put_req(iocb); |
868 | 848 | ||
@@ -1439,7 +1419,6 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | |||
1439 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | 1419 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1440 | struct io_event __user *, result) | 1420 | struct io_event __user *, result) |
1441 | { | 1421 | { |
1442 | struct io_event res; | ||
1443 | struct kioctx *ctx; | 1422 | struct kioctx *ctx; |
1444 | struct kiocb *kiocb; | 1423 | struct kiocb *kiocb; |
1445 | u32 key; | 1424 | u32 key; |
@@ -1457,18 +1436,19 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
1457 | 1436 | ||
1458 | kiocb = lookup_kiocb(ctx, iocb, key); | 1437 | kiocb = lookup_kiocb(ctx, iocb, key); |
1459 | if (kiocb) | 1438 | if (kiocb) |
1460 | ret = kiocb_cancel(ctx, kiocb, &res); | 1439 | ret = kiocb_cancel(ctx, kiocb); |
1461 | else | 1440 | else |
1462 | ret = -EINVAL; | 1441 | ret = -EINVAL; |
1463 | 1442 | ||
1464 | spin_unlock_irq(&ctx->ctx_lock); | 1443 | spin_unlock_irq(&ctx->ctx_lock); |
1465 | 1444 | ||
1466 | if (!ret) { | 1445 | if (!ret) { |
1467 | /* Cancellation succeeded -- copy the result | 1446 | /* |
1468 | * into the user's buffer. | 1447 | * The result argument is no longer used - the io_event is |
1448 | * always delivered via the ring buffer. -EINPROGRESS indicates | ||
1449 | * cancellation is progress: | ||
1469 | */ | 1450 | */ |
1470 | if (copy_to_user(result, &res, sizeof(res))) | 1451 | ret = -EINPROGRESS; |
1471 | ret = -EFAULT; | ||
1472 | } | 1452 | } |
1473 | 1453 | ||
1474 | percpu_ref_put(&ctx->users); | 1454 | percpu_ref_put(&ctx->users); |