diff options
| -rw-r--r-- | fs/aio.c | 20 |
1 files changed, 9 insertions, 11 deletions
| @@ -298,17 +298,23 @@ static void wait_for_all_aios(struct kioctx *ctx) | |||
| 298 | struct task_struct *tsk = current; | 298 | struct task_struct *tsk = current; |
| 299 | DECLARE_WAITQUEUE(wait, tsk); | 299 | DECLARE_WAITQUEUE(wait, tsk); |
| 300 | 300 | ||
| 301 | spin_lock_irq(&ctx->ctx_lock); | ||
| 301 | if (!ctx->reqs_active) | 302 | if (!ctx->reqs_active) |
| 302 | return; | 303 | goto out; |
| 303 | 304 | ||
| 304 | add_wait_queue(&ctx->wait, &wait); | 305 | add_wait_queue(&ctx->wait, &wait); |
| 305 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 306 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 306 | while (ctx->reqs_active) { | 307 | while (ctx->reqs_active) { |
| 308 | spin_unlock_irq(&ctx->ctx_lock); | ||
| 307 | schedule(); | 309 | schedule(); |
| 308 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 310 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 311 | spin_lock_irq(&ctx->ctx_lock); | ||
| 309 | } | 312 | } |
| 310 | __set_task_state(tsk, TASK_RUNNING); | 313 | __set_task_state(tsk, TASK_RUNNING); |
| 311 | remove_wait_queue(&ctx->wait, &wait); | 314 | remove_wait_queue(&ctx->wait, &wait); |
| 315 | |||
| 316 | out: | ||
| 317 | spin_unlock_irq(&ctx->ctx_lock); | ||
| 312 | } | 318 | } |
| 313 | 319 | ||
| 314 | /* wait_on_sync_kiocb: | 320 | /* wait_on_sync_kiocb: |
| @@ -424,7 +430,6 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | |||
| 424 | ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); | 430 | ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); |
| 425 | if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { | 431 | if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { |
| 426 | list_add(&req->ki_list, &ctx->active_reqs); | 432 | list_add(&req->ki_list, &ctx->active_reqs); |
| 427 | get_ioctx(ctx); | ||
| 428 | ctx->reqs_active++; | 433 | ctx->reqs_active++; |
| 429 | okay = 1; | 434 | okay = 1; |
| 430 | } | 435 | } |
| @@ -536,8 +541,6 @@ int fastcall aio_put_req(struct kiocb *req) | |||
| 536 | spin_lock_irq(&ctx->ctx_lock); | 541 | spin_lock_irq(&ctx->ctx_lock); |
| 537 | ret = __aio_put_req(ctx, req); | 542 | ret = __aio_put_req(ctx, req); |
| 538 | spin_unlock_irq(&ctx->ctx_lock); | 543 | spin_unlock_irq(&ctx->ctx_lock); |
| 539 | if (ret) | ||
| 540 | put_ioctx(ctx); | ||
| 541 | return ret; | 544 | return ret; |
| 542 | } | 545 | } |
| 543 | 546 | ||
| @@ -779,8 +782,7 @@ static int __aio_run_iocbs(struct kioctx *ctx) | |||
| 779 | */ | 782 | */ |
| 780 | iocb->ki_users++; /* grab extra reference */ | 783 | iocb->ki_users++; /* grab extra reference */ |
| 781 | aio_run_iocb(iocb); | 784 | aio_run_iocb(iocb); |
| 782 | if (__aio_put_req(ctx, iocb)) /* drop extra ref */ | 785 | __aio_put_req(ctx, iocb); |
| 783 | put_ioctx(ctx); | ||
| 784 | } | 786 | } |
| 785 | if (!list_empty(&ctx->run_list)) | 787 | if (!list_empty(&ctx->run_list)) |
| 786 | return 1; | 788 | return 1; |
| @@ -997,14 +999,10 @@ put_rq: | |||
| 997 | /* everything turned out well, dispose of the aiocb. */ | 999 | /* everything turned out well, dispose of the aiocb. */ |
| 998 | ret = __aio_put_req(ctx, iocb); | 1000 | ret = __aio_put_req(ctx, iocb); |
| 999 | 1001 | ||
| 1000 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | ||
| 1001 | |||
| 1002 | if (waitqueue_active(&ctx->wait)) | 1002 | if (waitqueue_active(&ctx->wait)) |
| 1003 | wake_up(&ctx->wait); | 1003 | wake_up(&ctx->wait); |
| 1004 | 1004 | ||
| 1005 | if (ret) | 1005 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
| 1006 | put_ioctx(ctx); | ||
| 1007 | |||
| 1008 | return ret; | 1006 | return ret; |
| 1009 | } | 1007 | } |
| 1010 | 1008 | ||
