diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 27 |
1 files changed, 12 insertions, 15 deletions
@@ -298,17 +298,23 @@ static void wait_for_all_aios(struct kioctx *ctx) | |||
298 | struct task_struct *tsk = current; | 298 | struct task_struct *tsk = current; |
299 | DECLARE_WAITQUEUE(wait, tsk); | 299 | DECLARE_WAITQUEUE(wait, tsk); |
300 | 300 | ||
301 | spin_lock_irq(&ctx->ctx_lock); | ||
301 | if (!ctx->reqs_active) | 302 | if (!ctx->reqs_active) |
302 | return; | 303 | goto out; |
303 | 304 | ||
304 | add_wait_queue(&ctx->wait, &wait); | 305 | add_wait_queue(&ctx->wait, &wait); |
305 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 306 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
306 | while (ctx->reqs_active) { | 307 | while (ctx->reqs_active) { |
308 | spin_unlock_irq(&ctx->ctx_lock); | ||
307 | schedule(); | 309 | schedule(); |
308 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 310 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
311 | spin_lock_irq(&ctx->ctx_lock); | ||
309 | } | 312 | } |
310 | __set_task_state(tsk, TASK_RUNNING); | 313 | __set_task_state(tsk, TASK_RUNNING); |
311 | remove_wait_queue(&ctx->wait, &wait); | 314 | remove_wait_queue(&ctx->wait, &wait); |
315 | |||
316 | out: | ||
317 | spin_unlock_irq(&ctx->ctx_lock); | ||
312 | } | 318 | } |
313 | 319 | ||
314 | /* wait_on_sync_kiocb: | 320 | /* wait_on_sync_kiocb: |
@@ -424,7 +430,6 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | |||
424 | ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); | 430 | ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); |
425 | if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { | 431 | if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { |
426 | list_add(&req->ki_list, &ctx->active_reqs); | 432 | list_add(&req->ki_list, &ctx->active_reqs); |
427 | get_ioctx(ctx); | ||
428 | ctx->reqs_active++; | 433 | ctx->reqs_active++; |
429 | okay = 1; | 434 | okay = 1; |
430 | } | 435 | } |
@@ -536,8 +541,6 @@ int fastcall aio_put_req(struct kiocb *req) | |||
536 | spin_lock_irq(&ctx->ctx_lock); | 541 | spin_lock_irq(&ctx->ctx_lock); |
537 | ret = __aio_put_req(ctx, req); | 542 | ret = __aio_put_req(ctx, req); |
538 | spin_unlock_irq(&ctx->ctx_lock); | 543 | spin_unlock_irq(&ctx->ctx_lock); |
539 | if (ret) | ||
540 | put_ioctx(ctx); | ||
541 | return ret; | 544 | return ret; |
542 | } | 545 | } |
543 | 546 | ||
@@ -599,9 +602,6 @@ static void use_mm(struct mm_struct *mm) | |||
599 | * by the calling kernel thread | 602 | * by the calling kernel thread |
600 | * (Note: this routine is intended to be called only | 603 | * (Note: this routine is intended to be called only |
601 | * from a kernel thread context) | 604 | * from a kernel thread context) |
602 | * | ||
603 | * Comments: Called with ctx->ctx_lock held. This nests | ||
604 | * task_lock instead ctx_lock. | ||
605 | */ | 605 | */ |
606 | static void unuse_mm(struct mm_struct *mm) | 606 | static void unuse_mm(struct mm_struct *mm) |
607 | { | 607 | { |
@@ -782,8 +782,7 @@ static int __aio_run_iocbs(struct kioctx *ctx) | |||
782 | */ | 782 | */ |
783 | iocb->ki_users++; /* grab extra reference */ | 783 | iocb->ki_users++; /* grab extra reference */ |
784 | aio_run_iocb(iocb); | 784 | aio_run_iocb(iocb); |
785 | if (__aio_put_req(ctx, iocb)) /* drop extra ref */ | 785 | __aio_put_req(ctx, iocb); |
786 | put_ioctx(ctx); | ||
787 | } | 786 | } |
788 | if (!list_empty(&ctx->run_list)) | 787 | if (!list_empty(&ctx->run_list)) |
789 | return 1; | 788 | return 1; |
@@ -850,14 +849,16 @@ static void aio_kick_handler(struct work_struct *work) | |||
850 | { | 849 | { |
851 | struct kioctx *ctx = container_of(work, struct kioctx, wq.work); | 850 | struct kioctx *ctx = container_of(work, struct kioctx, wq.work); |
852 | mm_segment_t oldfs = get_fs(); | 851 | mm_segment_t oldfs = get_fs(); |
852 | struct mm_struct *mm; | ||
853 | int requeue; | 853 | int requeue; |
854 | 854 | ||
855 | set_fs(USER_DS); | 855 | set_fs(USER_DS); |
856 | use_mm(ctx->mm); | 856 | use_mm(ctx->mm); |
857 | spin_lock_irq(&ctx->ctx_lock); | 857 | spin_lock_irq(&ctx->ctx_lock); |
858 | requeue =__aio_run_iocbs(ctx); | 858 | requeue =__aio_run_iocbs(ctx); |
859 | unuse_mm(ctx->mm); | 859 | mm = ctx->mm; |
860 | spin_unlock_irq(&ctx->ctx_lock); | 860 | spin_unlock_irq(&ctx->ctx_lock); |
861 | unuse_mm(mm); | ||
861 | set_fs(oldfs); | 862 | set_fs(oldfs); |
862 | /* | 863 | /* |
863 | * we're in a worker thread already, don't use queue_delayed_work, | 864 | * we're in a worker thread already, don't use queue_delayed_work, |
@@ -998,14 +999,10 @@ put_rq: | |||
998 | /* everything turned out well, dispose of the aiocb. */ | 999 | /* everything turned out well, dispose of the aiocb. */ |
999 | ret = __aio_put_req(ctx, iocb); | 1000 | ret = __aio_put_req(ctx, iocb); |
1000 | 1001 | ||
1001 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | ||
1002 | |||
1003 | if (waitqueue_active(&ctx->wait)) | 1002 | if (waitqueue_active(&ctx->wait)) |
1004 | wake_up(&ctx->wait); | 1003 | wake_up(&ctx->wait); |
1005 | 1004 | ||
1006 | if (ret) | 1005 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
1007 | put_ioctx(ctx); | ||
1008 | |||
1009 | return ret; | 1006 | return ret; |
1010 | } | 1007 | } |
1011 | 1008 | ||