aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 969beb0e2231..b9d64d89a043 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx)
228 call_rcu(&ctx->rcu_head, ctx_rcu_free); 228 call_rcu(&ctx->rcu_head, ctx_rcu_free);
229} 229}
230 230
231static inline void get_ioctx(struct kioctx *kioctx)
232{
233 BUG_ON(atomic_read(&kioctx->users) <= 0);
234 atomic_inc(&kioctx->users);
235}
236
237static inline int try_get_ioctx(struct kioctx *kioctx) 231static inline int try_get_ioctx(struct kioctx *kioctx)
238{ 232{
239 return atomic_inc_not_zero(&kioctx->users); 233 return atomic_inc_not_zero(&kioctx->users);
@@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
273 mm = ctx->mm = current->mm; 267 mm = ctx->mm = current->mm;
274 atomic_inc(&mm->mm_count); 268 atomic_inc(&mm->mm_count);
275 269
276 atomic_set(&ctx->users, 1); 270 atomic_set(&ctx->users, 2);
277 spin_lock_init(&ctx->ctx_lock); 271 spin_lock_init(&ctx->ctx_lock);
278 spin_lock_init(&ctx->ring_info.ring_lock); 272 spin_lock_init(&ctx->ring_info.ring_lock);
279 init_waitqueue_head(&ctx->wait); 273 init_waitqueue_head(&ctx->wait);
@@ -490,6 +484,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
490 kmem_cache_free(kiocb_cachep, req); 484 kmem_cache_free(kiocb_cachep, req);
491 ctx->reqs_active--; 485 ctx->reqs_active--;
492 } 486 }
487 if (unlikely(!ctx->reqs_active && ctx->dead))
488 wake_up_all(&ctx->wait);
493 spin_unlock_irq(&ctx->ctx_lock); 489 spin_unlock_irq(&ctx->ctx_lock);
494} 490}
495 491
@@ -607,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data)
607 fput(req->ki_filp); 603 fput(req->ki_filp);
608 604
609 /* Link the iocb into the context's free list */ 605 /* Link the iocb into the context's free list */
606 rcu_read_lock();
610 spin_lock_irq(&ctx->ctx_lock); 607 spin_lock_irq(&ctx->ctx_lock);
611 really_put_req(ctx, req); 608 really_put_req(ctx, req);
609 /*
610 * at that point ctx might've been killed, but actual
611 * freeing is RCU'd
612 */
612 spin_unlock_irq(&ctx->ctx_lock); 613 spin_unlock_irq(&ctx->ctx_lock);
614 rcu_read_unlock();
613 615
614 put_ioctx(ctx);
615 spin_lock_irq(&fput_lock); 616 spin_lock_irq(&fput_lock);
616 } 617 }
617 spin_unlock_irq(&fput_lock); 618 spin_unlock_irq(&fput_lock);
@@ -642,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
642 * this function will be executed w/out any aio kthread wakeup. 643 * this function will be executed w/out any aio kthread wakeup.
643 */ 644 */
644 if (unlikely(!fput_atomic(req->ki_filp))) { 645 if (unlikely(!fput_atomic(req->ki_filp))) {
645 get_ioctx(ctx);
646 spin_lock(&fput_lock); 646 spin_lock(&fput_lock);
647 list_add(&req->ki_list, &fput_head); 647 list_add(&req->ki_list, &fput_head);
648 spin_unlock(&fput_lock); 648 spin_unlock(&fput_lock);
@@ -1336,10 +1336,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1336 ret = PTR_ERR(ioctx); 1336 ret = PTR_ERR(ioctx);
1337 if (!IS_ERR(ioctx)) { 1337 if (!IS_ERR(ioctx)) {
1338 ret = put_user(ioctx->user_id, ctxp); 1338 ret = put_user(ioctx->user_id, ctxp);
1339 if (!ret) 1339 if (!ret) {
1340 put_ioctx(ioctx);
1340 return 0; 1341 return 0;
1341 1342 }
1342 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
1343 io_destroy(ioctx); 1343 io_destroy(ioctx);
1344 } 1344 }
1345 1345