diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 22 |
1 files changed, 10 insertions, 12 deletions
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx) | |||
228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline void get_ioctx(struct kioctx *kioctx) | ||
232 | { | ||
233 | BUG_ON(atomic_read(&kioctx->users) <= 0); | ||
234 | atomic_inc(&kioctx->users); | ||
235 | } | ||
236 | |||
237 | static inline int try_get_ioctx(struct kioctx *kioctx) | 231 | static inline int try_get_ioctx(struct kioctx *kioctx) |
238 | { | 232 | { |
239 | return atomic_inc_not_zero(&kioctx->users); | 233 | return atomic_inc_not_zero(&kioctx->users); |
@@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
273 | mm = ctx->mm = current->mm; | 267 | mm = ctx->mm = current->mm; |
274 | atomic_inc(&mm->mm_count); | 268 | atomic_inc(&mm->mm_count); |
275 | 269 | ||
276 | atomic_set(&ctx->users, 1); | 270 | atomic_set(&ctx->users, 2); |
277 | spin_lock_init(&ctx->ctx_lock); | 271 | spin_lock_init(&ctx->ctx_lock); |
278 | spin_lock_init(&ctx->ring_info.ring_lock); | 272 | spin_lock_init(&ctx->ring_info.ring_lock); |
279 | init_waitqueue_head(&ctx->wait); | 273 | init_waitqueue_head(&ctx->wait); |
@@ -609,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data) | |||
609 | fput(req->ki_filp); | 603 | fput(req->ki_filp); |
610 | 604 | ||
611 | /* Link the iocb into the context's free list */ | 605 | /* Link the iocb into the context's free list */ |
606 | rcu_read_lock(); | ||
612 | spin_lock_irq(&ctx->ctx_lock); | 607 | spin_lock_irq(&ctx->ctx_lock); |
613 | really_put_req(ctx, req); | 608 | really_put_req(ctx, req); |
609 | /* | ||
610 | * at that point ctx might've been killed, but actual | ||
611 | * freeing is RCU'd | ||
612 | */ | ||
614 | spin_unlock_irq(&ctx->ctx_lock); | 613 | spin_unlock_irq(&ctx->ctx_lock); |
614 | rcu_read_unlock(); | ||
615 | 615 | ||
616 | put_ioctx(ctx); | ||
617 | spin_lock_irq(&fput_lock); | 616 | spin_lock_irq(&fput_lock); |
618 | } | 617 | } |
619 | spin_unlock_irq(&fput_lock); | 618 | spin_unlock_irq(&fput_lock); |
@@ -644,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
644 | * this function will be executed w/out any aio kthread wakeup. | 643 | * this function will be executed w/out any aio kthread wakeup. |
645 | */ | 644 | */ |
646 | if (unlikely(!fput_atomic(req->ki_filp))) { | 645 | if (unlikely(!fput_atomic(req->ki_filp))) { |
647 | get_ioctx(ctx); | ||
648 | spin_lock(&fput_lock); | 646 | spin_lock(&fput_lock); |
649 | list_add(&req->ki_list, &fput_head); | 647 | list_add(&req->ki_list, &fput_head); |
650 | spin_unlock(&fput_lock); | 648 | spin_unlock(&fput_lock); |
@@ -1338,10 +1336,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | |||
1338 | ret = PTR_ERR(ioctx); | 1336 | ret = PTR_ERR(ioctx); |
1339 | if (!IS_ERR(ioctx)) { | 1337 | if (!IS_ERR(ioctx)) { |
1340 | ret = put_user(ioctx->user_id, ctxp); | 1338 | ret = put_user(ioctx->user_id, ctxp); |
1341 | if (!ret) | 1339 | if (!ret) { |
1340 | put_ioctx(ioctx); | ||
1342 | return 0; | 1341 | return 0; |
1343 | 1342 | } | |
1344 | get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ | ||
1345 | io_destroy(ioctx); | 1343 | io_destroy(ioctx); |
1346 | } | 1344 | } |
1347 | 1345 | ||