aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2010-05-26 15:13:55 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-05-27 22:03:07 -0400
commitd7065da038227a4d09a244e6014e0186a6bd21d0 (patch)
tree0b3b30a6ec59aa03e5fb7084eed31f2a5dfc9686 /fs/aio.c
parent176306f59ac7a35369cbba87aff13e14c5916074 (diff)
get rid of the magic around f_count in aio
__aio_put_req() plays sick games with file refcount. What it wants is fput() from atomic context; it's almost always done with f_count > 1, so they only have to deal with delayed work in rare cases when their reference happens to be the last one. Current code decrements f_count and if it hasn't hit 0, everything is fine. Otherwise it keeps a pointer to struct file (with zero f_count!) around and has delayed work do __fput() on it. Better way to do it: use atomic_long_add_unless( , -1, 1) instead of !atomic_long_dec_and_test(). IOW, decrement it only if it's not the last reference, leave refcount alone if it was. And use normal fput() in delayed work. I've made that atomic_long_add_unless call a new helper - fput_atomic(). Drops a reference to file if it's safe to do in atomic (i.e. if that's not the last one), tells if it had been able to do that. aio.c converted to it, __fput() use is gone. req->ki_file *always* contributes to refcount now. And __fput() became static. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 48fdeebdb544..1ccf25cef1f0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -527,7 +527,7 @@ static void aio_fput_routine(struct work_struct *data)
527 527
528 /* Complete the fput(s) */ 528 /* Complete the fput(s) */
529 if (req->ki_filp != NULL) 529 if (req->ki_filp != NULL)
530 __fput(req->ki_filp); 530 fput(req->ki_filp);
531 531
532 /* Link the iocb into the context's free list */ 532 /* Link the iocb into the context's free list */
533 spin_lock_irq(&ctx->ctx_lock); 533 spin_lock_irq(&ctx->ctx_lock);
@@ -560,11 +560,11 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
560 560
561 /* 561 /*
562 * Try to optimize the aio and eventfd file* puts, by avoiding to 562 * Try to optimize the aio and eventfd file* puts, by avoiding to
563 * schedule work in case it is not __fput() time. In normal cases, 563 * schedule work in case it is not final fput() time. In normal cases,
564 * we would not be holding the last reference to the file*, so 564 * we would not be holding the last reference to the file*, so
565 * this function will be executed w/out any aio kthread wakeup. 565 * this function will be executed w/out any aio kthread wakeup.
566 */ 566 */
567 if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { 567 if (unlikely(!fput_atomic(req->ki_filp))) {
568 get_ioctx(ctx); 568 get_ioctx(ctx);
569 spin_lock(&fput_lock); 569 spin_lock(&fput_lock);
570 list_add(&req->ki_list, &fput_head); 570 list_add(&req->ki_list, &fput_head);