diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2012-06-24 02:00:10 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-07-22 15:57:59 -0400 |
commit | 3ffa3c0e3f6e62f67fc2346ca60161dfb030083d (patch) | |
tree | d9db56bd658d3bc594ff90800c66596081d2239d /fs/aio.c | |
parent | 4a9d4b024a3102fc083c925c242d98ac27b1c5f6 (diff) |
aio: now fput() is OK from interrupt context; get rid of manual delayed __fput()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 73 |
1 files changed, 3 insertions, 70 deletions
@@ -56,13 +56,6 @@ static struct kmem_cache *kioctx_cachep; | |||
56 | 56 | ||
57 | static struct workqueue_struct *aio_wq; | 57 | static struct workqueue_struct *aio_wq; |
58 | 58 | ||
59 | /* Used for rare fput completion. */ | ||
60 | static void aio_fput_routine(struct work_struct *); | ||
61 | static DECLARE_WORK(fput_work, aio_fput_routine); | ||
62 | |||
63 | static DEFINE_SPINLOCK(fput_lock); | ||
64 | static LIST_HEAD(fput_head); | ||
65 | |||
66 | static void aio_kick_handler(struct work_struct *); | 59 | static void aio_kick_handler(struct work_struct *); |
67 | static void aio_queue_work(struct kioctx *); | 60 | static void aio_queue_work(struct kioctx *); |
68 | 61 | ||
@@ -479,7 +472,6 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) | |||
479 | { | 472 | { |
480 | unsigned short allocated, to_alloc; | 473 | unsigned short allocated, to_alloc; |
481 | long avail; | 474 | long avail; |
482 | bool called_fput = false; | ||
483 | struct kiocb *req, *n; | 475 | struct kiocb *req, *n; |
484 | struct aio_ring *ring; | 476 | struct aio_ring *ring; |
485 | 477 | ||
@@ -495,28 +487,11 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) | |||
495 | if (allocated == 0) | 487 | if (allocated == 0) |
496 | goto out; | 488 | goto out; |
497 | 489 | ||
498 | retry: | ||
499 | spin_lock_irq(&ctx->ctx_lock); | 490 | spin_lock_irq(&ctx->ctx_lock); |
500 | ring = kmap_atomic(ctx->ring_info.ring_pages[0]); | 491 | ring = kmap_atomic(ctx->ring_info.ring_pages[0]); |
501 | 492 | ||
502 | avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active; | 493 | avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active; |
503 | BUG_ON(avail < 0); | 494 | BUG_ON(avail < 0); |
504 | if (avail == 0 && !called_fput) { | ||
505 | /* | ||
506 | * Handle a potential starvation case. It is possible that | ||
507 | * we hold the last reference on a struct file, causing us | ||
508 | * to delay the final fput to non-irq context. In this case, | ||
509 | * ctx->reqs_active is artificially high. Calling the fput | ||
510 | * routine here may free up a slot in the event completion | ||
511 | * ring, allowing this allocation to succeed. | ||
512 | */ | ||
513 | kunmap_atomic(ring); | ||
514 | spin_unlock_irq(&ctx->ctx_lock); | ||
515 | aio_fput_routine(NULL); | ||
516 | called_fput = true; | ||
517 | goto retry; | ||
518 | } | ||
519 | |||
520 | if (avail < allocated) { | 495 | if (avail < allocated) { |
521 | /* Trim back the number of requests. */ | 496 | /* Trim back the number of requests. */ |
522 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { | 497 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { |
@@ -570,36 +545,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
570 | wake_up_all(&ctx->wait); | 545 | wake_up_all(&ctx->wait); |
571 | } | 546 | } |
572 | 547 | ||
573 | static void aio_fput_routine(struct work_struct *data) | ||
574 | { | ||
575 | spin_lock_irq(&fput_lock); | ||
576 | while (likely(!list_empty(&fput_head))) { | ||
577 | struct kiocb *req = list_kiocb(fput_head.next); | ||
578 | struct kioctx *ctx = req->ki_ctx; | ||
579 | |||
580 | list_del(&req->ki_list); | ||
581 | spin_unlock_irq(&fput_lock); | ||
582 | |||
583 | /* Complete the fput(s) */ | ||
584 | if (req->ki_filp != NULL) | ||
585 | fput(req->ki_filp); | ||
586 | |||
587 | /* Link the iocb into the context's free list */ | ||
588 | rcu_read_lock(); | ||
589 | spin_lock_irq(&ctx->ctx_lock); | ||
590 | really_put_req(ctx, req); | ||
591 | /* | ||
592 | * at that point ctx might've been killed, but actual | ||
593 | * freeing is RCU'd | ||
594 | */ | ||
595 | spin_unlock_irq(&ctx->ctx_lock); | ||
596 | rcu_read_unlock(); | ||
597 | |||
598 | spin_lock_irq(&fput_lock); | ||
599 | } | ||
600 | spin_unlock_irq(&fput_lock); | ||
601 | } | ||
602 | |||
603 | /* __aio_put_req | 548 | /* __aio_put_req |
604 | * Returns true if this put was the last user of the request. | 549 | * Returns true if this put was the last user of the request. |
605 | */ | 550 | */ |
@@ -618,21 +563,9 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
618 | req->ki_cancel = NULL; | 563 | req->ki_cancel = NULL; |
619 | req->ki_retry = NULL; | 564 | req->ki_retry = NULL; |
620 | 565 | ||
621 | /* | 566 | fput(req->ki_filp); |
622 | * Try to optimize the aio and eventfd file* puts, by avoiding to | 567 | req->ki_filp = NULL; |
623 | * schedule work in case it is not final fput() time. In normal cases, | 568 | really_put_req(ctx, req); |
624 | * we would not be holding the last reference to the file*, so | ||
625 | * this function will be executed w/out any aio kthread wakeup. | ||
626 | */ | ||
627 | if (unlikely(!fput_atomic(req->ki_filp))) { | ||
628 | spin_lock(&fput_lock); | ||
629 | list_add(&req->ki_list, &fput_head); | ||
630 | spin_unlock(&fput_lock); | ||
631 | schedule_work(&fput_work); | ||
632 | } else { | ||
633 | req->ki_filp = NULL; | ||
634 | really_put_req(ctx, req); | ||
635 | } | ||
636 | return 1; | 569 | return 1; |
637 | } | 570 | } |
638 | 571 | ||