diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 47 |
1 files changed, 22 insertions, 25 deletions
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
30 | #include <linux/workqueue.h> | 30 | #include <linux/workqueue.h> |
31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
32 | #include <linux/rcuref.h> | ||
33 | 32 | ||
34 | #include <asm/kmap_types.h> | 33 | #include <asm/kmap_types.h> |
35 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
@@ -457,6 +456,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx) | |||
457 | 456 | ||
458 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | 457 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) |
459 | { | 458 | { |
459 | assert_spin_locked(&ctx->ctx_lock); | ||
460 | |||
460 | if (req->ki_dtor) | 461 | if (req->ki_dtor) |
461 | req->ki_dtor(req); | 462 | req->ki_dtor(req); |
462 | kmem_cache_free(kiocb_cachep, req); | 463 | kmem_cache_free(kiocb_cachep, req); |
@@ -498,6 +499,8 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
498 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", | 499 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", |
499 | req, atomic_read(&req->ki_filp->f_count)); | 500 | req, atomic_read(&req->ki_filp->f_count)); |
500 | 501 | ||
502 | assert_spin_locked(&ctx->ctx_lock); | ||
503 | |||
501 | req->ki_users --; | 504 | req->ki_users --; |
502 | if (unlikely(req->ki_users < 0)) | 505 | if (unlikely(req->ki_users < 0)) |
503 | BUG(); | 506 | BUG(); |
@@ -510,7 +513,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
510 | /* Must be done under the lock to serialise against cancellation. | 513 | /* Must be done under the lock to serialise against cancellation. |
511 | * Call this aio_fput as it duplicates fput via the fput_work. | 514 | * Call this aio_fput as it duplicates fput via the fput_work. |
512 | */ | 515 | */ |
513 | if (unlikely(rcuref_dec_and_test(&req->ki_filp->f_count))) { | 516 | if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) { |
514 | get_ioctx(ctx); | 517 | get_ioctx(ctx); |
515 | spin_lock(&fput_lock); | 518 | spin_lock(&fput_lock); |
516 | list_add(&req->ki_list, &fput_head); | 519 | list_add(&req->ki_list, &fput_head); |
@@ -619,14 +622,13 @@ static void unuse_mm(struct mm_struct *mm) | |||
619 | * the kiocb (to tell the caller to activate the work | 622 | * the kiocb (to tell the caller to activate the work |
620 | * queue to process it), or 0, if it found that it was | 623 | * queue to process it), or 0, if it found that it was |
621 | * already queued. | 624 | * already queued. |
622 | * | ||
623 | * Should be called with the spin lock iocb->ki_ctx->ctx_lock | ||
624 | * held | ||
625 | */ | 625 | */ |
626 | static inline int __queue_kicked_iocb(struct kiocb *iocb) | 626 | static inline int __queue_kicked_iocb(struct kiocb *iocb) |
627 | { | 627 | { |
628 | struct kioctx *ctx = iocb->ki_ctx; | 628 | struct kioctx *ctx = iocb->ki_ctx; |
629 | 629 | ||
630 | assert_spin_locked(&ctx->ctx_lock); | ||
631 | |||
630 | if (list_empty(&iocb->ki_run_list)) { | 632 | if (list_empty(&iocb->ki_run_list)) { |
631 | list_add_tail(&iocb->ki_run_list, | 633 | list_add_tail(&iocb->ki_run_list, |
632 | &ctx->run_list); | 634 | &ctx->run_list); |
@@ -771,13 +773,15 @@ out: | |||
771 | * Process all pending retries queued on the ioctx | 773 | * Process all pending retries queued on the ioctx |
772 | * run list. | 774 | * run list. |
773 | * Assumes it is operating within the aio issuer's mm | 775 | * Assumes it is operating within the aio issuer's mm |
774 | * context. Expects to be called with ctx->ctx_lock held | 776 | * context. |
775 | */ | 777 | */ |
776 | static int __aio_run_iocbs(struct kioctx *ctx) | 778 | static int __aio_run_iocbs(struct kioctx *ctx) |
777 | { | 779 | { |
778 | struct kiocb *iocb; | 780 | struct kiocb *iocb; |
779 | LIST_HEAD(run_list); | 781 | LIST_HEAD(run_list); |
780 | 782 | ||
783 | assert_spin_locked(&ctx->ctx_lock); | ||
784 | |||
781 | list_splice_init(&ctx->run_list, &run_list); | 785 | list_splice_init(&ctx->run_list, &run_list); |
782 | while (!list_empty(&run_list)) { | 786 | while (!list_empty(&run_list)) { |
783 | iocb = list_entry(run_list.next, struct kiocb, | 787 | iocb = list_entry(run_list.next, struct kiocb, |
@@ -937,28 +941,19 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) | |||
937 | unsigned long tail; | 941 | unsigned long tail; |
938 | int ret; | 942 | int ret; |
939 | 943 | ||
940 | /* Special case handling for sync iocbs: events go directly | 944 | /* |
941 | * into the iocb for fast handling. Note that this will not | 945 | * Special case handling for sync iocbs: |
942 | * work if we allow sync kiocbs to be cancelled. in which | 946 | * - events go directly into the iocb for fast handling |
943 | * case the usage count checks will have to move under ctx_lock | 947 | * - the sync task with the iocb in its stack holds the single iocb |
944 | * for all cases. | 948 | * ref, no other paths have a way to get another ref |
949 | * - the sync task helpfully left a reference to itself in the iocb | ||
945 | */ | 950 | */ |
946 | if (is_sync_kiocb(iocb)) { | 951 | if (is_sync_kiocb(iocb)) { |
947 | int ret; | 952 | BUG_ON(iocb->ki_users != 1); |
948 | |||
949 | iocb->ki_user_data = res; | 953 | iocb->ki_user_data = res; |
950 | if (iocb->ki_users == 1) { | 954 | iocb->ki_users = 0; |
951 | iocb->ki_users = 0; | ||
952 | ret = 1; | ||
953 | } else { | ||
954 | spin_lock_irq(&ctx->ctx_lock); | ||
955 | iocb->ki_users--; | ||
956 | ret = (0 == iocb->ki_users); | ||
957 | spin_unlock_irq(&ctx->ctx_lock); | ||
958 | } | ||
959 | /* sync iocbs put the task here for us */ | ||
960 | wake_up_process(iocb->ki_obj.tsk); | 955 | wake_up_process(iocb->ki_obj.tsk); |
961 | return ret; | 956 | return 1; |
962 | } | 957 | } |
963 | 958 | ||
964 | info = &ctx->ring_info; | 959 | info = &ctx->ring_info; |
@@ -1613,12 +1608,14 @@ asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, | |||
1613 | 1608 | ||
1614 | /* lookup_kiocb | 1609 | /* lookup_kiocb |
1615 | * Finds a given iocb for cancellation. | 1610 | * Finds a given iocb for cancellation. |
1616 | * MUST be called with ctx->ctx_lock held. | ||
1617 | */ | 1611 | */ |
1618 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | 1612 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, |
1619 | u32 key) | 1613 | u32 key) |
1620 | { | 1614 | { |
1621 | struct list_head *pos; | 1615 | struct list_head *pos; |
1616 | |||
1617 | assert_spin_locked(&ctx->ctx_lock); | ||
1618 | |||
1622 | /* TODO: use a hash or array, this sucks. */ | 1619 | /* TODO: use a hash or array, this sucks. */ |
1623 | list_for_each(pos, &ctx->active_reqs) { | 1620 | list_for_each(pos, &ctx->active_reqs) { |
1624 | struct kiocb *kiocb = list_kiocb(pos); | 1621 | struct kiocb *kiocb = list_kiocb(pos); |