diff options
author | Zach Brown <zach.brown@oracle.com> | 2005-11-13 19:07:34 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-13 21:14:16 -0500 |
commit | d00689af6b3b6ba9e1fdefec3bd62edc860c385d (patch) | |
tree | 823a0998010a3074f8ae3cd0874de0cb91be719c | |
parent | 20dcae32439384b6863c626bb3b2a09bed65b33e (diff) |
[PATCH] aio: replace locking comments with assert_spin_locked()
aio: replace locking comments with assert_spin_locked()
Signed-off-by: Zach Brown <zach.brown@oracle.com>
Acked-by: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | fs/aio.c | 17 |
1 files changed, 12 insertions, 5 deletions
@@ -457,6 +457,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx) | |||
457 | 457 | ||
458 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | 458 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) |
459 | { | 459 | { |
460 | assert_spin_locked(&ctx->ctx_lock); | ||
461 | |||
460 | if (req->ki_dtor) | 462 | if (req->ki_dtor) |
461 | req->ki_dtor(req); | 463 | req->ki_dtor(req); |
462 | kmem_cache_free(kiocb_cachep, req); | 464 | kmem_cache_free(kiocb_cachep, req); |
@@ -498,6 +500,8 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
498 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", | 500 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", |
499 | req, atomic_read(&req->ki_filp->f_count)); | 501 | req, atomic_read(&req->ki_filp->f_count)); |
500 | 502 | ||
503 | assert_spin_locked(&ctx->ctx_lock); | ||
504 | |||
501 | req->ki_users --; | 505 | req->ki_users --; |
502 | if (unlikely(req->ki_users < 0)) | 506 | if (unlikely(req->ki_users < 0)) |
503 | BUG(); | 507 | BUG(); |
@@ -619,14 +623,13 @@ static void unuse_mm(struct mm_struct *mm) | |||
619 | * the kiocb (to tell the caller to activate the work | 623 | * the kiocb (to tell the caller to activate the work |
620 | * queue to process it), or 0, if it found that it was | 624 | * queue to process it), or 0, if it found that it was |
621 | * already queued. | 625 | * already queued. |
622 | * | ||
623 | * Should be called with the spin lock iocb->ki_ctx->ctx_lock | ||
624 | * held | ||
625 | */ | 626 | */ |
626 | static inline int __queue_kicked_iocb(struct kiocb *iocb) | 627 | static inline int __queue_kicked_iocb(struct kiocb *iocb) |
627 | { | 628 | { |
628 | struct kioctx *ctx = iocb->ki_ctx; | 629 | struct kioctx *ctx = iocb->ki_ctx; |
629 | 630 | ||
631 | assert_spin_locked(&ctx->ctx_lock); | ||
632 | |||
630 | if (list_empty(&iocb->ki_run_list)) { | 633 | if (list_empty(&iocb->ki_run_list)) { |
631 | list_add_tail(&iocb->ki_run_list, | 634 | list_add_tail(&iocb->ki_run_list, |
632 | &ctx->run_list); | 635 | &ctx->run_list); |
@@ -771,13 +774,15 @@ out: | |||
771 | * Process all pending retries queued on the ioctx | 774 | * Process all pending retries queued on the ioctx |
772 | * run list. | 775 | * run list. |
773 | * Assumes it is operating within the aio issuer's mm | 776 | * Assumes it is operating within the aio issuer's mm |
774 | * context. Expects to be called with ctx->ctx_lock held | 777 | * context. |
775 | */ | 778 | */ |
776 | static int __aio_run_iocbs(struct kioctx *ctx) | 779 | static int __aio_run_iocbs(struct kioctx *ctx) |
777 | { | 780 | { |
778 | struct kiocb *iocb; | 781 | struct kiocb *iocb; |
779 | LIST_HEAD(run_list); | 782 | LIST_HEAD(run_list); |
780 | 783 | ||
784 | assert_spin_locked(&ctx->ctx_lock); | ||
785 | |||
781 | list_splice_init(&ctx->run_list, &run_list); | 786 | list_splice_init(&ctx->run_list, &run_list); |
782 | while (!list_empty(&run_list)) { | 787 | while (!list_empty(&run_list)) { |
783 | iocb = list_entry(run_list.next, struct kiocb, | 788 | iocb = list_entry(run_list.next, struct kiocb, |
@@ -1604,12 +1609,14 @@ asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, | |||
1604 | 1609 | ||
1605 | /* lookup_kiocb | 1610 | /* lookup_kiocb |
1606 | * Finds a given iocb for cancellation. | 1611 | * Finds a given iocb for cancellation. |
1607 | * MUST be called with ctx->ctx_lock held. | ||
1608 | */ | 1612 | */ |
1609 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | 1613 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, |
1610 | u32 key) | 1614 | u32 key) |
1611 | { | 1615 | { |
1612 | struct list_head *pos; | 1616 | struct list_head *pos; |
1617 | |||
1618 | assert_spin_locked(&ctx->ctx_lock); | ||
1619 | |||
1613 | /* TODO: use a hash or array, this sucks. */ | 1620 | /* TODO: use a hash or array, this sucks. */ |
1614 | list_for_each(pos, &ctx->active_reqs) { | 1621 | list_for_each(pos, &ctx->active_reqs) { |
1615 | struct kiocb *kiocb = list_kiocb(pos); | 1622 | struct kiocb *kiocb = list_kiocb(pos); |