aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZach Brown <zach.brown@oracle.com>2005-09-30 14:58:54 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-30 15:41:17 -0400
commit998765e5588b197737d457e16f72832d8036190f (patch)
tree3a87877bffebc776a7102ab1b5f247a45d1c4ab3
parent6e3254c4e2927c117044a02acf5f5b56e1373053 (diff)
[PATCH] aio: lock around kiocbTryKick()
Only one of the run or kick path is supposed to put an iocb on the run list. If both of them do it than one of them can end up referencing a freed iocb. The kick patch could set the Kicked bit before acquiring the ctx_lock and putting the iocb on the run list. The run path, while holding the ctx_lock, could see this partial kick and mistake it for a kick that was deferred while it was doing work with the run_list NULLed out. It would then race with the kick thread to add the iocb to the run list. This patch moves the kick setting under the ctx_lock so that only one of the kick or run path queues the iocb on the run list, as intended. Signed-off-by: Zach Brown <zach.brown@oracle.com> Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/aio.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 0e11e31dbb77..b8f296999c04 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -899,16 +899,24 @@ static void aio_kick_handler(void *data)
899 * and if required activate the aio work queue to process 899 * and if required activate the aio work queue to process
900 * it 900 * it
901 */ 901 */
902static void queue_kicked_iocb(struct kiocb *iocb) 902static void try_queue_kicked_iocb(struct kiocb *iocb)
903{ 903{
904 struct kioctx *ctx = iocb->ki_ctx; 904 struct kioctx *ctx = iocb->ki_ctx;
905 unsigned long flags; 905 unsigned long flags;
906 int run = 0; 906 int run = 0;
907 907
908 WARN_ON((!list_empty(&iocb->ki_wait.task_list))); 908 /* We're supposed to be the only path putting the iocb back on the run
909 * list. If we find that the iocb is *back* on a wait queue already
910 * than retry has happened before we could queue the iocb. This also
911 * means that the retry could have completed and freed our iocb, no
912 * good. */
913 BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
909 914
910 spin_lock_irqsave(&ctx->ctx_lock, flags); 915 spin_lock_irqsave(&ctx->ctx_lock, flags);
911 run = __queue_kicked_iocb(iocb); 916 /* set this inside the lock so that we can't race with aio_run_iocb()
917 * testing it and putting the iocb on the run list under the lock */
918 if (!kiocbTryKick(iocb))
919 run = __queue_kicked_iocb(iocb);
912 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 920 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
913 if (run) 921 if (run)
914 aio_queue_work(ctx); 922 aio_queue_work(ctx);
@@ -931,10 +939,7 @@ void fastcall kick_iocb(struct kiocb *iocb)
931 return; 939 return;
932 } 940 }
933 941
934 /* If its already kicked we shouldn't queue it again */ 942 try_queue_kicked_iocb(iocb);
935 if (!kiocbTryKick(iocb)) {
936 queue_kicked_iocb(iocb);
937 }
938} 943}
939EXPORT_SYMBOL(kick_iocb); 944EXPORT_SYMBOL(kick_iocb);
940 945