aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2009-12-10 18:52:17 -0500
committerAlasdair G Kergon <agk@redhat.com>2009-12-10 18:52:17 -0500
commit11a68244e16b0c35e122dd55b4e7c595e0fb67a1 (patch)
treeac982eb9abf6eca76ae35a9e0900323b54bb3ab9
parentb4324feeae304ae39e631a254d238a7d63be004b (diff)
dm: refactor request based completion functions
This patch factors out the clone completion code, dm_done(), from dm_softirq_done() in preparation for a subsequent patch. No functional change. dm_done() will be used in barrier completion, which can't use and doesn't need softirq. The softirq_done callback needs to get a clone from an original request but it can't in the case of barrier, where an original request is shared by multiple clones. On the other hand, the completion of barrier clones doesn't involve re-submitting requests, which was the primary reason of the need for softirq. Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 01d741a0c079..c65be45a4c42 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -846,35 +846,46 @@ static void dm_end_request(struct request *clone, int error)
846 rq_completed(md, rw, 1); 846 rq_completed(md, rw, 1);
847} 847}
848 848
849/* 849static void dm_done(struct request *clone, int error, bool mapped)
850 * Request completion handler for request-based dm
851 */
852static void dm_softirq_done(struct request *rq)
853{ 850{
854 struct request *clone = rq->completion_data; 851 int r = error;
855 struct dm_rq_target_io *tio = clone->end_io_data; 852 struct dm_rq_target_io *tio = clone->end_io_data;
856 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 853 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
857 int error = tio->error;
858 854
859 if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) 855 if (mapped && rq_end_io)
860 error = rq_end_io(tio->ti, clone, error, &tio->info); 856 r = rq_end_io(tio->ti, clone, error, &tio->info);
861 857
862 if (error <= 0) 858 if (r <= 0)
863 /* The target wants to complete the I/O */ 859 /* The target wants to complete the I/O */
864 dm_end_request(clone, error); 860 dm_end_request(clone, r);
865 else if (error == DM_ENDIO_INCOMPLETE) 861 else if (r == DM_ENDIO_INCOMPLETE)
866 /* The target will handle the I/O */ 862 /* The target will handle the I/O */
867 return; 863 return;
868 else if (error == DM_ENDIO_REQUEUE) 864 else if (r == DM_ENDIO_REQUEUE)
869 /* The target wants to requeue the I/O */ 865 /* The target wants to requeue the I/O */
870 dm_requeue_unmapped_request(clone); 866 dm_requeue_unmapped_request(clone);
871 else { 867 else {
872 DMWARN("unimplemented target endio return value: %d", error); 868 DMWARN("unimplemented target endio return value: %d", r);
873 BUG(); 869 BUG();
874 } 870 }
875} 871}
876 872
877/* 873/*
874 * Request completion handler for request-based dm
875 */
876static void dm_softirq_done(struct request *rq)
877{
878 bool mapped = true;
879 struct request *clone = rq->completion_data;
880 struct dm_rq_target_io *tio = clone->end_io_data;
881
882 if (rq->cmd_flags & REQ_FAILED)
883 mapped = false;
884
885 dm_done(clone, tio->error, mapped);
886}
887
888/*
878 * Complete the clone and the original request with the error status 889 * Complete the clone and the original request with the error status
879 * through softirq context. 890 * through softirq context.
880 */ 891 */