aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2009-12-10 18:52:17 -0500
committerAlasdair G Kergon <agk@redhat.com>2009-12-10 18:52:17 -0500
commit980691e5f3a1b5ebbb2d34014e028fd7f1c6e4fb (patch)
treea4d593cf3818a54c41e6a4115cad63128b738c0b /drivers/md
parent11a68244e16b0c35e122dd55b4e7c595e0fb67a1 (diff)
dm: move dm_end_request
This patch moves dm_end_request() to make the next patch more readable. No functional change. Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c65be45a4c42..821a5dd6a8d1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -752,6 +752,37 @@ static void free_rq_clone(struct request *clone)
752 free_rq_tio(tio); 752 free_rq_tio(tio);
753} 753}
754 754
755/*
756 * Complete the clone and the original request.
757 * Must be called without queue lock.
758 */
759static void dm_end_request(struct request *clone, int error)
760{
761 int rw = rq_data_dir(clone);
762 struct dm_rq_target_io *tio = clone->end_io_data;
763 struct mapped_device *md = tio->md;
764 struct request *rq = tio->orig;
765
766 if (blk_pc_request(rq)) {
767 rq->errors = clone->errors;
768 rq->resid_len = clone->resid_len;
769
770 if (rq->sense)
771 /*
772 * We are using the sense buffer of the original
773 * request.
774 * So setting the length of the sense data is enough.
775 */
776 rq->sense_len = clone->sense_len;
777 }
778
779 free_rq_clone(clone);
780
781 blk_end_request_all(rq, error);
782
783 rq_completed(md, rw, 1);
784}
785
755static void dm_unprep_request(struct request *rq) 786static void dm_unprep_request(struct request *rq)
756{ 787{
757 struct request *clone = rq->special; 788 struct request *clone = rq->special;
@@ -815,37 +846,6 @@ static void start_queue(struct request_queue *q)
815 spin_unlock_irqrestore(q->queue_lock, flags); 846 spin_unlock_irqrestore(q->queue_lock, flags);
816} 847}
817 848
818/*
819 * Complete the clone and the original request.
820 * Must be called without queue lock.
821 */
822static void dm_end_request(struct request *clone, int error)
823{
824 int rw = rq_data_dir(clone);
825 struct dm_rq_target_io *tio = clone->end_io_data;
826 struct mapped_device *md = tio->md;
827 struct request *rq = tio->orig;
828
829 if (blk_pc_request(rq)) {
830 rq->errors = clone->errors;
831 rq->resid_len = clone->resid_len;
832
833 if (rq->sense)
834 /*
835 * We are using the sense buffer of the original
836 * request.
837 * So setting the length of the sense data is enough.
838 */
839 rq->sense_len = clone->sense_len;
840 }
841
842 free_rq_clone(clone);
843
844 blk_end_request_all(rq, error);
845
846 rq_completed(md, rw, 1);
847}
848
849static void dm_done(struct request *clone, int error, bool mapped) 849static void dm_done(struct request *clone, int error, bool mapped)
850{ 850{
851 int r = error; 851 int r = error;