diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2009-12-10 18:52:16 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-12-10 18:52:16 -0500 |
commit | b4324feeae304ae39e631a254d238a7d63be004b (patch) | |
tree | 12495362c9301129912a8d706de0f6cdb7de8ccf /drivers | |
parent | 9f518b27cf682dd5155a4c1679d52cd4b5be82f2 (diff) |
dm: use md pending for in flight IO counting
This patch changes the counter for the number of in_flight I/Os
to md->pending from q->in_flight in preparation for a later patch.
No functional change.
Request-based dm used q->in_flight to count the number of in-flight
clones assuming the counter is always incremented for an in-flight
original request and original:clone is 1:1 relationship.
However, it this no longer true for barrier requests.
So use md->pending to count the number of in-flight clones.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm.c | 46 |
1 files changed, 18 insertions, 28 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 634b1daab2d4..01d741a0c079 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -727,23 +727,16 @@ static void end_clone_bio(struct bio *clone, int error) | |||
727 | * the md may be freed in dm_put() at the end of this function. | 727 | * the md may be freed in dm_put() at the end of this function. |
728 | * Or do dm_get() before calling this function and dm_put() later. | 728 | * Or do dm_get() before calling this function and dm_put() later. |
729 | */ | 729 | */ |
730 | static void rq_completed(struct mapped_device *md, int run_queue) | 730 | static void rq_completed(struct mapped_device *md, int rw, int run_queue) |
731 | { | 731 | { |
732 | int wakeup_waiters = 0; | 732 | atomic_dec(&md->pending[rw]); |
733 | struct request_queue *q = md->queue; | ||
734 | unsigned long flags; | ||
735 | |||
736 | spin_lock_irqsave(q->queue_lock, flags); | ||
737 | if (!queue_in_flight(q)) | ||
738 | wakeup_waiters = 1; | ||
739 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
740 | 733 | ||
741 | /* nudge anyone waiting on suspend queue */ | 734 | /* nudge anyone waiting on suspend queue */ |
742 | if (wakeup_waiters) | 735 | if (!md_in_flight(md)) |
743 | wake_up(&md->wait); | 736 | wake_up(&md->wait); |
744 | 737 | ||
745 | if (run_queue) | 738 | if (run_queue) |
746 | blk_run_queue(q); | 739 | blk_run_queue(md->queue); |
747 | 740 | ||
748 | /* | 741 | /* |
749 | * dm_put() must be at the end of this function. See the comment above | 742 | * dm_put() must be at the end of this function. See the comment above |
@@ -774,6 +767,7 @@ static void dm_unprep_request(struct request *rq) | |||
774 | */ | 767 | */ |
775 | void dm_requeue_unmapped_request(struct request *clone) | 768 | void dm_requeue_unmapped_request(struct request *clone) |
776 | { | 769 | { |
770 | int rw = rq_data_dir(clone); | ||
777 | struct dm_rq_target_io *tio = clone->end_io_data; | 771 | struct dm_rq_target_io *tio = clone->end_io_data; |
778 | struct mapped_device *md = tio->md; | 772 | struct mapped_device *md = tio->md; |
779 | struct request *rq = tio->orig; | 773 | struct request *rq = tio->orig; |
@@ -788,7 +782,7 @@ void dm_requeue_unmapped_request(struct request *clone) | |||
788 | blk_requeue_request(q, rq); | 782 | blk_requeue_request(q, rq); |
789 | spin_unlock_irqrestore(q->queue_lock, flags); | 783 | spin_unlock_irqrestore(q->queue_lock, flags); |
790 | 784 | ||
791 | rq_completed(md, 0); | 785 | rq_completed(md, rw, 0); |
792 | } | 786 | } |
793 | EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); | 787 | EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); |
794 | 788 | ||
@@ -827,6 +821,7 @@ static void start_queue(struct request_queue *q) | |||
827 | */ | 821 | */ |
828 | static void dm_end_request(struct request *clone, int error) | 822 | static void dm_end_request(struct request *clone, int error) |
829 | { | 823 | { |
824 | int rw = rq_data_dir(clone); | ||
830 | struct dm_rq_target_io *tio = clone->end_io_data; | 825 | struct dm_rq_target_io *tio = clone->end_io_data; |
831 | struct mapped_device *md = tio->md; | 826 | struct mapped_device *md = tio->md; |
832 | struct request *rq = tio->orig; | 827 | struct request *rq = tio->orig; |
@@ -848,7 +843,7 @@ static void dm_end_request(struct request *clone, int error) | |||
848 | 843 | ||
849 | blk_end_request_all(rq, error); | 844 | blk_end_request_all(rq, error); |
850 | 845 | ||
851 | rq_completed(md, 1); | 846 | rq_completed(md, rw, 1); |
852 | } | 847 | } |
853 | 848 | ||
854 | /* | 849 | /* |
@@ -1541,12 +1536,13 @@ static void dm_request_fn(struct request_queue *q) | |||
1541 | struct mapped_device *md = q->queuedata; | 1536 | struct mapped_device *md = q->queuedata; |
1542 | struct dm_table *map = dm_get_table(md); | 1537 | struct dm_table *map = dm_get_table(md); |
1543 | struct dm_target *ti; | 1538 | struct dm_target *ti; |
1544 | struct request *rq; | 1539 | struct request *rq, *clone; |
1545 | 1540 | ||
1546 | /* | 1541 | /* |
1547 | * For suspend, check blk_queue_stopped() and don't increment | 1542 | * For suspend, check blk_queue_stopped() and increment |
1548 | * the number of in-flight I/Os after the queue is stopped | 1543 | * ->pending within a single queue_lock not to increment the |
1549 | * in dm_suspend(). | 1544 | * number of in-flight I/Os after the queue is stopped in |
1545 | * dm_suspend(). | ||
1550 | */ | 1546 | */ |
1551 | while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { | 1547 | while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { |
1552 | rq = blk_peek_request(q); | 1548 | rq = blk_peek_request(q); |
@@ -1558,8 +1554,11 @@ static void dm_request_fn(struct request_queue *q) | |||
1558 | goto plug_and_out; | 1554 | goto plug_and_out; |
1559 | 1555 | ||
1560 | blk_start_request(rq); | 1556 | blk_start_request(rq); |
1557 | clone = rq->special; | ||
1558 | atomic_inc(&md->pending[rq_data_dir(clone)]); | ||
1559 | |||
1561 | spin_unlock(q->queue_lock); | 1560 | spin_unlock(q->queue_lock); |
1562 | map_request(ti, rq->special, md); | 1561 | map_request(ti, clone, md); |
1563 | spin_lock_irq(q->queue_lock); | 1562 | spin_lock_irq(q->queue_lock); |
1564 | } | 1563 | } |
1565 | 1564 | ||
@@ -2071,8 +2070,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2071 | { | 2070 | { |
2072 | int r = 0; | 2071 | int r = 0; |
2073 | DECLARE_WAITQUEUE(wait, current); | 2072 | DECLARE_WAITQUEUE(wait, current); |
2074 | struct request_queue *q = md->queue; | ||
2075 | unsigned long flags; | ||
2076 | 2073 | ||
2077 | dm_unplug_all(md->queue); | 2074 | dm_unplug_all(md->queue); |
2078 | 2075 | ||
@@ -2082,14 +2079,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2082 | set_current_state(interruptible); | 2079 | set_current_state(interruptible); |
2083 | 2080 | ||
2084 | smp_mb(); | 2081 | smp_mb(); |
2085 | if (dm_request_based(md)) { | 2082 | if (!md_in_flight(md)) |
2086 | spin_lock_irqsave(q->queue_lock, flags); | ||
2087 | if (!queue_in_flight(q)) { | ||
2088 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2089 | break; | ||
2090 | } | ||
2091 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2092 | } else if (!md_in_flight(md)) | ||
2093 | break; | 2083 | break; |
2094 | 2084 | ||
2095 | if (interruptible == TASK_INTERRUPTIBLE && | 2085 | if (interruptible == TASK_INTERRUPTIBLE && |