aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_transport_fc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-06-04 23:40:54 -0400
committerJens Axboe <axboe@kernel.dk>2012-06-25 05:53:48 -0400
commit86072d8112595ea1b6beeb33f578e7c2839e014e (patch)
treead49aefc355232e099c07c4a371b437e5241981f /drivers/scsi/scsi_transport_fc.c
parenta91a5ac6858fbf7477131e1210cb3e897b668e6f (diff)
block: drop custom queue draining used by scsi_transport_{iscsi|fc}
iscsi_remove_host() uses bsg_remove_queue() which implements custom queue draining. fc_bsg_remove() open-codes mostly identical logic. The draining logic isn't correct in that blk_stop_queue() doesn't prevent new requests from being queued - it just stops processing, so nothing prevents new requests to be queued after the logic determines that the queue is drained. blk_cleanup_queue() now implements proper queue draining and these custom draining logics aren't necessary. Drop them and use bsg_unregister_queue() + blk_cleanup_queue() instead. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Mike Christie <michaelc@cs.wisc.edu> Acked-by: Vivek Goyal <vgoyal@redhat.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@emulex.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/scsi/scsi_transport_fc.c')
-rw-r--r--drivers/scsi/scsi_transport_fc.c38
1 files changed, 0 insertions, 38 deletions
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 579760420d53..a9617ad05f33 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -4130,45 +4130,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4130static void 4130static void
4131fc_bsg_remove(struct request_queue *q) 4131fc_bsg_remove(struct request_queue *q)
4132{ 4132{
4133 struct request *req; /* block request */
4134 int counts; /* totals for request_list count and starved */
4135
4136 if (q) { 4133 if (q) {
4137 /* Stop taking in new requests */
4138 spin_lock_irq(q->queue_lock);
4139 blk_stop_queue(q);
4140
4141 /* drain all requests in the queue */
4142 while (1) {
4143 /* need the lock to fetch a request
4144 * this may fetch the same reqeust as the previous pass
4145 */
4146 req = blk_fetch_request(q);
4147 /* save requests in use and starved */
4148 counts = q->rq.count[0] + q->rq.count[1] +
4149 q->rq.starved[0] + q->rq.starved[1];
4150 spin_unlock_irq(q->queue_lock);
4151 /* any requests still outstanding? */
4152 if (counts == 0)
4153 break;
4154
4155 /* This may be the same req as the previous iteration,
4156 * always send the blk_end_request_all after a prefetch.
4157 * It is not okay to not end the request because the
4158 * prefetch started the request.
4159 */
4160 if (req) {
4161 /* return -ENXIO to indicate that this queue is
4162 * going away
4163 */
4164 req->errors = -ENXIO;
4165 blk_end_request_all(req, -ENXIO);
4166 }
4167
4168 msleep(200); /* allow bsg to possibly finish */
4169 spin_lock_irq(q->queue_lock);
4170 }
4171
4172 bsg_unregister_queue(q); 4134 bsg_unregister_queue(q);
4173 blk_cleanup_queue(q); 4135 blk_cleanup_queue(q);
4174 } 4136 }