aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_transport_fc.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-08-31 22:27:31 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-09-09 16:37:40 -0400
commit78d16341facf829a71b6f7c68ec5511b9c168060 (patch)
treeea63d438409b6b955f8514268bc368bf5a99c4d3 /drivers/scsi/scsi_transport_fc.c
parent91978465b1e5f89025cd43cd2102943160ec6dee (diff)
[SCSI] scsi_transport_fc: fix blocked bsg request when fc object deleted
When an rport is "blocked" and a bsg request is received, the bsg request gets placed on the queue but the queue stalls. If the fc object is then deleted - the bsg queue never restarts and keeps the reference on the object, and stops the overall teardown. This patch restarts the bsg queue on teardown and drains any pending requests, allowing the teardown to succeed. Signed-off-by: Carl Lajeunesse <carl.lajeunesse@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/scsi_transport_fc.c')
-rw-r--r--drivers/scsi/scsi_transport_fc.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 9f0f7d9c7422..78486d540652 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -4048,11 +4048,54 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4048/** 4048/**
4049 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports 4049 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
4050 * @q: the request_queue that is to be torn down. 4050 * @q: the request_queue that is to be torn down.
4051 *
4052 * Notes:
4053 * Before unregistering the queue empty any requests that are blocked
4054 *
4055 *
4051 */ 4056 */
4052static void 4057static void
4053fc_bsg_remove(struct request_queue *q) 4058fc_bsg_remove(struct request_queue *q)
4054{ 4059{
4060 struct request *req; /* block request */
4061 int counts; /* totals for request_list count and starved */
4062
4055 if (q) { 4063 if (q) {
4064 /* Stop taking in new requests */
4065 spin_lock_irq(q->queue_lock);
4066 blk_stop_queue(q);
4067
4068 /* drain all requests in the queue */
4069 while (1) {
4070 /* need the lock to fetch a request
4071 * this may fetch the same reqeust as the previous pass
4072 */
4073 req = blk_fetch_request(q);
4074 /* save requests in use and starved */
4075 counts = q->rq.count[0] + q->rq.count[1] +
4076 q->rq.starved[0] + q->rq.starved[1];
4077 spin_unlock_irq(q->queue_lock);
4078 /* any requests still outstanding? */
4079 if (counts == 0)
4080 break;
4081
4082 /* This may be the same req as the previous iteration,
4083 * always send the blk_end_request_all after a prefetch.
4084 * It is not okay to not end the request because the
4085 * prefetch started the request.
4086 */
4087 if (req) {
4088 /* return -ENXIO to indicate that this queue is
4089 * going away
4090 */
4091 req->errors = -ENXIO;
4092 blk_end_request_all(req, -ENXIO);
4093 }
4094
4095 msleep(200); /* allow bsg to possibly finish */
4096 spin_lock_irq(q->queue_lock);
4097 }
4098
4056 bsg_unregister_queue(q); 4099 bsg_unregister_queue(q);
4057 blk_cleanup_queue(q); 4100 blk_cleanup_queue(q);
4058 } 4101 }