aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/scsi_lib.c44
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
3 files changed, 21 insertions, 31 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2d63c8ad1442..6d5c7ff43f5b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
67 67
68struct kmem_cache *scsi_sdb_cache; 68struct kmem_cache *scsi_sdb_cache;
69 69
70/*
71 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
72 * not change behaviour from the previous unplug mechanism, experimentation
73 * may prove this needs changing.
74 */
75#define SCSI_QUEUE_DELAY 3
76
70static void scsi_run_queue(struct request_queue *q); 77static void scsi_run_queue(struct request_queue *q);
71 78
72/* 79/*
@@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
149 /* 156 /*
150 * Requeue this command. It will go before all other commands 157 * Requeue this command. It will go before all other commands
151 * that are already in the queue. 158 * that are already in the queue.
152 * 159 */
153 * NOTE: there is magic here about the way the queue is plugged if
154 * we have no outstanding commands.
155 *
156 * Although we *don't* plug the queue, we call the request
157 * function. The SCSI request function detects the blocked condition
158 * and plugs the queue appropriately.
159 */
160 spin_lock_irqsave(q->queue_lock, flags); 160 spin_lock_irqsave(q->queue_lock, flags);
161 blk_requeue_request(q, cmd->request); 161 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags); 162 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1226,11 +1226,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1226 case BLKPREP_DEFER: 1226 case BLKPREP_DEFER:
1227 /* 1227 /*
1228 * If we defer, the blk_peek_request() returns NULL, but the 1228 * If we defer, the blk_peek_request() returns NULL, but the
1229 * queue must be restarted, so we plug here if no returning 1229 * queue must be restarted, so we schedule a callback to happen
1230 * command will automatically do that. 1230 * shortly.
1231 */ 1231 */
1232 if (sdev->device_busy == 0) 1232 if (sdev->device_busy == 0)
1233 blk_plug_device(q); 1233 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1234 break; 1234 break;
1235 default: 1235 default:
1236 req->cmd_flags |= REQ_DONTPREP; 1236 req->cmd_flags |= REQ_DONTPREP;
@@ -1269,7 +1269,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
1269 sdev_printk(KERN_INFO, sdev, 1269 sdev_printk(KERN_INFO, sdev,
1270 "unblocking device at zero depth\n")); 1270 "unblocking device at zero depth\n"));
1271 } else { 1271 } else {
1272 blk_plug_device(q); 1272 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1273 return 0; 1273 return 0;
1274 } 1274 }
1275 } 1275 }
@@ -1499,7 +1499,7 @@ static void scsi_request_fn(struct request_queue *q)
1499 * the host is no longer able to accept any more requests. 1499 * the host is no longer able to accept any more requests.
1500 */ 1500 */
1501 shost = sdev->host; 1501 shost = sdev->host;
1502 while (!blk_queue_plugged(q)) { 1502 for (;;) {
1503 int rtn; 1503 int rtn;
1504 /* 1504 /*
1505 * get next queueable request. We do this early to make sure 1505 * get next queueable request. We do this early to make sure
@@ -1578,15 +1578,8 @@ static void scsi_request_fn(struct request_queue *q)
1578 */ 1578 */
1579 rtn = scsi_dispatch_cmd(cmd); 1579 rtn = scsi_dispatch_cmd(cmd);
1580 spin_lock_irq(q->queue_lock); 1580 spin_lock_irq(q->queue_lock);
1581 if(rtn) { 1581 if (rtn)
1582 /* we're refusing the command; because of 1582 goto out_delay;
1583 * the way locks get dropped, we need to
1584 * check here if plugging is required */
1585 if(sdev->device_busy == 0)
1586 blk_plug_device(q);
1587
1588 break;
1589 }
1590 } 1583 }
1591 1584
1592 goto out; 1585 goto out;
@@ -1605,9 +1598,10 @@ static void scsi_request_fn(struct request_queue *q)
1605 spin_lock_irq(q->queue_lock); 1598 spin_lock_irq(q->queue_lock);
1606 blk_requeue_request(q, req); 1599 blk_requeue_request(q, req);
1607 sdev->device_busy--; 1600 sdev->device_busy--;
1608 if(sdev->device_busy == 0) 1601out_delay:
1609 blk_plug_device(q); 1602 if (sdev->device_busy == 0)
1610 out: 1603 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1604out:
1611 /* must be careful here...if we trigger the ->remove() function 1605 /* must be careful here...if we trigger the ->remove() function
1612 * we cannot be holding the q lock */ 1606 * we cannot be holding the q lock */
1613 spin_unlock_irq(q->queue_lock); 1607 spin_unlock_irq(q->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 5c3ccfc6b622..2941d2d92c94 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3913 if (!get_device(dev)) 3913 if (!get_device(dev))
3914 return; 3914 return;
3915 3915
3916 while (!blk_queue_plugged(q)) { 3916 while (1) {
3917 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && 3917 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
3918 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) 3918 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3919 break; 3919 break;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 927e99cb7225..c6fcf76cade5 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
173 int ret; 173 int ret;
174 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 174 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
175 175
176 while (!blk_queue_plugged(q)) { 176 while ((req = blk_fetch_request(q)) != NULL) {
177 req = blk_fetch_request(q);
178 if (!req)
179 break;
180
181 spin_unlock_irq(q->queue_lock); 177 spin_unlock_irq(q->queue_lock);
182 178
183 handler = to_sas_internal(shost->transportt)->f->smp_handler; 179 handler = to_sas_internal(shost->transportt)->f->smp_handler;