aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2010-04-16 15:13:15 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:45:54 -0500
commita488e74976bf0a9bccecdd094378394942dacef1 (patch)
tree94b69a2dd1e7126b83e311dc7c85def32ecde6dd /drivers
parent0a41e90bb7c931fd53d73ea770f5b251af6c91ce (diff)
scsi: convert to blk_delay_queue()
It was always abuse to reuse the plugging infrastructure for this, convert it to the (new) real API for delaying queueing a bit. A default delay of 3 msec is defined, to match the previous behaviour. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/scsi_lib.c44
1 files changed, 19 insertions, 25 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52abd25..5a0ae7a944ce 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
67 67
68struct kmem_cache *scsi_sdb_cache; 68struct kmem_cache *scsi_sdb_cache;
69 69
70/*
71 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
72 * not change behaviour from the previous unplug mechanism, experimentation
73 * may prove this needs changing.
74 */
75#define SCSI_QUEUE_DELAY 3
76
70static void scsi_run_queue(struct request_queue *q); 77static void scsi_run_queue(struct request_queue *q);
71 78
72/* 79/*
@@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
149 /* 156 /*
150 * Requeue this command. It will go before all other commands 157 * Requeue this command. It will go before all other commands
151 * that are already in the queue. 158 * that are already in the queue.
152 * 159 */
153 * NOTE: there is magic here about the way the queue is plugged if
154 * we have no outstanding commands.
155 *
156 * Although we *don't* plug the queue, we call the request
157 * function. The SCSI request function detects the blocked condition
158 * and plugs the queue appropriately.
159 */
160 spin_lock_irqsave(q->queue_lock, flags); 160 spin_lock_irqsave(q->queue_lock, flags);
161 blk_requeue_request(q, cmd->request); 161 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags); 162 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1194,11 +1194,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1194 case BLKPREP_DEFER: 1194 case BLKPREP_DEFER:
1195 /* 1195 /*
1196 * If we defer, the blk_peek_request() returns NULL, but the 1196 * If we defer, the blk_peek_request() returns NULL, but the
1197 * queue must be restarted, so we plug here if no returning 1197 * queue must be restarted, so we schedule a callback to happen
1198 * command will automatically do that. 1198 * shortly.
1199 */ 1199 */
1200 if (sdev->device_busy == 0) 1200 if (sdev->device_busy == 0)
1201 blk_plug_device(q); 1201 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1202 break; 1202 break;
1203 default: 1203 default:
1204 req->cmd_flags |= REQ_DONTPREP; 1204 req->cmd_flags |= REQ_DONTPREP;
@@ -1237,7 +1237,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
1237 sdev_printk(KERN_INFO, sdev, 1237 sdev_printk(KERN_INFO, sdev,
1238 "unblocking device at zero depth\n")); 1238 "unblocking device at zero depth\n"));
1239 } else { 1239 } else {
1240 blk_plug_device(q); 1240 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1241 return 0; 1241 return 0;
1242 } 1242 }
1243 } 1243 }
@@ -1467,7 +1467,7 @@ static void scsi_request_fn(struct request_queue *q)
1467 * the host is no longer able to accept any more requests. 1467 * the host is no longer able to accept any more requests.
1468 */ 1468 */
1469 shost = sdev->host; 1469 shost = sdev->host;
1470 while (!blk_queue_plugged(q)) { 1470 for (;;) {
1471 int rtn; 1471 int rtn;
1472 /* 1472 /*
1473 * get next queueable request. We do this early to make sure 1473 * get next queueable request. We do this early to make sure
@@ -1546,15 +1546,8 @@ static void scsi_request_fn(struct request_queue *q)
1546 */ 1546 */
1547 rtn = scsi_dispatch_cmd(cmd); 1547 rtn = scsi_dispatch_cmd(cmd);
1548 spin_lock_irq(q->queue_lock); 1548 spin_lock_irq(q->queue_lock);
1549 if(rtn) { 1549 if (rtn)
1550 /* we're refusing the command; because of 1550 goto out_delay;
1551 * the way locks get dropped, we need to
1552 * check here if plugging is required */
1553 if(sdev->device_busy == 0)
1554 blk_plug_device(q);
1555
1556 break;
1557 }
1558 } 1551 }
1559 1552
1560 goto out; 1553 goto out;
@@ -1573,9 +1566,10 @@ static void scsi_request_fn(struct request_queue *q)
1573 spin_lock_irq(q->queue_lock); 1566 spin_lock_irq(q->queue_lock);
1574 blk_requeue_request(q, req); 1567 blk_requeue_request(q, req);
1575 sdev->device_busy--; 1568 sdev->device_busy--;
1576 if(sdev->device_busy == 0) 1569out_delay:
1577 blk_plug_device(q); 1570 if (sdev->device_busy == 0)
1578 out: 1571 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1572out:
1579 /* must be careful here...if we trigger the ->remove() function 1573 /* must be careful here...if we trigger the ->remove() function
1580 * we cannot be holding the q lock */ 1574 * we cannot be holding the q lock */
1581 spin_unlock_irq(q->queue_lock); 1575 spin_unlock_irq(q->queue_lock);