aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-11 13:07:01 -0400
committerChristoph Hellwig <hch@lst.de>2014-07-25 07:43:45 -0400
commit71e75c97f97a9645d25fbf3d8e4165a558f18747 (patch)
treefb85185386af55199c46499dc3ce366d227870e1
parent74665016086615bbaa3fa6f83af410a0a4e029ee (diff)
scsi: convert device_busy to atomic_t
Avoid taking the queue_lock to check the per-device queue limit. Instead we do an atomic_inc_return early on to grab our slot in the queue, and if necessary decrement it after finishing all checks. Unlike the host and target busy counters this doesn't allow us to avoid the queue_lock in the request_fn due to the way the interface works, but it'll allow us to prepare for using the blk-mq code, which doesn't use the queue_lock at all, and it at least avoids a queue_lock round trip in scsi_device_unbusy, which is still important given how busy the queue_lock is. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Webb Scales <webbnh@hp.com> Acked-by: Jens Axboe <axboe@kernel.dk> Tested-by: Bart Van Assche <bvanassche@acm.org> Tested-by: Robert Elliott <elliott@hp.com>
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--include/scsi/scsi_device.h4
5 files changed, 40 insertions, 28 deletions
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 711fcb5cec87..d636dbe172a3 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -3763,7 +3763,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
3763 printk(MYIOC_s_DEBUG_FMT 3763 printk(MYIOC_s_DEBUG_FMT
3764 "SDEV OUTSTANDING CMDS" 3764 "SDEV OUTSTANDING CMDS"
3765 "%d\n", ioc->name, 3765 "%d\n", ioc->name,
3766 sdev->device_busy)); 3766 atomic_read(&sdev->device_busy)));
3767 } 3767 }
3768 3768
3769 } 3769 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d0bd7e0ab7a8..1ddf0fb43b59 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -302,9 +302,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
302 spin_unlock_irqrestore(shost->host_lock, flags); 302 spin_unlock_irqrestore(shost->host_lock, flags);
303 } 303 }
304 304
305 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 305 atomic_dec(&sdev->device_busy);
306 sdev->device_busy--;
307 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
308} 306}
309 307
310/* 308/*
@@ -355,9 +353,9 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
355 353
356static inline int scsi_device_is_busy(struct scsi_device *sdev) 354static inline int scsi_device_is_busy(struct scsi_device *sdev)
357{ 355{
358 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) 356 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth ||
357 sdev->device_blocked)
359 return 1; 358 return 1;
360
361 return 0; 359 return 0;
362} 360}
363 361
@@ -1204,7 +1202,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1204 * queue must be restarted, so we schedule a callback to happen 1202 * queue must be restarted, so we schedule a callback to happen
1205 * shortly. 1203 * shortly.
1206 */ 1204 */
1207 if (sdev->device_busy == 0) 1205 if (atomic_read(&sdev->device_busy) == 0)
1208 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1206 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1209 break; 1207 break;
1210 default: 1208 default:
@@ -1255,26 +1253,33 @@ static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1255static inline int scsi_dev_queue_ready(struct request_queue *q, 1253static inline int scsi_dev_queue_ready(struct request_queue *q,
1256 struct scsi_device *sdev) 1254 struct scsi_device *sdev)
1257{ 1255{
1258 if (sdev->device_busy == 0 && sdev->device_blocked) { 1256 unsigned int busy;
1257
1258 busy = atomic_inc_return(&sdev->device_busy) - 1;
1259 if (sdev->device_blocked) {
1260 if (busy)
1261 goto out_dec;
1262
1259 /* 1263 /*
1260 * unblock after device_blocked iterates to zero 1264 * unblock after device_blocked iterates to zero
1261 */ 1265 */
1262 if (--sdev->device_blocked == 0) { 1266 if (--sdev->device_blocked != 0) {
1263 SCSI_LOG_MLQUEUE(3,
1264 sdev_printk(KERN_INFO, sdev,
1265 "unblocking device at zero depth\n"));
1266 } else {
1267 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1267 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1268 return 0; 1268 goto out_dec;
1269 } 1269 }
1270 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1271 "unblocking device at zero depth\n"));
1270 } 1272 }
1271 if (scsi_device_is_busy(sdev)) 1273
1272 return 0; 1274 if (busy >= sdev->queue_depth)
1275 goto out_dec;
1273 1276
1274 return 1; 1277 return 1;
1278out_dec:
1279 atomic_dec(&sdev->device_busy);
1280 return 0;
1275} 1281}
1276 1282
1277
1278/* 1283/*
1279 * scsi_target_queue_ready: checks if there we can send commands to target 1284 * scsi_target_queue_ready: checks if there we can send commands to target
1280 * @sdev: scsi device on starget to check. 1285 * @sdev: scsi device on starget to check.
@@ -1448,7 +1453,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1448 * bump busy counts. To bump the counters, we need to dance 1453 * bump busy counts. To bump the counters, we need to dance
1449 * with the locks as normal issue path does. 1454 * with the locks as normal issue path does.
1450 */ 1455 */
1451 sdev->device_busy++; 1456 atomic_inc(&sdev->device_busy);
1452 atomic_inc(&shost->host_busy); 1457 atomic_inc(&shost->host_busy);
1453 atomic_inc(&starget->target_busy); 1458 atomic_inc(&starget->target_busy);
1454 1459
@@ -1544,7 +1549,7 @@ static void scsi_request_fn(struct request_queue *q)
1544 * accept it. 1549 * accept it.
1545 */ 1550 */
1546 req = blk_peek_request(q); 1551 req = blk_peek_request(q);
1547 if (!req || !scsi_dev_queue_ready(q, sdev)) 1552 if (!req)
1548 break; 1553 break;
1549 1554
1550 if (unlikely(!scsi_device_online(sdev))) { 1555 if (unlikely(!scsi_device_online(sdev))) {
@@ -1554,13 +1559,14 @@ static void scsi_request_fn(struct request_queue *q)
1554 continue; 1559 continue;
1555 } 1560 }
1556 1561
1562 if (!scsi_dev_queue_ready(q, sdev))
1563 break;
1557 1564
1558 /* 1565 /*
1559 * Remove the request from the request list. 1566 * Remove the request from the request list.
1560 */ 1567 */
1561 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1568 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1562 blk_start_request(req); 1569 blk_start_request(req);
1563 sdev->device_busy++;
1564 1570
1565 spin_unlock_irq(q->queue_lock); 1571 spin_unlock_irq(q->queue_lock);
1566 cmd = req->special; 1572 cmd = req->special;
@@ -1630,9 +1636,9 @@ static void scsi_request_fn(struct request_queue *q)
1630 */ 1636 */
1631 spin_lock_irq(q->queue_lock); 1637 spin_lock_irq(q->queue_lock);
1632 blk_requeue_request(q, req); 1638 blk_requeue_request(q, req);
1633 sdev->device_busy--; 1639 atomic_dec(&sdev->device_busy);
1634out_delay: 1640out_delay:
1635 if (sdev->device_busy == 0 && !scsi_device_blocked(sdev)) 1641 if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1636 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1642 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1637} 1643}
1638 1644
@@ -2371,7 +2377,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
2371 return err; 2377 return err;
2372 2378
2373 scsi_run_queue(sdev->request_queue); 2379 scsi_run_queue(sdev->request_queue);
2374 while (sdev->device_busy) { 2380 while (atomic_read(&sdev->device_busy)) {
2375 msleep_interruptible(200); 2381 msleep_interruptible(200);
2376 scsi_run_queue(sdev->request_queue); 2382 scsi_run_queue(sdev->request_queue);
2377 } 2383 }
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index de57b8bca7be..79df9847edef 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -585,13 +585,21 @@ static int scsi_sdev_check_buf_bit(const char *buf)
585 * Create the actual show/store functions and data structures. 585 * Create the actual show/store functions and data structures.
586 */ 586 */
587sdev_rd_attr (device_blocked, "%d\n"); 587sdev_rd_attr (device_blocked, "%d\n");
588sdev_rd_attr (device_busy, "%d\n");
589sdev_rd_attr (type, "%d\n"); 588sdev_rd_attr (type, "%d\n");
590sdev_rd_attr (scsi_level, "%d\n"); 589sdev_rd_attr (scsi_level, "%d\n");
591sdev_rd_attr (vendor, "%.8s\n"); 590sdev_rd_attr (vendor, "%.8s\n");
592sdev_rd_attr (model, "%.16s\n"); 591sdev_rd_attr (model, "%.16s\n");
593sdev_rd_attr (rev, "%.4s\n"); 592sdev_rd_attr (rev, "%.4s\n");
594 593
594static ssize_t
595sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
596 char *buf)
597{
598 struct scsi_device *sdev = to_scsi_device(dev);
599 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
600}
601static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
602
595/* 603/*
596 * TODO: can we make these symlinks to the block layer ones? 604 * TODO: can we make these symlinks to the block layer ones?
597 */ 605 */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 7a291f5c7227..01cf88888797 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2574,7 +2574,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2574 scsidp->id, scsidp->lun, (int) scsidp->type, 2574 scsidp->id, scsidp->lun, (int) scsidp->type,
2575 1, 2575 1,
2576 (int) scsidp->queue_depth, 2576 (int) scsidp->queue_depth,
2577 (int) scsidp->device_busy, 2577 (int) atomic_read(&scsidp->device_busy),
2578 (int) scsi_device_online(scsidp)); 2578 (int) scsi_device_online(scsidp));
2579 } 2579 }
2580 read_unlock_irqrestore(&sg_index_lock, iflags); 2580 read_unlock_irqrestore(&sg_index_lock, iflags);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 4e078b63a9e5..3329901c7243 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -81,9 +81,7 @@ struct scsi_device {
81 struct list_head siblings; /* list of all devices on this host */ 81 struct list_head siblings; /* list of all devices on this host */
82 struct list_head same_target_siblings; /* just the devices sharing same target id */ 82 struct list_head same_target_siblings; /* just the devices sharing same target id */
83 83
84 /* this is now protected by the request_queue->queue_lock */ 84 atomic_t device_busy; /* commands actually active on LLDD */
85 unsigned int device_busy; /* commands actually active on
86 * low-level. protected by queue_lock. */
87 spinlock_t list_lock; 85 spinlock_t list_lock;
88 struct list_head cmd_list; /* queue of in use SCSI Command structures */ 86 struct list_head cmd_list; /* queue of in use SCSI Command structures */
89 struct list_head starved_entry; 87 struct list_head starved_entry;