aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Christie <michaelc@cs.wisc.edu>2008-08-17 16:24:38 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-10-13 09:28:46 -0400
commitf0c0a376d0fcd4c5579ecf5e95f88387cba85211 (patch)
tree16b97ab71a22106cb1e5c1a177ab6c8103fe5a48
parent4480f15b3306f43bbb0310d461142b4e897ca45b (diff)
[SCSI] Add helper code so transport classes/driver can control queueing (v3)
SCSI-ml manages the queueing limits for the device and host, but does not do so at the target level. However something something similar can come in userful when a driver is transitioning a transport object to the the blocked state, becuase at that time we do not want to queue io and we do not want the queuecommand to be called again. The patch adds code similar to the exisiting SCSI_ML_*BUSY handlers. You can now return SCSI_MLQUEUE_TARGET_BUSY when we hit a transport level queueing issue like the hw cannot allocate some resource at the iscsi session/connection level, or the target has temporarily closed or shrunk the queueing window, or if we are transitioning to the blocked state. bnx2i, when they rework their firmware according to netdev developers requests, will also need to be able to limit queueing at this level. bnx2i will hook into libiscsi, but will allocate a scsi host per netdevice/hba, so unlike pure software iscsi/iser which is allocating a host per session, it cannot set the scsi_host->can_queue and return SCSI_MLQUEUE_HOST_BUSY to reflect queueing limits on the transport. The iscsi class/driver can also set a scsi_target->can_queue value which reflects the max commands the driver/class can support. For iscsi this reflects the number of commands we can support for each session due to session/connection hw limits, driver limits, and to also reflect the session/targets's queueing window. Changes: v1 - initial patch. v2 - Fix scsi_run_queue handling of multiple blocked targets. Previously we would break from the main loop if a device was added back on the starved list. We now run over the list and check if any target is blocked. v3 - Rediff for scsi-misc. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r--drivers/scsi/scsi.c10
-rw-r--r--drivers/scsi/scsi_lib.c104
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--include/scsi/scsi.h1
-rw-r--r--include/scsi/scsi_device.h10
5 files changed, 108 insertions, 18 deletions
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ac3cb2b9081..f8b79d401d58 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -754,8 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
754 } 754 }
755 spin_unlock_irqrestore(host->host_lock, flags); 755 spin_unlock_irqrestore(host->host_lock, flags);
756 if (rtn) { 756 if (rtn) {
757 scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? 757 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
758 rtn : SCSI_MLQUEUE_HOST_BUSY); 758 rtn != SCSI_MLQUEUE_TARGET_BUSY)
759 rtn = SCSI_MLQUEUE_HOST_BUSY;
760
761 scsi_queue_insert(cmd, rtn);
762
759 SCSI_LOG_MLQUEUE(3, 763 SCSI_LOG_MLQUEUE(3,
760 printk("queuecommand : request rejected\n")); 764 printk("queuecommand : request rejected\n"));
761 } 765 }
@@ -800,6 +804,7 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
800void scsi_finish_command(struct scsi_cmnd *cmd) 804void scsi_finish_command(struct scsi_cmnd *cmd)
801{ 805{
802 struct scsi_device *sdev = cmd->device; 806 struct scsi_device *sdev = cmd->device;
807 struct scsi_target *starget = scsi_target(sdev);
803 struct Scsi_Host *shost = sdev->host; 808 struct Scsi_Host *shost = sdev->host;
804 struct scsi_driver *drv; 809 struct scsi_driver *drv;
805 unsigned int good_bytes; 810 unsigned int good_bytes;
@@ -815,6 +820,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
815 * XXX(hch): What about locking? 820 * XXX(hch): What about locking?
816 */ 821 */
817 shost->host_blocked = 0; 822 shost->host_blocked = 0;
823 starget->target_blocked = 0;
818 sdev->device_blocked = 0; 824 sdev->device_blocked = 0;
819 825
820 /* 826 /*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 98ee55ced592..91c74c55aa5e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
114{ 114{
115 struct Scsi_Host *host = cmd->device->host; 115 struct Scsi_Host *host = cmd->device->host;
116 struct scsi_device *device = cmd->device; 116 struct scsi_device *device = cmd->device;
117 struct scsi_target *starget = scsi_target(device);
117 struct request_queue *q = device->request_queue; 118 struct request_queue *q = device->request_queue;
118 unsigned long flags; 119 unsigned long flags;
119 120
@@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
133 * if a command is requeued with no other commands outstanding 134 * if a command is requeued with no other commands outstanding
134 * either for the device or for the host. 135 * either for the device or for the host.
135 */ 136 */
136 if (reason == SCSI_MLQUEUE_HOST_BUSY) 137 switch (reason) {
138 case SCSI_MLQUEUE_HOST_BUSY:
137 host->host_blocked = host->max_host_blocked; 139 host->host_blocked = host->max_host_blocked;
138 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) 140 break;
141 case SCSI_MLQUEUE_DEVICE_BUSY:
139 device->device_blocked = device->max_device_blocked; 142 device->device_blocked = device->max_device_blocked;
143 break;
144 case SCSI_MLQUEUE_TARGET_BUSY:
145 starget->target_blocked = starget->max_target_blocked;
146 break;
147 }
140 148
141 /* 149 /*
142 * Decrement the counters, since these commands are no longer 150 * Decrement the counters, since these commands are no longer
@@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
460void scsi_device_unbusy(struct scsi_device *sdev) 468void scsi_device_unbusy(struct scsi_device *sdev)
461{ 469{
462 struct Scsi_Host *shost = sdev->host; 470 struct Scsi_Host *shost = sdev->host;
471 struct scsi_target *starget = scsi_target(sdev);
463 unsigned long flags; 472 unsigned long flags;
464 473
465 spin_lock_irqsave(shost->host_lock, flags); 474 spin_lock_irqsave(shost->host_lock, flags);
466 shost->host_busy--; 475 shost->host_busy--;
476 starget->target_busy--;
467 if (unlikely(scsi_host_in_recovery(shost) && 477 if (unlikely(scsi_host_in_recovery(shost) &&
468 (shost->host_failed || shost->host_eh_scheduled))) 478 (shost->host_failed || shost->host_eh_scheduled)))
469 scsi_eh_wakeup(shost); 479 scsi_eh_wakeup(shost);
@@ -519,6 +529,13 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
519 spin_unlock_irqrestore(shost->host_lock, flags); 529 spin_unlock_irqrestore(shost->host_lock, flags);
520} 530}
521 531
532static inline int scsi_target_is_busy(struct scsi_target *starget)
533{
534 return ((starget->can_queue > 0 &&
535 starget->target_busy >= starget->can_queue) ||
536 starget->target_blocked);
537}
538
522/* 539/*
523 * Function: scsi_run_queue() 540 * Function: scsi_run_queue()
524 * 541 *
@@ -533,7 +550,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
533 */ 550 */
534static void scsi_run_queue(struct request_queue *q) 551static void scsi_run_queue(struct request_queue *q)
535{ 552{
536 struct scsi_device *sdev = q->queuedata; 553 struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
537 struct Scsi_Host *shost = sdev->host; 554 struct Scsi_Host *shost = sdev->host;
538 unsigned long flags; 555 unsigned long flags;
539 556
@@ -560,6 +577,21 @@ static void scsi_run_queue(struct request_queue *q)
560 */ 577 */
561 sdev = list_entry(shost->starved_list.next, 578 sdev = list_entry(shost->starved_list.next,
562 struct scsi_device, starved_entry); 579 struct scsi_device, starved_entry);
580 /*
581 * The *queue_ready functions can add a device back onto the
582 * starved list's tail, so we must check for a infinite loop.
583 */
584 if (sdev == starved_head)
585 break;
586 if (!starved_head)
587 starved_head = sdev;
588
589 if (scsi_target_is_busy(scsi_target(sdev))) {
590 list_move_tail(&sdev->starved_entry,
591 &shost->starved_list);
592 continue;
593 }
594
563 list_del_init(&sdev->starved_entry); 595 list_del_init(&sdev->starved_entry);
564 spin_unlock(shost->host_lock); 596 spin_unlock(shost->host_lock);
565 597
@@ -575,13 +607,6 @@ static void scsi_run_queue(struct request_queue *q)
575 spin_unlock(sdev->request_queue->queue_lock); 607 spin_unlock(sdev->request_queue->queue_lock);
576 608
577 spin_lock(shost->host_lock); 609 spin_lock(shost->host_lock);
578 if (unlikely(!list_empty(&sdev->starved_entry)))
579 /*
580 * sdev lost a race, and was put back on the
581 * starved list. This is unlikely but without this
582 * in theory we could loop forever.
583 */
584 break;
585 } 610 }
586 spin_unlock_irqrestore(shost->host_lock, flags); 611 spin_unlock_irqrestore(shost->host_lock, flags);
587 612
@@ -1344,6 +1369,52 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
1344 return 1; 1369 return 1;
1345} 1370}
1346 1371
1372
1373/*
1374 * scsi_target_queue_ready: checks if there we can send commands to target
1375 * @sdev: scsi device on starget to check.
1376 *
1377 * Called with the host lock held.
1378 */
1379static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1380 struct scsi_device *sdev)
1381{
1382 struct scsi_target *starget = scsi_target(sdev);
1383
1384 if (starget->single_lun) {
1385 if (starget->starget_sdev_user &&
1386 starget->starget_sdev_user != sdev)
1387 return 0;
1388 starget->starget_sdev_user = sdev;
1389 }
1390
1391 if (starget->target_busy == 0 && starget->target_blocked) {
1392 /*
1393 * unblock after target_blocked iterates to zero
1394 */
1395 if (--starget->target_blocked == 0) {
1396 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1397 "unblocking target at zero depth\n"));
1398 } else {
1399 blk_plug_device(sdev->request_queue);
1400 return 0;
1401 }
1402 }
1403
1404 if (scsi_target_is_busy(starget)) {
1405 if (list_empty(&sdev->starved_entry)) {
1406 list_add_tail(&sdev->starved_entry,
1407 &shost->starved_list);
1408 return 0;
1409 }
1410 }
1411
1412 /* We're OK to process the command, so we can't be starved */
1413 if (!list_empty(&sdev->starved_entry))
1414 list_del_init(&sdev->starved_entry);
1415 return 1;
1416}
1417
1347/* 1418/*
1348 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1419 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1349 * return 0. We must end up running the queue again whenever 0 is 1420 * return 0. We must end up running the queue again whenever 0 is
@@ -1390,6 +1461,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1390{ 1461{
1391 struct scsi_cmnd *cmd = req->special; 1462 struct scsi_cmnd *cmd = req->special;
1392 struct scsi_device *sdev = cmd->device; 1463 struct scsi_device *sdev = cmd->device;
1464 struct scsi_target *starget = scsi_target(sdev);
1393 struct Scsi_Host *shost = sdev->host; 1465 struct Scsi_Host *shost = sdev->host;
1394 1466
1395 blkdev_dequeue_request(req); 1467 blkdev_dequeue_request(req);
@@ -1413,6 +1485,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1413 spin_unlock(sdev->request_queue->queue_lock); 1485 spin_unlock(sdev->request_queue->queue_lock);
1414 spin_lock(shost->host_lock); 1486 spin_lock(shost->host_lock);
1415 shost->host_busy++; 1487 shost->host_busy++;
1488 starget->target_busy++;
1416 spin_unlock(shost->host_lock); 1489 spin_unlock(shost->host_lock);
1417 spin_lock(sdev->request_queue->queue_lock); 1490 spin_lock(sdev->request_queue->queue_lock);
1418 1491
@@ -1550,14 +1623,13 @@ static void scsi_request_fn(struct request_queue *q)
1550 goto not_ready; 1623 goto not_ready;
1551 } 1624 }
1552 1625
1626 if (!scsi_target_queue_ready(shost, sdev))
1627 goto not_ready;
1628
1553 if (!scsi_host_queue_ready(q, shost, sdev)) 1629 if (!scsi_host_queue_ready(q, shost, sdev))
1554 goto not_ready; 1630 goto not_ready;
1555 if (scsi_target(sdev)->single_lun) { 1631
1556 if (scsi_target(sdev)->starget_sdev_user && 1632 scsi_target(sdev)->target_busy++;
1557 scsi_target(sdev)->starget_sdev_user != sdev)
1558 goto not_ready;
1559 scsi_target(sdev)->starget_sdev_user = sdev;
1560 }
1561 shost->host_busy++; 1633 shost->host_busy++;
1562 1634
1563 /* 1635 /*
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 334862e26a1b..b14dc02c3ded 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -419,6 +419,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
419 dev->type = &scsi_target_type; 419 dev->type = &scsi_target_type;
420 starget->id = id; 420 starget->id = id;
421 starget->channel = channel; 421 starget->channel = channel;
422 starget->can_queue = 0;
422 INIT_LIST_HEAD(&starget->siblings); 423 INIT_LIST_HEAD(&starget->siblings);
423 INIT_LIST_HEAD(&starget->devices); 424 INIT_LIST_HEAD(&starget->devices);
424 starget->state = STARGET_CREATED; 425 starget->state = STARGET_CREATED;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 192f8716aa9e..3a5662b2817e 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -426,6 +426,7 @@ static inline int scsi_is_wlun(unsigned int lun)
426#define SCSI_MLQUEUE_HOST_BUSY 0x1055 426#define SCSI_MLQUEUE_HOST_BUSY 0x1055
427#define SCSI_MLQUEUE_DEVICE_BUSY 0x1056 427#define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
428#define SCSI_MLQUEUE_EH_RETRY 0x1057 428#define SCSI_MLQUEUE_EH_RETRY 0x1057
429#define SCSI_MLQUEUE_TARGET_BUSY 0x1058
429 430
430/* 431/*
431 * Use these to separate status msg and our bytes 432 * Use these to separate status msg and our bytes
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b49e725be039..a37a8148a310 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -238,6 +238,16 @@ struct scsi_target {
238 * for the device at a time. */ 238 * for the device at a time. */
239 unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */ 239 unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
240 /* means no lun present */ 240 /* means no lun present */
241 /* commands actually active on LLD. protected by host lock. */
242 unsigned int target_busy;
243 /*
244 * LLDs should set this in the slave_alloc host template callout.
245 * If set to zero then there is not limit.
246 */
247 unsigned int can_queue;
248 unsigned int target_blocked;
249 unsigned int max_target_blocked;
250#define SCSI_DEFAULT_TARGET_BLOCKED 3
241 251
242 char scsi_level; 252 char scsi_level;
243 struct execute_work ew; 253 struct execute_work ew;