diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 104 |
1 files changed, 88 insertions, 16 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 98ee55ced592..91c74c55aa5e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |||
114 | { | 114 | { |
115 | struct Scsi_Host *host = cmd->device->host; | 115 | struct Scsi_Host *host = cmd->device->host; |
116 | struct scsi_device *device = cmd->device; | 116 | struct scsi_device *device = cmd->device; |
117 | struct scsi_target *starget = scsi_target(device); | ||
117 | struct request_queue *q = device->request_queue; | 118 | struct request_queue *q = device->request_queue; |
118 | unsigned long flags; | 119 | unsigned long flags; |
119 | 120 | ||
@@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |||
133 | * if a command is requeued with no other commands outstanding | 134 | * if a command is requeued with no other commands outstanding |
134 | * either for the device or for the host. | 135 | * either for the device or for the host. |
135 | */ | 136 | */ |
136 | if (reason == SCSI_MLQUEUE_HOST_BUSY) | 137 | switch (reason) { |
138 | case SCSI_MLQUEUE_HOST_BUSY: | ||
137 | host->host_blocked = host->max_host_blocked; | 139 | host->host_blocked = host->max_host_blocked; |
138 | else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) | 140 | break; |
141 | case SCSI_MLQUEUE_DEVICE_BUSY: | ||
139 | device->device_blocked = device->max_device_blocked; | 142 | device->device_blocked = device->max_device_blocked; |
143 | break; | ||
144 | case SCSI_MLQUEUE_TARGET_BUSY: | ||
145 | starget->target_blocked = starget->max_target_blocked; | ||
146 | break; | ||
147 | } | ||
140 | 148 | ||
141 | /* | 149 | /* |
142 | * Decrement the counters, since these commands are no longer | 150 | * Decrement the counters, since these commands are no longer |
@@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) | |||
460 | void scsi_device_unbusy(struct scsi_device *sdev) | 468 | void scsi_device_unbusy(struct scsi_device *sdev) |
461 | { | 469 | { |
462 | struct Scsi_Host *shost = sdev->host; | 470 | struct Scsi_Host *shost = sdev->host; |
471 | struct scsi_target *starget = scsi_target(sdev); | ||
463 | unsigned long flags; | 472 | unsigned long flags; |
464 | 473 | ||
465 | spin_lock_irqsave(shost->host_lock, flags); | 474 | spin_lock_irqsave(shost->host_lock, flags); |
466 | shost->host_busy--; | 475 | shost->host_busy--; |
476 | starget->target_busy--; | ||
467 | if (unlikely(scsi_host_in_recovery(shost) && | 477 | if (unlikely(scsi_host_in_recovery(shost) && |
468 | (shost->host_failed || shost->host_eh_scheduled))) | 478 | (shost->host_failed || shost->host_eh_scheduled))) |
469 | scsi_eh_wakeup(shost); | 479 | scsi_eh_wakeup(shost); |
@@ -519,6 +529,13 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
519 | spin_unlock_irqrestore(shost->host_lock, flags); | 529 | spin_unlock_irqrestore(shost->host_lock, flags); |
520 | } | 530 | } |
521 | 531 | ||
532 | static inline int scsi_target_is_busy(struct scsi_target *starget) | ||
533 | { | ||
534 | return ((starget->can_queue > 0 && | ||
535 | starget->target_busy >= starget->can_queue) || | ||
536 | starget->target_blocked); | ||
537 | } | ||
538 | |||
522 | /* | 539 | /* |
523 | * Function: scsi_run_queue() | 540 | * Function: scsi_run_queue() |
524 | * | 541 | * |
@@ -533,7 +550,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
533 | */ | 550 | */ |
534 | static void scsi_run_queue(struct request_queue *q) | 551 | static void scsi_run_queue(struct request_queue *q) |
535 | { | 552 | { |
536 | struct scsi_device *sdev = q->queuedata; | 553 | struct scsi_device *starved_head = NULL, *sdev = q->queuedata; |
537 | struct Scsi_Host *shost = sdev->host; | 554 | struct Scsi_Host *shost = sdev->host; |
538 | unsigned long flags; | 555 | unsigned long flags; |
539 | 556 | ||
@@ -560,6 +577,21 @@ static void scsi_run_queue(struct request_queue *q) | |||
560 | */ | 577 | */ |
561 | sdev = list_entry(shost->starved_list.next, | 578 | sdev = list_entry(shost->starved_list.next, |
562 | struct scsi_device, starved_entry); | 579 | struct scsi_device, starved_entry); |
580 | /* | ||
581 | * The *queue_ready functions can add a device back onto the | ||
582 | * starved list's tail, so we must check for a infinite loop. | ||
583 | */ | ||
584 | if (sdev == starved_head) | ||
585 | break; | ||
586 | if (!starved_head) | ||
587 | starved_head = sdev; | ||
588 | |||
589 | if (scsi_target_is_busy(scsi_target(sdev))) { | ||
590 | list_move_tail(&sdev->starved_entry, | ||
591 | &shost->starved_list); | ||
592 | continue; | ||
593 | } | ||
594 | |||
563 | list_del_init(&sdev->starved_entry); | 595 | list_del_init(&sdev->starved_entry); |
564 | spin_unlock(shost->host_lock); | 596 | spin_unlock(shost->host_lock); |
565 | 597 | ||
@@ -575,13 +607,6 @@ static void scsi_run_queue(struct request_queue *q) | |||
575 | spin_unlock(sdev->request_queue->queue_lock); | 607 | spin_unlock(sdev->request_queue->queue_lock); |
576 | 608 | ||
577 | spin_lock(shost->host_lock); | 609 | spin_lock(shost->host_lock); |
578 | if (unlikely(!list_empty(&sdev->starved_entry))) | ||
579 | /* | ||
580 | * sdev lost a race, and was put back on the | ||
581 | * starved list. This is unlikely but without this | ||
582 | * in theory we could loop forever. | ||
583 | */ | ||
584 | break; | ||
585 | } | 610 | } |
586 | spin_unlock_irqrestore(shost->host_lock, flags); | 611 | spin_unlock_irqrestore(shost->host_lock, flags); |
587 | 612 | ||
@@ -1344,6 +1369,52 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, | |||
1344 | return 1; | 1369 | return 1; |
1345 | } | 1370 | } |
1346 | 1371 | ||
1372 | |||
1373 | /* | ||
1374 | * scsi_target_queue_ready: checks if there we can send commands to target | ||
1375 | * @sdev: scsi device on starget to check. | ||
1376 | * | ||
1377 | * Called with the host lock held. | ||
1378 | */ | ||
1379 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | ||
1380 | struct scsi_device *sdev) | ||
1381 | { | ||
1382 | struct scsi_target *starget = scsi_target(sdev); | ||
1383 | |||
1384 | if (starget->single_lun) { | ||
1385 | if (starget->starget_sdev_user && | ||
1386 | starget->starget_sdev_user != sdev) | ||
1387 | return 0; | ||
1388 | starget->starget_sdev_user = sdev; | ||
1389 | } | ||
1390 | |||
1391 | if (starget->target_busy == 0 && starget->target_blocked) { | ||
1392 | /* | ||
1393 | * unblock after target_blocked iterates to zero | ||
1394 | */ | ||
1395 | if (--starget->target_blocked == 0) { | ||
1396 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, | ||
1397 | "unblocking target at zero depth\n")); | ||
1398 | } else { | ||
1399 | blk_plug_device(sdev->request_queue); | ||
1400 | return 0; | ||
1401 | } | ||
1402 | } | ||
1403 | |||
1404 | if (scsi_target_is_busy(starget)) { | ||
1405 | if (list_empty(&sdev->starved_entry)) { | ||
1406 | list_add_tail(&sdev->starved_entry, | ||
1407 | &shost->starved_list); | ||
1408 | return 0; | ||
1409 | } | ||
1410 | } | ||
1411 | |||
1412 | /* We're OK to process the command, so we can't be starved */ | ||
1413 | if (!list_empty(&sdev->starved_entry)) | ||
1414 | list_del_init(&sdev->starved_entry); | ||
1415 | return 1; | ||
1416 | } | ||
1417 | |||
1347 | /* | 1418 | /* |
1348 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | 1419 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else |
1349 | * return 0. We must end up running the queue again whenever 0 is | 1420 | * return 0. We must end up running the queue again whenever 0 is |
@@ -1390,6 +1461,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1390 | { | 1461 | { |
1391 | struct scsi_cmnd *cmd = req->special; | 1462 | struct scsi_cmnd *cmd = req->special; |
1392 | struct scsi_device *sdev = cmd->device; | 1463 | struct scsi_device *sdev = cmd->device; |
1464 | struct scsi_target *starget = scsi_target(sdev); | ||
1393 | struct Scsi_Host *shost = sdev->host; | 1465 | struct Scsi_Host *shost = sdev->host; |
1394 | 1466 | ||
1395 | blkdev_dequeue_request(req); | 1467 | blkdev_dequeue_request(req); |
@@ -1413,6 +1485,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1413 | spin_unlock(sdev->request_queue->queue_lock); | 1485 | spin_unlock(sdev->request_queue->queue_lock); |
1414 | spin_lock(shost->host_lock); | 1486 | spin_lock(shost->host_lock); |
1415 | shost->host_busy++; | 1487 | shost->host_busy++; |
1488 | starget->target_busy++; | ||
1416 | spin_unlock(shost->host_lock); | 1489 | spin_unlock(shost->host_lock); |
1417 | spin_lock(sdev->request_queue->queue_lock); | 1490 | spin_lock(sdev->request_queue->queue_lock); |
1418 | 1491 | ||
@@ -1550,14 +1623,13 @@ static void scsi_request_fn(struct request_queue *q) | |||
1550 | goto not_ready; | 1623 | goto not_ready; |
1551 | } | 1624 | } |
1552 | 1625 | ||
1626 | if (!scsi_target_queue_ready(shost, sdev)) | ||
1627 | goto not_ready; | ||
1628 | |||
1553 | if (!scsi_host_queue_ready(q, shost, sdev)) | 1629 | if (!scsi_host_queue_ready(q, shost, sdev)) |
1554 | goto not_ready; | 1630 | goto not_ready; |
1555 | if (scsi_target(sdev)->single_lun) { | 1631 | |
1556 | if (scsi_target(sdev)->starget_sdev_user && | 1632 | scsi_target(sdev)->target_busy++; |
1557 | scsi_target(sdev)->starget_sdev_user != sdev) | ||
1558 | goto not_ready; | ||
1559 | scsi_target(sdev)->starget_sdev_user = sdev; | ||
1560 | } | ||
1561 | shost->host_busy++; | 1633 | shost->host_busy++; |
1562 | 1634 | ||
1563 | /* | 1635 | /* |