diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 603 |
1 files changed, 42 insertions, 561 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8b0345924a92..651be30ba96a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -168,8 +168,6 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) | |||
168 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) | 168 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) |
169 | { | 169 | { |
170 | struct scsi_device *device = cmd->device; | 170 | struct scsi_device *device = cmd->device; |
171 | struct request_queue *q = device->request_queue; | ||
172 | unsigned long flags; | ||
173 | 171 | ||
174 | SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, | 172 | SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, |
175 | "Inserting command %p into mlqueue\n", cmd)); | 173 | "Inserting command %p into mlqueue\n", cmd)); |
@@ -190,26 +188,20 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) | |||
190 | * before blk_cleanup_queue() finishes. | 188 | * before blk_cleanup_queue() finishes. |
191 | */ | 189 | */ |
192 | cmd->result = 0; | 190 | cmd->result = 0; |
193 | if (q->mq_ops) { | 191 | |
194 | /* | 192 | /* |
195 | * Before a SCSI command is dispatched, | 193 | * Before a SCSI command is dispatched, |
196 | * get_device(&sdev->sdev_gendev) is called and the host, | 194 | * get_device(&sdev->sdev_gendev) is called and the host, |
197 | * target and device busy counters are increased. Since | 195 | * target and device busy counters are increased. Since |
198 | * requeuing a request causes these actions to be repeated and | 196 | * requeuing a request causes these actions to be repeated and |
199 | * since scsi_device_unbusy() has already been called, | 197 | * since scsi_device_unbusy() has already been called, |
200 | * put_device(&device->sdev_gendev) must still be called. Call | 198 | * put_device(&device->sdev_gendev) must still be called. Call |
201 | * put_device() after blk_mq_requeue_request() to avoid that | 199 | * put_device() after blk_mq_requeue_request() to avoid that |
202 | * removal of the SCSI device can start before requeueing has | 200 | * removal of the SCSI device can start before requeueing has |
203 | * happened. | 201 | * happened. |
204 | */ | 202 | */ |
205 | blk_mq_requeue_request(cmd->request, true); | 203 | blk_mq_requeue_request(cmd->request, true); |
206 | put_device(&device->sdev_gendev); | 204 | put_device(&device->sdev_gendev); |
207 | return; | ||
208 | } | ||
209 | spin_lock_irqsave(q->queue_lock, flags); | ||
210 | blk_requeue_request(q, cmd->request); | ||
211 | kblockd_schedule_work(&device->requeue_work); | ||
212 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
213 | } | 205 | } |
214 | 206 | ||
215 | /* | 207 | /* |
@@ -370,10 +362,7 @@ void scsi_device_unbusy(struct scsi_device *sdev) | |||
370 | 362 | ||
371 | static void scsi_kick_queue(struct request_queue *q) | 363 | static void scsi_kick_queue(struct request_queue *q) |
372 | { | 364 | { |
373 | if (q->mq_ops) | 365 | blk_mq_run_hw_queues(q, false); |
374 | blk_mq_run_hw_queues(q, false); | ||
375 | else | ||
376 | blk_run_queue(q); | ||
377 | } | 366 | } |
378 | 367 | ||
379 | /* | 368 | /* |
@@ -534,10 +523,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
534 | if (!list_empty(&sdev->host->starved_list)) | 523 | if (!list_empty(&sdev->host->starved_list)) |
535 | scsi_starved_list_run(sdev->host); | 524 | scsi_starved_list_run(sdev->host); |
536 | 525 | ||
537 | if (q->mq_ops) | 526 | blk_mq_run_hw_queues(q, false); |
538 | blk_mq_run_hw_queues(q, false); | ||
539 | else | ||
540 | blk_run_queue(q); | ||
541 | } | 527 | } |
542 | 528 | ||
543 | void scsi_requeue_run_queue(struct work_struct *work) | 529 | void scsi_requeue_run_queue(struct work_struct *work) |
@@ -550,42 +536,6 @@ void scsi_requeue_run_queue(struct work_struct *work) | |||
550 | scsi_run_queue(q); | 536 | scsi_run_queue(q); |
551 | } | 537 | } |
552 | 538 | ||
553 | /* | ||
554 | * Function: scsi_requeue_command() | ||
555 | * | ||
556 | * Purpose: Handle post-processing of completed commands. | ||
557 | * | ||
558 | * Arguments: q - queue to operate on | ||
559 | * cmd - command that may need to be requeued. | ||
560 | * | ||
561 | * Returns: Nothing | ||
562 | * | ||
563 | * Notes: After command completion, there may be blocks left | ||
564 | * over which weren't finished by the previous command | ||
565 | * this can be for a number of reasons - the main one is | ||
566 | * I/O errors in the middle of the request, in which case | ||
567 | * we need to request the blocks that come after the bad | ||
568 | * sector. | ||
569 | * Notes: Upon return, cmd is a stale pointer. | ||
570 | */ | ||
571 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | ||
572 | { | ||
573 | struct scsi_device *sdev = cmd->device; | ||
574 | struct request *req = cmd->request; | ||
575 | unsigned long flags; | ||
576 | |||
577 | spin_lock_irqsave(q->queue_lock, flags); | ||
578 | blk_unprep_request(req); | ||
579 | req->special = NULL; | ||
580 | scsi_put_command(cmd); | ||
581 | blk_requeue_request(q, req); | ||
582 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
583 | |||
584 | scsi_run_queue(q); | ||
585 | |||
586 | put_device(&sdev->sdev_gendev); | ||
587 | } | ||
588 | |||
589 | void scsi_run_host_queues(struct Scsi_Host *shost) | 539 | void scsi_run_host_queues(struct Scsi_Host *shost) |
590 | { | 540 | { |
591 | struct scsi_device *sdev; | 541 | struct scsi_device *sdev; |
@@ -626,42 +576,6 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) | |||
626 | scsi_del_cmd_from_list(cmd); | 576 | scsi_del_cmd_from_list(cmd); |
627 | } | 577 | } |
628 | 578 | ||
629 | /* | ||
630 | * Function: scsi_release_buffers() | ||
631 | * | ||
632 | * Purpose: Free resources allocate for a scsi_command. | ||
633 | * | ||
634 | * Arguments: cmd - command that we are bailing. | ||
635 | * | ||
636 | * Lock status: Assumed that no lock is held upon entry. | ||
637 | * | ||
638 | * Returns: Nothing | ||
639 | * | ||
640 | * Notes: In the event that an upper level driver rejects a | ||
641 | * command, we must release resources allocated during | ||
642 | * the __init_io() function. Primarily this would involve | ||
643 | * the scatter-gather table. | ||
644 | */ | ||
645 | static void scsi_release_buffers(struct scsi_cmnd *cmd) | ||
646 | { | ||
647 | if (cmd->sdb.table.nents) | ||
648 | sg_free_table_chained(&cmd->sdb.table, false); | ||
649 | |||
650 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | ||
651 | |||
652 | if (scsi_prot_sg_count(cmd)) | ||
653 | sg_free_table_chained(&cmd->prot_sdb->table, false); | ||
654 | } | ||
655 | |||
656 | static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) | ||
657 | { | ||
658 | struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; | ||
659 | |||
660 | sg_free_table_chained(&bidi_sdb->table, false); | ||
661 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | ||
662 | cmd->request->next_rq->special = NULL; | ||
663 | } | ||
664 | |||
665 | /* Returns false when no more bytes to process, true if there are more */ | 579 | /* Returns false when no more bytes to process, true if there are more */ |
666 | static bool scsi_end_request(struct request *req, blk_status_t error, | 580 | static bool scsi_end_request(struct request *req, blk_status_t error, |
667 | unsigned int bytes, unsigned int bidi_bytes) | 581 | unsigned int bytes, unsigned int bidi_bytes) |
@@ -687,37 +601,22 @@ static bool scsi_end_request(struct request *req, blk_status_t error, | |||
687 | destroy_rcu_head(&cmd->rcu); | 601 | destroy_rcu_head(&cmd->rcu); |
688 | } | 602 | } |
689 | 603 | ||
690 | if (req->mq_ctx) { | 604 | /* |
691 | /* | 605 | * In the MQ case the command gets freed by __blk_mq_end_request, |
692 | * In the MQ case the command gets freed by __blk_mq_end_request, | 606 | * so we have to do all cleanup that depends on it earlier. |
693 | * so we have to do all cleanup that depends on it earlier. | 607 | * |
694 | * | 608 | * We also can't kick the queues from irq context, so we |
695 | * We also can't kick the queues from irq context, so we | 609 | * will have to defer it to a workqueue. |
696 | * will have to defer it to a workqueue. | 610 | */ |
697 | */ | 611 | scsi_mq_uninit_cmd(cmd); |
698 | scsi_mq_uninit_cmd(cmd); | ||
699 | |||
700 | __blk_mq_end_request(req, error); | ||
701 | |||
702 | if (scsi_target(sdev)->single_lun || | ||
703 | !list_empty(&sdev->host->starved_list)) | ||
704 | kblockd_schedule_work(&sdev->requeue_work); | ||
705 | else | ||
706 | blk_mq_run_hw_queues(q, true); | ||
707 | } else { | ||
708 | unsigned long flags; | ||
709 | |||
710 | if (bidi_bytes) | ||
711 | scsi_release_bidi_buffers(cmd); | ||
712 | scsi_release_buffers(cmd); | ||
713 | scsi_put_command(cmd); | ||
714 | 612 | ||
715 | spin_lock_irqsave(q->queue_lock, flags); | 613 | __blk_mq_end_request(req, error); |
716 | blk_finish_request(req, error); | ||
717 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
718 | 614 | ||
719 | scsi_run_queue(q); | 615 | if (scsi_target(sdev)->single_lun || |
720 | } | 616 | !list_empty(&sdev->host->starved_list)) |
617 | kblockd_schedule_work(&sdev->requeue_work); | ||
618 | else | ||
619 | blk_mq_run_hw_queues(q, true); | ||
721 | 620 | ||
722 | put_device(&sdev->sdev_gendev); | 621 | put_device(&sdev->sdev_gendev); |
723 | return false; | 622 | return false; |
@@ -766,13 +665,7 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, | |||
766 | struct request_queue *q) | 665 | struct request_queue *q) |
767 | { | 666 | { |
768 | /* A new command will be prepared and issued. */ | 667 | /* A new command will be prepared and issued. */ |
769 | if (q->mq_ops) { | 668 | scsi_mq_requeue_cmd(cmd); |
770 | scsi_mq_requeue_cmd(cmd); | ||
771 | } else { | ||
772 | /* Unprep request and put it back at head of the queue. */ | ||
773 | scsi_release_buffers(cmd); | ||
774 | scsi_requeue_command(q, cmd); | ||
775 | } | ||
776 | } | 669 | } |
777 | 670 | ||
778 | /* Helper for scsi_io_completion() when special action required. */ | 671 | /* Helper for scsi_io_completion() when special action required. */ |
@@ -1147,9 +1040,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) | |||
1147 | */ | 1040 | */ |
1148 | int scsi_init_io(struct scsi_cmnd *cmd) | 1041 | int scsi_init_io(struct scsi_cmnd *cmd) |
1149 | { | 1042 | { |
1150 | struct scsi_device *sdev = cmd->device; | ||
1151 | struct request *rq = cmd->request; | 1043 | struct request *rq = cmd->request; |
1152 | bool is_mq = (rq->mq_ctx != NULL); | ||
1153 | int error = BLKPREP_KILL; | 1044 | int error = BLKPREP_KILL; |
1154 | 1045 | ||
1155 | if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq))) | 1046 | if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq))) |
@@ -1160,17 +1051,6 @@ int scsi_init_io(struct scsi_cmnd *cmd) | |||
1160 | goto err_exit; | 1051 | goto err_exit; |
1161 | 1052 | ||
1162 | if (blk_bidi_rq(rq)) { | 1053 | if (blk_bidi_rq(rq)) { |
1163 | if (!rq->q->mq_ops) { | ||
1164 | struct scsi_data_buffer *bidi_sdb = | ||
1165 | kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC); | ||
1166 | if (!bidi_sdb) { | ||
1167 | error = BLKPREP_DEFER; | ||
1168 | goto err_exit; | ||
1169 | } | ||
1170 | |||
1171 | rq->next_rq->special = bidi_sdb; | ||
1172 | } | ||
1173 | |||
1174 | error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); | 1054 | error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); |
1175 | if (error) | 1055 | if (error) |
1176 | goto err_exit; | 1056 | goto err_exit; |
@@ -1210,14 +1090,7 @@ int scsi_init_io(struct scsi_cmnd *cmd) | |||
1210 | 1090 | ||
1211 | return BLKPREP_OK; | 1091 | return BLKPREP_OK; |
1212 | err_exit: | 1092 | err_exit: |
1213 | if (is_mq) { | 1093 | scsi_mq_free_sgtables(cmd); |
1214 | scsi_mq_free_sgtables(cmd); | ||
1215 | } else { | ||
1216 | scsi_release_buffers(cmd); | ||
1217 | cmd->request->special = NULL; | ||
1218 | scsi_put_command(cmd); | ||
1219 | put_device(&sdev->sdev_gendev); | ||
1220 | } | ||
1221 | return error; | 1094 | return error; |
1222 | } | 1095 | } |
1223 | EXPORT_SYMBOL(scsi_init_io); | 1096 | EXPORT_SYMBOL(scsi_init_io); |
@@ -1423,75 +1296,6 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | |||
1423 | return ret; | 1296 | return ret; |
1424 | } | 1297 | } |
1425 | 1298 | ||
1426 | static int | ||
1427 | scsi_prep_return(struct request_queue *q, struct request *req, int ret) | ||
1428 | { | ||
1429 | struct scsi_device *sdev = q->queuedata; | ||
1430 | |||
1431 | switch (ret) { | ||
1432 | case BLKPREP_KILL: | ||
1433 | case BLKPREP_INVALID: | ||
1434 | scsi_req(req)->result = DID_NO_CONNECT << 16; | ||
1435 | /* release the command and kill it */ | ||
1436 | if (req->special) { | ||
1437 | struct scsi_cmnd *cmd = req->special; | ||
1438 | scsi_release_buffers(cmd); | ||
1439 | scsi_put_command(cmd); | ||
1440 | put_device(&sdev->sdev_gendev); | ||
1441 | req->special = NULL; | ||
1442 | } | ||
1443 | break; | ||
1444 | case BLKPREP_DEFER: | ||
1445 | /* | ||
1446 | * If we defer, the blk_peek_request() returns NULL, but the | ||
1447 | * queue must be restarted, so we schedule a callback to happen | ||
1448 | * shortly. | ||
1449 | */ | ||
1450 | if (atomic_read(&sdev->device_busy) == 0) | ||
1451 | blk_delay_queue(q, SCSI_QUEUE_DELAY); | ||
1452 | break; | ||
1453 | default: | ||
1454 | req->rq_flags |= RQF_DONTPREP; | ||
1455 | } | ||
1456 | |||
1457 | return ret; | ||
1458 | } | ||
1459 | |||
1460 | static int scsi_prep_fn(struct request_queue *q, struct request *req) | ||
1461 | { | ||
1462 | struct scsi_device *sdev = q->queuedata; | ||
1463 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | ||
1464 | int ret; | ||
1465 | |||
1466 | ret = scsi_prep_state_check(sdev, req); | ||
1467 | if (ret != BLKPREP_OK) | ||
1468 | goto out; | ||
1469 | |||
1470 | if (!req->special) { | ||
1471 | /* Bail if we can't get a reference to the device */ | ||
1472 | if (unlikely(!get_device(&sdev->sdev_gendev))) { | ||
1473 | ret = BLKPREP_DEFER; | ||
1474 | goto out; | ||
1475 | } | ||
1476 | |||
1477 | scsi_init_command(sdev, cmd); | ||
1478 | req->special = cmd; | ||
1479 | } | ||
1480 | |||
1481 | cmd->tag = req->tag; | ||
1482 | cmd->request = req; | ||
1483 | cmd->prot_op = SCSI_PROT_NORMAL; | ||
1484 | |||
1485 | ret = scsi_setup_cmnd(sdev, req); | ||
1486 | out: | ||
1487 | return scsi_prep_return(q, req, ret); | ||
1488 | } | ||
1489 | |||
1490 | static void scsi_unprep_fn(struct request_queue *q, struct request *req) | ||
1491 | { | ||
1492 | scsi_uninit_cmd(blk_mq_rq_to_pdu(req)); | ||
1493 | } | ||
1494 | |||
1495 | /* | 1299 | /* |
1496 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | 1300 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else |
1497 | * return 0. | 1301 | * return 0. |
@@ -1511,14 +1315,8 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, | |||
1511 | /* | 1315 | /* |
1512 | * unblock after device_blocked iterates to zero | 1316 | * unblock after device_blocked iterates to zero |
1513 | */ | 1317 | */ |
1514 | if (atomic_dec_return(&sdev->device_blocked) > 0) { | 1318 | if (atomic_dec_return(&sdev->device_blocked) > 0) |
1515 | /* | ||
1516 | * For the MQ case we take care of this in the caller. | ||
1517 | */ | ||
1518 | if (!q->mq_ops) | ||
1519 | blk_delay_queue(q, SCSI_QUEUE_DELAY); | ||
1520 | goto out_dec; | 1319 | goto out_dec; |
1521 | } | ||
1522 | SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, | 1320 | SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, |
1523 | "unblocking device at zero depth\n")); | 1321 | "unblocking device at zero depth\n")); |
1524 | } | 1322 | } |
@@ -1653,13 +1451,13 @@ out_dec: | |||
1653 | * needs to return 'not busy'. Otherwise, request stacking drivers | 1451 | * needs to return 'not busy'. Otherwise, request stacking drivers |
1654 | * may hold requests forever. | 1452 | * may hold requests forever. |
1655 | */ | 1453 | */ |
1656 | static int scsi_lld_busy(struct request_queue *q) | 1454 | static bool scsi_mq_lld_busy(struct request_queue *q) |
1657 | { | 1455 | { |
1658 | struct scsi_device *sdev = q->queuedata; | 1456 | struct scsi_device *sdev = q->queuedata; |
1659 | struct Scsi_Host *shost; | 1457 | struct Scsi_Host *shost; |
1660 | 1458 | ||
1661 | if (blk_queue_dying(q)) | 1459 | if (blk_queue_dying(q)) |
1662 | return 0; | 1460 | return false; |
1663 | 1461 | ||
1664 | shost = sdev->host; | 1462 | shost = sdev->host; |
1665 | 1463 | ||
@@ -1670,48 +1468,9 @@ static int scsi_lld_busy(struct request_queue *q) | |||
1670 | * in SCSI layer. | 1468 | * in SCSI layer. |
1671 | */ | 1469 | */ |
1672 | if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) | 1470 | if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) |
1673 | return 1; | 1471 | return true; |
1674 | |||
1675 | return 0; | ||
1676 | } | ||
1677 | |||
1678 | static bool scsi_mq_lld_busy(struct request_queue *q) | ||
1679 | { | ||
1680 | return scsi_lld_busy(q); | ||
1681 | } | ||
1682 | |||
1683 | /* | ||
1684 | * Kill a request for a dead device | ||
1685 | */ | ||
1686 | static void scsi_kill_request(struct request *req, struct request_queue *q) | ||
1687 | { | ||
1688 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | ||
1689 | struct scsi_device *sdev; | ||
1690 | struct scsi_target *starget; | ||
1691 | struct Scsi_Host *shost; | ||
1692 | |||
1693 | blk_start_request(req); | ||
1694 | |||
1695 | scmd_printk(KERN_INFO, cmd, "killing request\n"); | ||
1696 | |||
1697 | sdev = cmd->device; | ||
1698 | starget = scsi_target(sdev); | ||
1699 | shost = sdev->host; | ||
1700 | scsi_init_cmd_errh(cmd); | ||
1701 | cmd->result = DID_NO_CONNECT << 16; | ||
1702 | atomic_inc(&cmd->device->iorequest_cnt); | ||
1703 | |||
1704 | /* | ||
1705 | * SCSI request completion path will do scsi_device_unbusy(), | ||
1706 | * bump busy counts. To bump the counters, we need to dance | ||
1707 | * with the locks as normal issue path does. | ||
1708 | */ | ||
1709 | atomic_inc(&sdev->device_busy); | ||
1710 | atomic_inc(&shost->host_busy); | ||
1711 | if (starget->can_queue > 0) | ||
1712 | atomic_inc(&starget->target_busy); | ||
1713 | 1472 | ||
1714 | blk_complete_request(req); | 1473 | return false; |
1715 | } | 1474 | } |
1716 | 1475 | ||
1717 | static void scsi_softirq_done(struct request *rq) | 1476 | static void scsi_softirq_done(struct request *rq) |
@@ -1834,158 +1593,6 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
1834 | return 0; | 1593 | return 0; |
1835 | } | 1594 | } |
1836 | 1595 | ||
1837 | /** | ||
1838 | * scsi_done - Invoke completion on finished SCSI command. | ||
1839 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives | ||
1840 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. | ||
1841 | * | ||
1842 | * Description: This function is the mid-level's (SCSI Core) interrupt routine, | ||
1843 | * which regains ownership of the SCSI command (de facto) from a LLDD, and | ||
1844 | * calls blk_complete_request() for further processing. | ||
1845 | * | ||
1846 | * This function is interrupt context safe. | ||
1847 | */ | ||
1848 | static void scsi_done(struct scsi_cmnd *cmd) | ||
1849 | { | ||
1850 | trace_scsi_dispatch_cmd_done(cmd); | ||
1851 | blk_complete_request(cmd->request); | ||
1852 | } | ||
1853 | |||
1854 | /* | ||
1855 | * Function: scsi_request_fn() | ||
1856 | * | ||
1857 | * Purpose: Main strategy routine for SCSI. | ||
1858 | * | ||
1859 | * Arguments: q - Pointer to actual queue. | ||
1860 | * | ||
1861 | * Returns: Nothing | ||
1862 | * | ||
1863 | * Lock status: request queue lock assumed to be held when called. | ||
1864 | * | ||
1865 | * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order | ||
1866 | * protection for ZBC disks. | ||
1867 | */ | ||
1868 | static void scsi_request_fn(struct request_queue *q) | ||
1869 | __releases(q->queue_lock) | ||
1870 | __acquires(q->queue_lock) | ||
1871 | { | ||
1872 | struct scsi_device *sdev = q->queuedata; | ||
1873 | struct Scsi_Host *shost; | ||
1874 | struct scsi_cmnd *cmd; | ||
1875 | struct request *req; | ||
1876 | |||
1877 | /* | ||
1878 | * To start with, we keep looping until the queue is empty, or until | ||
1879 | * the host is no longer able to accept any more requests. | ||
1880 | */ | ||
1881 | shost = sdev->host; | ||
1882 | for (;;) { | ||
1883 | int rtn; | ||
1884 | /* | ||
1885 | * get next queueable request. We do this early to make sure | ||
1886 | * that the request is fully prepared even if we cannot | ||
1887 | * accept it. | ||
1888 | */ | ||
1889 | req = blk_peek_request(q); | ||
1890 | if (!req) | ||
1891 | break; | ||
1892 | |||
1893 | if (unlikely(!scsi_device_online(sdev))) { | ||
1894 | sdev_printk(KERN_ERR, sdev, | ||
1895 | "rejecting I/O to offline device\n"); | ||
1896 | scsi_kill_request(req, q); | ||
1897 | continue; | ||
1898 | } | ||
1899 | |||
1900 | if (!scsi_dev_queue_ready(q, sdev)) | ||
1901 | break; | ||
1902 | |||
1903 | /* | ||
1904 | * Remove the request from the request list. | ||
1905 | */ | ||
1906 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) | ||
1907 | blk_start_request(req); | ||
1908 | |||
1909 | spin_unlock_irq(q->queue_lock); | ||
1910 | cmd = blk_mq_rq_to_pdu(req); | ||
1911 | if (cmd != req->special) { | ||
1912 | printk(KERN_CRIT "impossible request in %s.\n" | ||
1913 | "please mail a stack trace to " | ||
1914 | "linux-scsi@vger.kernel.org\n", | ||
1915 | __func__); | ||
1916 | blk_dump_rq_flags(req, "foo"); | ||
1917 | BUG(); | ||
1918 | } | ||
1919 | |||
1920 | /* | ||
1921 | * We hit this when the driver is using a host wide | ||
1922 | * tag map. For device level tag maps the queue_depth check | ||
1923 | * in the device ready fn would prevent us from trying | ||
1924 | * to allocate a tag. Since the map is a shared host resource | ||
1925 | * we add the dev to the starved list so it eventually gets | ||
1926 | * a run when a tag is freed. | ||
1927 | */ | ||
1928 | if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) { | ||
1929 | spin_lock_irq(shost->host_lock); | ||
1930 | if (list_empty(&sdev->starved_entry)) | ||
1931 | list_add_tail(&sdev->starved_entry, | ||
1932 | &shost->starved_list); | ||
1933 | spin_unlock_irq(shost->host_lock); | ||
1934 | goto not_ready; | ||
1935 | } | ||
1936 | |||
1937 | if (!scsi_target_queue_ready(shost, sdev)) | ||
1938 | goto not_ready; | ||
1939 | |||
1940 | if (!scsi_host_queue_ready(q, shost, sdev)) | ||
1941 | goto host_not_ready; | ||
1942 | |||
1943 | if (sdev->simple_tags) | ||
1944 | cmd->flags |= SCMD_TAGGED; | ||
1945 | else | ||
1946 | cmd->flags &= ~SCMD_TAGGED; | ||
1947 | |||
1948 | /* | ||
1949 | * Finally, initialize any error handling parameters, and set up | ||
1950 | * the timers for timeouts. | ||
1951 | */ | ||
1952 | scsi_init_cmd_errh(cmd); | ||
1953 | |||
1954 | /* | ||
1955 | * Dispatch the command to the low-level driver. | ||
1956 | */ | ||
1957 | cmd->scsi_done = scsi_done; | ||
1958 | rtn = scsi_dispatch_cmd(cmd); | ||
1959 | if (rtn) { | ||
1960 | scsi_queue_insert(cmd, rtn); | ||
1961 | spin_lock_irq(q->queue_lock); | ||
1962 | goto out_delay; | ||
1963 | } | ||
1964 | spin_lock_irq(q->queue_lock); | ||
1965 | } | ||
1966 | |||
1967 | return; | ||
1968 | |||
1969 | host_not_ready: | ||
1970 | if (scsi_target(sdev)->can_queue > 0) | ||
1971 | atomic_dec(&scsi_target(sdev)->target_busy); | ||
1972 | not_ready: | ||
1973 | /* | ||
1974 | * lock q, handle tag, requeue req, and decrement device_busy. We | ||
1975 | * must return with queue_lock held. | ||
1976 | * | ||
1977 | * Decrementing device_busy without checking it is OK, as all such | ||
1978 | * cases (host limits or settings) should run the queue at some | ||
1979 | * later time. | ||
1980 | */ | ||
1981 | spin_lock_irq(q->queue_lock); | ||
1982 | blk_requeue_request(q, req); | ||
1983 | atomic_dec(&sdev->device_busy); | ||
1984 | out_delay: | ||
1985 | if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) | ||
1986 | blk_delay_queue(q, SCSI_QUEUE_DELAY); | ||
1987 | } | ||
1988 | |||
1989 | static inline blk_status_t prep_to_mq(int ret) | 1596 | static inline blk_status_t prep_to_mq(int ret) |
1990 | { | 1597 | { |
1991 | switch (ret) { | 1598 | switch (ret) { |
@@ -2248,77 +1855,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) | |||
2248 | } | 1855 | } |
2249 | EXPORT_SYMBOL_GPL(__scsi_init_queue); | 1856 | EXPORT_SYMBOL_GPL(__scsi_init_queue); |
2250 | 1857 | ||
2251 | static int scsi_old_init_rq(struct request_queue *q, struct request *rq, | ||
2252 | gfp_t gfp) | ||
2253 | { | ||
2254 | struct Scsi_Host *shost = q->rq_alloc_data; | ||
2255 | const bool unchecked_isa_dma = shost->unchecked_isa_dma; | ||
2256 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | ||
2257 | |||
2258 | memset(cmd, 0, sizeof(*cmd)); | ||
2259 | |||
2260 | if (unchecked_isa_dma) | ||
2261 | cmd->flags |= SCMD_UNCHECKED_ISA_DMA; | ||
2262 | cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp, | ||
2263 | NUMA_NO_NODE); | ||
2264 | if (!cmd->sense_buffer) | ||
2265 | goto fail; | ||
2266 | cmd->req.sense = cmd->sense_buffer; | ||
2267 | |||
2268 | if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { | ||
2269 | cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp); | ||
2270 | if (!cmd->prot_sdb) | ||
2271 | goto fail_free_sense; | ||
2272 | } | ||
2273 | |||
2274 | return 0; | ||
2275 | |||
2276 | fail_free_sense: | ||
2277 | scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer); | ||
2278 | fail: | ||
2279 | return -ENOMEM; | ||
2280 | } | ||
2281 | |||
2282 | static void scsi_old_exit_rq(struct request_queue *q, struct request *rq) | ||
2283 | { | ||
2284 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | ||
2285 | |||
2286 | if (cmd->prot_sdb) | ||
2287 | kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); | ||
2288 | scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, | ||
2289 | cmd->sense_buffer); | ||
2290 | } | ||
2291 | |||
2292 | struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev) | ||
2293 | { | ||
2294 | struct Scsi_Host *shost = sdev->host; | ||
2295 | struct request_queue *q; | ||
2296 | |||
2297 | q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); | ||
2298 | if (!q) | ||
2299 | return NULL; | ||
2300 | q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; | ||
2301 | q->rq_alloc_data = shost; | ||
2302 | q->request_fn = scsi_request_fn; | ||
2303 | q->init_rq_fn = scsi_old_init_rq; | ||
2304 | q->exit_rq_fn = scsi_old_exit_rq; | ||
2305 | q->initialize_rq_fn = scsi_initialize_rq; | ||
2306 | |||
2307 | if (blk_init_allocated_queue(q) < 0) { | ||
2308 | blk_cleanup_queue(q); | ||
2309 | return NULL; | ||
2310 | } | ||
2311 | |||
2312 | __scsi_init_queue(shost, q); | ||
2313 | blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); | ||
2314 | blk_queue_prep_rq(q, scsi_prep_fn); | ||
2315 | blk_queue_unprep_rq(q, scsi_unprep_fn); | ||
2316 | blk_queue_softirq_done(q, scsi_softirq_done); | ||
2317 | blk_queue_rq_timed_out(q, scsi_times_out); | ||
2318 | blk_queue_lld_busy(q, scsi_lld_busy); | ||
2319 | return q; | ||
2320 | } | ||
2321 | |||
2322 | static const struct blk_mq_ops scsi_mq_ops = { | 1858 | static const struct blk_mq_ops scsi_mq_ops = { |
2323 | .get_budget = scsi_mq_get_budget, | 1859 | .get_budget = scsi_mq_get_budget, |
2324 | .put_budget = scsi_mq_put_budget, | 1860 | .put_budget = scsi_mq_put_budget, |
@@ -2386,10 +1922,7 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q) | |||
2386 | { | 1922 | { |
2387 | struct scsi_device *sdev = NULL; | 1923 | struct scsi_device *sdev = NULL; |
2388 | 1924 | ||
2389 | if (q->mq_ops) { | 1925 | if (q->mq_ops == &scsi_mq_ops) |
2390 | if (q->mq_ops == &scsi_mq_ops) | ||
2391 | sdev = q->queuedata; | ||
2392 | } else if (q->request_fn == scsi_request_fn) | ||
2393 | sdev = q->queuedata; | 1926 | sdev = q->queuedata; |
2394 | if (!sdev || !get_device(&sdev->sdev_gendev)) | 1927 | if (!sdev || !get_device(&sdev->sdev_gendev)) |
2395 | sdev = NULL; | 1928 | sdev = NULL; |
@@ -2993,39 +2526,6 @@ void sdev_evt_send_simple(struct scsi_device *sdev, | |||
2993 | EXPORT_SYMBOL_GPL(sdev_evt_send_simple); | 2526 | EXPORT_SYMBOL_GPL(sdev_evt_send_simple); |
2994 | 2527 | ||
2995 | /** | 2528 | /** |
2996 | * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() | ||
2997 | * @sdev: SCSI device to count the number of scsi_request_fn() callers for. | ||
2998 | */ | ||
2999 | static int scsi_request_fn_active(struct scsi_device *sdev) | ||
3000 | { | ||
3001 | struct request_queue *q = sdev->request_queue; | ||
3002 | int request_fn_active; | ||
3003 | |||
3004 | WARN_ON_ONCE(sdev->host->use_blk_mq); | ||
3005 | |||
3006 | spin_lock_irq(q->queue_lock); | ||
3007 | request_fn_active = q->request_fn_active; | ||
3008 | spin_unlock_irq(q->queue_lock); | ||
3009 | |||
3010 | return request_fn_active; | ||
3011 | } | ||
3012 | |||
3013 | /** | ||
3014 | * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls | ||
3015 | * @sdev: SCSI device pointer. | ||
3016 | * | ||
3017 | * Wait until the ongoing shost->hostt->queuecommand() calls that are | ||
3018 | * invoked from scsi_request_fn() have finished. | ||
3019 | */ | ||
3020 | static void scsi_wait_for_queuecommand(struct scsi_device *sdev) | ||
3021 | { | ||
3022 | WARN_ON_ONCE(sdev->host->use_blk_mq); | ||
3023 | |||
3024 | while (scsi_request_fn_active(sdev)) | ||
3025 | msleep(20); | ||
3026 | } | ||
3027 | |||
3028 | /** | ||
3029 | * scsi_device_quiesce - Block user issued commands. | 2529 | * scsi_device_quiesce - Block user issued commands. |
3030 | * @sdev: scsi device to quiesce. | 2530 | * @sdev: scsi device to quiesce. |
3031 | * | 2531 | * |
@@ -3148,7 +2648,6 @@ EXPORT_SYMBOL(scsi_target_resume); | |||
3148 | int scsi_internal_device_block_nowait(struct scsi_device *sdev) | 2648 | int scsi_internal_device_block_nowait(struct scsi_device *sdev) |
3149 | { | 2649 | { |
3150 | struct request_queue *q = sdev->request_queue; | 2650 | struct request_queue *q = sdev->request_queue; |
3151 | unsigned long flags; | ||
3152 | int err = 0; | 2651 | int err = 0; |
3153 | 2652 | ||
3154 | err = scsi_device_set_state(sdev, SDEV_BLOCK); | 2653 | err = scsi_device_set_state(sdev, SDEV_BLOCK); |
@@ -3164,14 +2663,7 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev) | |||
3164 | * block layer from calling the midlayer with this device's | 2663 | * block layer from calling the midlayer with this device's |
3165 | * request queue. | 2664 | * request queue. |
3166 | */ | 2665 | */ |
3167 | if (q->mq_ops) { | 2666 | blk_mq_quiesce_queue_nowait(q); |
3168 | blk_mq_quiesce_queue_nowait(q); | ||
3169 | } else { | ||
3170 | spin_lock_irqsave(q->queue_lock, flags); | ||
3171 | blk_stop_queue(q); | ||
3172 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
3173 | } | ||
3174 | |||
3175 | return 0; | 2667 | return 0; |
3176 | } | 2668 | } |
3177 | EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); | 2669 | EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); |
@@ -3202,12 +2694,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev) | |||
3202 | 2694 | ||
3203 | mutex_lock(&sdev->state_mutex); | 2695 | mutex_lock(&sdev->state_mutex); |
3204 | err = scsi_internal_device_block_nowait(sdev); | 2696 | err = scsi_internal_device_block_nowait(sdev); |
3205 | if (err == 0) { | 2697 | if (err == 0) |
3206 | if (q->mq_ops) | 2698 | blk_mq_quiesce_queue(q); |
3207 | blk_mq_quiesce_queue(q); | ||
3208 | else | ||
3209 | scsi_wait_for_queuecommand(sdev); | ||
3210 | } | ||
3211 | mutex_unlock(&sdev->state_mutex); | 2699 | mutex_unlock(&sdev->state_mutex); |
3212 | 2700 | ||
3213 | return err; | 2701 | return err; |
@@ -3216,15 +2704,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev) | |||
3216 | void scsi_start_queue(struct scsi_device *sdev) | 2704 | void scsi_start_queue(struct scsi_device *sdev) |
3217 | { | 2705 | { |
3218 | struct request_queue *q = sdev->request_queue; | 2706 | struct request_queue *q = sdev->request_queue; |
3219 | unsigned long flags; | ||
3220 | 2707 | ||
3221 | if (q->mq_ops) { | 2708 | blk_mq_unquiesce_queue(q); |
3222 | blk_mq_unquiesce_queue(q); | ||
3223 | } else { | ||
3224 | spin_lock_irqsave(q->queue_lock, flags); | ||
3225 | blk_start_queue(q); | ||
3226 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
3227 | } | ||
3228 | } | 2709 | } |
3229 | 2710 | ||
3230 | /** | 2711 | /** |