diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-09 21:54:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-09 21:54:06 -0400 |
commit | 1c54fc1efe6922b4e7ffd591739d72050976ccd6 (patch) | |
tree | 0f7f0eaa91fa06bba11da240915eb6a4040b482a /drivers/scsi/scsi_lib.c | |
parent | f4f9b8fc73f9aa93744f0e91e18f367d7766f523 (diff) | |
parent | b4c43993f448d0e25fe40690d9e9c81a8ebda623 (diff) |
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This patch consists of the usual driver updates (qla2xxx, qla4xxx,
lpfc, be2iscsi, fnic, ufs, NCR5380) The NCR5380 is the addition to
maintained status of a long neglected driver for older hardware. In
addition there are a lot of minor fixes and cleanups and some more
updates to make scsi mq ready"
* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (130 commits)
include/scsi/osd_protocol.h: remove unnecessary __constant
mvsas: Recognise device/subsystem 9485/9485 as 88SE9485
Revert "be2iscsi: Fix processing cqe for cxn whose endpoint is freed"
mptfusion: fix msgContext in mptctl_hp_hostinfo
acornscsi: remove linked command support
scsi/NCR5380: dprintk macro
fusion: Remove use of DEF_SCSI_QCMD
fusion: Add free msg frames to the head, not tail of list
mpt2sas: Add free smids to the head, not tail of list
mpt2sas: Remove use of DEF_SCSI_QCMD
mpt2sas: Remove uses of serial_number
mpt3sas: Remove use of DEF_SCSI_QCMD
mpt3sas: Remove uses of serial_number
qla2xxx: Use kmemdup instead of kmalloc + memcpy
qla4xxx: Use kmemdup instead of kmalloc + memcpy
qla2xxx: fix incorrect debug printk
be2iscsi: Bump the driver version
be2iscsi: Fix processing cqe for cxn whose endpoint is freed
be2iscsi: Fix destroy MCC-CQ before MCC-EQ is destroyed
be2iscsi: Fix memory corruption in MBX path
...
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 225 |
1 files changed, 90 insertions, 135 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a0c95cac91f0..be0d5fad999d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) | |||
512 | scsi_run_queue(sdev->request_queue); | 512 | scsi_run_queue(sdev->request_queue); |
513 | } | 513 | } |
514 | 514 | ||
515 | static void __scsi_release_buffers(struct scsi_cmnd *, int); | ||
516 | |||
517 | /* | ||
518 | * Function: scsi_end_request() | ||
519 | * | ||
520 | * Purpose: Post-processing of completed commands (usually invoked at end | ||
521 | * of upper level post-processing and scsi_io_completion). | ||
522 | * | ||
523 | * Arguments: cmd - command that is complete. | ||
524 | * error - 0 if I/O indicates success, < 0 for I/O error. | ||
525 | * bytes - number of bytes of completed I/O | ||
526 | * requeue - indicates whether we should requeue leftovers. | ||
527 | * | ||
528 | * Lock status: Assumed that lock is not held upon entry. | ||
529 | * | ||
530 | * Returns: cmd if requeue required, NULL otherwise. | ||
531 | * | ||
532 | * Notes: This is called for block device requests in order to | ||
533 | * mark some number of sectors as complete. | ||
534 | * | ||
535 | * We are guaranteeing that the request queue will be goosed | ||
536 | * at some point during this call. | ||
537 | * Notes: If cmd was requeued, upon return it will be a stale pointer. | ||
538 | */ | ||
539 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, | ||
540 | int bytes, int requeue) | ||
541 | { | ||
542 | struct request_queue *q = cmd->device->request_queue; | ||
543 | struct request *req = cmd->request; | ||
544 | |||
545 | /* | ||
546 | * If there are blocks left over at the end, set up the command | ||
547 | * to queue the remainder of them. | ||
548 | */ | ||
549 | if (blk_end_request(req, error, bytes)) { | ||
550 | /* kill remainder if no retrys */ | ||
551 | if (error && scsi_noretry_cmd(cmd)) | ||
552 | blk_end_request_all(req, error); | ||
553 | else { | ||
554 | if (requeue) { | ||
555 | /* | ||
556 | * Bleah. Leftovers again. Stick the | ||
557 | * leftovers in the front of the | ||
558 | * queue, and goose the queue again. | ||
559 | */ | ||
560 | scsi_release_buffers(cmd); | ||
561 | scsi_requeue_command(q, cmd); | ||
562 | cmd = NULL; | ||
563 | } | ||
564 | return cmd; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * This will goose the queue request function at the end, so we don't | ||
570 | * need to worry about launching another command. | ||
571 | */ | ||
572 | __scsi_release_buffers(cmd, 0); | ||
573 | scsi_next_command(cmd); | ||
574 | return NULL; | ||
575 | } | ||
576 | |||
577 | static inline unsigned int scsi_sgtable_index(unsigned short nents) | 515 | static inline unsigned int scsi_sgtable_index(unsigned short nents) |
578 | { | 516 | { |
579 | unsigned int index; | 517 | unsigned int index; |
@@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) | |||
625 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); | 563 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); |
626 | } | 564 | } |
627 | 565 | ||
628 | static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) | ||
629 | { | ||
630 | |||
631 | if (cmd->sdb.table.nents) | ||
632 | scsi_free_sgtable(&cmd->sdb); | ||
633 | |||
634 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | ||
635 | |||
636 | if (do_bidi_check && scsi_bidi_cmnd(cmd)) { | ||
637 | struct scsi_data_buffer *bidi_sdb = | ||
638 | cmd->request->next_rq->special; | ||
639 | scsi_free_sgtable(bidi_sdb); | ||
640 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | ||
641 | cmd->request->next_rq->special = NULL; | ||
642 | } | ||
643 | |||
644 | if (scsi_prot_sg_count(cmd)) | ||
645 | scsi_free_sgtable(cmd->prot_sdb); | ||
646 | } | ||
647 | |||
648 | /* | 566 | /* |
649 | * Function: scsi_release_buffers() | 567 | * Function: scsi_release_buffers() |
650 | * | 568 | * |
651 | * Purpose: Completion processing for block device I/O requests. | 569 | * Purpose: Free resources allocate for a scsi_command. |
652 | * | 570 | * |
653 | * Arguments: cmd - command that we are bailing. | 571 | * Arguments: cmd - command that we are bailing. |
654 | * | 572 | * |
@@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) | |||
659 | * Notes: In the event that an upper level driver rejects a | 577 | * Notes: In the event that an upper level driver rejects a |
660 | * command, we must release resources allocated during | 578 | * command, we must release resources allocated during |
661 | * the __init_io() function. Primarily this would involve | 579 | * the __init_io() function. Primarily this would involve |
662 | * the scatter-gather table, and potentially any bounce | 580 | * the scatter-gather table. |
663 | * buffers. | ||
664 | */ | 581 | */ |
665 | void scsi_release_buffers(struct scsi_cmnd *cmd) | 582 | void scsi_release_buffers(struct scsi_cmnd *cmd) |
666 | { | 583 | { |
667 | __scsi_release_buffers(cmd, 1); | 584 | if (cmd->sdb.table.nents) |
585 | scsi_free_sgtable(&cmd->sdb); | ||
586 | |||
587 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | ||
588 | |||
589 | if (scsi_prot_sg_count(cmd)) | ||
590 | scsi_free_sgtable(cmd->prot_sdb); | ||
668 | } | 591 | } |
669 | EXPORT_SYMBOL(scsi_release_buffers); | 592 | EXPORT_SYMBOL(scsi_release_buffers); |
670 | 593 | ||
594 | static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) | ||
595 | { | ||
596 | struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; | ||
597 | |||
598 | scsi_free_sgtable(bidi_sdb); | ||
599 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | ||
600 | cmd->request->next_rq->special = NULL; | ||
601 | } | ||
602 | |||
671 | /** | 603 | /** |
672 | * __scsi_error_from_host_byte - translate SCSI error code into errno | 604 | * __scsi_error_from_host_byte - translate SCSI error code into errno |
673 | * @cmd: SCSI command (unused) | 605 | * @cmd: SCSI command (unused) |
@@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | |||
725 | * | 657 | * |
726 | * Returns: Nothing | 658 | * Returns: Nothing |
727 | * | 659 | * |
728 | * Notes: This function is matched in terms of capabilities to | 660 | * Notes: We will finish off the specified number of sectors. If we |
729 | * the function that created the scatter-gather list. | 661 | * are done, the command block will be released and the queue |
730 | * In other words, if there are no bounce buffers | 662 | * function will be goosed. If we are not done then we have to |
731 | * (the normal case for most drivers), we don't need | ||
732 | * the logic to deal with cleaning up afterwards. | ||
733 | * | ||
734 | * We must call scsi_end_request(). This will finish off | ||
735 | * the specified number of sectors. If we are done, the | ||
736 | * command block will be released and the queue function | ||
737 | * will be goosed. If we are not done then we have to | ||
738 | * figure out what to do next: | 663 | * figure out what to do next: |
739 | * | 664 | * |
740 | * a) We can call scsi_requeue_command(). The request | 665 | * a) We can call scsi_requeue_command(). The request |
@@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | |||
743 | * be used if we made forward progress, or if we want | 668 | * be used if we made forward progress, or if we want |
744 | * to switch from READ(10) to READ(6) for example. | 669 | * to switch from READ(10) to READ(6) for example. |
745 | * | 670 | * |
746 | * b) We can call scsi_queue_insert(). The request will | 671 | * b) We can call __scsi_queue_insert(). The request will |
747 | * be put back on the queue and retried using the same | 672 | * be put back on the queue and retried using the same |
748 | * command as before, possibly after a delay. | 673 | * command as before, possibly after a delay. |
749 | * | 674 | * |
@@ -801,6 +726,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
801 | req->next_rq->resid_len = scsi_in(cmd)->resid; | 726 | req->next_rq->resid_len = scsi_in(cmd)->resid; |
802 | 727 | ||
803 | scsi_release_buffers(cmd); | 728 | scsi_release_buffers(cmd); |
729 | scsi_release_bidi_buffers(cmd); | ||
730 | |||
804 | blk_end_request_all(req, 0); | 731 | blk_end_request_all(req, 0); |
805 | 732 | ||
806 | scsi_next_command(cmd); | 733 | scsi_next_command(cmd); |
@@ -840,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
840 | } | 767 | } |
841 | 768 | ||
842 | /* | 769 | /* |
843 | * A number of bytes were successfully read. If there | 770 | * If we finished all bytes in the request we are done now. |
844 | * are leftovers and there is some kind of error | ||
845 | * (result != 0), retry the rest. | ||
846 | */ | 771 | */ |
847 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) | 772 | if (!blk_end_request(req, error, good_bytes)) |
848 | return; | 773 | goto next_command; |
774 | |||
775 | /* | ||
776 | * Kill remainder if no retrys. | ||
777 | */ | ||
778 | if (error && scsi_noretry_cmd(cmd)) { | ||
779 | blk_end_request_all(req, error); | ||
780 | goto next_command; | ||
781 | } | ||
782 | |||
783 | /* | ||
784 | * If there had been no error, but we have leftover bytes in the | ||
785 | * requeues just queue the command up again. | ||
786 | */ | ||
787 | if (result == 0) | ||
788 | goto requeue; | ||
849 | 789 | ||
850 | error = __scsi_error_from_host_byte(cmd, result); | 790 | error = __scsi_error_from_host_byte(cmd, result); |
851 | 791 | ||
@@ -973,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
973 | switch (action) { | 913 | switch (action) { |
974 | case ACTION_FAIL: | 914 | case ACTION_FAIL: |
975 | /* Give up and fail the remainder of the request */ | 915 | /* Give up and fail the remainder of the request */ |
976 | scsi_release_buffers(cmd); | ||
977 | if (!(req->cmd_flags & REQ_QUIET)) { | 916 | if (!(req->cmd_flags & REQ_QUIET)) { |
978 | if (description) | 917 | if (description) |
979 | scmd_printk(KERN_INFO, cmd, "%s\n", | 918 | scmd_printk(KERN_INFO, cmd, "%s\n", |
@@ -983,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
983 | scsi_print_sense("", cmd); | 922 | scsi_print_sense("", cmd); |
984 | scsi_print_command(cmd); | 923 | scsi_print_command(cmd); |
985 | } | 924 | } |
986 | if (blk_end_request_err(req, error)) | 925 | if (!blk_end_request_err(req, error)) |
987 | scsi_requeue_command(q, cmd); | 926 | goto next_command; |
988 | else | 927 | /*FALLTHRU*/ |
989 | scsi_next_command(cmd); | ||
990 | break; | ||
991 | case ACTION_REPREP: | 928 | case ACTION_REPREP: |
929 | requeue: | ||
992 | /* Unprep the request and put it back at the head of the queue. | 930 | /* Unprep the request and put it back at the head of the queue. |
993 | * A new command will be prepared and issued. | 931 | * A new command will be prepared and issued. |
994 | */ | 932 | */ |
@@ -1004,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1004 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); | 942 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); |
1005 | break; | 943 | break; |
1006 | } | 944 | } |
945 | return; | ||
946 | |||
947 | next_command: | ||
948 | scsi_release_buffers(cmd); | ||
949 | scsi_next_command(cmd); | ||
1007 | } | 950 | } |
1008 | 951 | ||
1009 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | 952 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, |
@@ -1128,15 +1071,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, | |||
1128 | 1071 | ||
1129 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | 1072 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) |
1130 | { | 1073 | { |
1131 | struct scsi_cmnd *cmd; | 1074 | struct scsi_cmnd *cmd = req->special; |
1132 | int ret = scsi_prep_state_check(sdev, req); | ||
1133 | |||
1134 | if (ret != BLKPREP_OK) | ||
1135 | return ret; | ||
1136 | |||
1137 | cmd = scsi_get_cmd_from_req(sdev, req); | ||
1138 | if (unlikely(!cmd)) | ||
1139 | return BLKPREP_DEFER; | ||
1140 | 1075 | ||
1141 | /* | 1076 | /* |
1142 | * BLOCK_PC requests may transfer data, in which case they must | 1077 | * BLOCK_PC requests may transfer data, in which case they must |
@@ -1179,15 +1114,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); | |||
1179 | */ | 1114 | */ |
1180 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | 1115 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) |
1181 | { | 1116 | { |
1182 | struct scsi_cmnd *cmd; | 1117 | struct scsi_cmnd *cmd = req->special; |
1183 | int ret = scsi_prep_state_check(sdev, req); | ||
1184 | |||
1185 | if (ret != BLKPREP_OK) | ||
1186 | return ret; | ||
1187 | 1118 | ||
1188 | if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh | 1119 | if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh |
1189 | && sdev->scsi_dh_data->scsi_dh->prep_fn)) { | 1120 | && sdev->scsi_dh_data->scsi_dh->prep_fn)) { |
1190 | ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); | 1121 | int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); |
1191 | if (ret != BLKPREP_OK) | 1122 | if (ret != BLKPREP_OK) |
1192 | return ret; | 1123 | return ret; |
1193 | } | 1124 | } |
@@ -1197,16 +1128,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | |||
1197 | */ | 1128 | */ |
1198 | BUG_ON(!req->nr_phys_segments); | 1129 | BUG_ON(!req->nr_phys_segments); |
1199 | 1130 | ||
1200 | cmd = scsi_get_cmd_from_req(sdev, req); | ||
1201 | if (unlikely(!cmd)) | ||
1202 | return BLKPREP_DEFER; | ||
1203 | |||
1204 | memset(cmd->cmnd, 0, BLK_MAX_CDB); | 1131 | memset(cmd->cmnd, 0, BLK_MAX_CDB); |
1205 | return scsi_init_io(cmd, GFP_ATOMIC); | 1132 | return scsi_init_io(cmd, GFP_ATOMIC); |
1206 | } | 1133 | } |
1207 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); | 1134 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); |
1208 | 1135 | ||
1209 | int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | 1136 | static int |
1137 | scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | ||
1210 | { | 1138 | { |
1211 | int ret = BLKPREP_OK; | 1139 | int ret = BLKPREP_OK; |
1212 | 1140 | ||
@@ -1258,9 +1186,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | |||
1258 | } | 1186 | } |
1259 | return ret; | 1187 | return ret; |
1260 | } | 1188 | } |
1261 | EXPORT_SYMBOL(scsi_prep_state_check); | ||
1262 | 1189 | ||
1263 | int scsi_prep_return(struct request_queue *q, struct request *req, int ret) | 1190 | static int |
1191 | scsi_prep_return(struct request_queue *q, struct request *req, int ret) | ||
1264 | { | 1192 | { |
1265 | struct scsi_device *sdev = q->queuedata; | 1193 | struct scsi_device *sdev = q->queuedata; |
1266 | 1194 | ||
@@ -1291,18 +1219,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) | |||
1291 | 1219 | ||
1292 | return ret; | 1220 | return ret; |
1293 | } | 1221 | } |
1294 | EXPORT_SYMBOL(scsi_prep_return); | ||
1295 | 1222 | ||
1296 | int scsi_prep_fn(struct request_queue *q, struct request *req) | 1223 | static int scsi_prep_fn(struct request_queue *q, struct request *req) |
1297 | { | 1224 | { |
1298 | struct scsi_device *sdev = q->queuedata; | 1225 | struct scsi_device *sdev = q->queuedata; |
1299 | int ret = BLKPREP_KILL; | 1226 | struct scsi_cmnd *cmd; |
1227 | int ret; | ||
1300 | 1228 | ||
1301 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) | 1229 | ret = scsi_prep_state_check(sdev, req); |
1230 | if (ret != BLKPREP_OK) | ||
1231 | goto out; | ||
1232 | |||
1233 | cmd = scsi_get_cmd_from_req(sdev, req); | ||
1234 | if (unlikely(!cmd)) { | ||
1235 | ret = BLKPREP_DEFER; | ||
1236 | goto out; | ||
1237 | } | ||
1238 | |||
1239 | if (req->cmd_type == REQ_TYPE_FS) | ||
1240 | ret = scsi_cmd_to_driver(cmd)->init_command(cmd); | ||
1241 | else if (req->cmd_type == REQ_TYPE_BLOCK_PC) | ||
1302 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | 1242 | ret = scsi_setup_blk_pc_cmnd(sdev, req); |
1243 | else | ||
1244 | ret = BLKPREP_KILL; | ||
1245 | |||
1246 | out: | ||
1303 | return scsi_prep_return(q, req, ret); | 1247 | return scsi_prep_return(q, req, ret); |
1304 | } | 1248 | } |
1305 | EXPORT_SYMBOL(scsi_prep_fn); | 1249 | |
1250 | static void scsi_unprep_fn(struct request_queue *q, struct request *req) | ||
1251 | { | ||
1252 | if (req->cmd_type == REQ_TYPE_FS) { | ||
1253 | struct scsi_cmnd *cmd = req->special; | ||
1254 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); | ||
1255 | |||
1256 | if (drv->uninit_command) | ||
1257 | drv->uninit_command(cmd); | ||
1258 | } | ||
1259 | } | ||
1306 | 1260 | ||
1307 | /* | 1261 | /* |
1308 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | 1262 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else |
@@ -1723,6 +1677,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
1723 | return NULL; | 1677 | return NULL; |
1724 | 1678 | ||
1725 | blk_queue_prep_rq(q, scsi_prep_fn); | 1679 | blk_queue_prep_rq(q, scsi_prep_fn); |
1680 | blk_queue_unprep_rq(q, scsi_unprep_fn); | ||
1726 | blk_queue_softirq_done(q, scsi_softirq_done); | 1681 | blk_queue_softirq_done(q, scsi_softirq_done); |
1727 | blk_queue_rq_timed_out(q, scsi_times_out); | 1682 | blk_queue_rq_timed_out(q, scsi_times_out); |
1728 | blk_queue_lld_busy(q, scsi_lld_busy); | 1683 | blk_queue_lld_busy(q, scsi_lld_busy); |