diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_isr.c')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 444 |
1 files changed, 221 insertions, 223 deletions
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b20a7169aac2..db539b0c3dae 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -7,7 +7,9 @@ | |||
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | 8 | ||
9 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
10 | #include <linux/slab.h> | ||
10 | #include <scsi/scsi_tcq.h> | 11 | #include <scsi/scsi_tcq.h> |
12 | #include <scsi/scsi_bsg_fc.h> | ||
11 | 13 | ||
12 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); | 14 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); |
13 | static void qla2x00_process_completed_request(struct scsi_qla_host *, | 15 | static void qla2x00_process_completed_request(struct scsi_qla_host *, |
@@ -152,7 +154,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
152 | for (iter = 50; iter--; ) { | 154 | for (iter = 50; iter--; ) { |
153 | stat = RD_REG_DWORD(®->u.isp2300.host_status); | 155 | stat = RD_REG_DWORD(®->u.isp2300.host_status); |
154 | if (stat & HSR_RISC_PAUSED) { | 156 | if (stat & HSR_RISC_PAUSED) { |
155 | if (pci_channel_offline(ha->pdev)) | 157 | if (unlikely(pci_channel_offline(ha->pdev))) |
156 | break; | 158 | break; |
157 | 159 | ||
158 | hccr = RD_REG_WORD(®->hccr); | 160 | hccr = RD_REG_WORD(®->hccr); |
@@ -313,10 +315,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) | |||
313 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; | 315 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; |
314 | char *link_speed; | 316 | char *link_speed; |
315 | uint16_t handle_cnt; | 317 | uint16_t handle_cnt; |
316 | uint16_t cnt; | 318 | uint16_t cnt, mbx; |
317 | uint32_t handles[5]; | 319 | uint32_t handles[5]; |
318 | struct qla_hw_data *ha = vha->hw; | 320 | struct qla_hw_data *ha = vha->hw; |
319 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 321 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
322 | struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; | ||
320 | uint32_t rscn_entry, host_pid; | 323 | uint32_t rscn_entry, host_pid; |
321 | uint8_t rscn_queue_index; | 324 | uint8_t rscn_queue_index; |
322 | unsigned long flags; | 325 | unsigned long flags; |
@@ -395,9 +398,10 @@ skip_rio: | |||
395 | break; | 398 | break; |
396 | 399 | ||
397 | case MBA_SYSTEM_ERR: /* System Error */ | 400 | case MBA_SYSTEM_ERR: /* System Error */ |
401 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; | ||
398 | qla_printk(KERN_INFO, ha, | 402 | qla_printk(KERN_INFO, ha, |
399 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", | 403 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " |
400 | mb[1], mb[2], mb[3]); | 404 | "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); |
401 | 405 | ||
402 | ha->isp_ops->fw_dump(vha, 1); | 406 | ha->isp_ops->fw_dump(vha, 1); |
403 | 407 | ||
@@ -419,9 +423,10 @@ skip_rio: | |||
419 | break; | 423 | break; |
420 | 424 | ||
421 | case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ | 425 | case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ |
422 | DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", | 426 | DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", |
423 | vha->host_no)); | 427 | vha->host_no, mb[1])); |
424 | qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); | 428 | qla_printk(KERN_WARNING, ha, |
429 | "ISP Request Transfer Error (%x).\n", mb[1]); | ||
425 | 430 | ||
426 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 431 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
427 | break; | 432 | break; |
@@ -485,10 +490,13 @@ skip_rio: | |||
485 | break; | 490 | break; |
486 | 491 | ||
487 | case MBA_LOOP_DOWN: /* Loop Down Event */ | 492 | case MBA_LOOP_DOWN: /* Loop Down Event */ |
493 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; | ||
488 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " | 494 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " |
489 | "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3])); | 495 | "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], |
490 | qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", | 496 | mbx)); |
491 | mb[1], mb[2], mb[3]); | 497 | qla_printk(KERN_INFO, ha, |
498 | "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], | ||
499 | mbx); | ||
492 | 500 | ||
493 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 501 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
494 | atomic_set(&vha->loop_state, LOOP_DOWN); | 502 | atomic_set(&vha->loop_state, LOOP_DOWN); |
@@ -613,11 +621,10 @@ skip_rio: | |||
613 | * vp_idx does not match | 621 | * vp_idx does not match |
614 | * Event is not global, vp_idx does not match | 622 | * Event is not global, vp_idx does not match |
615 | */ | 623 | */ |
616 | if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) | 624 | if (IS_QLA2XXX_MIDTYPE(ha) && |
617 | || (mb[1] != 0xffff)) { | 625 | ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || |
618 | if (vha->vp_idx != (mb[3] & 0xff)) | 626 | (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) |
619 | break; | 627 | break; |
620 | } | ||
621 | 628 | ||
622 | /* Global event -- port logout or port unavailable. */ | 629 | /* Global event -- port logout or port unavailable. */ |
623 | if (mb[1] == 0xffff && mb[2] == 0x7) { | 630 | if (mb[1] == 0xffff && mb[2] == 0x7) { |
@@ -805,78 +812,6 @@ skip_rio: | |||
805 | qla2x00_alert_all_vps(rsp, mb); | 812 | qla2x00_alert_all_vps(rsp, mb); |
806 | } | 813 | } |
807 | 814 | ||
808 | static void | ||
809 | qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) | ||
810 | { | ||
811 | fc_port_t *fcport = data; | ||
812 | struct scsi_qla_host *vha = fcport->vha; | ||
813 | struct qla_hw_data *ha = vha->hw; | ||
814 | struct req_que *req = NULL; | ||
815 | |||
816 | if (!ql2xqfulltracking) | ||
817 | return; | ||
818 | |||
819 | req = vha->req; | ||
820 | if (!req) | ||
821 | return; | ||
822 | if (req->max_q_depth <= sdev->queue_depth) | ||
823 | return; | ||
824 | |||
825 | if (sdev->ordered_tags) | ||
826 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, | ||
827 | sdev->queue_depth + 1); | ||
828 | else | ||
829 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, | ||
830 | sdev->queue_depth + 1); | ||
831 | |||
832 | fcport->last_ramp_up = jiffies; | ||
833 | |||
834 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
835 | "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", | ||
836 | fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, | ||
837 | sdev->queue_depth)); | ||
838 | } | ||
839 | |||
840 | static void | ||
841 | qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data) | ||
842 | { | ||
843 | fc_port_t *fcport = data; | ||
844 | |||
845 | if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) | ||
846 | return; | ||
847 | |||
848 | DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, | ||
849 | "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", | ||
850 | fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, | ||
851 | sdev->queue_depth)); | ||
852 | } | ||
853 | |||
854 | static inline void | ||
855 | qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req, | ||
856 | srb_t *sp) | ||
857 | { | ||
858 | fc_port_t *fcport; | ||
859 | struct scsi_device *sdev; | ||
860 | |||
861 | if (!ql2xqfulltracking) | ||
862 | return; | ||
863 | |||
864 | sdev = sp->cmd->device; | ||
865 | if (sdev->queue_depth >= req->max_q_depth) | ||
866 | return; | ||
867 | |||
868 | fcport = sp->fcport; | ||
869 | if (time_before(jiffies, | ||
870 | fcport->last_ramp_up + ql2xqfullrampup * HZ)) | ||
871 | return; | ||
872 | if (time_before(jiffies, | ||
873 | fcport->last_queue_full + ql2xqfullrampup * HZ)) | ||
874 | return; | ||
875 | |||
876 | starget_for_each_device(sdev->sdev_target, fcport, | ||
877 | qla2x00_adjust_sdev_qdepth_up); | ||
878 | } | ||
879 | |||
880 | /** | 815 | /** |
881 | * qla2x00_process_completed_request() - Process a Fast Post response. | 816 | * qla2x00_process_completed_request() - Process a Fast Post response. |
882 | * @ha: SCSI driver HA context | 817 | * @ha: SCSI driver HA context |
@@ -907,8 +842,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, | |||
907 | 842 | ||
908 | /* Save ISP completion status */ | 843 | /* Save ISP completion status */ |
909 | sp->cmd->result = DID_OK << 16; | 844 | sp->cmd->result = DID_OK << 16; |
910 | |||
911 | qla2x00_ramp_up_queue_depth(vha, req, sp); | ||
912 | qla2x00_sp_compl(ha, sp); | 845 | qla2x00_sp_compl(ha, sp); |
913 | } else { | 846 | } else { |
914 | DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" | 847 | DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" |
@@ -949,7 +882,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, | |||
949 | index); | 882 | index); |
950 | return NULL; | 883 | return NULL; |
951 | } | 884 | } |
885 | |||
952 | req->outstanding_cmds[index] = NULL; | 886 | req->outstanding_cmds[index] = NULL; |
887 | |||
953 | done: | 888 | done: |
954 | return sp; | 889 | return sp; |
955 | } | 890 | } |
@@ -1050,6 +985,100 @@ done_post_logio_done_work: | |||
1050 | } | 985 | } |
1051 | 986 | ||
1052 | static void | 987 | static void |
988 | qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | ||
989 | struct sts_entry_24xx *pkt, int iocb_type) | ||
990 | { | ||
991 | const char func[] = "ELS_CT_IOCB"; | ||
992 | const char *type; | ||
993 | struct qla_hw_data *ha = vha->hw; | ||
994 | srb_t *sp; | ||
995 | struct srb_bsg *sp_bsg; | ||
996 | struct fc_bsg_job *bsg_job; | ||
997 | uint16_t comp_status; | ||
998 | uint32_t fw_status[3]; | ||
999 | uint8_t* fw_sts_ptr; | ||
1000 | |||
1001 | sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); | ||
1002 | if (!sp) | ||
1003 | return; | ||
1004 | sp_bsg = (struct srb_bsg*)sp->ctx; | ||
1005 | bsg_job = sp_bsg->bsg_job; | ||
1006 | |||
1007 | type = NULL; | ||
1008 | switch (sp_bsg->ctx.type) { | ||
1009 | case SRB_ELS_CMD_RPT: | ||
1010 | case SRB_ELS_CMD_HST: | ||
1011 | type = "els"; | ||
1012 | break; | ||
1013 | case SRB_CT_CMD: | ||
1014 | type = "ct pass-through"; | ||
1015 | break; | ||
1016 | default: | ||
1017 | qla_printk(KERN_WARNING, ha, | ||
1018 | "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, | ||
1019 | sp_bsg->ctx.type); | ||
1020 | return; | ||
1021 | } | ||
1022 | |||
1023 | comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); | ||
1024 | fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); | ||
1025 | fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); | ||
1026 | |||
1027 | /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT | ||
1028 | * fc payload to the caller | ||
1029 | */ | ||
1030 | bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; | ||
1031 | bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); | ||
1032 | |||
1033 | if (comp_status != CS_COMPLETE) { | ||
1034 | if (comp_status == CS_DATA_UNDERRUN) { | ||
1035 | bsg_job->reply->result = DID_OK << 16; | ||
1036 | bsg_job->reply->reply_payload_rcv_len = | ||
1037 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); | ||
1038 | |||
1039 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
1040 | "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " | ||
1041 | "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", | ||
1042 | vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], | ||
1043 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); | ||
1044 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | ||
1045 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); | ||
1046 | } | ||
1047 | else { | ||
1048 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
1049 | "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " | ||
1050 | "error subcode 1=0x%x error subcode 2=0x%x.\n", | ||
1051 | vha->host_no, sp->handle, type, comp_status, | ||
1052 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), | ||
1053 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); | ||
1054 | bsg_job->reply->result = DID_ERROR << 16; | ||
1055 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
1056 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | ||
1057 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); | ||
1058 | } | ||
1059 | DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); | ||
1060 | } | ||
1061 | else { | ||
1062 | bsg_job->reply->result = DID_OK << 16;; | ||
1063 | bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; | ||
1064 | bsg_job->reply_len = 0; | ||
1065 | } | ||
1066 | |||
1067 | dma_unmap_sg(&ha->pdev->dev, | ||
1068 | bsg_job->request_payload.sg_list, | ||
1069 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1070 | dma_unmap_sg(&ha->pdev->dev, | ||
1071 | bsg_job->reply_payload.sg_list, | ||
1072 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
1073 | if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) || | ||
1074 | (sp_bsg->ctx.type == SRB_CT_CMD)) | ||
1075 | kfree(sp->fcport); | ||
1076 | kfree(sp->ctx); | ||
1077 | mempool_free(sp, ha->srb_mempool); | ||
1078 | bsg_job->job_done(bsg_job); | ||
1079 | } | ||
1080 | |||
1081 | static void | ||
1053 | qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | 1082 | qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, |
1054 | struct logio_entry_24xx *logio) | 1083 | struct logio_entry_24xx *logio) |
1055 | { | 1084 | { |
@@ -1347,16 +1376,22 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1347 | 1376 | ||
1348 | sense_len = rsp_info_len = resid_len = fw_resid_len = 0; | 1377 | sense_len = rsp_info_len = resid_len = fw_resid_len = 0; |
1349 | if (IS_FWI2_CAPABLE(ha)) { | 1378 | if (IS_FWI2_CAPABLE(ha)) { |
1350 | sense_len = le32_to_cpu(sts24->sense_len); | 1379 | if (scsi_status & SS_SENSE_LEN_VALID) |
1351 | rsp_info_len = le32_to_cpu(sts24->rsp_data_len); | 1380 | sense_len = le32_to_cpu(sts24->sense_len); |
1352 | resid_len = le32_to_cpu(sts24->rsp_residual_count); | 1381 | if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) |
1353 | fw_resid_len = le32_to_cpu(sts24->residual_len); | 1382 | rsp_info_len = le32_to_cpu(sts24->rsp_data_len); |
1383 | if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) | ||
1384 | resid_len = le32_to_cpu(sts24->rsp_residual_count); | ||
1385 | if (comp_status == CS_DATA_UNDERRUN) | ||
1386 | fw_resid_len = le32_to_cpu(sts24->residual_len); | ||
1354 | rsp_info = sts24->data; | 1387 | rsp_info = sts24->data; |
1355 | sense_data = sts24->data; | 1388 | sense_data = sts24->data; |
1356 | host_to_fcp_swap(sts24->data, sizeof(sts24->data)); | 1389 | host_to_fcp_swap(sts24->data, sizeof(sts24->data)); |
1357 | } else { | 1390 | } else { |
1358 | sense_len = le16_to_cpu(sts->req_sense_length); | 1391 | if (scsi_status & SS_SENSE_LEN_VALID) |
1359 | rsp_info_len = le16_to_cpu(sts->rsp_info_len); | 1392 | sense_len = le16_to_cpu(sts->req_sense_length); |
1393 | if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) | ||
1394 | rsp_info_len = le16_to_cpu(sts->rsp_info_len); | ||
1360 | resid_len = le32_to_cpu(sts->residual_length); | 1395 | resid_len = le32_to_cpu(sts->residual_length); |
1361 | rsp_info = sts->rsp_info; | 1396 | rsp_info = sts->rsp_info; |
1362 | sense_data = sts->req_sense_data; | 1397 | sense_data = sts->req_sense_data; |
@@ -1423,13 +1458,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1423 | "scsi(%ld): QUEUE FULL status detected " | 1458 | "scsi(%ld): QUEUE FULL status detected " |
1424 | "0x%x-0x%x.\n", vha->host_no, comp_status, | 1459 | "0x%x-0x%x.\n", vha->host_no, comp_status, |
1425 | scsi_status)); | 1460 | scsi_status)); |
1426 | |||
1427 | /* Adjust queue depth for all luns on the port. */ | ||
1428 | if (!ql2xqfulltracking) | ||
1429 | break; | ||
1430 | fcport->last_queue_full = jiffies; | ||
1431 | starget_for_each_device(cp->device->sdev_target, | ||
1432 | fcport, qla2x00_adjust_sdev_qdepth_down); | ||
1433 | break; | 1461 | break; |
1434 | } | 1462 | } |
1435 | if (lscsi_status != SS_CHECK_CONDITION) | 1463 | if (lscsi_status != SS_CHECK_CONDITION) |
@@ -1443,54 +1471,67 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1443 | break; | 1471 | break; |
1444 | 1472 | ||
1445 | case CS_DATA_UNDERRUN: | 1473 | case CS_DATA_UNDERRUN: |
1446 | resid = resid_len; | 1474 | DEBUG2(printk(KERN_INFO |
1475 | "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. " | ||
1476 | "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n", | ||
1477 | vha->host_no, cp->device->id, cp->device->lun, comp_status, | ||
1478 | scsi_status, resid_len, fw_resid_len, cp->cmnd[0], | ||
1479 | cp->underflow)); | ||
1480 | |||
1447 | /* Use F/W calculated residual length. */ | 1481 | /* Use F/W calculated residual length. */ |
1448 | if (IS_FWI2_CAPABLE(ha)) { | 1482 | resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; |
1449 | if (!(scsi_status & SS_RESIDUAL_UNDER)) { | 1483 | scsi_set_resid(cp, resid); |
1450 | lscsi_status = 0; | 1484 | if (scsi_status & SS_RESIDUAL_UNDER) { |
1451 | } else if (resid != fw_resid_len) { | 1485 | if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { |
1452 | scsi_status &= ~SS_RESIDUAL_UNDER; | 1486 | DEBUG2(printk( |
1453 | lscsi_status = 0; | 1487 | "scsi(%ld:%d:%d:%d) Dropped frame(s) " |
1488 | "detected (%x of %x bytes)...residual " | ||
1489 | "length mismatch...retrying command.\n", | ||
1490 | vha->host_no, cp->device->channel, | ||
1491 | cp->device->id, cp->device->lun, resid, | ||
1492 | scsi_bufflen(cp))); | ||
1493 | |||
1494 | cp->result = DID_ERROR << 16 | lscsi_status; | ||
1495 | break; | ||
1454 | } | 1496 | } |
1455 | resid = fw_resid_len; | ||
1456 | } | ||
1457 | 1497 | ||
1458 | if (scsi_status & SS_RESIDUAL_UNDER) { | 1498 | if (!lscsi_status && |
1459 | scsi_set_resid(cp, resid); | 1499 | ((unsigned)(scsi_bufflen(cp) - resid) < |
1460 | } else { | 1500 | cp->underflow)) { |
1461 | DEBUG2(printk(KERN_INFO | 1501 | qla_printk(KERN_INFO, ha, |
1462 | "scsi(%ld:%d:%d) UNDERRUN status detected " | 1502 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " |
1463 | "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " | 1503 | "detected (%x of %x bytes)...returning " |
1464 | "os_underflow=0x%x\n", vha->host_no, | 1504 | "error status.\n", vha->host_no, |
1465 | cp->device->id, cp->device->lun, comp_status, | 1505 | cp->device->channel, cp->device->id, |
1466 | scsi_status, resid_len, resid, cp->cmnd[0], | 1506 | cp->device->lun, resid, scsi_bufflen(cp)); |
1467 | cp->underflow)); | ||
1468 | 1507 | ||
1508 | cp->result = DID_ERROR << 16; | ||
1509 | break; | ||
1510 | } | ||
1511 | } else if (!lscsi_status) { | ||
1512 | DEBUG2(printk( | ||
1513 | "scsi(%ld:%d:%d:%d) Dropped frame(s) detected " | ||
1514 | "(%x of %x bytes)...firmware reported underrun..." | ||
1515 | "retrying command.\n", vha->host_no, | ||
1516 | cp->device->channel, cp->device->id, | ||
1517 | cp->device->lun, resid, scsi_bufflen(cp))); | ||
1518 | |||
1519 | cp->result = DID_ERROR << 16; | ||
1520 | break; | ||
1469 | } | 1521 | } |
1470 | 1522 | ||
1523 | cp->result = DID_OK << 16 | lscsi_status; | ||
1524 | |||
1471 | /* | 1525 | /* |
1472 | * Check to see if SCSI Status is non zero. If so report SCSI | 1526 | * Check to see if SCSI Status is non zero. If so report SCSI |
1473 | * Status. | 1527 | * Status. |
1474 | */ | 1528 | */ |
1475 | if (lscsi_status != 0) { | 1529 | if (lscsi_status != 0) { |
1476 | cp->result = DID_OK << 16 | lscsi_status; | ||
1477 | |||
1478 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { | 1530 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { |
1479 | DEBUG2(printk(KERN_INFO | 1531 | DEBUG2(printk(KERN_INFO |
1480 | "scsi(%ld): QUEUE FULL status detected " | 1532 | "scsi(%ld): QUEUE FULL status detected " |
1481 | "0x%x-0x%x.\n", vha->host_no, comp_status, | 1533 | "0x%x-0x%x.\n", vha->host_no, comp_status, |
1482 | scsi_status)); | 1534 | scsi_status)); |
1483 | |||
1484 | /* | ||
1485 | * Adjust queue depth for all luns on the | ||
1486 | * port. | ||
1487 | */ | ||
1488 | if (!ql2xqfulltracking) | ||
1489 | break; | ||
1490 | fcport->last_queue_full = jiffies; | ||
1491 | starget_for_each_device( | ||
1492 | cp->device->sdev_target, fcport, | ||
1493 | qla2x00_adjust_sdev_qdepth_down); | ||
1494 | break; | 1535 | break; |
1495 | } | 1536 | } |
1496 | if (lscsi_status != SS_CHECK_CONDITION) | 1537 | if (lscsi_status != SS_CHECK_CONDITION) |
@@ -1501,42 +1542,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1501 | break; | 1542 | break; |
1502 | 1543 | ||
1503 | qla2x00_handle_sense(sp, sense_data, sense_len, rsp); | 1544 | qla2x00_handle_sense(sp, sense_data, sense_len, rsp); |
1504 | } else { | ||
1505 | /* | ||
1506 | * If RISC reports underrun and target does not report | ||
1507 | * it then we must have a lost frame, so tell upper | ||
1508 | * layer to retry it by reporting an error. | ||
1509 | */ | ||
1510 | if (!(scsi_status & SS_RESIDUAL_UNDER)) { | ||
1511 | DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " | ||
1512 | "frame(s) detected (%x of %x bytes)..." | ||
1513 | "retrying command.\n", | ||
1514 | vha->host_no, cp->device->channel, | ||
1515 | cp->device->id, cp->device->lun, resid, | ||
1516 | scsi_bufflen(cp))); | ||
1517 | |||
1518 | scsi_set_resid(cp, resid); | ||
1519 | cp->result = DID_ERROR << 16; | ||
1520 | break; | ||
1521 | } | ||
1522 | |||
1523 | /* Handle mid-layer underflow */ | ||
1524 | if ((unsigned)(scsi_bufflen(cp) - resid) < | ||
1525 | cp->underflow) { | ||
1526 | qla_printk(KERN_INFO, ha, | ||
1527 | "scsi(%ld:%d:%d:%d): Mid-layer underflow " | ||
1528 | "detected (%x of %x bytes)...returning " | ||
1529 | "error status.\n", vha->host_no, | ||
1530 | cp->device->channel, cp->device->id, | ||
1531 | cp->device->lun, resid, | ||
1532 | scsi_bufflen(cp)); | ||
1533 | |||
1534 | cp->result = DID_ERROR << 16; | ||
1535 | break; | ||
1536 | } | ||
1537 | |||
1538 | /* Everybody online, looking good... */ | ||
1539 | cp->result = DID_OK << 16; | ||
1540 | } | 1545 | } |
1541 | break; | 1546 | break; |
1542 | 1547 | ||
@@ -1841,6 +1846,13 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
1841 | qla24xx_logio_entry(vha, rsp->req, | 1846 | qla24xx_logio_entry(vha, rsp->req, |
1842 | (struct logio_entry_24xx *)pkt); | 1847 | (struct logio_entry_24xx *)pkt); |
1843 | break; | 1848 | break; |
1849 | case CT_IOCB_TYPE: | ||
1850 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); | ||
1851 | clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); | ||
1852 | break; | ||
1853 | case ELS_IOCB_TYPE: | ||
1854 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); | ||
1855 | break; | ||
1844 | default: | 1856 | default: |
1845 | /* Type Not Supported. */ | 1857 | /* Type Not Supported. */ |
1846 | DEBUG4(printk(KERN_WARNING | 1858 | DEBUG4(printk(KERN_WARNING |
@@ -1938,12 +1950,15 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1938 | reg = &ha->iobase->isp24; | 1950 | reg = &ha->iobase->isp24; |
1939 | status = 0; | 1951 | status = 0; |
1940 | 1952 | ||
1953 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
1954 | return IRQ_HANDLED; | ||
1955 | |||
1941 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1956 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1942 | vha = pci_get_drvdata(ha->pdev); | 1957 | vha = pci_get_drvdata(ha->pdev); |
1943 | for (iter = 50; iter--; ) { | 1958 | for (iter = 50; iter--; ) { |
1944 | stat = RD_REG_DWORD(®->host_status); | 1959 | stat = RD_REG_DWORD(®->host_status); |
1945 | if (stat & HSRX_RISC_PAUSED) { | 1960 | if (stat & HSRX_RISC_PAUSED) { |
1946 | if (pci_channel_offline(ha->pdev)) | 1961 | if (unlikely(pci_channel_offline(ha->pdev))) |
1947 | break; | 1962 | break; |
1948 | 1963 | ||
1949 | hccr = RD_REG_DWORD(®->hccr); | 1964 | hccr = RD_REG_DWORD(®->hccr); |
@@ -2006,6 +2021,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
2006 | struct rsp_que *rsp; | 2021 | struct rsp_que *rsp; |
2007 | struct device_reg_24xx __iomem *reg; | 2022 | struct device_reg_24xx __iomem *reg; |
2008 | struct scsi_qla_host *vha; | 2023 | struct scsi_qla_host *vha; |
2024 | unsigned long flags; | ||
2009 | 2025 | ||
2010 | rsp = (struct rsp_que *) dev_id; | 2026 | rsp = (struct rsp_que *) dev_id; |
2011 | if (!rsp) { | 2027 | if (!rsp) { |
@@ -2016,15 +2032,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
2016 | ha = rsp->hw; | 2032 | ha = rsp->hw; |
2017 | reg = &ha->iobase->isp24; | 2033 | reg = &ha->iobase->isp24; |
2018 | 2034 | ||
2019 | spin_lock_irq(&ha->hardware_lock); | 2035 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2020 | 2036 | ||
2021 | vha = qla25xx_get_host(rsp); | 2037 | vha = pci_get_drvdata(ha->pdev); |
2022 | qla24xx_process_response_queue(vha, rsp); | 2038 | qla24xx_process_response_queue(vha, rsp); |
2023 | if (!ha->mqenable) { | 2039 | if (!ha->flags.disable_msix_handshake) { |
2024 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 2040 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
2025 | RD_REG_DWORD_RELAXED(®->hccr); | 2041 | RD_REG_DWORD_RELAXED(®->hccr); |
2026 | } | 2042 | } |
2027 | spin_unlock_irq(&ha->hardware_lock); | 2043 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2028 | 2044 | ||
2029 | return IRQ_HANDLED; | 2045 | return IRQ_HANDLED; |
2030 | } | 2046 | } |
@@ -2034,6 +2050,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
2034 | { | 2050 | { |
2035 | struct qla_hw_data *ha; | 2051 | struct qla_hw_data *ha; |
2036 | struct rsp_que *rsp; | 2052 | struct rsp_que *rsp; |
2053 | struct device_reg_24xx __iomem *reg; | ||
2054 | unsigned long flags; | ||
2037 | 2055 | ||
2038 | rsp = (struct rsp_que *) dev_id; | 2056 | rsp = (struct rsp_que *) dev_id; |
2039 | if (!rsp) { | 2057 | if (!rsp) { |
@@ -2043,6 +2061,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
2043 | } | 2061 | } |
2044 | ha = rsp->hw; | 2062 | ha = rsp->hw; |
2045 | 2063 | ||
2064 | /* Clear the interrupt, if enabled, for this response queue */ | ||
2065 | if (rsp->options & ~BIT_6) { | ||
2066 | reg = &ha->iobase->isp24; | ||
2067 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2068 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | ||
2069 | RD_REG_DWORD_RELAXED(®->hccr); | ||
2070 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2071 | } | ||
2046 | queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); | 2072 | queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); |
2047 | 2073 | ||
2048 | return IRQ_HANDLED; | 2074 | return IRQ_HANDLED; |
@@ -2059,6 +2085,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
2059 | uint32_t stat; | 2085 | uint32_t stat; |
2060 | uint32_t hccr; | 2086 | uint32_t hccr; |
2061 | uint16_t mb[4]; | 2087 | uint16_t mb[4]; |
2088 | unsigned long flags; | ||
2062 | 2089 | ||
2063 | rsp = (struct rsp_que *) dev_id; | 2090 | rsp = (struct rsp_que *) dev_id; |
2064 | if (!rsp) { | 2091 | if (!rsp) { |
@@ -2070,12 +2097,12 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
2070 | reg = &ha->iobase->isp24; | 2097 | reg = &ha->iobase->isp24; |
2071 | status = 0; | 2098 | status = 0; |
2072 | 2099 | ||
2073 | spin_lock_irq(&ha->hardware_lock); | 2100 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2074 | vha = pci_get_drvdata(ha->pdev); | 2101 | vha = pci_get_drvdata(ha->pdev); |
2075 | do { | 2102 | do { |
2076 | stat = RD_REG_DWORD(®->host_status); | 2103 | stat = RD_REG_DWORD(®->host_status); |
2077 | if (stat & HSRX_RISC_PAUSED) { | 2104 | if (stat & HSRX_RISC_PAUSED) { |
2078 | if (pci_channel_offline(ha->pdev)) | 2105 | if (unlikely(pci_channel_offline(ha->pdev))) |
2079 | break; | 2106 | break; |
2080 | 2107 | ||
2081 | hccr = RD_REG_DWORD(®->hccr); | 2108 | hccr = RD_REG_DWORD(®->hccr); |
@@ -2119,14 +2146,13 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
2119 | } | 2146 | } |
2120 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 2147 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
2121 | } while (0); | 2148 | } while (0); |
2122 | spin_unlock_irq(&ha->hardware_lock); | 2149 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2123 | 2150 | ||
2124 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 2151 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
2125 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 2152 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
2126 | set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | 2153 | set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); |
2127 | complete(&ha->mbx_intr_comp); | 2154 | complete(&ha->mbx_intr_comp); |
2128 | } | 2155 | } |
2129 | |||
2130 | return IRQ_HANDLED; | 2156 | return IRQ_HANDLED; |
2131 | } | 2157 | } |
2132 | 2158 | ||
@@ -2246,30 +2272,28 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
2246 | 2272 | ||
2247 | /* If possible, enable MSI-X. */ | 2273 | /* If possible, enable MSI-X. */ |
2248 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && | 2274 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && |
2249 | !IS_QLA8432(ha) && !IS_QLA8001(ha)) | 2275 | !IS_QLA8432(ha) && !IS_QLA8001(ha)) |
2250 | goto skip_msix; | 2276 | goto skip_msi; |
2277 | |||
2278 | if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && | ||
2279 | (ha->pdev->subsystem_device == 0x7040 || | ||
2280 | ha->pdev->subsystem_device == 0x7041 || | ||
2281 | ha->pdev->subsystem_device == 0x1705)) { | ||
2282 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2283 | "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", | ||
2284 | ha->pdev->subsystem_vendor, | ||
2285 | ha->pdev->subsystem_device)); | ||
2286 | goto skip_msi; | ||
2287 | } | ||
2251 | 2288 | ||
2252 | if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || | 2289 | if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || |
2253 | !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { | 2290 | !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { |
2254 | DEBUG2(qla_printk(KERN_WARNING, ha, | 2291 | DEBUG2(qla_printk(KERN_WARNING, ha, |
2255 | "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", | 2292 | "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", |
2256 | ha->pdev->revision, ha->fw_attributes)); | 2293 | ha->pdev->revision, ha->fw_attributes)); |
2257 | |||
2258 | goto skip_msix; | 2294 | goto skip_msix; |
2259 | } | 2295 | } |
2260 | 2296 | ||
2261 | if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && | ||
2262 | (ha->pdev->subsystem_device == 0x7040 || | ||
2263 | ha->pdev->subsystem_device == 0x7041 || | ||
2264 | ha->pdev->subsystem_device == 0x1705)) { | ||
2265 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2266 | "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n", | ||
2267 | ha->pdev->subsystem_vendor, | ||
2268 | ha->pdev->subsystem_device)); | ||
2269 | |||
2270 | goto skip_msi; | ||
2271 | } | ||
2272 | |||
2273 | ret = qla24xx_enable_msix(ha, rsp); | 2297 | ret = qla24xx_enable_msix(ha, rsp); |
2274 | if (!ret) { | 2298 | if (!ret) { |
2275 | DEBUG2(qla_printk(KERN_INFO, ha, | 2299 | DEBUG2(qla_printk(KERN_INFO, ha, |
@@ -2332,10 +2356,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) | |||
2332 | 2356 | ||
2333 | if (ha->flags.msix_enabled) | 2357 | if (ha->flags.msix_enabled) |
2334 | qla24xx_disable_msix(ha); | 2358 | qla24xx_disable_msix(ha); |
2335 | else if (ha->flags.inta_enabled) { | 2359 | else if (ha->flags.msi_enabled) { |
2336 | free_irq(ha->pdev->irq, rsp); | 2360 | free_irq(ha->pdev->irq, rsp); |
2337 | pci_disable_msi(ha->pdev); | 2361 | pci_disable_msi(ha->pdev); |
2338 | } | 2362 | } else |
2363 | free_irq(ha->pdev->irq, rsp); | ||
2339 | } | 2364 | } |
2340 | 2365 | ||
2341 | 2366 | ||
@@ -2357,30 +2382,3 @@ int qla25xx_request_irq(struct rsp_que *rsp) | |||
2357 | msix->rsp = rsp; | 2382 | msix->rsp = rsp; |
2358 | return ret; | 2383 | return ret; |
2359 | } | 2384 | } |
2360 | |||
2361 | struct scsi_qla_host * | ||
2362 | qla25xx_get_host(struct rsp_que *rsp) | ||
2363 | { | ||
2364 | srb_t *sp; | ||
2365 | struct qla_hw_data *ha = rsp->hw; | ||
2366 | struct scsi_qla_host *vha = NULL; | ||
2367 | struct sts_entry_24xx *pkt; | ||
2368 | struct req_que *req; | ||
2369 | uint16_t que; | ||
2370 | uint32_t handle; | ||
2371 | |||
2372 | pkt = (struct sts_entry_24xx *) rsp->ring_ptr; | ||
2373 | que = MSW(pkt->handle); | ||
2374 | handle = (uint32_t) LSW(pkt->handle); | ||
2375 | req = ha->req_q_map[que]; | ||
2376 | if (handle < MAX_OUTSTANDING_COMMANDS) { | ||
2377 | sp = req->outstanding_cmds[handle]; | ||
2378 | if (sp) | ||
2379 | return sp->fcport->vha; | ||
2380 | else | ||
2381 | goto base_que; | ||
2382 | } | ||
2383 | base_que: | ||
2384 | vha = pci_get_drvdata(ha->pdev); | ||
2385 | return vha; | ||
2386 | } | ||