diff options
Diffstat (limited to 'drivers/scsi')
30 files changed, 1267 insertions, 480 deletions
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index ed0e3e55652a..538135783aab 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c | |||
@@ -646,7 +646,7 @@ static int aha1740_probe (struct device *dev) | |||
646 | 646 | ||
647 | static __devexit int aha1740_remove (struct device *dev) | 647 | static __devexit int aha1740_remove (struct device *dev) |
648 | { | 648 | { |
649 | struct Scsi_Host *shpnt = dev->driver_data; | 649 | struct Scsi_Host *shpnt = dev_get_drvdata(dev); |
650 | struct aha1740_hostdata *host = HOSTDATA (shpnt); | 650 | struct aha1740_hostdata *host = HOSTDATA (shpnt); |
651 | 651 | ||
652 | scsi_remove_host(shpnt); | 652 | scsi_remove_host(shpnt); |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 0f829b3b8ab7..75b23317bd26 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
@@ -627,19 +627,15 @@ ahd_linux_target_alloc(struct scsi_target *starget) | |||
627 | starget->id, &tstate); | 627 | starget->id, &tstate); |
628 | 628 | ||
629 | if ((flags & CFPACKETIZED) == 0) { | 629 | if ((flags & CFPACKETIZED) == 0) { |
630 | /* Do not negotiate packetized transfers */ | 630 | /* don't negotiate packetized (IU) transfers */ |
631 | spi_rd_strm(starget) = 0; | 631 | spi_max_iu(starget) = 0; |
632 | spi_pcomp_en(starget) = 0; | ||
633 | spi_rti(starget) = 0; | ||
634 | spi_wr_flow(starget) = 0; | ||
635 | spi_hold_mcs(starget) = 0; | ||
636 | } else { | 632 | } else { |
637 | if ((ahd->features & AHD_RTI) == 0) | 633 | if ((ahd->features & AHD_RTI) == 0) |
638 | spi_rti(starget) = 0; | 634 | spi_rti(starget) = 0; |
639 | } | 635 | } |
640 | 636 | ||
641 | if ((flags & CFQAS) == 0) | 637 | if ((flags & CFQAS) == 0) |
642 | spi_qas(starget) = 0; | 638 | spi_max_qas(starget) = 0; |
643 | 639 | ||
644 | /* Transinfo values have been set to BIOS settings */ | 640 | /* Transinfo values have been set to BIOS settings */ |
645 | spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; | 641 | spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; |
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index 820d428ae839..b62b482e55e7 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig | |||
@@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI | |||
2 | tristate "Broadcom NetXtreme II iSCSI support" | 2 | tristate "Broadcom NetXtreme II iSCSI support" |
3 | select SCSI_ISCSI_ATTRS | 3 | select SCSI_ISCSI_ATTRS |
4 | select CNIC | 4 | select CNIC |
5 | depends on PCI | ||
5 | ---help--- | 6 | ---help--- |
6 | This driver supports iSCSI offload for the Broadcom NetXtreme II | 7 | This driver supports iSCSI offload for the Broadcom NetXtreme II |
7 | devices. | 8 | devices. |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 11d2602ae88e..869a11bdccbd 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -1877,7 +1877,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1877 | unsigned long wait_switch = 0; | 1877 | unsigned long wait_switch = 0; |
1878 | int rc; | 1878 | int rc; |
1879 | 1879 | ||
1880 | vdev->dev.driver_data = NULL; | 1880 | dev_set_drvdata(&vdev->dev, NULL); |
1881 | 1881 | ||
1882 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); | 1882 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); |
1883 | if (!host) { | 1883 | if (!host) { |
@@ -1949,7 +1949,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1949 | scsi_scan_host(host); | 1949 | scsi_scan_host(host); |
1950 | } | 1950 | } |
1951 | 1951 | ||
1952 | vdev->dev.driver_data = hostdata; | 1952 | dev_set_drvdata(&vdev->dev, hostdata); |
1953 | return 0; | 1953 | return 0; |
1954 | 1954 | ||
1955 | add_srp_port_failed: | 1955 | add_srp_port_failed: |
@@ -1968,7 +1968,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1968 | 1968 | ||
1969 | static int ibmvscsi_remove(struct vio_dev *vdev) | 1969 | static int ibmvscsi_remove(struct vio_dev *vdev) |
1970 | { | 1970 | { |
1971 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; | 1971 | struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); |
1972 | unmap_persist_bufs(hostdata); | 1972 | unmap_persist_bufs(hostdata); |
1973 | release_event_pool(&hostdata->pool, hostdata); | 1973 | release_event_pool(&hostdata->pool, hostdata); |
1974 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, | 1974 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index e2dd6a45924a..d5eaf9727109 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -892,7 +892,7 @@ free_vport: | |||
892 | 892 | ||
893 | static int ibmvstgt_remove(struct vio_dev *dev) | 893 | static int ibmvstgt_remove(struct vio_dev *dev) |
894 | { | 894 | { |
895 | struct srp_target *target = (struct srp_target *) dev->dev.driver_data; | 895 | struct srp_target *target = dev_get_drvdata(&dev->dev); |
896 | struct Scsi_Host *shost = target->shost; | 896 | struct Scsi_Host *shost = target->shost; |
897 | struct vio_port *vport = target->ldata; | 897 | struct vio_port *vport = target->ldata; |
898 | 898 | ||
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c index 15e2d132e8b9..2742ae8a3d09 100644 --- a/drivers/scsi/libsrp.c +++ b/drivers/scsi/libsrp.c | |||
@@ -135,7 +135,7 @@ int srp_target_alloc(struct srp_target *target, struct device *dev, | |||
135 | INIT_LIST_HEAD(&target->cmd_queue); | 135 | INIT_LIST_HEAD(&target->cmd_queue); |
136 | 136 | ||
137 | target->dev = dev; | 137 | target->dev = dev; |
138 | target->dev->driver_data = target; | 138 | dev_set_drvdata(target->dev, target); |
139 | 139 | ||
140 | target->srp_iu_size = iu_size; | 140 | target->srp_iu_size = iu_size; |
141 | target->rx_ring_size = nr; | 141 | target->rx_ring_size = nr; |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 540569849099..1877d9811831 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -457,10 +457,6 @@ struct lpfc_hba { | |||
457 | void (*lpfc_scsi_prep_cmnd) | 457 | void (*lpfc_scsi_prep_cmnd) |
458 | (struct lpfc_vport *, struct lpfc_scsi_buf *, | 458 | (struct lpfc_vport *, struct lpfc_scsi_buf *, |
459 | struct lpfc_nodelist *); | 459 | struct lpfc_nodelist *); |
460 | int (*lpfc_scsi_prep_task_mgmt_cmd) | ||
461 | (struct lpfc_vport *, struct lpfc_scsi_buf *, | ||
462 | unsigned int, uint8_t); | ||
463 | |||
464 | /* IOCB interface function jump table entries */ | 460 | /* IOCB interface function jump table entries */ |
465 | int (*__lpfc_sli_issue_iocb) | 461 | int (*__lpfc_sli_issue_iocb) |
466 | (struct lpfc_hba *, uint32_t, | 462 | (struct lpfc_hba *, uint32_t, |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index d73e677201f8..fc07be5fbce9 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -3113,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
3113 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3113 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
3114 | struct lpfc_hba *phba = vport->phba; | 3114 | struct lpfc_hba *phba = vport->phba; |
3115 | 3115 | ||
3116 | if (phba->sli_rev >= LPFC_SLI_REV4) | ||
3117 | return -EPERM; | ||
3118 | |||
3116 | if ((off + count) > FF_REG_AREA_SIZE) | 3119 | if ((off + count) > FF_REG_AREA_SIZE) |
3117 | return -ERANGE; | 3120 | return -ERANGE; |
3118 | 3121 | ||
@@ -3163,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
3163 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3166 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
3164 | struct lpfc_hba *phba = vport->phba; | 3167 | struct lpfc_hba *phba = vport->phba; |
3165 | 3168 | ||
3169 | if (phba->sli_rev >= LPFC_SLI_REV4) | ||
3170 | return -EPERM; | ||
3171 | |||
3166 | if (off > FF_REG_AREA_SIZE) | 3172 | if (off > FF_REG_AREA_SIZE) |
3167 | return -ERANGE; | 3173 | return -ERANGE; |
3168 | 3174 | ||
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 1dbccfd3d022..0e532f072eb3 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -1732,7 +1732,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) | |||
1732 | uint32_t *ptr, str[4]; | 1732 | uint32_t *ptr, str[4]; |
1733 | uint8_t *fwname; | 1733 | uint8_t *fwname; |
1734 | 1734 | ||
1735 | if (vp->rev.rBit) { | 1735 | if (phba->sli_rev == LPFC_SLI_REV4) |
1736 | sprintf(fwrevision, "%s", vp->rev.opFwName); | ||
1737 | else if (vp->rev.rBit) { | ||
1736 | if (psli->sli_flag & LPFC_SLI_ACTIVE) | 1738 | if (psli->sli_flag & LPFC_SLI_ACTIVE) |
1737 | rev = vp->rev.sli2FwRev; | 1739 | rev = vp->rev.sli2FwRev; |
1738 | else | 1740 | else |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2b02b1fb39a0..8d0f0de76b63 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -53,8 +53,7 @@ | |||
53 | * debugfs interface | 53 | * debugfs interface |
54 | * | 54 | * |
55 | * To access this interface the user should: | 55 | * To access this interface the user should: |
56 | * # mkdir /debug | 56 | * # mount -t debugfs none /sys/kernel/debug |
57 | * # mount -t debugfs none /debug | ||
58 | * | 57 | * |
59 | * The lpfc debugfs directory hierarchy is: | 58 | * The lpfc debugfs directory hierarchy is: |
60 | * lpfc/lpfcX/vportY | 59 | * lpfc/lpfcX/vportY |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6bdeb14878a2..f72fdf23bf1b 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -168,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
168 | if (elsiocb == NULL) | 168 | if (elsiocb == NULL) |
169 | return NULL; | 169 | return NULL; |
170 | 170 | ||
171 | /* | ||
172 | * If this command is for fabric controller and HBA running | ||
173 | * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. | ||
174 | */ | ||
175 | if ((did == Fabric_DID) && | ||
176 | bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && | ||
177 | ((elscmd == ELS_CMD_FLOGI) || | ||
178 | (elscmd == ELS_CMD_FDISC) || | ||
179 | (elscmd == ELS_CMD_LOGO))) | ||
180 | elsiocb->iocb_flag |= LPFC_FIP_ELS; | ||
181 | else | ||
182 | elsiocb->iocb_flag &= ~LPFC_FIP_ELS; | ||
183 | |||
171 | icmd = &elsiocb->iocb; | 184 | icmd = &elsiocb->iocb; |
172 | 185 | ||
173 | /* fill in BDEs for command */ | 186 | /* fill in BDEs for command */ |
@@ -6108,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
6108 | icmd->un.elsreq64.myID = 0; | 6121 | icmd->un.elsreq64.myID = 0; |
6109 | icmd->un.elsreq64.fl = 1; | 6122 | icmd->un.elsreq64.fl = 1; |
6110 | 6123 | ||
6111 | /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ | 6124 | if (phba->sli_rev == LPFC_SLI_REV4) { |
6112 | icmd->ulpCt_h = 1; | 6125 | /* FDISC needs to be 1 for WQE VPI */ |
6113 | icmd->ulpCt_l = 0; | 6126 | elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; |
6127 | elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; | ||
6128 | /* Set the ulpContext to the vpi */ | ||
6129 | elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base; | ||
6130 | } else { | ||
6131 | /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ | ||
6132 | icmd->ulpCt_h = 1; | ||
6133 | icmd->ulpCt_l = 0; | ||
6134 | } | ||
6114 | 6135 | ||
6115 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); | 6136 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
6116 | *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; | 6137 | *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 35c41ae75be2..ed46b24a3380 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1197,6 +1197,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
1197 | { | 1197 | { |
1198 | struct lpfc_fcf_conn_entry *conn_entry; | 1198 | struct lpfc_fcf_conn_entry *conn_entry; |
1199 | 1199 | ||
1200 | /* If FCF not available return 0 */ | ||
1201 | if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || | ||
1202 | !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) | ||
1203 | return 0; | ||
1204 | |||
1200 | if (!phba->cfg_enable_fip) { | 1205 | if (!phba->cfg_enable_fip) { |
1201 | *boot_flag = 0; | 1206 | *boot_flag = 0; |
1202 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, | 1207 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
@@ -1216,6 +1221,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
1216 | *boot_flag = 0; | 1221 | *boot_flag = 0; |
1217 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, | 1222 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
1218 | new_fcf_record); | 1223 | new_fcf_record); |
1224 | |||
1225 | /* | ||
1226 | * When there are no FCF connect entries, use driver's default | ||
1227 | * addressing mode - FPMA. | ||
1228 | */ | ||
1229 | if (*addr_mode & LPFC_FCF_FPMA) | ||
1230 | *addr_mode = LPFC_FCF_FPMA; | ||
1231 | |||
1219 | *vlan_id = 0xFFFF; | 1232 | *vlan_id = 0xFFFF; |
1220 | return 1; | 1233 | return 1; |
1221 | } | 1234 | } |
@@ -1241,6 +1254,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
1241 | } | 1254 | } |
1242 | 1255 | ||
1243 | /* | 1256 | /* |
1257 | * If connection record does not support any addressing mode, | ||
1258 | * skip the FCF record. | ||
1259 | */ | ||
1260 | if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) | ||
1261 | & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) | ||
1262 | continue; | ||
1263 | |||
1264 | /* | ||
1244 | * Check if the connection record specifies a required | 1265 | * Check if the connection record specifies a required |
1245 | * addressing mode. | 1266 | * addressing mode. |
1246 | */ | 1267 | */ |
@@ -1272,6 +1293,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
1272 | else | 1293 | else |
1273 | *boot_flag = 0; | 1294 | *boot_flag = 0; |
1274 | 1295 | ||
1296 | /* | ||
1297 | * If user did not specify any addressing mode, or if the | ||
1298 | * prefered addressing mode specified by user is not supported | ||
1299 | * by FCF, allow fabric to pick the addressing mode. | ||
1300 | */ | ||
1275 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, | 1301 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
1276 | new_fcf_record); | 1302 | new_fcf_record); |
1277 | /* | 1303 | /* |
@@ -1297,12 +1323,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
1297 | !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && | 1323 | !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
1298 | (*addr_mode & LPFC_FCF_FPMA)) | 1324 | (*addr_mode & LPFC_FCF_FPMA)) |
1299 | *addr_mode = LPFC_FCF_FPMA; | 1325 | *addr_mode = LPFC_FCF_FPMA; |
1300 | /* | ||
1301 | * If user did not specify any addressing mode, use FPMA if | ||
1302 | * possible else use SPMA. | ||
1303 | */ | ||
1304 | else if (*addr_mode & LPFC_FCF_FPMA) | ||
1305 | *addr_mode = LPFC_FCF_FPMA; | ||
1306 | 1326 | ||
1307 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) | 1327 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) |
1308 | *vlan_id = conn_entry->conn_rec.vlan_tag; | 1328 | *vlan_id = conn_entry->conn_rec.vlan_tag; |
@@ -1864,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1864 | vport->fc_flag &= ~FC_BYPASSED_MODE; | 1884 | vport->fc_flag &= ~FC_BYPASSED_MODE; |
1865 | spin_unlock_irq(shost->host_lock); | 1885 | spin_unlock_irq(shost->host_lock); |
1866 | 1886 | ||
1867 | if (((phba->fc_eventTag + 1) < la->eventTag) || | 1887 | if ((phba->fc_eventTag < la->eventTag) || |
1868 | (phba->fc_eventTag == la->eventTag)) { | 1888 | (phba->fc_eventTag == la->eventTag)) { |
1869 | phba->fc_stat.LinkMultiEvent++; | 1889 | phba->fc_stat.LinkMultiEvent++; |
1870 | if (la->attType == AT_LINK_UP) | 1890 | if (la->attType == AT_LINK_UP) |
@@ -2925,6 +2945,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2925 | lpfc_no_rpi(phba, ndlp); | 2945 | lpfc_no_rpi(phba, ndlp); |
2926 | ndlp->nlp_rpi = 0; | 2946 | ndlp->nlp_rpi = 0; |
2927 | ndlp->nlp_flag &= ~NLP_RPI_VALID; | 2947 | ndlp->nlp_flag &= ~NLP_RPI_VALID; |
2948 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; | ||
2928 | return 1; | 2949 | return 1; |
2929 | } | 2950 | } |
2930 | return 0; | 2951 | return 0; |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 02aa016b93e9..8a3a026667e4 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1183,7 +1183,6 @@ typedef struct { | |||
1183 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 | 1183 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 |
1184 | #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 | 1184 | #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 |
1185 | #define PCI_DEVICE_ID_TIGERSHARK 0x0704 | 1185 | #define PCI_DEVICE_ID_TIGERSHARK 0x0704 |
1186 | #define PCI_DEVICE_ID_TIGERSHARK_S 0x0705 | ||
1187 | 1186 | ||
1188 | #define JEDEC_ID_ADDRESS 0x0080001c | 1187 | #define JEDEC_ID_ADDRESS 0x0080001c |
1189 | #define FIREFLY_JEDEC_ID 0x1ACC | 1188 | #define FIREFLY_JEDEC_ID 0x1ACC |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 39c34b3ad29d..2995d128f07f 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -422,9 +422,9 @@ struct lpfc_wqe_generic{ | |||
422 | #define lpfc_wqe_gen_pri_WORD word10 | 422 | #define lpfc_wqe_gen_pri_WORD word10 |
423 | uint32_t word11; | 423 | uint32_t word11; |
424 | #define lpfc_wqe_gen_cq_id_SHIFT 16 | 424 | #define lpfc_wqe_gen_cq_id_SHIFT 16 |
425 | #define lpfc_wqe_gen_cq_id_MASK 0x000003FF | 425 | #define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF |
426 | #define lpfc_wqe_gen_cq_id_WORD word11 | 426 | #define lpfc_wqe_gen_cq_id_WORD word11 |
427 | #define LPFC_WQE_CQ_ID_DEFAULT 0x3ff | 427 | #define LPFC_WQE_CQ_ID_DEFAULT 0xffff |
428 | #define lpfc_wqe_gen_wqec_SHIFT 7 | 428 | #define lpfc_wqe_gen_wqec_SHIFT 7 |
429 | #define lpfc_wqe_gen_wqec_MASK 0x00000001 | 429 | #define lpfc_wqe_gen_wqec_MASK 0x00000001 |
430 | #define lpfc_wqe_gen_wqec_WORD word11 | 430 | #define lpfc_wqe_gen_wqec_WORD word11 |
@@ -1128,7 +1128,7 @@ struct fcf_record { | |||
1128 | #define lpfc_fcf_record_mac_5_WORD word4 | 1128 | #define lpfc_fcf_record_mac_5_WORD word4 |
1129 | #define lpfc_fcf_record_fcf_avail_SHIFT 16 | 1129 | #define lpfc_fcf_record_fcf_avail_SHIFT 16 |
1130 | #define lpfc_fcf_record_fcf_avail_MASK 0x000000FF | 1130 | #define lpfc_fcf_record_fcf_avail_MASK 0x000000FF |
1131 | #define lpfc_fcf_record_fc_avail_WORD word4 | 1131 | #define lpfc_fcf_record_fcf_avail_WORD word4 |
1132 | #define lpfc_fcf_record_mac_addr_prov_SHIFT 24 | 1132 | #define lpfc_fcf_record_mac_addr_prov_SHIFT 24 |
1133 | #define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF | 1133 | #define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF |
1134 | #define lpfc_fcf_record_mac_addr_prov_WORD word4 | 1134 | #define lpfc_fcf_record_mac_addr_prov_WORD word4 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 2f5907f92eea..fc67cc65c63b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -428,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
428 | /* Reset the DFT_HBA_Q_DEPTH to the max xri */ | 428 | /* Reset the DFT_HBA_Q_DEPTH to the max xri */ |
429 | if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) | 429 | if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) |
430 | phba->cfg_hba_queue_depth = | 430 | phba->cfg_hba_queue_depth = |
431 | mb->un.varRdConfig.max_xri + 1; | 431 | (mb->un.varRdConfig.max_xri + 1) - |
432 | lpfc_sli4_get_els_iocb_cnt(phba); | ||
432 | 433 | ||
433 | phba->lmt = mb->un.varRdConfig.lmt; | 434 | phba->lmt = mb->un.varRdConfig.lmt; |
434 | 435 | ||
@@ -1646,10 +1647,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
1646 | oneConnect = 1; | 1647 | oneConnect = 1; |
1647 | m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; | 1648 | m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; |
1648 | break; | 1649 | break; |
1649 | case PCI_DEVICE_ID_TIGERSHARK_S: | ||
1650 | oneConnect = 1; | ||
1651 | m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; | ||
1652 | break; | ||
1653 | default: | 1650 | default: |
1654 | m = (typeof(m)){ NULL }; | 1651 | m = (typeof(m)){ NULL }; |
1655 | break; | 1652 | break; |
@@ -3543,6 +3540,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) | |||
3543 | 3540 | ||
3544 | /* Free the allocated rpi headers. */ | 3541 | /* Free the allocated rpi headers. */ |
3545 | lpfc_sli4_remove_rpi_hdrs(phba); | 3542 | lpfc_sli4_remove_rpi_hdrs(phba); |
3543 | lpfc_sli4_remove_rpis(phba); | ||
3546 | 3544 | ||
3547 | /* Free the ELS sgl list */ | 3545 | /* Free the ELS sgl list */ |
3548 | lpfc_free_active_sgl(phba); | 3546 | lpfc_free_active_sgl(phba); |
@@ -7184,16 +7182,19 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) | |||
7184 | { | 7182 | { |
7185 | int max_xri = phba->sli4_hba.max_cfg_param.max_xri; | 7183 | int max_xri = phba->sli4_hba.max_cfg_param.max_xri; |
7186 | 7184 | ||
7187 | if (max_xri <= 100) | 7185 | if (phba->sli_rev == LPFC_SLI_REV4) { |
7188 | return 4; | 7186 | if (max_xri <= 100) |
7189 | else if (max_xri <= 256) | 7187 | return 4; |
7190 | return 8; | 7188 | else if (max_xri <= 256) |
7191 | else if (max_xri <= 512) | 7189 | return 8; |
7192 | return 16; | 7190 | else if (max_xri <= 512) |
7193 | else if (max_xri <= 1024) | 7191 | return 16; |
7194 | return 32; | 7192 | else if (max_xri <= 1024) |
7195 | else | 7193 | return 32; |
7196 | return 48; | 7194 | else |
7195 | return 48; | ||
7196 | } else | ||
7197 | return 0; | ||
7197 | } | 7198 | } |
7198 | 7199 | ||
7199 | /** | 7200 | /** |
@@ -7642,7 +7643,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
7642 | 7643 | ||
7643 | switch (dev_id) { | 7644 | switch (dev_id) { |
7644 | case PCI_DEVICE_ID_TIGERSHARK: | 7645 | case PCI_DEVICE_ID_TIGERSHARK: |
7645 | case PCI_DEVICE_ID_TIGERSHARK_S: | ||
7646 | rc = lpfc_pci_probe_one_s4(pdev, pid); | 7646 | rc = lpfc_pci_probe_one_s4(pdev, pid); |
7647 | break; | 7647 | break; |
7648 | default: | 7648 | default: |
@@ -7941,8 +7941,6 @@ static struct pci_device_id lpfc_id_table[] = { | |||
7941 | PCI_ANY_ID, PCI_ANY_ID, }, | 7941 | PCI_ANY_ID, PCI_ANY_ID, }, |
7942 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, | 7942 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, |
7943 | PCI_ANY_ID, PCI_ANY_ID, }, | 7943 | PCI_ANY_ID, PCI_ANY_ID, }, |
7944 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, | ||
7945 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
7946 | { 0 } | 7944 | { 0 } |
7947 | }; | 7945 | }; |
7948 | 7946 | ||
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index b9b451c09010..3423571dd1b3 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -1631,6 +1631,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, | |||
1631 | /* In case of malloc fails, proceed with whatever we have */ | 1631 | /* In case of malloc fails, proceed with whatever we have */ |
1632 | if (!viraddr) | 1632 | if (!viraddr) |
1633 | break; | 1633 | break; |
1634 | memset(viraddr, 0, PAGE_SIZE); | ||
1634 | mbox->sge_array->addr[pagen] = viraddr; | 1635 | mbox->sge_array->addr[pagen] = viraddr; |
1635 | /* Keep the first page for later sub-header construction */ | 1636 | /* Keep the first page for later sub-header construction */ |
1636 | if (pagen == 0) | 1637 | if (pagen == 0) |
@@ -1715,8 +1716,10 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) | |||
1715 | /* Set up host requested features. */ | 1716 | /* Set up host requested features. */ |
1716 | bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); | 1717 | bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); |
1717 | 1718 | ||
1718 | /* Virtual fabrics and FIPs are not supported yet. */ | 1719 | if (phba->cfg_enable_fip) |
1719 | bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); | 1720 | bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); |
1721 | else | ||
1722 | bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1); | ||
1720 | 1723 | ||
1721 | /* Enable DIF (block guard) only if configured to do so. */ | 1724 | /* Enable DIF (block guard) only if configured to do so. */ |
1722 | if (phba->cfg_enable_bg) | 1725 | if (phba->cfg_enable_bg) |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 09f659f77bb3..3e74136f1ede 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -497,7 +497,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
497 | lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); | 497 | lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); |
498 | else | 498 | else |
499 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | 499 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); |
500 | if ((ndlp->nlp_type & NLP_FABRIC) && | 500 | if ((ndlp->nlp_DID == Fabric_DID) && |
501 | vport->port_type == LPFC_NPIV_PORT) { | 501 | vport->port_type == LPFC_NPIV_PORT) { |
502 | lpfc_linkdown_port(vport); | 502 | lpfc_linkdown_port(vport); |
503 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); | 503 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 7991ba1980ae..da59c4f0168f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -116,6 +116,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd) | |||
116 | } | 116 | } |
117 | 117 | ||
118 | /** | 118 | /** |
119 | * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. | ||
120 | * @phba: Pointer to HBA object. | ||
121 | * @lpfc_cmd: lpfc scsi command object pointer. | ||
122 | * | ||
123 | * This function is called from the lpfc_prep_task_mgmt_cmd function to | ||
124 | * set the last bit in the response sge entry. | ||
125 | **/ | ||
126 | static void | ||
127 | lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, | ||
128 | struct lpfc_scsi_buf *lpfc_cmd) | ||
129 | { | ||
130 | struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; | ||
131 | if (sgl) { | ||
132 | sgl += 1; | ||
133 | sgl->word2 = le32_to_cpu(sgl->word2); | ||
134 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
135 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | /** | ||
119 | * lpfc_update_stats - Update statistical data for the command completion | 140 | * lpfc_update_stats - Update statistical data for the command completion |
120 | * @phba: Pointer to HBA object. | 141 | * @phba: Pointer to HBA object. |
121 | * @lpfc_cmd: lpfc scsi command object pointer. | 142 | * @lpfc_cmd: lpfc scsi command object pointer. |
@@ -1978,7 +1999,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | |||
1978 | } | 1999 | } |
1979 | 2000 | ||
1980 | /** | 2001 | /** |
1981 | * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev | 2002 | * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev |
1982 | * @phba: The HBA for which this call is being executed. | 2003 | * @phba: The HBA for which this call is being executed. |
1983 | * @psb: The scsi buffer which is going to be un-mapped. | 2004 | * @psb: The scsi buffer which is going to be un-mapped. |
1984 | * | 2005 | * |
@@ -1986,7 +2007,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | |||
1986 | * field of @lpfc_cmd for device with SLI-3 interface spec. | 2007 | * field of @lpfc_cmd for device with SLI-3 interface spec. |
1987 | **/ | 2008 | **/ |
1988 | static void | 2009 | static void |
1989 | lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | 2010 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) |
1990 | { | 2011 | { |
1991 | /* | 2012 | /* |
1992 | * There are only two special cases to consider. (1) the scsi command | 2013 | * There are only two special cases to consider. (1) the scsi command |
@@ -2003,36 +2024,6 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
2003 | } | 2024 | } |
2004 | 2025 | ||
2005 | /** | 2026 | /** |
2006 | * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev | ||
2007 | * @phba: The Hba for which this call is being executed. | ||
2008 | * @psb: The scsi buffer which is going to be un-mapped. | ||
2009 | * | ||
2010 | * This routine does DMA un-mapping of scatter gather list of scsi command | ||
2011 | * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to | ||
2012 | * remove the sgl for this scsi buffer then we will do it here. For now | ||
2013 | * we should be able to just call the sli3 unprep routine. | ||
2014 | **/ | ||
2015 | static void | ||
2016 | lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
2017 | { | ||
2018 | lpfc_scsi_unprep_dma_buf_s3(phba, psb); | ||
2019 | } | ||
2020 | |||
2021 | /** | ||
2022 | * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list | ||
2023 | * @phba: The Hba for which this call is being executed. | ||
2024 | * @psb: The scsi buffer which is going to be un-mapped. | ||
2025 | * | ||
2026 | * This routine does DMA un-mapping of scatter gather list of scsi command | ||
2027 | * field of @lpfc_cmd for device with SLI-4 interface spec. | ||
2028 | **/ | ||
2029 | static void | ||
2030 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
2031 | { | ||
2032 | phba->lpfc_scsi_unprep_dma_buf(phba, psb); | ||
2033 | } | ||
2034 | |||
2035 | /** | ||
2036 | * lpfc_handler_fcp_err - FCP response handler | 2027 | * lpfc_handler_fcp_err - FCP response handler |
2037 | * @vport: The virtual port for which this call is being executed. | 2028 | * @vport: The virtual port for which this call is being executed. |
2038 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | 2029 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. |
@@ -2461,7 +2452,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) | |||
2461 | } | 2452 | } |
2462 | 2453 | ||
2463 | /** | 2454 | /** |
2464 | * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev | 2455 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit |
2465 | * @vport: The virtual port for which this call is being executed. | 2456 | * @vport: The virtual port for which this call is being executed. |
2466 | * @lpfc_cmd: The scsi command which needs to send. | 2457 | * @lpfc_cmd: The scsi command which needs to send. |
2467 | * @pnode: Pointer to lpfc_nodelist. | 2458 | * @pnode: Pointer to lpfc_nodelist. |
@@ -2470,7 +2461,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) | |||
2470 | * to transfer for device with SLI3 interface spec. | 2461 | * to transfer for device with SLI3 interface spec. |
2471 | **/ | 2462 | **/ |
2472 | static void | 2463 | static void |
2473 | lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | 2464 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, |
2474 | struct lpfc_nodelist *pnode) | 2465 | struct lpfc_nodelist *pnode) |
2475 | { | 2466 | { |
2476 | struct lpfc_hba *phba = vport->phba; | 2467 | struct lpfc_hba *phba = vport->phba; |
@@ -2558,46 +2549,7 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
2558 | } | 2549 | } |
2559 | 2550 | ||
2560 | /** | 2551 | /** |
2561 | * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev | 2552 | * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit |
2562 | * @vport: The virtual port for which this call is being executed. | ||
2563 | * @lpfc_cmd: The scsi command which needs to send. | ||
2564 | * @pnode: Pointer to lpfc_nodelist. | ||
2565 | * | ||
2566 | * This routine initializes fcp_cmnd and iocb data structure from scsi command | ||
2567 | * to transfer for device with SLI4 interface spec. | ||
2568 | **/ | ||
2569 | static void | ||
2570 | lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | ||
2571 | struct lpfc_nodelist *pnode) | ||
2572 | { | ||
2573 | /* | ||
2574 | * The prep cmnd routines do not touch the sgl or its | ||
2575 | * entries. We may not have to do anything different. | ||
2576 | * I will leave this function in place until we can | ||
2577 | * run some IO through the driver and determine if changes | ||
2578 | * are needed. | ||
2579 | */ | ||
2580 | return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); | ||
2581 | } | ||
2582 | |||
2583 | /** | ||
2584 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit | ||
2585 | * @vport: The virtual port for which this call is being executed. | ||
2586 | * @lpfc_cmd: The scsi command which needs to send. | ||
2587 | * @pnode: Pointer to lpfc_nodelist. | ||
2588 | * | ||
2589 | * This routine wraps the actual convert SCSI cmnd function pointer from | ||
2590 | * the lpfc_hba struct. | ||
2591 | **/ | ||
2592 | static inline void | ||
2593 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | ||
2594 | struct lpfc_nodelist *pnode) | ||
2595 | { | ||
2596 | vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode); | ||
2597 | } | ||
2598 | |||
2599 | /** | ||
2600 | * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit | ||
2601 | * @vport: The virtual port for which this call is being executed. | 2553 | * @vport: The virtual port for which this call is being executed. |
2602 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | 2554 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. |
2603 | * @lun: Logical unit number. | 2555 | * @lun: Logical unit number. |
@@ -2611,7 +2563,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
2611 | * 1 - Success | 2563 | * 1 - Success |
2612 | **/ | 2564 | **/ |
2613 | static int | 2565 | static int |
2614 | lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, | 2566 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, |
2615 | struct lpfc_scsi_buf *lpfc_cmd, | 2567 | struct lpfc_scsi_buf *lpfc_cmd, |
2616 | unsigned int lun, | 2568 | unsigned int lun, |
2617 | uint8_t task_mgmt_cmd) | 2569 | uint8_t task_mgmt_cmd) |
@@ -2653,68 +2605,13 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, | |||
2653 | * The driver will provide the timeout mechanism. | 2605 | * The driver will provide the timeout mechanism. |
2654 | */ | 2606 | */ |
2655 | piocb->ulpTimeout = 0; | 2607 | piocb->ulpTimeout = 0; |
2656 | } else { | 2608 | } else |
2657 | piocb->ulpTimeout = lpfc_cmd->timeout; | 2609 | piocb->ulpTimeout = lpfc_cmd->timeout; |
2658 | } | ||
2659 | |||
2660 | return 1; | ||
2661 | } | ||
2662 | |||
2663 | /** | ||
2664 | * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit | ||
2665 | * @vport: The virtual port for which this call is being executed. | ||
2666 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
2667 | * @lun: Logical unit number. | ||
2668 | * @task_mgmt_cmd: SCSI task management command. | ||
2669 | * | ||
2670 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd | ||
2671 | * for device with SLI-4 interface spec. | ||
2672 | * | ||
2673 | * Return codes: | ||
2674 | * 0 - Error | ||
2675 | * 1 - Success | ||
2676 | **/ | ||
2677 | static int | ||
2678 | lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, | ||
2679 | struct lpfc_scsi_buf *lpfc_cmd, | ||
2680 | unsigned int lun, | ||
2681 | uint8_t task_mgmt_cmd) | ||
2682 | { | ||
2683 | /* | ||
2684 | * The prep cmnd routines do not touch the sgl or its | ||
2685 | * entries. We may not have to do anything different. | ||
2686 | * I will leave this function in place until we can | ||
2687 | * run some IO through the driver and determine if changes | ||
2688 | * are needed. | ||
2689 | */ | ||
2690 | return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, | ||
2691 | task_mgmt_cmd); | ||
2692 | } | ||
2693 | 2610 | ||
2694 | /** | 2611 | if (vport->phba->sli_rev == LPFC_SLI_REV4) |
2695 | * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info | 2612 | lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); |
2696 | * @vport: The virtual port for which this call is being executed. | ||
2697 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
2698 | * @lun: Logical unit number. | ||
2699 | * @task_mgmt_cmd: SCSI task management command. | ||
2700 | * | ||
2701 | * This routine wraps the actual convert SCSI TM to FCP information unit | ||
2702 | * function pointer from the lpfc_hba struct. | ||
2703 | * | ||
2704 | * Return codes: | ||
2705 | * 0 - Error | ||
2706 | * 1 - Success | ||
2707 | **/ | ||
2708 | static inline int | ||
2709 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | ||
2710 | struct lpfc_scsi_buf *lpfc_cmd, | ||
2711 | unsigned int lun, | ||
2712 | uint8_t task_mgmt_cmd) | ||
2713 | { | ||
2714 | struct lpfc_hba *phba = vport->phba; | ||
2715 | 2613 | ||
2716 | return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, | 2614 | return 1; |
2717 | task_mgmt_cmd); | ||
2718 | } | 2615 | } |
2719 | 2616 | ||
2720 | /** | 2617 | /** |
@@ -2730,23 +2627,19 @@ int | |||
2730 | lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | 2627 | lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) |
2731 | { | 2628 | { |
2732 | 2629 | ||
2630 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; | ||
2631 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; | ||
2632 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; | ||
2633 | |||
2733 | switch (dev_grp) { | 2634 | switch (dev_grp) { |
2734 | case LPFC_PCI_DEV_LP: | 2635 | case LPFC_PCI_DEV_LP: |
2735 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; | 2636 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; |
2736 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; | 2637 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; |
2737 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3; | ||
2738 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3; | ||
2739 | phba->lpfc_scsi_prep_task_mgmt_cmd = | ||
2740 | lpfc_scsi_prep_task_mgmt_cmd_s3; | ||
2741 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; | 2638 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; |
2742 | break; | 2639 | break; |
2743 | case LPFC_PCI_DEV_OC: | 2640 | case LPFC_PCI_DEV_OC: |
2744 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; | 2641 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; |
2745 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; | 2642 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; |
2746 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; | ||
2747 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; | ||
2748 | phba->lpfc_scsi_prep_task_mgmt_cmd = | ||
2749 | lpfc_scsi_prep_task_mgmt_cmd_s4; | ||
2750 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; | 2643 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; |
2751 | break; | 2644 | break; |
2752 | default: | 2645 | default: |
@@ -2783,72 +2676,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, | |||
2783 | } | 2676 | } |
2784 | 2677 | ||
2785 | /** | 2678 | /** |
2786 | * lpfc_scsi_tgt_reset - Target reset handler | ||
2787 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure | ||
2788 | * @vport: The virtual port for which this call is being executed. | ||
2789 | * @tgt_id: Target ID. | ||
2790 | * @lun: Lun number. | ||
2791 | * @rdata: Pointer to lpfc_rport_data. | ||
2792 | * | ||
2793 | * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID. | ||
2794 | * | ||
2795 | * Return Code: | ||
2796 | * 0x2003 - Error | ||
2797 | * 0x2002 - Success. | ||
2798 | **/ | ||
2799 | static int | ||
2800 | lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, | ||
2801 | unsigned tgt_id, unsigned int lun, | ||
2802 | struct lpfc_rport_data *rdata) | ||
2803 | { | ||
2804 | struct lpfc_hba *phba = vport->phba; | ||
2805 | struct lpfc_iocbq *iocbq; | ||
2806 | struct lpfc_iocbq *iocbqrsp; | ||
2807 | int ret; | ||
2808 | int status; | ||
2809 | |||
2810 | if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) | ||
2811 | return FAILED; | ||
2812 | |||
2813 | lpfc_cmd->rdata = rdata; | ||
2814 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, | ||
2815 | FCP_TARGET_RESET); | ||
2816 | if (!status) | ||
2817 | return FAILED; | ||
2818 | |||
2819 | iocbq = &lpfc_cmd->cur_iocbq; | ||
2820 | iocbqrsp = lpfc_sli_get_iocbq(phba); | ||
2821 | |||
2822 | if (!iocbqrsp) | ||
2823 | return FAILED; | ||
2824 | |||
2825 | /* Issue Target Reset to TGT <num> */ | ||
2826 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | ||
2827 | "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", | ||
2828 | tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); | ||
2829 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, | ||
2830 | iocbq, iocbqrsp, lpfc_cmd->timeout); | ||
2831 | if (status != IOCB_SUCCESS) { | ||
2832 | if (status == IOCB_TIMEDOUT) { | ||
2833 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; | ||
2834 | ret = TIMEOUT_ERROR; | ||
2835 | } else | ||
2836 | ret = FAILED; | ||
2837 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; | ||
2838 | } else { | ||
2839 | ret = SUCCESS; | ||
2840 | lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; | ||
2841 | lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; | ||
2842 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && | ||
2843 | (lpfc_cmd->result & IOERR_DRVR_MASK)) | ||
2844 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; | ||
2845 | } | ||
2846 | |||
2847 | lpfc_sli_release_iocbq(phba, iocbqrsp); | ||
2848 | return ret; | ||
2849 | } | ||
2850 | |||
2851 | /** | ||
2852 | * lpfc_info - Info entry point of scsi_host_template data structure | 2679 | * lpfc_info - Info entry point of scsi_host_template data structure |
2853 | * @host: The scsi host for which this call is being executed. | 2680 | * @host: The scsi host for which this call is being executed. |
2854 | * | 2681 | * |
@@ -3228,156 +3055,334 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
3228 | return ret; | 3055 | return ret; |
3229 | } | 3056 | } |
3230 | 3057 | ||
3058 | static char * | ||
3059 | lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) | ||
3060 | { | ||
3061 | switch (task_mgmt_cmd) { | ||
3062 | case FCP_ABORT_TASK_SET: | ||
3063 | return "ABORT_TASK_SET"; | ||
3064 | case FCP_CLEAR_TASK_SET: | ||
3065 | return "FCP_CLEAR_TASK_SET"; | ||
3066 | case FCP_BUS_RESET: | ||
3067 | return "FCP_BUS_RESET"; | ||
3068 | case FCP_LUN_RESET: | ||
3069 | return "FCP_LUN_RESET"; | ||
3070 | case FCP_TARGET_RESET: | ||
3071 | return "FCP_TARGET_RESET"; | ||
3072 | case FCP_CLEAR_ACA: | ||
3073 | return "FCP_CLEAR_ACA"; | ||
3074 | case FCP_TERMINATE_TASK: | ||
3075 | return "FCP_TERMINATE_TASK"; | ||
3076 | default: | ||
3077 | return "unknown"; | ||
3078 | } | ||
3079 | } | ||
3080 | |||
3231 | /** | 3081 | /** |
3232 | * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point | 3082 | * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler |
3233 | * @cmnd: Pointer to scsi_cmnd data structure. | 3083 | * @vport: The virtual port for which this call is being executed. |
3084 | * @rdata: Pointer to remote port local data | ||
3085 | * @tgt_id: Target ID of remote device. | ||
3086 | * @lun_id: Lun number for the TMF | ||
3087 | * @task_mgmt_cmd: type of TMF to send | ||
3234 | * | 3088 | * |
3235 | * This routine does a device reset by sending a TARGET_RESET task management | 3089 | * This routine builds and sends a TMF (SCSI Task Mgmt Function) to |
3236 | * command. | 3090 | * a remote port. |
3237 | * | 3091 | * |
3238 | * Return code : | 3092 | * Return Code: |
3239 | * 0x2003 - Error | 3093 | * 0x2003 - Error |
3240 | * 0x2002 - Success | 3094 | * 0x2002 - Success. |
3241 | **/ | 3095 | **/ |
3242 | static int | 3096 | static int |
3243 | lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | 3097 | lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, |
3098 | unsigned tgt_id, unsigned int lun_id, | ||
3099 | uint8_t task_mgmt_cmd) | ||
3244 | { | 3100 | { |
3245 | struct Scsi_Host *shost = cmnd->device->host; | ||
3246 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
3247 | struct lpfc_hba *phba = vport->phba; | 3101 | struct lpfc_hba *phba = vport->phba; |
3248 | struct lpfc_scsi_buf *lpfc_cmd; | 3102 | struct lpfc_scsi_buf *lpfc_cmd; |
3249 | struct lpfc_iocbq *iocbq, *iocbqrsp; | 3103 | struct lpfc_iocbq *iocbq; |
3250 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 3104 | struct lpfc_iocbq *iocbqrsp; |
3251 | struct lpfc_nodelist *pnode = rdata->pnode; | 3105 | int ret; |
3252 | unsigned long later; | ||
3253 | int ret = SUCCESS; | ||
3254 | int status; | 3106 | int status; |
3255 | int cnt; | ||
3256 | struct lpfc_scsi_event_header scsi_event; | ||
3257 | 3107 | ||
3258 | lpfc_block_error_handler(cmnd); | 3108 | if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) |
3259 | /* | ||
3260 | * If target is not in a MAPPED state, delay the reset until | ||
3261 | * target is rediscovered or devloss timeout expires. | ||
3262 | */ | ||
3263 | later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; | ||
3264 | while (time_after(later, jiffies)) { | ||
3265 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) | ||
3266 | return FAILED; | ||
3267 | if (pnode->nlp_state == NLP_STE_MAPPED_NODE) | ||
3268 | break; | ||
3269 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); | ||
3270 | rdata = cmnd->device->hostdata; | ||
3271 | if (!rdata) | ||
3272 | break; | ||
3273 | pnode = rdata->pnode; | ||
3274 | } | ||
3275 | |||
3276 | scsi_event.event_type = FC_REG_SCSI_EVENT; | ||
3277 | scsi_event.subcategory = LPFC_EVENT_TGTRESET; | ||
3278 | scsi_event.lun = 0; | ||
3279 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
3280 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
3281 | |||
3282 | fc_host_post_vendor_event(shost, | ||
3283 | fc_get_event_number(), | ||
3284 | sizeof(scsi_event), | ||
3285 | (char *)&scsi_event, | ||
3286 | LPFC_NL_VENDOR_ID); | ||
3287 | |||
3288 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { | ||
3289 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
3290 | "0721 LUN Reset rport " | ||
3291 | "failure: msec x%x rdata x%p\n", | ||
3292 | jiffies_to_msecs(jiffies - later), rdata); | ||
3293 | return FAILED; | 3109 | return FAILED; |
3294 | } | 3110 | |
3295 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 3111 | lpfc_cmd = lpfc_get_scsi_buf(phba); |
3296 | if (lpfc_cmd == NULL) | 3112 | if (lpfc_cmd == NULL) |
3297 | return FAILED; | 3113 | return FAILED; |
3298 | lpfc_cmd->timeout = 60; | 3114 | lpfc_cmd->timeout = 60; |
3299 | lpfc_cmd->rdata = rdata; | 3115 | lpfc_cmd->rdata = rdata; |
3300 | 3116 | ||
3301 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, | 3117 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, |
3302 | cmnd->device->lun, | 3118 | task_mgmt_cmd); |
3303 | FCP_TARGET_RESET); | ||
3304 | if (!status) { | 3119 | if (!status) { |
3305 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 3120 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
3306 | return FAILED; | 3121 | return FAILED; |
3307 | } | 3122 | } |
3308 | iocbq = &lpfc_cmd->cur_iocbq; | ||
3309 | 3123 | ||
3310 | /* get a buffer for this IOCB command response */ | 3124 | iocbq = &lpfc_cmd->cur_iocbq; |
3311 | iocbqrsp = lpfc_sli_get_iocbq(phba); | 3125 | iocbqrsp = lpfc_sli_get_iocbq(phba); |
3312 | if (iocbqrsp == NULL) { | 3126 | if (iocbqrsp == NULL) { |
3313 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 3127 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
3314 | return FAILED; | 3128 | return FAILED; |
3315 | } | 3129 | } |
3130 | |||
3316 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 3131 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
3317 | "0703 Issue target reset to TGT %d LUN %d " | 3132 | "0702 Issue %s to TGT %d LUN %d " |
3318 | "rpi x%x nlp_flag x%x\n", cmnd->device->id, | 3133 | "rpi x%x nlp_flag x%x\n", |
3319 | cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); | 3134 | lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, |
3135 | rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); | ||
3136 | |||
3320 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, | 3137 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
3321 | iocbq, iocbqrsp, lpfc_cmd->timeout); | 3138 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
3322 | if (status == IOCB_TIMEDOUT) { | 3139 | if (status != IOCB_SUCCESS) { |
3323 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; | 3140 | if (status == IOCB_TIMEDOUT) { |
3324 | ret = TIMEOUT_ERROR; | 3141 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; |
3325 | } else { | 3142 | ret = TIMEOUT_ERROR; |
3326 | if (status != IOCB_SUCCESS) | 3143 | } else |
3327 | ret = FAILED; | 3144 | ret = FAILED; |
3328 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 3145 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; |
3329 | } | 3146 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
3330 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3147 | "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", |
3331 | "0713 SCSI layer issued device reset (%d, %d) " | 3148 | lpfc_taskmgmt_name(task_mgmt_cmd), |
3332 | "return x%x status x%x result x%x\n", | 3149 | tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, |
3333 | cmnd->device->id, cmnd->device->lun, ret, | ||
3334 | iocbqrsp->iocb.ulpStatus, | ||
3335 | iocbqrsp->iocb.un.ulpWord[4]); | 3150 | iocbqrsp->iocb.un.ulpWord[4]); |
3151 | } else | ||
3152 | ret = SUCCESS; | ||
3153 | |||
3336 | lpfc_sli_release_iocbq(phba, iocbqrsp); | 3154 | lpfc_sli_release_iocbq(phba, iocbqrsp); |
3337 | cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, | 3155 | |
3338 | LPFC_CTX_TGT); | 3156 | if (ret != TIMEOUT_ERROR) |
3157 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
3158 | |||
3159 | return ret; | ||
3160 | } | ||
3161 | |||
3162 | /** | ||
3163 | * lpfc_chk_tgt_mapped - | ||
3164 | * @vport: The virtual port to check on | ||
3165 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
3166 | * | ||
3167 | * This routine delays until the scsi target (aka rport) for the | ||
3168 | * command exists (is present and logged in) or we declare it non-existent. | ||
3169 | * | ||
3170 | * Return code : | ||
3171 | * 0x2003 - Error | ||
3172 | * 0x2002 - Success | ||
3173 | **/ | ||
3174 | static int | ||
3175 | lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) | ||
3176 | { | ||
3177 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | ||
3178 | struct lpfc_nodelist *pnode = rdata->pnode; | ||
3179 | unsigned long later; | ||
3180 | |||
3181 | /* | ||
3182 | * If target is not in a MAPPED state, delay until | ||
3183 | * target is rediscovered or devloss timeout expires. | ||
3184 | */ | ||
3185 | later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; | ||
3186 | while (time_after(later, jiffies)) { | ||
3187 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) | ||
3188 | return FAILED; | ||
3189 | if (pnode->nlp_state == NLP_STE_MAPPED_NODE) | ||
3190 | return SUCCESS; | ||
3191 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); | ||
3192 | rdata = cmnd->device->hostdata; | ||
3193 | if (!rdata) | ||
3194 | return FAILED; | ||
3195 | pnode = rdata->pnode; | ||
3196 | } | ||
3197 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) || | ||
3198 | (pnode->nlp_state != NLP_STE_MAPPED_NODE)) | ||
3199 | return FAILED; | ||
3200 | return SUCCESS; | ||
3201 | } | ||
3202 | |||
3203 | /** | ||
3204 | * lpfc_reset_flush_io_context - | ||
3205 | * @vport: The virtual port (scsi_host) for the flush context | ||
3206 | * @tgt_id: If aborting by Target contect - specifies the target id | ||
3207 | * @lun_id: If aborting by Lun context - specifies the lun id | ||
3208 | * @context: specifies the context level to flush at. | ||
3209 | * | ||
3210 | * After a reset condition via TMF, we need to flush orphaned i/o | ||
3211 | * contexts from the adapter. This routine aborts any contexts | ||
3212 | * outstanding, then waits for their completions. The wait is | ||
3213 | * bounded by devloss_tmo though. | ||
3214 | * | ||
3215 | * Return code : | ||
3216 | * 0x2003 - Error | ||
3217 | * 0x2002 - Success | ||
3218 | **/ | ||
3219 | static int | ||
3220 | lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, | ||
3221 | uint64_t lun_id, lpfc_ctx_cmd context) | ||
3222 | { | ||
3223 | struct lpfc_hba *phba = vport->phba; | ||
3224 | unsigned long later; | ||
3225 | int cnt; | ||
3226 | |||
3227 | cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); | ||
3339 | if (cnt) | 3228 | if (cnt) |
3340 | lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], | 3229 | lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], |
3341 | cmnd->device->id, cmnd->device->lun, | 3230 | tgt_id, lun_id, context); |
3342 | LPFC_CTX_TGT); | ||
3343 | later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; | 3231 | later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; |
3344 | while (time_after(later, jiffies) && cnt) { | 3232 | while (time_after(later, jiffies) && cnt) { |
3345 | schedule_timeout_uninterruptible(msecs_to_jiffies(20)); | 3233 | schedule_timeout_uninterruptible(msecs_to_jiffies(20)); |
3346 | cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, | 3234 | cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); |
3347 | cmnd->device->lun, LPFC_CTX_TGT); | ||
3348 | } | 3235 | } |
3349 | if (cnt) { | 3236 | if (cnt) { |
3350 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3237 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
3351 | "0719 device reset I/O flush failure: " | 3238 | "0724 I/O flush failure for context %s : cnt x%x\n", |
3352 | "cnt x%x\n", cnt); | 3239 | ((context == LPFC_CTX_LUN) ? "LUN" : |
3353 | ret = FAILED; | 3240 | ((context == LPFC_CTX_TGT) ? "TGT" : |
3241 | ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), | ||
3242 | cnt); | ||
3243 | return FAILED; | ||
3354 | } | 3244 | } |
3355 | return ret; | 3245 | return SUCCESS; |
3246 | } | ||
3247 | |||
3248 | /** | ||
3249 | * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point | ||
3250 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
3251 | * | ||
3252 | * This routine does a device reset by sending a LUN_RESET task management | ||
3253 | * command. | ||
3254 | * | ||
3255 | * Return code : | ||
3256 | * 0x2003 - Error | ||
3257 | * 0x2002 - Success | ||
3258 | **/ | ||
3259 | static int | ||
3260 | lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | ||
3261 | { | ||
3262 | struct Scsi_Host *shost = cmnd->device->host; | ||
3263 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
3264 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | ||
3265 | struct lpfc_nodelist *pnode = rdata->pnode; | ||
3266 | unsigned tgt_id = cmnd->device->id; | ||
3267 | unsigned int lun_id = cmnd->device->lun; | ||
3268 | struct lpfc_scsi_event_header scsi_event; | ||
3269 | int status; | ||
3270 | |||
3271 | lpfc_block_error_handler(cmnd); | ||
3272 | |||
3273 | status = lpfc_chk_tgt_mapped(vport, cmnd); | ||
3274 | if (status == FAILED) { | ||
3275 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
3276 | "0721 Device Reset rport failure: rdata x%p\n", rdata); | ||
3277 | return FAILED; | ||
3278 | } | ||
3279 | |||
3280 | scsi_event.event_type = FC_REG_SCSI_EVENT; | ||
3281 | scsi_event.subcategory = LPFC_EVENT_LUNRESET; | ||
3282 | scsi_event.lun = lun_id; | ||
3283 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
3284 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
3285 | |||
3286 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
3287 | sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); | ||
3288 | |||
3289 | status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, | ||
3290 | FCP_LUN_RESET); | ||
3291 | |||
3292 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
3293 | "0713 SCSI layer issued Device Reset (%d, %d) " | ||
3294 | "return x%x\n", tgt_id, lun_id, status); | ||
3295 | |||
3296 | /* | ||
3297 | * We have to clean up i/o as : they may be orphaned by the TMF; | ||
3298 | * or if the TMF failed, they may be in an indeterminate state. | ||
3299 | * So, continue on. | ||
3300 | * We will report success if all the i/o aborts successfully. | ||
3301 | */ | ||
3302 | status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, | ||
3303 | LPFC_CTX_LUN); | ||
3304 | return status; | ||
3305 | } | ||
3306 | |||
3307 | /** | ||
3308 | * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point | ||
3309 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
3310 | * | ||
3311 | * This routine does a target reset by sending a TARGET_RESET task management | ||
3312 | * command. | ||
3313 | * | ||
3314 | * Return code : | ||
3315 | * 0x2003 - Error | ||
3316 | * 0x2002 - Success | ||
3317 | **/ | ||
3318 | static int | ||
3319 | lpfc_target_reset_handler(struct scsi_cmnd *cmnd) | ||
3320 | { | ||
3321 | struct Scsi_Host *shost = cmnd->device->host; | ||
3322 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
3323 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | ||
3324 | struct lpfc_nodelist *pnode = rdata->pnode; | ||
3325 | unsigned tgt_id = cmnd->device->id; | ||
3326 | unsigned int lun_id = cmnd->device->lun; | ||
3327 | struct lpfc_scsi_event_header scsi_event; | ||
3328 | int status; | ||
3329 | |||
3330 | lpfc_block_error_handler(cmnd); | ||
3331 | |||
3332 | status = lpfc_chk_tgt_mapped(vport, cmnd); | ||
3333 | if (status == FAILED) { | ||
3334 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
3335 | "0722 Target Reset rport failure: rdata x%p\n", rdata); | ||
3336 | return FAILED; | ||
3337 | } | ||
3338 | |||
3339 | scsi_event.event_type = FC_REG_SCSI_EVENT; | ||
3340 | scsi_event.subcategory = LPFC_EVENT_TGTRESET; | ||
3341 | scsi_event.lun = 0; | ||
3342 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
3343 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
3344 | |||
3345 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
3346 | sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); | ||
3347 | |||
3348 | status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, | ||
3349 | FCP_TARGET_RESET); | ||
3350 | |||
3351 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
3352 | "0723 SCSI layer issued Target Reset (%d, %d) " | ||
3353 | "return x%x\n", tgt_id, lun_id, status); | ||
3354 | |||
3355 | /* | ||
3356 | * We have to clean up i/o as : they may be orphaned by the TMF; | ||
3357 | * or if the TMF failed, they may be in an indeterminate state. | ||
3358 | * So, continue on. | ||
3359 | * We will report success if all the i/o aborts successfully. | ||
3360 | */ | ||
3361 | status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, | ||
3362 | LPFC_CTX_TGT); | ||
3363 | return status; | ||
3356 | } | 3364 | } |
3357 | 3365 | ||
3358 | /** | 3366 | /** |
3359 | * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point | 3367 | * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point |
3360 | * @cmnd: Pointer to scsi_cmnd data structure. | 3368 | * @cmnd: Pointer to scsi_cmnd data structure. |
3361 | * | 3369 | * |
3362 | * This routine does target reset to all target on @cmnd->device->host. | 3370 | * This routine does target reset to all targets on @cmnd->device->host. |
3371 | * This emulates Parallel SCSI Bus Reset Semantics. | ||
3363 | * | 3372 | * |
3364 | * Return Code: | 3373 | * Return code : |
3365 | * 0x2003 - Error | 3374 | * 0x2003 - Error |
3366 | * 0x2002 - Success | 3375 | * 0x2002 - Success |
3367 | **/ | 3376 | **/ |
3368 | static int | 3377 | static int |
3369 | lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | 3378 | lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) |
3370 | { | 3379 | { |
3371 | struct Scsi_Host *shost = cmnd->device->host; | 3380 | struct Scsi_Host *shost = cmnd->device->host; |
3372 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3381 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
3373 | struct lpfc_hba *phba = vport->phba; | ||
3374 | struct lpfc_nodelist *ndlp = NULL; | 3382 | struct lpfc_nodelist *ndlp = NULL; |
3375 | int match; | ||
3376 | int ret = SUCCESS, status = SUCCESS, i; | ||
3377 | int cnt; | ||
3378 | struct lpfc_scsi_buf * lpfc_cmd; | ||
3379 | unsigned long later; | ||
3380 | struct lpfc_scsi_event_header scsi_event; | 3383 | struct lpfc_scsi_event_header scsi_event; |
3384 | int match; | ||
3385 | int ret = SUCCESS, status, i; | ||
3381 | 3386 | ||
3382 | scsi_event.event_type = FC_REG_SCSI_EVENT; | 3387 | scsi_event.event_type = FC_REG_SCSI_EVENT; |
3383 | scsi_event.subcategory = LPFC_EVENT_BUSRESET; | 3388 | scsi_event.subcategory = LPFC_EVENT_BUSRESET; |
@@ -3385,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
3385 | memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); | 3390 | memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); |
3386 | memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); | 3391 | memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); |
3387 | 3392 | ||
3388 | fc_host_post_vendor_event(shost, | 3393 | fc_host_post_vendor_event(shost, fc_get_event_number(), |
3389 | fc_get_event_number(), | 3394 | sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); |
3390 | sizeof(scsi_event), | ||
3391 | (char *)&scsi_event, | ||
3392 | LPFC_NL_VENDOR_ID); | ||
3393 | 3395 | ||
3394 | lpfc_block_error_handler(cmnd); | 3396 | lpfc_block_error_handler(cmnd); |
3397 | |||
3395 | /* | 3398 | /* |
3396 | * Since the driver manages a single bus device, reset all | 3399 | * Since the driver manages a single bus device, reset all |
3397 | * targets known to the driver. Should any target reset | 3400 | * targets known to the driver. Should any target reset |
@@ -3414,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
3414 | spin_unlock_irq(shost->host_lock); | 3417 | spin_unlock_irq(shost->host_lock); |
3415 | if (!match) | 3418 | if (!match) |
3416 | continue; | 3419 | continue; |
3417 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 3420 | |
3418 | if (lpfc_cmd) { | 3421 | status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data, |
3419 | lpfc_cmd->timeout = 60; | 3422 | i, 0, FCP_TARGET_RESET); |
3420 | status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, | 3423 | |
3421 | cmnd->device->lun, | 3424 | if (status != SUCCESS) { |
3422 | ndlp->rport->dd_data); | ||
3423 | if (status != TIMEOUT_ERROR) | ||
3424 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
3425 | } | ||
3426 | if (!lpfc_cmd || status != SUCCESS) { | ||
3427 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3425 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
3428 | "0700 Bus Reset on target %d failed\n", | 3426 | "0700 Bus Reset on target %d failed\n", |
3429 | i); | 3427 | i); |
@@ -3431,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
3431 | } | 3429 | } |
3432 | } | 3430 | } |
3433 | /* | 3431 | /* |
3434 | * All outstanding txcmplq I/Os should have been aborted by | 3432 | * We have to clean up i/o as : they may be orphaned by the TMFs |
3435 | * the targets. Unfortunately, some targets do not abide by | 3433 | * above; or if any of the TMFs failed, they may be in an |
3436 | * this forcing the driver to double check. | 3434 | * indeterminate state. |
3435 | * We will report success if all the i/o aborts successfully. | ||
3437 | */ | 3436 | */ |
3438 | cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); | 3437 | |
3439 | if (cnt) | 3438 | status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); |
3440 | lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], | 3439 | if (status != SUCCESS) |
3441 | 0, 0, LPFC_CTX_HOST); | ||
3442 | later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; | ||
3443 | while (time_after(later, jiffies) && cnt) { | ||
3444 | schedule_timeout_uninterruptible(msecs_to_jiffies(20)); | ||
3445 | cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); | ||
3446 | } | ||
3447 | if (cnt) { | ||
3448 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | ||
3449 | "0715 Bus Reset I/O flush failure: " | ||
3450 | "cnt x%x left x%x\n", cnt, i); | ||
3451 | ret = FAILED; | 3440 | ret = FAILED; |
3452 | } | 3441 | |
3453 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3442 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
3454 | "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); | 3443 | "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); |
3455 | return ret; | 3444 | return ret; |
@@ -3582,7 +3571,8 @@ struct scsi_host_template lpfc_template = { | |||
3582 | .info = lpfc_info, | 3571 | .info = lpfc_info, |
3583 | .queuecommand = lpfc_queuecommand, | 3572 | .queuecommand = lpfc_queuecommand, |
3584 | .eh_abort_handler = lpfc_abort_handler, | 3573 | .eh_abort_handler = lpfc_abort_handler, |
3585 | .eh_device_reset_handler= lpfc_device_reset_handler, | 3574 | .eh_device_reset_handler = lpfc_device_reset_handler, |
3575 | .eh_target_reset_handler = lpfc_target_reset_handler, | ||
3586 | .eh_bus_reset_handler = lpfc_bus_reset_handler, | 3576 | .eh_bus_reset_handler = lpfc_bus_reset_handler, |
3587 | .slave_alloc = lpfc_slave_alloc, | 3577 | .slave_alloc = lpfc_slave_alloc, |
3588 | .slave_configure = lpfc_slave_configure, | 3578 | .slave_configure = lpfc_slave_configure, |
@@ -3602,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = { | |||
3602 | .info = lpfc_info, | 3592 | .info = lpfc_info, |
3603 | .queuecommand = lpfc_queuecommand, | 3593 | .queuecommand = lpfc_queuecommand, |
3604 | .eh_abort_handler = lpfc_abort_handler, | 3594 | .eh_abort_handler = lpfc_abort_handler, |
3605 | .eh_device_reset_handler= lpfc_device_reset_handler, | 3595 | .eh_device_reset_handler = lpfc_device_reset_handler, |
3596 | .eh_target_reset_handler = lpfc_target_reset_handler, | ||
3606 | .eh_bus_reset_handler = lpfc_bus_reset_handler, | 3597 | .eh_bus_reset_handler = lpfc_bus_reset_handler, |
3607 | .slave_alloc = lpfc_slave_alloc, | 3598 | .slave_alloc = lpfc_slave_alloc, |
3608 | .slave_configure = lpfc_slave_configure, | 3599 | .slave_configure = lpfc_slave_configure, |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index ff04daf18f48..acc43b061ba1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -4139,8 +4139,11 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, | |||
4139 | return -EIO; | 4139 | return -EIO; |
4140 | } | 4140 | } |
4141 | data_length = mqe->un.mb_words[5]; | 4141 | data_length = mqe->un.mb_words[5]; |
4142 | if (data_length > DMP_FCOEPARAM_RGN_SIZE) | 4142 | if (data_length > DMP_FCOEPARAM_RGN_SIZE) { |
4143 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | ||
4144 | kfree(mp); | ||
4143 | return -EIO; | 4145 | return -EIO; |
4146 | } | ||
4144 | 4147 | ||
4145 | lpfc_parse_fcoe_conf(phba, mp->virt, data_length); | 4148 | lpfc_parse_fcoe_conf(phba, mp->virt, data_length); |
4146 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 4149 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -4211,27 +4214,6 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | |||
4211 | return -EIO; | 4214 | return -EIO; |
4212 | } | 4215 | } |
4213 | 4216 | ||
4214 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
4215 | "(%d):0380 Mailbox cmd x%x Status x%x " | ||
4216 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
4217 | "x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
4218 | "CQ: x%x x%x x%x x%x\n", | ||
4219 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4220 | bf_get(lpfc_mqe_command, mqe), | ||
4221 | bf_get(lpfc_mqe_status, mqe), | ||
4222 | mqe->un.mb_words[0], mqe->un.mb_words[1], | ||
4223 | mqe->un.mb_words[2], mqe->un.mb_words[3], | ||
4224 | mqe->un.mb_words[4], mqe->un.mb_words[5], | ||
4225 | mqe->un.mb_words[6], mqe->un.mb_words[7], | ||
4226 | mqe->un.mb_words[8], mqe->un.mb_words[9], | ||
4227 | mqe->un.mb_words[10], mqe->un.mb_words[11], | ||
4228 | mqe->un.mb_words[12], mqe->un.mb_words[13], | ||
4229 | mqe->un.mb_words[14], mqe->un.mb_words[15], | ||
4230 | mqe->un.mb_words[16], mqe->un.mb_words[50], | ||
4231 | mboxq->mcqe.word0, | ||
4232 | mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, | ||
4233 | mboxq->mcqe.trailer); | ||
4234 | |||
4235 | /* | 4217 | /* |
4236 | * The available vpd length cannot be bigger than the | 4218 | * The available vpd length cannot be bigger than the |
4237 | * DMA buffer passed to the port. Catch the less than | 4219 | * DMA buffer passed to the port. Catch the less than |
@@ -4337,21 +4319,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4337 | goto out_free_vpd; | 4319 | goto out_free_vpd; |
4338 | 4320 | ||
4339 | mqe = &mboxq->u.mqe; | 4321 | mqe = &mboxq->u.mqe; |
4340 | if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, | 4322 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); |
4341 | &mqe->un.read_rev) != LPFC_SLI_REV4) || | 4323 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) |
4342 | (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { | 4324 | phba->hba_flag |= HBA_FCOE_SUPPORT; |
4325 | if (phba->sli_rev != LPFC_SLI_REV4 || | ||
4326 | !(phba->hba_flag & HBA_FCOE_SUPPORT)) { | ||
4343 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 4327 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
4344 | "0376 READ_REV Error. SLI Level %d " | 4328 | "0376 READ_REV Error. SLI Level %d " |
4345 | "FCoE enabled %d\n", | 4329 | "FCoE enabled %d\n", |
4346 | bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), | 4330 | phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT); |
4347 | bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); | ||
4348 | rc = -EIO; | 4331 | rc = -EIO; |
4349 | goto out_free_vpd; | 4332 | goto out_free_vpd; |
4350 | } | 4333 | } |
4351 | /* Single threaded at this point, no need for lock */ | ||
4352 | spin_lock_irq(&phba->hbalock); | ||
4353 | phba->hba_flag |= HBA_FCOE_SUPPORT; | ||
4354 | spin_unlock_irq(&phba->hbalock); | ||
4355 | /* | 4334 | /* |
4356 | * Evaluate the read rev and vpd data. Populate the driver | 4335 | * Evaluate the read rev and vpd data. Populate the driver |
4357 | * state with the results. If this routine fails, the failure | 4336 | * state with the results. If this routine fails, the failure |
@@ -4365,8 +4344,32 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4365 | rc = 0; | 4344 | rc = 0; |
4366 | } | 4345 | } |
4367 | 4346 | ||
4368 | /* By now, we should determine the SLI revision, hard code for now */ | 4347 | /* Save information as VPD data */ |
4369 | phba->sli_rev = LPFC_SLI_REV4; | 4348 | phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; |
4349 | phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; | ||
4350 | phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; | ||
4351 | phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, | ||
4352 | &mqe->un.read_rev); | ||
4353 | phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, | ||
4354 | &mqe->un.read_rev); | ||
4355 | phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, | ||
4356 | &mqe->un.read_rev); | ||
4357 | phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, | ||
4358 | &mqe->un.read_rev); | ||
4359 | phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; | ||
4360 | memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); | ||
4361 | phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; | ||
4362 | memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); | ||
4363 | phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; | ||
4364 | memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); | ||
4365 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
4366 | "(%d):0380 READ_REV Status x%x " | ||
4367 | "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", | ||
4368 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4369 | bf_get(lpfc_mqe_status, mqe), | ||
4370 | phba->vpd.rev.opFwName, | ||
4371 | phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, | ||
4372 | phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); | ||
4370 | 4373 | ||
4371 | /* | 4374 | /* |
4372 | * Discover the port's supported feature set and match it against the | 4375 | * Discover the port's supported feature set and match it against the |
@@ -4491,8 +4494,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4491 | rc = -ENODEV; | 4494 | rc = -ENODEV; |
4492 | goto out_free_vpd; | 4495 | goto out_free_vpd; |
4493 | } | 4496 | } |
4494 | /* Temporary initialization of lpfc_fip_flag to non-fip */ | 4497 | if (phba->cfg_enable_fip) |
4495 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); | 4498 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1); |
4499 | else | ||
4500 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); | ||
4496 | 4501 | ||
4497 | /* Set up all the queues to the device */ | 4502 | /* Set up all the queues to the device */ |
4498 | rc = lpfc_sli4_queue_setup(phba); | 4503 | rc = lpfc_sli4_queue_setup(phba); |
@@ -5030,6 +5035,92 @@ out_not_finished: | |||
5030 | } | 5035 | } |
5031 | 5036 | ||
5032 | /** | 5037 | /** |
5038 | * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command | ||
5039 | * @phba: Pointer to HBA context object. | ||
5040 | * | ||
5041 | * The function blocks the posting of SLI4 asynchronous mailbox commands from | ||
5042 | * the driver internal pending mailbox queue. It will then try to wait out the | ||
5043 | * possible outstanding mailbox command before return. | ||
5044 | * | ||
5045 | * Returns: | ||
5046 | * 0 - the outstanding mailbox command completed; otherwise, the wait for | ||
5047 | * the outstanding mailbox command timed out. | ||
5048 | **/ | ||
5049 | static int | ||
5050 | lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) | ||
5051 | { | ||
5052 | struct lpfc_sli *psli = &phba->sli; | ||
5053 | uint8_t actcmd = MBX_HEARTBEAT; | ||
5054 | int rc = 0; | ||
5055 | unsigned long timeout; | ||
5056 | |||
5057 | /* Mark the asynchronous mailbox command posting as blocked */ | ||
5058 | spin_lock_irq(&phba->hbalock); | ||
5059 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; | ||
5060 | if (phba->sli.mbox_active) | ||
5061 | actcmd = phba->sli.mbox_active->u.mb.mbxCommand; | ||
5062 | spin_unlock_irq(&phba->hbalock); | ||
5063 | /* Determine how long we might wait for the active mailbox | ||
5064 | * command to be gracefully completed by firmware. | ||
5065 | */ | ||
5066 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + | ||
5067 | jiffies; | ||
5068 | /* Wait for the outstnading mailbox command to complete */ | ||
5069 | while (phba->sli.mbox_active) { | ||
5070 | /* Check active mailbox complete status every 2ms */ | ||
5071 | msleep(2); | ||
5072 | if (time_after(jiffies, timeout)) { | ||
5073 | /* Timeout, marked the outstanding cmd not complete */ | ||
5074 | rc = 1; | ||
5075 | break; | ||
5076 | } | ||
5077 | } | ||
5078 | |||
5079 | /* Can not cleanly block async mailbox command, fails it */ | ||
5080 | if (rc) { | ||
5081 | spin_lock_irq(&phba->hbalock); | ||
5082 | psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; | ||
5083 | spin_unlock_irq(&phba->hbalock); | ||
5084 | } | ||
5085 | return rc; | ||
5086 | } | ||
5087 | |||
5088 | /** | ||
5089 | * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command | ||
5090 | * @phba: Pointer to HBA context object. | ||
5091 | * | ||
5092 | * The function unblocks and resume posting of SLI4 asynchronous mailbox | ||
5093 | * commands from the driver internal pending mailbox queue. It makes sure | ||
5094 | * that there is no outstanding mailbox command before resuming posting | ||
5095 | * asynchronous mailbox commands. If, for any reason, there is outstanding | ||
5096 | * mailbox command, it will try to wait it out before resuming asynchronous | ||
5097 | * mailbox command posting. | ||
5098 | **/ | ||
5099 | static void | ||
5100 | lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) | ||
5101 | { | ||
5102 | struct lpfc_sli *psli = &phba->sli; | ||
5103 | |||
5104 | spin_lock_irq(&phba->hbalock); | ||
5105 | if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { | ||
5106 | /* Asynchronous mailbox posting is not blocked, do nothing */ | ||
5107 | spin_unlock_irq(&phba->hbalock); | ||
5108 | return; | ||
5109 | } | ||
5110 | |||
5111 | /* Outstanding synchronous mailbox command is guaranteed to be done, | ||
5112 | * successful or timeout, after timing-out the outstanding mailbox | ||
5113 | * command shall always be removed, so just unblock posting async | ||
5114 | * mailbox command and resume | ||
5115 | */ | ||
5116 | psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; | ||
5117 | spin_unlock_irq(&phba->hbalock); | ||
5118 | |||
5119 | /* wake up worker thread to post asynchronlous mailbox command */ | ||
5120 | lpfc_worker_wake_up(phba); | ||
5121 | } | ||
5122 | |||
5123 | /** | ||
5033 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox | 5124 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox |
5034 | * @phba: Pointer to HBA context object. | 5125 | * @phba: Pointer to HBA context object. |
5035 | * @mboxq: Pointer to mailbox object. | 5126 | * @mboxq: Pointer to mailbox object. |
@@ -5204,14 +5295,35 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | |||
5204 | psli->sli_flag, flag); | 5295 | psli->sli_flag, flag); |
5205 | return rc; | 5296 | return rc; |
5206 | } else if (flag == MBX_POLL) { | 5297 | } else if (flag == MBX_POLL) { |
5207 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 5298 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
5208 | "(%d):2542 Mailbox command x%x (x%x) " | 5299 | "(%d):2542 Try to issue mailbox command " |
5209 | "cannot issue Data: x%x x%x\n", | 5300 | "x%x (x%x) synchronously ahead of async" |
5301 | "mailbox command queue: x%x x%x\n", | ||
5210 | mboxq->vport ? mboxq->vport->vpi : 0, | 5302 | mboxq->vport ? mboxq->vport->vpi : 0, |
5211 | mboxq->u.mb.mbxCommand, | 5303 | mboxq->u.mb.mbxCommand, |
5212 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | 5304 | lpfc_sli4_mbox_opcode_get(phba, mboxq), |
5213 | psli->sli_flag, flag); | 5305 | psli->sli_flag, flag); |
5214 | return -EIO; | 5306 | /* Try to block the asynchronous mailbox posting */ |
5307 | rc = lpfc_sli4_async_mbox_block(phba); | ||
5308 | if (!rc) { | ||
5309 | /* Successfully blocked, now issue sync mbox cmd */ | ||
5310 | rc = lpfc_sli4_post_sync_mbox(phba, mboxq); | ||
5311 | if (rc != MBX_SUCCESS) | ||
5312 | lpfc_printf_log(phba, KERN_ERR, | ||
5313 | LOG_MBOX | LOG_SLI, | ||
5314 | "(%d):2597 Mailbox command " | ||
5315 | "x%x (x%x) cannot issue " | ||
5316 | "Data: x%x x%x\n", | ||
5317 | mboxq->vport ? | ||
5318 | mboxq->vport->vpi : 0, | ||
5319 | mboxq->u.mb.mbxCommand, | ||
5320 | lpfc_sli4_mbox_opcode_get(phba, | ||
5321 | mboxq), | ||
5322 | psli->sli_flag, flag); | ||
5323 | /* Unblock the async mailbox posting afterward */ | ||
5324 | lpfc_sli4_async_mbox_unblock(phba); | ||
5325 | } | ||
5326 | return rc; | ||
5215 | } | 5327 | } |
5216 | 5328 | ||
5217 | /* Now, interrupt mode asynchrous mailbox command */ | 5329 | /* Now, interrupt mode asynchrous mailbox command */ |
@@ -5749,18 +5861,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5749 | 5861 | ||
5750 | fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); | 5862 | fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); |
5751 | /* The fcp commands will set command type */ | 5863 | /* The fcp commands will set command type */ |
5752 | if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) | 5864 | if (iocbq->iocb_flag & LPFC_IO_FCP) |
5753 | command_type = ELS_COMMAND_NON_FIP; | ||
5754 | else if (!(iocbq->iocb_flag & LPFC_IO_FCP)) | ||
5755 | command_type = ELS_COMMAND_FIP; | ||
5756 | else if (iocbq->iocb_flag & LPFC_IO_FCP) | ||
5757 | command_type = FCP_COMMAND; | 5865 | command_type = FCP_COMMAND; |
5758 | else { | 5866 | else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) |
5759 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 5867 | command_type = ELS_COMMAND_FIP; |
5760 | "2019 Invalid cmd 0x%x\n", | 5868 | else |
5761 | iocbq->iocb.ulpCommand); | 5869 | command_type = ELS_COMMAND_NON_FIP; |
5762 | return IOCB_ERROR; | 5870 | |
5763 | } | ||
5764 | /* Some of the fields are in the right position already */ | 5871 | /* Some of the fields are in the right position already */ |
5765 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); | 5872 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); |
5766 | abort_tag = (uint32_t) iocbq->iotag; | 5873 | abort_tag = (uint32_t) iocbq->iotag; |
@@ -5814,11 +5921,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5814 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | 5921 | bf_set(lpfc_wqe_gen_context, &wqe->generic, |
5815 | iocbq->iocb.ulpContext); | 5922 | iocbq->iocb.ulpContext); |
5816 | 5923 | ||
5817 | if (iocbq->vport->fc_myDID != 0) { | ||
5818 | bf_set(els_req64_sid, &wqe->els_req, | ||
5819 | iocbq->vport->fc_myDID); | ||
5820 | bf_set(els_req64_sp, &wqe->els_req, 1); | ||
5821 | } | ||
5822 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); | 5924 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); |
5823 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | 5925 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); |
5824 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ | 5926 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ |
@@ -5877,14 +5979,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5877 | * is set and we are sending our 2nd or greater command on | 5979 | * is set and we are sending our 2nd or greater command on |
5878 | * this exchange. | 5980 | * this exchange. |
5879 | */ | 5981 | */ |
5982 | /* Always open the exchange */ | ||
5983 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | ||
5880 | 5984 | ||
5881 | /* ALLOW read & write to fall through to ICMD64 */ | 5985 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ |
5986 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | ||
5987 | break; | ||
5882 | case CMD_FCP_ICMND64_CR: | 5988 | case CMD_FCP_ICMND64_CR: |
5883 | /* Always open the exchange */ | 5989 | /* Always open the exchange */ |
5884 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | 5990 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); |
5885 | 5991 | ||
5992 | wqe->words[4] = 0; | ||
5886 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ | 5993 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ |
5887 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | 5994 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); |
5888 | break; | 5995 | break; |
5889 | case CMD_GEN_REQUEST64_CR: | 5996 | case CMD_GEN_REQUEST64_CR: |
5890 | /* word3 command length is described as byte offset to the | 5997 | /* word3 command length is described as byte offset to the |
@@ -7247,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, | |||
7247 | } | 7354 | } |
7248 | 7355 | ||
7249 | /** | 7356 | /** |
7357 | * lpfc_chk_iocb_flg - Test IOCB flag with lock held. | ||
7358 | * @phba: Pointer to HBA context object.. | ||
7359 | * @piocbq: Pointer to command iocb. | ||
7360 | * @flag: Flag to test. | ||
7361 | * | ||
7362 | * This routine grabs the hbalock and then test the iocb_flag to | ||
7363 | * see if the passed in flag is set. | ||
7364 | * Returns: | ||
7365 | * 1 if flag is set. | ||
7366 | * 0 if flag is not set. | ||
7367 | **/ | ||
7368 | static int | ||
7369 | lpfc_chk_iocb_flg(struct lpfc_hba *phba, | ||
7370 | struct lpfc_iocbq *piocbq, uint32_t flag) | ||
7371 | { | ||
7372 | unsigned long iflags; | ||
7373 | int ret; | ||
7374 | |||
7375 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
7376 | ret = piocbq->iocb_flag & flag; | ||
7377 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
7378 | return ret; | ||
7379 | |||
7380 | } | ||
7381 | |||
7382 | /** | ||
7250 | * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands | 7383 | * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands |
7251 | * @phba: Pointer to HBA context object.. | 7384 | * @phba: Pointer to HBA context object.. |
7252 | * @pring: Pointer to sli ring. | 7385 | * @pring: Pointer to sli ring. |
@@ -7313,7 +7446,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
7313 | if (retval == IOCB_SUCCESS) { | 7446 | if (retval == IOCB_SUCCESS) { |
7314 | timeout_req = timeout * HZ; | 7447 | timeout_req = timeout * HZ; |
7315 | timeleft = wait_event_timeout(done_q, | 7448 | timeleft = wait_event_timeout(done_q, |
7316 | piocb->iocb_flag & LPFC_IO_WAKE, | 7449 | lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), |
7317 | timeout_req); | 7450 | timeout_req); |
7318 | 7451 | ||
7319 | if (piocb->iocb_flag & LPFC_IO_WAKE) { | 7452 | if (piocb->iocb_flag & LPFC_IO_WAKE) { |
@@ -7498,20 +7631,16 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
7498 | if ((HS_FFER1 & phba->work_hs) && | 7631 | if ((HS_FFER1 & phba->work_hs) && |
7499 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | | 7632 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | |
7500 | HS_FFER6 | HS_FFER7) & phba->work_hs)) { | 7633 | HS_FFER6 | HS_FFER7) & phba->work_hs)) { |
7501 | spin_lock_irq(&phba->hbalock); | ||
7502 | phba->hba_flag |= DEFER_ERATT; | 7634 | phba->hba_flag |= DEFER_ERATT; |
7503 | spin_unlock_irq(&phba->hbalock); | ||
7504 | /* Clear all interrupt enable conditions */ | 7635 | /* Clear all interrupt enable conditions */ |
7505 | writel(0, phba->HCregaddr); | 7636 | writel(0, phba->HCregaddr); |
7506 | readl(phba->HCregaddr); | 7637 | readl(phba->HCregaddr); |
7507 | } | 7638 | } |
7508 | 7639 | ||
7509 | /* Set the driver HA work bitmap */ | 7640 | /* Set the driver HA work bitmap */ |
7510 | spin_lock_irq(&phba->hbalock); | ||
7511 | phba->work_ha |= HA_ERATT; | 7641 | phba->work_ha |= HA_ERATT; |
7512 | /* Indicate polling handles this ERATT */ | 7642 | /* Indicate polling handles this ERATT */ |
7513 | phba->hba_flag |= HBA_ERATT_HANDLED; | 7643 | phba->hba_flag |= HBA_ERATT_HANDLED; |
7514 | spin_unlock_irq(&phba->hbalock); | ||
7515 | return 1; | 7644 | return 1; |
7516 | } | 7645 | } |
7517 | return 0; | 7646 | return 0; |
@@ -7557,12 +7686,10 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
7557 | return 0; | 7686 | return 0; |
7558 | phba->work_status[0] = uerr_sta_lo; | 7687 | phba->work_status[0] = uerr_sta_lo; |
7559 | phba->work_status[1] = uerr_sta_hi; | 7688 | phba->work_status[1] = uerr_sta_hi; |
7560 | spin_lock_irq(&phba->hbalock); | ||
7561 | /* Set the driver HA work bitmap */ | 7689 | /* Set the driver HA work bitmap */ |
7562 | phba->work_ha |= HA_ERATT; | 7690 | phba->work_ha |= HA_ERATT; |
7563 | /* Indicate polling handles this ERATT */ | 7691 | /* Indicate polling handles this ERATT */ |
7564 | phba->hba_flag |= HBA_ERATT_HANDLED; | 7692 | phba->hba_flag |= HBA_ERATT_HANDLED; |
7565 | spin_unlock_irq(&phba->hbalock); | ||
7566 | return 1; | 7693 | return 1; |
7567 | } | 7694 | } |
7568 | } | 7695 | } |
@@ -9245,6 +9372,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, | |||
9245 | kfree(dmabuf); | 9372 | kfree(dmabuf); |
9246 | goto out_fail; | 9373 | goto out_fail; |
9247 | } | 9374 | } |
9375 | memset(dmabuf->virt, 0, PAGE_SIZE); | ||
9248 | dmabuf->buffer_tag = x; | 9376 | dmabuf->buffer_tag = x; |
9249 | list_add_tail(&dmabuf->list, &queue->page_list); | 9377 | list_add_tail(&dmabuf->list, &queue->page_list); |
9250 | /* initialize queue's entry array */ | 9378 | /* initialize queue's entry array */ |
@@ -9667,7 +9795,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
9667 | /* link the wq onto the parent cq child list */ | 9795 | /* link the wq onto the parent cq child list */ |
9668 | list_add_tail(&wq->list, &cq->child_list); | 9796 | list_add_tail(&wq->list, &cq->child_list); |
9669 | out: | 9797 | out: |
9670 | if (rc == MBX_TIMEOUT) | 9798 | if (rc != MBX_TIMEOUT) |
9671 | mempool_free(mbox, phba->mbox_mem_pool); | 9799 | mempool_free(mbox, phba->mbox_mem_pool); |
9672 | return status; | 9800 | return status; |
9673 | } | 9801 | } |
@@ -11020,10 +11148,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) | |||
11020 | rpi_page->start_rpi); | 11148 | rpi_page->start_rpi); |
11021 | hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); | 11149 | hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); |
11022 | hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); | 11150 | hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); |
11023 | if (!phba->sli4_hba.intr_enable) | 11151 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
11024 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
11025 | else | ||
11026 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); | ||
11027 | shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; | 11152 | shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; |
11028 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 11153 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
11029 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 11154 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
@@ -11363,6 +11488,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, | |||
11363 | bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); | 11488 | bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); |
11364 | bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); | 11489 | bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); |
11365 | bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); | 11490 | bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); |
11491 | bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); | ||
11366 | bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); | 11492 | bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); |
11367 | bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, | 11493 | bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, |
11368 | LPFC_FCF_FPMA | LPFC_FCF_SPMA); | 11494 | LPFC_FCF_FPMA | LPFC_FCF_SPMA); |
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7d37eb7459bf..3c53316cf6d0 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -56,6 +56,7 @@ struct lpfc_iocbq { | |||
56 | #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ | 56 | #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ |
57 | #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ | 57 | #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ |
58 | #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ | 58 | #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ |
59 | #define LPFC_FIP_ELS 0x40 | ||
59 | 60 | ||
60 | uint8_t abort_count; | 61 | uint8_t abort_count; |
61 | uint8_t rsvd2; | 62 | uint8_t rsvd2; |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 5196b46608d7..3b276b47d18f 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -229,7 +229,7 @@ struct lpfc_bmbx { | |||
229 | 229 | ||
230 | #define LPFC_EQE_DEF_COUNT 1024 | 230 | #define LPFC_EQE_DEF_COUNT 1024 |
231 | #define LPFC_CQE_DEF_COUNT 256 | 231 | #define LPFC_CQE_DEF_COUNT 256 |
232 | #define LPFC_WQE_DEF_COUNT 64 | 232 | #define LPFC_WQE_DEF_COUNT 256 |
233 | #define LPFC_MQE_DEF_COUNT 16 | 233 | #define LPFC_MQE_DEF_COUNT 16 |
234 | #define LPFC_RQE_DEF_COUNT 512 | 234 | #define LPFC_RQE_DEF_COUNT 512 |
235 | 235 | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 6b8a148f0a55..41094e02304b 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.2" | 21 | #define LPFC_DRIVER_VERSION "8.3.3" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 24 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index a6313ee84ac5..e0b49922193e 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -695,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
695 | } | 695 | } |
696 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 696 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
697 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 697 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
698 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | ||
699 | goto skip_logo; | ||
700 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) | 698 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) |
701 | while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) | 699 | while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) |
702 | timeout = schedule_timeout(timeout); | 700 | timeout = schedule_timeout(timeout); |
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 3b7240e40819..e3c482aa87b5 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
@@ -5444,7 +5444,7 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl | |||
5444 | ** input speed faster than the period. | 5444 | ** input speed faster than the period. |
5445 | */ | 5445 | */ |
5446 | kpc = per * clk; | 5446 | kpc = per * clk; |
5447 | while (--div >= 0) | 5447 | while (--div > 0) |
5448 | if (kpc >= (div_10M[div] << 2)) break; | 5448 | if (kpc >= (div_10M[div] << 2)) break; |
5449 | 5449 | ||
5450 | /* | 5450 | /* |
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 11a61ea8d5d9..70b60ade049e 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
@@ -530,7 +530,7 @@ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask, | |||
530 | if (reg == 0xff) { | 530 | if (reg == 0xff) { |
531 | break; | 531 | break; |
532 | } | 532 | } |
533 | } while ((time_out-- != 0) && (reg & mask) != 0); | 533 | } while ((--time_out != 0) && (reg & mask) != 0); |
534 | 534 | ||
535 | if (time_out == 0) { | 535 | if (time_out == 0) { |
536 | nsp_msg(KERN_DEBUG, " %s signal off timeut", str); | 536 | nsp_msg(KERN_DEBUG, " %s signal off timeut", str); |
@@ -801,7 +801,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt) | |||
801 | 801 | ||
802 | data->FifoCount = ocount; | 802 | data->FifoCount = ocount; |
803 | 803 | ||
804 | if (time_out == 0) { | 804 | if (time_out < 0) { |
805 | nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", | 805 | nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", |
806 | scsi_get_resid(SCpnt), SCpnt->SCp.this_residual, | 806 | scsi_get_resid(SCpnt), SCpnt->SCp.this_residual, |
807 | SCpnt->SCp.buffers_residual); | 807 | SCpnt->SCp.buffers_residual); |
@@ -897,7 +897,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt) | |||
897 | 897 | ||
898 | data->FifoCount = ocount; | 898 | data->FifoCount = ocount; |
899 | 899 | ||
900 | if (time_out == 0) { | 900 | if (time_out < 0) { |
901 | nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", | 901 | nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", |
902 | scsi_get_resid(SCpnt)); | 902 | scsi_get_resid(SCpnt)); |
903 | } | 903 | } |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index c8d0a176fea4..245e7afb4c4d 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -37,6 +37,7 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
37 | uint16_t hccr; | 37 | uint16_t hccr; |
38 | uint16_t mb[4]; | 38 | uint16_t mb[4]; |
39 | struct rsp_que *rsp; | 39 | struct rsp_que *rsp; |
40 | unsigned long flags; | ||
40 | 41 | ||
41 | rsp = (struct rsp_que *) dev_id; | 42 | rsp = (struct rsp_que *) dev_id; |
42 | if (!rsp) { | 43 | if (!rsp) { |
@@ -49,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
49 | reg = &ha->iobase->isp; | 50 | reg = &ha->iobase->isp; |
50 | status = 0; | 51 | status = 0; |
51 | 52 | ||
52 | spin_lock(&ha->hardware_lock); | 53 | spin_lock_irqsave(&ha->hardware_lock, flags); |
53 | vha = pci_get_drvdata(ha->pdev); | 54 | vha = pci_get_drvdata(ha->pdev); |
54 | for (iter = 50; iter--; ) { | 55 | for (iter = 50; iter--; ) { |
55 | hccr = RD_REG_WORD(®->hccr); | 56 | hccr = RD_REG_WORD(®->hccr); |
@@ -101,7 +102,7 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
101 | RD_REG_WORD(®->hccr); | 102 | RD_REG_WORD(®->hccr); |
102 | } | 103 | } |
103 | } | 104 | } |
104 | spin_unlock(&ha->hardware_lock); | 105 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
105 | 106 | ||
106 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 107 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
107 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 108 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
@@ -133,6 +134,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
133 | uint16_t mb[4]; | 134 | uint16_t mb[4]; |
134 | struct rsp_que *rsp; | 135 | struct rsp_que *rsp; |
135 | struct qla_hw_data *ha; | 136 | struct qla_hw_data *ha; |
137 | unsigned long flags; | ||
136 | 138 | ||
137 | rsp = (struct rsp_que *) dev_id; | 139 | rsp = (struct rsp_que *) dev_id; |
138 | if (!rsp) { | 140 | if (!rsp) { |
@@ -145,7 +147,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
145 | reg = &ha->iobase->isp; | 147 | reg = &ha->iobase->isp; |
146 | status = 0; | 148 | status = 0; |
147 | 149 | ||
148 | spin_lock(&ha->hardware_lock); | 150 | spin_lock_irqsave(&ha->hardware_lock, flags); |
149 | vha = pci_get_drvdata(ha->pdev); | 151 | vha = pci_get_drvdata(ha->pdev); |
150 | for (iter = 50; iter--; ) { | 152 | for (iter = 50; iter--; ) { |
151 | stat = RD_REG_DWORD(®->u.isp2300.host_status); | 153 | stat = RD_REG_DWORD(®->u.isp2300.host_status); |
@@ -216,7 +218,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
216 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); | 218 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); |
217 | RD_REG_WORD_RELAXED(®->hccr); | 219 | RD_REG_WORD_RELAXED(®->hccr); |
218 | } | 220 | } |
219 | spin_unlock(&ha->hardware_lock); | 221 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
220 | 222 | ||
221 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 223 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
222 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 224 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
@@ -1626,6 +1628,7 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1626 | uint32_t hccr; | 1628 | uint32_t hccr; |
1627 | uint16_t mb[4]; | 1629 | uint16_t mb[4]; |
1628 | struct rsp_que *rsp; | 1630 | struct rsp_que *rsp; |
1631 | unsigned long flags; | ||
1629 | 1632 | ||
1630 | rsp = (struct rsp_que *) dev_id; | 1633 | rsp = (struct rsp_que *) dev_id; |
1631 | if (!rsp) { | 1634 | if (!rsp) { |
@@ -1638,7 +1641,7 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1638 | reg = &ha->iobase->isp24; | 1641 | reg = &ha->iobase->isp24; |
1639 | status = 0; | 1642 | status = 0; |
1640 | 1643 | ||
1641 | spin_lock(&ha->hardware_lock); | 1644 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1642 | vha = pci_get_drvdata(ha->pdev); | 1645 | vha = pci_get_drvdata(ha->pdev); |
1643 | for (iter = 50; iter--; ) { | 1646 | for (iter = 50; iter--; ) { |
1644 | stat = RD_REG_DWORD(®->host_status); | 1647 | stat = RD_REG_DWORD(®->host_status); |
@@ -1688,7 +1691,7 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1688 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1691 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1689 | RD_REG_DWORD_RELAXED(®->hccr); | 1692 | RD_REG_DWORD_RELAXED(®->hccr); |
1690 | } | 1693 | } |
1691 | spin_unlock(&ha->hardware_lock); | 1694 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1692 | 1695 | ||
1693 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 1696 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
1694 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 1697 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 6260505dceb5..010e69b29afe 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -945,7 +945,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) | |||
945 | 945 | ||
946 | DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " | 946 | DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " |
947 | "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, | 947 | "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, |
948 | vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), | 948 | (unsigned long long)vid.port_name, |
949 | (unsigned long long)vid.node_name, | ||
950 | le16_to_cpu(entry->vf_id), | ||
949 | entry->q_qos, entry->f_qos)); | 951 | entry->q_qos, entry->f_qos)); |
950 | 952 | ||
951 | if (i < QLA_PRECONFIG_VPORTS) { | 953 | if (i < QLA_PRECONFIG_VPORTS) { |
@@ -954,7 +956,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) | |||
954 | qla_printk(KERN_INFO, ha, | 956 | qla_printk(KERN_INFO, ha, |
955 | "NPIV-Config: Failed to create vport [%02x]: " | 957 | "NPIV-Config: Failed to create vport [%02x]: " |
956 | "wwpn=%llx wwnn=%llx.\n", cnt, | 958 | "wwpn=%llx wwnn=%llx.\n", cnt, |
957 | vid.port_name, vid.node_name); | 959 | (unsigned long long)vid.port_name, |
960 | (unsigned long long)vid.node_name); | ||
958 | } | 961 | } |
959 | } | 962 | } |
960 | done: | 963 | done: |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index b13481369642..8821df9a277b 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -225,6 +225,7 @@ static struct { | |||
225 | {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 225 | {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
226 | {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 226 | {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
227 | {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 227 | {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
228 | {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
228 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, | 229 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, |
229 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, | 230 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, |
230 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ | 231 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index a152f89ae51c..3f64d93b6c8b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/netlink.h> | 35 | #include <linux/netlink.h> |
36 | #include <net/netlink.h> | 36 | #include <net/netlink.h> |
37 | #include <scsi/scsi_netlink_fc.h> | 37 | #include <scsi/scsi_netlink_fc.h> |
38 | #include <scsi/scsi_bsg_fc.h> | ||
38 | #include "scsi_priv.h" | 39 | #include "scsi_priv.h" |
39 | #include "scsi_transport_fc_internal.h" | 40 | #include "scsi_transport_fc_internal.h" |
40 | 41 | ||
@@ -43,6 +44,10 @@ static void fc_vport_sched_delete(struct work_struct *work); | |||
43 | static int fc_vport_setup(struct Scsi_Host *shost, int channel, | 44 | static int fc_vport_setup(struct Scsi_Host *shost, int channel, |
44 | struct device *pdev, struct fc_vport_identifiers *ids, | 45 | struct device *pdev, struct fc_vport_identifiers *ids, |
45 | struct fc_vport **vport); | 46 | struct fc_vport **vport); |
47 | static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *); | ||
48 | static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *); | ||
49 | static void fc_bsg_remove(struct request_queue *); | ||
50 | static void fc_bsg_goose_queue(struct fc_rport *); | ||
46 | 51 | ||
47 | /* | 52 | /* |
48 | * Redefine so that we can have same named attributes in the | 53 | * Redefine so that we can have same named attributes in the |
@@ -411,13 +416,26 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, | |||
411 | return -ENOMEM; | 416 | return -ENOMEM; |
412 | } | 417 | } |
413 | 418 | ||
419 | fc_bsg_hostadd(shost, fc_host); | ||
420 | /* ignore any bsg add error - we just can't do sgio */ | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static int fc_host_remove(struct transport_container *tc, struct device *dev, | ||
426 | struct device *cdev) | ||
427 | { | ||
428 | struct Scsi_Host *shost = dev_to_shost(dev); | ||
429 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); | ||
430 | |||
431 | fc_bsg_remove(fc_host->rqst_q); | ||
414 | return 0; | 432 | return 0; |
415 | } | 433 | } |
416 | 434 | ||
417 | static DECLARE_TRANSPORT_CLASS(fc_host_class, | 435 | static DECLARE_TRANSPORT_CLASS(fc_host_class, |
418 | "fc_host", | 436 | "fc_host", |
419 | fc_host_setup, | 437 | fc_host_setup, |
420 | NULL, | 438 | fc_host_remove, |
421 | NULL); | 439 | NULL); |
422 | 440 | ||
423 | /* | 441 | /* |
@@ -2375,6 +2393,7 @@ fc_rport_final_delete(struct work_struct *work) | |||
2375 | scsi_flush_work(shost); | 2393 | scsi_flush_work(shost); |
2376 | 2394 | ||
2377 | fc_terminate_rport_io(rport); | 2395 | fc_terminate_rport_io(rport); |
2396 | |||
2378 | /* | 2397 | /* |
2379 | * Cancel any outstanding timers. These should really exist | 2398 | * Cancel any outstanding timers. These should really exist |
2380 | * only when rmmod'ing the LLDD and we're asking for | 2399 | * only when rmmod'ing the LLDD and we're asking for |
@@ -2407,6 +2426,8 @@ fc_rport_final_delete(struct work_struct *work) | |||
2407 | (i->f->dev_loss_tmo_callbk)) | 2426 | (i->f->dev_loss_tmo_callbk)) |
2408 | i->f->dev_loss_tmo_callbk(rport); | 2427 | i->f->dev_loss_tmo_callbk(rport); |
2409 | 2428 | ||
2429 | fc_bsg_remove(rport->rqst_q); | ||
2430 | |||
2410 | transport_remove_device(dev); | 2431 | transport_remove_device(dev); |
2411 | device_del(dev); | 2432 | device_del(dev); |
2412 | transport_destroy_device(dev); | 2433 | transport_destroy_device(dev); |
@@ -2494,6 +2515,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel, | |||
2494 | transport_add_device(dev); | 2515 | transport_add_device(dev); |
2495 | transport_configure_device(dev); | 2516 | transport_configure_device(dev); |
2496 | 2517 | ||
2518 | fc_bsg_rportadd(shost, rport); | ||
2519 | /* ignore any bsg add error - we just can't do sgio */ | ||
2520 | |||
2497 | if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { | 2521 | if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { |
2498 | /* initiate a scan of the target */ | 2522 | /* initiate a scan of the target */ |
2499 | rport->flags |= FC_RPORT_SCAN_PENDING; | 2523 | rport->flags |= FC_RPORT_SCAN_PENDING; |
@@ -2658,6 +2682,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
2658 | spin_unlock_irqrestore(shost->host_lock, | 2682 | spin_unlock_irqrestore(shost->host_lock, |
2659 | flags); | 2683 | flags); |
2660 | 2684 | ||
2685 | fc_bsg_goose_queue(rport); | ||
2686 | |||
2661 | return rport; | 2687 | return rport; |
2662 | } | 2688 | } |
2663 | } | 2689 | } |
@@ -3343,6 +3369,592 @@ fc_vport_sched_delete(struct work_struct *work) | |||
3343 | } | 3369 | } |
3344 | 3370 | ||
3345 | 3371 | ||
3372 | /* | ||
3373 | * BSG support | ||
3374 | */ | ||
3375 | |||
3376 | |||
3377 | /** | ||
3378 | * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job | ||
3379 | * @job: fc_bsg_job that is to be torn down | ||
3380 | */ | ||
3381 | static void | ||
3382 | fc_destroy_bsgjob(struct fc_bsg_job *job) | ||
3383 | { | ||
3384 | unsigned long flags; | ||
3385 | |||
3386 | spin_lock_irqsave(&job->job_lock, flags); | ||
3387 | if (job->ref_cnt) { | ||
3388 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3389 | return; | ||
3390 | } | ||
3391 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3392 | |||
3393 | put_device(job->dev); /* release reference for the request */ | ||
3394 | |||
3395 | kfree(job->request_payload.sg_list); | ||
3396 | kfree(job->reply_payload.sg_list); | ||
3397 | kfree(job); | ||
3398 | } | ||
3399 | |||
3400 | |||
3401 | /** | ||
3402 | * fc_bsg_jobdone - completion routine for bsg requests that the LLD has | ||
3403 | * completed | ||
3404 | * @job: fc_bsg_job that is complete | ||
3405 | */ | ||
3406 | static void | ||
3407 | fc_bsg_jobdone(struct fc_bsg_job *job) | ||
3408 | { | ||
3409 | struct request *req = job->req; | ||
3410 | struct request *rsp = req->next_rq; | ||
3411 | unsigned long flags; | ||
3412 | int err; | ||
3413 | |||
3414 | spin_lock_irqsave(&job->job_lock, flags); | ||
3415 | job->state_flags |= FC_RQST_STATE_DONE; | ||
3416 | job->ref_cnt--; | ||
3417 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3418 | |||
3419 | err = job->req->errors = job->reply->result; | ||
3420 | if (err < 0) | ||
3421 | /* we're only returning the result field in the reply */ | ||
3422 | job->req->sense_len = sizeof(uint32_t); | ||
3423 | else | ||
3424 | job->req->sense_len = job->reply_len; | ||
3425 | |||
3426 | /* we assume all request payload was transferred, residual == 0 */ | ||
3427 | req->resid_len = 0; | ||
3428 | |||
3429 | if (rsp) { | ||
3430 | WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len); | ||
3431 | |||
3432 | /* set reply (bidi) residual */ | ||
3433 | rsp->resid_len -= min(job->reply->reply_payload_rcv_len, | ||
3434 | rsp->resid_len); | ||
3435 | } | ||
3436 | |||
3437 | blk_end_request_all(req, err); | ||
3438 | |||
3439 | fc_destroy_bsgjob(job); | ||
3440 | } | ||
3441 | |||
3442 | |||
3443 | /** | ||
3444 | * fc_bsg_job_timeout - handler for when a bsg request timesout | ||
3445 | * @req: request that timed out | ||
3446 | */ | ||
3447 | static enum blk_eh_timer_return | ||
3448 | fc_bsg_job_timeout(struct request *req) | ||
3449 | { | ||
3450 | struct fc_bsg_job *job = (void *) req->special; | ||
3451 | struct Scsi_Host *shost = job->shost; | ||
3452 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
3453 | unsigned long flags; | ||
3454 | int err = 0, done = 0; | ||
3455 | |||
3456 | if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED) | ||
3457 | return BLK_EH_RESET_TIMER; | ||
3458 | |||
3459 | spin_lock_irqsave(&job->job_lock, flags); | ||
3460 | if (job->state_flags & FC_RQST_STATE_DONE) | ||
3461 | done = 1; | ||
3462 | else | ||
3463 | job->ref_cnt++; | ||
3464 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3465 | |||
3466 | if (!done && i->f->bsg_timeout) { | ||
3467 | /* call LLDD to abort the i/o as it has timed out */ | ||
3468 | err = i->f->bsg_timeout(job); | ||
3469 | if (err) | ||
3470 | printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " | ||
3471 | "abort failed with status %d\n", err); | ||
3472 | } | ||
3473 | |||
3474 | if (!done) { | ||
3475 | spin_lock_irqsave(&job->job_lock, flags); | ||
3476 | job->ref_cnt--; | ||
3477 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3478 | fc_destroy_bsgjob(job); | ||
3479 | } | ||
3480 | |||
3481 | /* the blk_end_sync_io() doesn't check the error */ | ||
3482 | return BLK_EH_HANDLED; | ||
3483 | } | ||
3484 | |||
3485 | |||
3486 | |||
3487 | static int | ||
3488 | fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) | ||
3489 | { | ||
3490 | size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); | ||
3491 | |||
3492 | BUG_ON(!req->nr_phys_segments); | ||
3493 | |||
3494 | buf->sg_list = kzalloc(sz, GFP_KERNEL); | ||
3495 | if (!buf->sg_list) | ||
3496 | return -ENOMEM; | ||
3497 | sg_init_table(buf->sg_list, req->nr_phys_segments); | ||
3498 | buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); | ||
3499 | buf->payload_len = blk_rq_bytes(req); | ||
3500 | return 0; | ||
3501 | } | ||
3502 | |||
3503 | |||
3504 | /** | ||
3505 | * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the | ||
3506 | * bsg request | ||
3507 | * @shost: SCSI Host corresponding to the bsg object | ||
3508 | * @rport: (optional) FC Remote Port corresponding to the bsg object | ||
3509 | * @req: BSG request that needs a job structure | ||
3510 | */ | ||
3511 | static int | ||
3512 | fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport, | ||
3513 | struct request *req) | ||
3514 | { | ||
3515 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
3516 | struct request *rsp = req->next_rq; | ||
3517 | struct fc_bsg_job *job; | ||
3518 | int ret; | ||
3519 | |||
3520 | BUG_ON(req->special); | ||
3521 | |||
3522 | job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size, | ||
3523 | GFP_KERNEL); | ||
3524 | if (!job) | ||
3525 | return -ENOMEM; | ||
3526 | |||
3527 | /* | ||
3528 | * Note: this is a bit silly. | ||
3529 | * The request gets formatted as a SGIO v4 ioctl request, which | ||
3530 | * then gets reformatted as a blk request, which then gets | ||
3531 | * reformatted as a fc bsg request. And on completion, we have | ||
3532 | * to wrap return results such that SGIO v4 thinks it was a scsi | ||
3533 | * status. I hope this was all worth it. | ||
3534 | */ | ||
3535 | |||
3536 | req->special = job; | ||
3537 | job->shost = shost; | ||
3538 | job->rport = rport; | ||
3539 | job->req = req; | ||
3540 | if (i->f->dd_bsg_size) | ||
3541 | job->dd_data = (void *)&job[1]; | ||
3542 | spin_lock_init(&job->job_lock); | ||
3543 | job->request = (struct fc_bsg_request *)req->cmd; | ||
3544 | job->request_len = req->cmd_len; | ||
3545 | job->reply = req->sense; | ||
3546 | job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer | ||
3547 | * allocated */ | ||
3548 | if (req->bio) { | ||
3549 | ret = fc_bsg_map_buffer(&job->request_payload, req); | ||
3550 | if (ret) | ||
3551 | goto failjob_rls_job; | ||
3552 | } | ||
3553 | if (rsp && rsp->bio) { | ||
3554 | ret = fc_bsg_map_buffer(&job->reply_payload, rsp); | ||
3555 | if (ret) | ||
3556 | goto failjob_rls_rqst_payload; | ||
3557 | } | ||
3558 | job->job_done = fc_bsg_jobdone; | ||
3559 | if (rport) | ||
3560 | job->dev = &rport->dev; | ||
3561 | else | ||
3562 | job->dev = &shost->shost_gendev; | ||
3563 | get_device(job->dev); /* take a reference for the request */ | ||
3564 | |||
3565 | job->ref_cnt = 1; | ||
3566 | |||
3567 | return 0; | ||
3568 | |||
3569 | |||
3570 | failjob_rls_rqst_payload: | ||
3571 | kfree(job->request_payload.sg_list); | ||
3572 | failjob_rls_job: | ||
3573 | kfree(job); | ||
3574 | return -ENOMEM; | ||
3575 | } | ||
3576 | |||
3577 | |||
3578 | enum fc_dispatch_result { | ||
3579 | FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */ | ||
3580 | FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */ | ||
3581 | FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */ | ||
3582 | }; | ||
3583 | |||
3584 | |||
3585 | /** | ||
3586 | * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD | ||
3587 | * @shost: scsi host rport attached to | ||
3588 | * @job: bsg job to be processed | ||
3589 | */ | ||
3590 | static enum fc_dispatch_result | ||
3591 | fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, | ||
3592 | struct fc_bsg_job *job) | ||
3593 | { | ||
3594 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
3595 | int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ | ||
3596 | int ret; | ||
3597 | |||
3598 | /* Validate the host command */ | ||
3599 | switch (job->request->msgcode) { | ||
3600 | case FC_BSG_HST_ADD_RPORT: | ||
3601 | cmdlen += sizeof(struct fc_bsg_host_add_rport); | ||
3602 | break; | ||
3603 | |||
3604 | case FC_BSG_HST_DEL_RPORT: | ||
3605 | cmdlen += sizeof(struct fc_bsg_host_del_rport); | ||
3606 | break; | ||
3607 | |||
3608 | case FC_BSG_HST_ELS_NOLOGIN: | ||
3609 | cmdlen += sizeof(struct fc_bsg_host_els); | ||
3610 | /* there better be a xmt and rcv payloads */ | ||
3611 | if ((!job->request_payload.payload_len) || | ||
3612 | (!job->reply_payload.payload_len)) { | ||
3613 | ret = -EINVAL; | ||
3614 | goto fail_host_msg; | ||
3615 | } | ||
3616 | break; | ||
3617 | |||
3618 | case FC_BSG_HST_CT: | ||
3619 | cmdlen += sizeof(struct fc_bsg_host_ct); | ||
3620 | /* there better be xmt and rcv payloads */ | ||
3621 | if ((!job->request_payload.payload_len) || | ||
3622 | (!job->reply_payload.payload_len)) { | ||
3623 | ret = -EINVAL; | ||
3624 | goto fail_host_msg; | ||
3625 | } | ||
3626 | break; | ||
3627 | |||
3628 | case FC_BSG_HST_VENDOR: | ||
3629 | cmdlen += sizeof(struct fc_bsg_host_vendor); | ||
3630 | if ((shost->hostt->vendor_id == 0L) || | ||
3631 | (job->request->rqst_data.h_vendor.vendor_id != | ||
3632 | shost->hostt->vendor_id)) { | ||
3633 | ret = -ESRCH; | ||
3634 | goto fail_host_msg; | ||
3635 | } | ||
3636 | break; | ||
3637 | |||
3638 | default: | ||
3639 | ret = -EBADR; | ||
3640 | goto fail_host_msg; | ||
3641 | } | ||
3642 | |||
3643 | /* check if we really have all the request data needed */ | ||
3644 | if (job->request_len < cmdlen) { | ||
3645 | ret = -ENOMSG; | ||
3646 | goto fail_host_msg; | ||
3647 | } | ||
3648 | |||
3649 | ret = i->f->bsg_request(job); | ||
3650 | if (!ret) | ||
3651 | return FC_DISPATCH_UNLOCKED; | ||
3652 | |||
3653 | fail_host_msg: | ||
3654 | /* return the errno failure code as the only status */ | ||
3655 | BUG_ON(job->reply_len < sizeof(uint32_t)); | ||
3656 | job->reply->result = ret; | ||
3657 | job->reply_len = sizeof(uint32_t); | ||
3658 | fc_bsg_jobdone(job); | ||
3659 | return FC_DISPATCH_UNLOCKED; | ||
3660 | } | ||
3661 | |||
3662 | |||
3663 | /* | ||
3664 | * fc_bsg_goose_queue - restart rport queue in case it was stopped | ||
3665 | * @rport: rport to be restarted | ||
3666 | */ | ||
3667 | static void | ||
3668 | fc_bsg_goose_queue(struct fc_rport *rport) | ||
3669 | { | ||
3670 | int flagset; | ||
3671 | |||
3672 | if (!rport->rqst_q) | ||
3673 | return; | ||
3674 | |||
3675 | get_device(&rport->dev); | ||
3676 | |||
3677 | spin_lock(rport->rqst_q->queue_lock); | ||
3678 | flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && | ||
3679 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | ||
3680 | if (flagset) | ||
3681 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
3682 | __blk_run_queue(rport->rqst_q); | ||
3683 | if (flagset) | ||
3684 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
3685 | spin_unlock(rport->rqst_q->queue_lock); | ||
3686 | |||
3687 | put_device(&rport->dev); | ||
3688 | } | ||
3689 | |||
3690 | |||
3691 | /** | ||
3692 | * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD | ||
3693 | * @shost: scsi host rport attached to | ||
3694 | * @rport: rport request destined to | ||
3695 | * @job: bsg job to be processed | ||
3696 | */ | ||
3697 | static enum fc_dispatch_result | ||
3698 | fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost, | ||
3699 | struct fc_rport *rport, struct fc_bsg_job *job) | ||
3700 | { | ||
3701 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
3702 | int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ | ||
3703 | int ret; | ||
3704 | |||
3705 | /* Validate the rport command */ | ||
3706 | switch (job->request->msgcode) { | ||
3707 | case FC_BSG_RPT_ELS: | ||
3708 | cmdlen += sizeof(struct fc_bsg_rport_els); | ||
3709 | goto check_bidi; | ||
3710 | |||
3711 | case FC_BSG_RPT_CT: | ||
3712 | cmdlen += sizeof(struct fc_bsg_rport_ct); | ||
3713 | check_bidi: | ||
3714 | /* there better be xmt and rcv payloads */ | ||
3715 | if ((!job->request_payload.payload_len) || | ||
3716 | (!job->reply_payload.payload_len)) { | ||
3717 | ret = -EINVAL; | ||
3718 | goto fail_rport_msg; | ||
3719 | } | ||
3720 | break; | ||
3721 | default: | ||
3722 | ret = -EBADR; | ||
3723 | goto fail_rport_msg; | ||
3724 | } | ||
3725 | |||
3726 | /* check if we really have all the request data needed */ | ||
3727 | if (job->request_len < cmdlen) { | ||
3728 | ret = -ENOMSG; | ||
3729 | goto fail_rport_msg; | ||
3730 | } | ||
3731 | |||
3732 | ret = i->f->bsg_request(job); | ||
3733 | if (!ret) | ||
3734 | return FC_DISPATCH_UNLOCKED; | ||
3735 | |||
3736 | fail_rport_msg: | ||
3737 | /* return the errno failure code as the only status */ | ||
3738 | BUG_ON(job->reply_len < sizeof(uint32_t)); | ||
3739 | job->reply->result = ret; | ||
3740 | job->reply_len = sizeof(uint32_t); | ||
3741 | fc_bsg_jobdone(job); | ||
3742 | return FC_DISPATCH_UNLOCKED; | ||
3743 | } | ||
3744 | |||
3745 | |||
3746 | /** | ||
3747 | * fc_bsg_request_handler - generic handler for bsg requests | ||
3748 | * @q: request queue to manage | ||
3749 | * @shost: Scsi_Host related to the bsg object | ||
3750 | * @rport: FC remote port related to the bsg object (optional) | ||
3751 | * @dev: device structure for bsg object | ||
3752 | */ | ||
3753 | static void | ||
3754 | fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost, | ||
3755 | struct fc_rport *rport, struct device *dev) | ||
3756 | { | ||
3757 | struct request *req; | ||
3758 | struct fc_bsg_job *job; | ||
3759 | enum fc_dispatch_result ret; | ||
3760 | |||
3761 | if (!get_device(dev)) | ||
3762 | return; | ||
3763 | |||
3764 | while (!blk_queue_plugged(q)) { | ||
3765 | if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED)) | ||
3766 | break; | ||
3767 | |||
3768 | req = blk_fetch_request(q); | ||
3769 | if (!req) | ||
3770 | break; | ||
3771 | |||
3772 | if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) { | ||
3773 | req->errors = -ENXIO; | ||
3774 | spin_unlock_irq(q->queue_lock); | ||
3775 | blk_end_request(req, -ENXIO, blk_rq_bytes(req)); | ||
3776 | spin_lock_irq(q->queue_lock); | ||
3777 | continue; | ||
3778 | } | ||
3779 | |||
3780 | spin_unlock_irq(q->queue_lock); | ||
3781 | |||
3782 | ret = fc_req_to_bsgjob(shost, rport, req); | ||
3783 | if (ret) { | ||
3784 | req->errors = ret; | ||
3785 | blk_end_request(req, ret, blk_rq_bytes(req)); | ||
3786 | spin_lock_irq(q->queue_lock); | ||
3787 | continue; | ||
3788 | } | ||
3789 | |||
3790 | job = req->special; | ||
3791 | |||
3792 | /* check if we have the msgcode value at least */ | ||
3793 | if (job->request_len < sizeof(uint32_t)) { | ||
3794 | BUG_ON(job->reply_len < sizeof(uint32_t)); | ||
3795 | job->reply->result = -ENOMSG; | ||
3796 | job->reply_len = sizeof(uint32_t); | ||
3797 | fc_bsg_jobdone(job); | ||
3798 | spin_lock_irq(q->queue_lock); | ||
3799 | continue; | ||
3800 | } | ||
3801 | |||
3802 | /* the dispatch routines will unlock the queue_lock */ | ||
3803 | if (rport) | ||
3804 | ret = fc_bsg_rport_dispatch(q, shost, rport, job); | ||
3805 | else | ||
3806 | ret = fc_bsg_host_dispatch(q, shost, job); | ||
3807 | |||
3808 | /* did dispatcher hit state that can't process any more */ | ||
3809 | if (ret == FC_DISPATCH_BREAK) | ||
3810 | break; | ||
3811 | |||
3812 | /* did dispatcher had released the lock */ | ||
3813 | if (ret == FC_DISPATCH_UNLOCKED) | ||
3814 | spin_lock_irq(q->queue_lock); | ||
3815 | } | ||
3816 | |||
3817 | spin_unlock_irq(q->queue_lock); | ||
3818 | put_device(dev); | ||
3819 | spin_lock_irq(q->queue_lock); | ||
3820 | } | ||
3821 | |||
3822 | |||
3823 | /** | ||
3824 | * fc_bsg_host_handler - handler for bsg requests for a fc host | ||
3825 | * @q: fc host request queue | ||
3826 | */ | ||
3827 | static void | ||
3828 | fc_bsg_host_handler(struct request_queue *q) | ||
3829 | { | ||
3830 | struct Scsi_Host *shost = q->queuedata; | ||
3831 | |||
3832 | fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev); | ||
3833 | } | ||
3834 | |||
3835 | |||
3836 | /** | ||
3837 | * fc_bsg_rport_handler - handler for bsg requests for a fc rport | ||
3838 | * @q: rport request queue | ||
3839 | */ | ||
3840 | static void | ||
3841 | fc_bsg_rport_handler(struct request_queue *q) | ||
3842 | { | ||
3843 | struct fc_rport *rport = q->queuedata; | ||
3844 | struct Scsi_Host *shost = rport_to_shost(rport); | ||
3845 | |||
3846 | fc_bsg_request_handler(q, shost, rport, &rport->dev); | ||
3847 | } | ||
3848 | |||
3849 | |||
3850 | /** | ||
3851 | * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests | ||
3852 | * @shost: shost for fc_host | ||
3853 | * @fc_host: fc_host adding the structures to | ||
3854 | */ | ||
3855 | static int | ||
3856 | fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) | ||
3857 | { | ||
3858 | struct device *dev = &shost->shost_gendev; | ||
3859 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
3860 | struct request_queue *q; | ||
3861 | int err; | ||
3862 | char bsg_name[BUS_ID_SIZE]; /*20*/ | ||
3863 | |||
3864 | fc_host->rqst_q = NULL; | ||
3865 | |||
3866 | if (!i->f->bsg_request) | ||
3867 | return -ENOTSUPP; | ||
3868 | |||
3869 | snprintf(bsg_name, sizeof(bsg_name), | ||
3870 | "fc_host%d", shost->host_no); | ||
3871 | |||
3872 | q = __scsi_alloc_queue(shost, fc_bsg_host_handler); | ||
3873 | if (!q) { | ||
3874 | printk(KERN_ERR "fc_host%d: bsg interface failed to " | ||
3875 | "initialize - no request queue\n", | ||
3876 | shost->host_no); | ||
3877 | return -ENOMEM; | ||
3878 | } | ||
3879 | |||
3880 | q->queuedata = shost; | ||
3881 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | ||
3882 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); | ||
3883 | blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); | ||
3884 | |||
3885 | err = bsg_register_queue(q, dev, bsg_name, NULL); | ||
3886 | if (err) { | ||
3887 | printk(KERN_ERR "fc_host%d: bsg interface failed to " | ||
3888 | "initialize - register queue\n", | ||
3889 | shost->host_no); | ||
3890 | blk_cleanup_queue(q); | ||
3891 | return err; | ||
3892 | } | ||
3893 | |||
3894 | fc_host->rqst_q = q; | ||
3895 | return 0; | ||
3896 | } | ||
3897 | |||
3898 | |||
3899 | /** | ||
3900 | * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests | ||
3901 | * @shost: shost that rport is attached to | ||
3902 | * @rport: rport that the bsg hooks are being attached to | ||
3903 | */ | ||
3904 | static int | ||
3905 | fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) | ||
3906 | { | ||
3907 | struct device *dev = &rport->dev; | ||
3908 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
3909 | struct request_queue *q; | ||
3910 | int err; | ||
3911 | |||
3912 | rport->rqst_q = NULL; | ||
3913 | |||
3914 | if (!i->f->bsg_request) | ||
3915 | return -ENOTSUPP; | ||
3916 | |||
3917 | q = __scsi_alloc_queue(shost, fc_bsg_rport_handler); | ||
3918 | if (!q) { | ||
3919 | printk(KERN_ERR "%s: bsg interface failed to " | ||
3920 | "initialize - no request queue\n", | ||
3921 | dev->kobj.name); | ||
3922 | return -ENOMEM; | ||
3923 | } | ||
3924 | |||
3925 | q->queuedata = rport; | ||
3926 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | ||
3927 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); | ||
3928 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); | ||
3929 | |||
3930 | err = bsg_register_queue(q, dev, NULL, NULL); | ||
3931 | if (err) { | ||
3932 | printk(KERN_ERR "%s: bsg interface failed to " | ||
3933 | "initialize - register queue\n", | ||
3934 | dev->kobj.name); | ||
3935 | blk_cleanup_queue(q); | ||
3936 | return err; | ||
3937 | } | ||
3938 | |||
3939 | rport->rqst_q = q; | ||
3940 | return 0; | ||
3941 | } | ||
3942 | |||
3943 | |||
3944 | /** | ||
3945 | * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports | ||
3946 | * @q: the request_queue that is to be torn down. | ||
3947 | */ | ||
3948 | static void | ||
3949 | fc_bsg_remove(struct request_queue *q) | ||
3950 | { | ||
3951 | if (q) { | ||
3952 | bsg_unregister_queue(q); | ||
3953 | blk_cleanup_queue(q); | ||
3954 | } | ||
3955 | } | ||
3956 | |||
3957 | |||
3346 | /* Original Author: Martin Hicks */ | 3958 | /* Original Author: Martin Hicks */ |
3347 | MODULE_AUTHOR("James Smart"); | 3959 | MODULE_AUTHOR("James Smart"); |
3348 | MODULE_DESCRIPTION("FC Transport Attributes"); | 3960 | MODULE_DESCRIPTION("FC Transport Attributes"); |
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index f49f55c6bfc8..654a34fb04cb 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -234,8 +234,10 @@ static int spi_setup_transport_attrs(struct transport_container *tc, | |||
234 | spi_width(starget) = 0; /* narrow */ | 234 | spi_width(starget) = 0; /* narrow */ |
235 | spi_max_width(starget) = 1; | 235 | spi_max_width(starget) = 1; |
236 | spi_iu(starget) = 0; /* no IU */ | 236 | spi_iu(starget) = 0; /* no IU */ |
237 | spi_max_iu(starget) = 1; | ||
237 | spi_dt(starget) = 0; /* ST */ | 238 | spi_dt(starget) = 0; /* ST */ |
238 | spi_qas(starget) = 0; | 239 | spi_qas(starget) = 0; |
240 | spi_max_qas(starget) = 1; | ||
239 | spi_wr_flow(starget) = 0; | 241 | spi_wr_flow(starget) = 0; |
240 | spi_rd_strm(starget) = 0; | 242 | spi_rd_strm(starget) = 0; |
241 | spi_rti(starget) = 0; | 243 | spi_rti(starget) = 0; |
@@ -360,9 +362,9 @@ static DEVICE_ATTR(field, S_IRUGO, \ | |||
360 | /* The Parallel SCSI Tranport Attributes: */ | 362 | /* The Parallel SCSI Tranport Attributes: */ |
361 | spi_transport_max_attr(offset, "%d\n"); | 363 | spi_transport_max_attr(offset, "%d\n"); |
362 | spi_transport_max_attr(width, "%d\n"); | 364 | spi_transport_max_attr(width, "%d\n"); |
363 | spi_transport_rd_attr(iu, "%d\n"); | 365 | spi_transport_max_attr(iu, "%d\n"); |
364 | spi_transport_rd_attr(dt, "%d\n"); | 366 | spi_transport_rd_attr(dt, "%d\n"); |
365 | spi_transport_rd_attr(qas, "%d\n"); | 367 | spi_transport_max_attr(qas, "%d\n"); |
366 | spi_transport_rd_attr(wr_flow, "%d\n"); | 368 | spi_transport_rd_attr(wr_flow, "%d\n"); |
367 | spi_transport_rd_attr(rd_strm, "%d\n"); | 369 | spi_transport_rd_attr(rd_strm, "%d\n"); |
368 | spi_transport_rd_attr(rti, "%d\n"); | 370 | spi_transport_rd_attr(rti, "%d\n"); |
@@ -874,13 +876,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
874 | 876 | ||
875 | /* try QAS requests; this should be harmless to set if the | 877 | /* try QAS requests; this should be harmless to set if the |
876 | * target supports it */ | 878 | * target supports it */ |
877 | if (scsi_device_qas(sdev)) { | 879 | if (scsi_device_qas(sdev) && spi_max_qas(starget)) { |
878 | DV_SET(qas, 1); | 880 | DV_SET(qas, 1); |
879 | } else { | 881 | } else { |
880 | DV_SET(qas, 0); | 882 | DV_SET(qas, 0); |
881 | } | 883 | } |
882 | 884 | ||
883 | if (scsi_device_ius(sdev) && min_period < 9) { | 885 | if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) { |
884 | /* This u320 (or u640). Set IU transfers */ | 886 | /* This u320 (or u640). Set IU transfers */ |
885 | DV_SET(iu, 1); | 887 | DV_SET(iu, 1); |
886 | /* Then set the optional parameters */ | 888 | /* Then set the optional parameters */ |
@@ -1412,12 +1414,18 @@ static mode_t target_attribute_is_visible(struct kobject *kobj, | |||
1412 | else if (attr == &dev_attr_iu.attr && | 1414 | else if (attr == &dev_attr_iu.attr && |
1413 | spi_support_ius(starget)) | 1415 | spi_support_ius(starget)) |
1414 | return TARGET_ATTRIBUTE_HELPER(iu); | 1416 | return TARGET_ATTRIBUTE_HELPER(iu); |
1417 | else if (attr == &dev_attr_max_iu.attr && | ||
1418 | spi_support_ius(starget)) | ||
1419 | return TARGET_ATTRIBUTE_HELPER(iu); | ||
1415 | else if (attr == &dev_attr_dt.attr && | 1420 | else if (attr == &dev_attr_dt.attr && |
1416 | spi_support_dt(starget)) | 1421 | spi_support_dt(starget)) |
1417 | return TARGET_ATTRIBUTE_HELPER(dt); | 1422 | return TARGET_ATTRIBUTE_HELPER(dt); |
1418 | else if (attr == &dev_attr_qas.attr && | 1423 | else if (attr == &dev_attr_qas.attr && |
1419 | spi_support_qas(starget)) | 1424 | spi_support_qas(starget)) |
1420 | return TARGET_ATTRIBUTE_HELPER(qas); | 1425 | return TARGET_ATTRIBUTE_HELPER(qas); |
1426 | else if (attr == &dev_attr_max_qas.attr && | ||
1427 | spi_support_qas(starget)) | ||
1428 | return TARGET_ATTRIBUTE_HELPER(qas); | ||
1421 | else if (attr == &dev_attr_wr_flow.attr && | 1429 | else if (attr == &dev_attr_wr_flow.attr && |
1422 | spi_support_ius(starget)) | 1430 | spi_support_ius(starget)) |
1423 | return TARGET_ATTRIBUTE_HELPER(wr_flow); | 1431 | return TARGET_ATTRIBUTE_HELPER(wr_flow); |
@@ -1447,8 +1455,10 @@ static struct attribute *target_attributes[] = { | |||
1447 | &dev_attr_width.attr, | 1455 | &dev_attr_width.attr, |
1448 | &dev_attr_max_width.attr, | 1456 | &dev_attr_max_width.attr, |
1449 | &dev_attr_iu.attr, | 1457 | &dev_attr_iu.attr, |
1458 | &dev_attr_max_iu.attr, | ||
1450 | &dev_attr_dt.attr, | 1459 | &dev_attr_dt.attr, |
1451 | &dev_attr_qas.attr, | 1460 | &dev_attr_qas.attr, |
1461 | &dev_attr_max_qas.attr, | ||
1452 | &dev_attr_wr_flow.attr, | 1462 | &dev_attr_wr_flow.attr, |
1453 | &dev_attr_rd_strm.attr, | 1463 | &dev_attr_rd_strm.attr, |
1454 | &dev_attr_rti.attr, | 1464 | &dev_attr_rti.attr, |