aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/message/fusion/mptscsih.c13
-rw-r--r--drivers/message/fusion/mptspi.c22
-rw-r--r--drivers/message/i2o/i2o_scsi.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c138
-rw-r--r--drivers/scsi/be2iscsi/be.h10
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c11
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c13
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h13
-rw-r--r--drivers/scsi/be2iscsi/be_main.c51
-rw-r--r--drivers/scsi/be2iscsi/be_main.h15
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c17
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h13
-rw-r--r--drivers/scsi/bfa/bfad.c61
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c33
-rw-r--r--drivers/scsi/bfa/bfad_im.h25
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/constants.c1
-rw-r--r--drivers/scsi/dc395x.c193
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c72
-rw-r--r--drivers/scsi/dpt_i2o.c6
-rw-r--r--drivers/scsi/eata.c66
-rw-r--r--drivers/scsi/eata_pio.c19
-rw-r--r--drivers/scsi/esp_scsi.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c202
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c6
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c56
-rw-r--r--drivers/scsi/hpsa.c496
-rw-r--r--drivers/scsi/hpsa.h15
-rw-r--r--drivers/scsi/hpsa_cmd.h11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c3
-rw-r--r--drivers/scsi/in2000.c29
-rw-r--r--drivers/scsi/ipr.c158
-rw-r--r--drivers/scsi/ipr.h22
-rw-r--r--drivers/scsi/libfc/fc_fcp.c57
-rw-r--r--drivers/scsi/libfc/fc_lport.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h130
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c931
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h48
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/mesh.c3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c42
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h49
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c617
-rw-r--r--drivers/scsi/mvsas/Kconfig1
-rw-r--r--drivers/scsi/mvsas/Makefile1
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c1
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h1
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c1
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h1
-rw-r--r--drivers/scsi/mvsas/mv_chips.h1
-rw-r--r--drivers/scsi/mvsas/mv_defs.h3
-rw-r--r--drivers/scsi/mvsas/mv_init.c67
-rw-r--r--drivers/scsi/mvsas/mv_sas.c383
-rw-r--r--drivers/scsi/mvsas/mv_sas.h8
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c5
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_proc.c58
-rw-r--r--drivers/scsi/scsi_tgt_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c11
-rw-r--r--drivers/scsi/tmscsim.c22
-rw-r--r--drivers/scsi/u14-34f.c61
-rw-r--r--drivers/scsi/wd33c93.c45
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/tcm_fc/Kconfig5
-rw-r--r--drivers/target/tcm_fc/Makefile15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h215
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c696
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c677
-rw-r--r--drivers/target/tcm_fc/tfc_io.c374
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c541
-rw-r--r--drivers/usb/storage/isd200.c1
117 files changed, 6098 insertions, 1672 deletions
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 1735c84ff757..fe902338539b 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.18" 79#define MPT_LINUX_VERSION_COMMON "3.04.19"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.19"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 66f94125de4e..7596aecd5072 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5012,7 +5012,6 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
5012 (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) { 5012 (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
5013 VirtTarget *vtarget = NULL; 5013 VirtTarget *vtarget = NULL;
5014 u8 id, channel; 5014 u8 id, channel;
5015 u32 log_info = le32_to_cpu(reply->IOCLogInfo);
5016 5015
5017 id = sas_event_data->TargetID; 5016 id = sas_event_data->TargetID;
5018 channel = sas_event_data->Bus; 5017 channel = sas_event_data->Bus;
@@ -5023,7 +5022,8 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
5023 "LogInfo (0x%x) available for " 5022 "LogInfo (0x%x) available for "
5024 "INTERNAL_DEVICE_RESET" 5023 "INTERNAL_DEVICE_RESET"
5025 "fw_id %d fw_channel %d\n", ioc->name, 5024 "fw_id %d fw_channel %d\n", ioc->name,
5026 log_info, id, channel)); 5025 le32_to_cpu(reply->IOCLogInfo),
5026 id, channel));
5027 if (vtarget->raidVolume) { 5027 if (vtarget->raidVolume) {
5028 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT 5028 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5029 "Skipping Raid Volume for inDMD\n", 5029 "Skipping Raid Volume for inDMD\n",
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 0d9b82a44540..a1d4ee6671be 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1415,11 +1415,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1415 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", 1415 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
1416 ioc->name, SCpnt, done)); 1416 ioc->name, SCpnt, done));
1417 1417
1418 if (ioc->taskmgmt_quiesce_io) { 1418 if (ioc->taskmgmt_quiesce_io)
1419 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
1420 ioc->name, SCpnt));
1421 return SCSI_MLQUEUE_HOST_BUSY; 1419 return SCSI_MLQUEUE_HOST_BUSY;
1422 }
1423 1420
1424 /* 1421 /*
1425 * Put together a MPT SCSI request... 1422 * Put together a MPT SCSI request...
@@ -1773,7 +1770,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1773 int scpnt_idx; 1770 int scpnt_idx;
1774 int retval; 1771 int retval;
1775 VirtDevice *vdevice; 1772 VirtDevice *vdevice;
1776 ulong sn = SCpnt->serial_number;
1777 MPT_ADAPTER *ioc; 1773 MPT_ADAPTER *ioc;
1778 1774
1779 /* If we can't locate our host adapter structure, return FAILED status. 1775 /* If we can't locate our host adapter structure, return FAILED status.
@@ -1859,8 +1855,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1859 vdevice->vtarget->id, vdevice->lun, 1855 vdevice->vtarget->id, vdevice->lun,
1860 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1856 ctx2abort, mptscsih_get_tm_timeout(ioc));
1861 1857
1862 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && 1858 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx) {
1863 SCpnt->serial_number == sn) {
1864 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 1859 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1865 "task abort: command still in active list! (sc=%p)\n", 1860 "task abort: command still in active list! (sc=%p)\n",
1866 ioc->name, SCpnt)); 1861 ioc->name, SCpnt));
@@ -1873,9 +1868,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1873 } 1868 }
1874 1869
1875 out: 1870 out:
1876 printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", 1871 printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p)\n",
1877 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, 1872 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
1878 SCpnt, SCpnt->serial_number); 1873 SCpnt);
1879 1874
1880 return retval; 1875 return retval;
1881} 1876}
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 6d9568d2ec59..8f61ba6aac23 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -867,6 +867,10 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
867 struct _x_config_parms cfg; 867 struct _x_config_parms cfg;
868 struct _CONFIG_PAGE_HEADER hdr; 868 struct _CONFIG_PAGE_HEADER hdr;
869 int err = -EBUSY; 869 int err = -EBUSY;
870 u32 nego_parms;
871 u32 period;
872 struct scsi_device *sdev;
873 int i;
870 874
871 /* don't allow updating nego parameters on RAID devices */ 875 /* don't allow updating nego parameters on RAID devices */
872 if (starget->channel == 0 && 876 if (starget->channel == 0 &&
@@ -904,6 +908,24 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
904 pg1->Header.PageNumber = hdr.PageNumber; 908 pg1->Header.PageNumber = hdr.PageNumber;
905 pg1->Header.PageType = hdr.PageType; 909 pg1->Header.PageType = hdr.PageType;
906 910
911 nego_parms = le32_to_cpu(pg1->RequestedParameters);
912 period = (nego_parms & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK) >>
913 MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
914 if (period == 8) {
915 /* Turn on inline data padding for TAPE when running U320 */
916 for (i = 0 ; i < 16; i++) {
917 sdev = scsi_device_lookup_by_target(starget, i);
918 if (sdev && sdev->type == TYPE_TAPE) {
919 sdev_printk(KERN_DEBUG, sdev, MYIOC_s_FMT
920 "IDP:ON\n", ioc->name);
921 nego_parms |= MPI_SCSIDEVPAGE1_RP_IDP;
922 pg1->RequestedParameters =
923 cpu_to_le32(nego_parms);
924 break;
925 }
926 }
927 }
928
907 mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters)); 929 mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters));
908 930
909 if (mpt_config(ioc, &cfg)) { 931 if (mpt_config(ioc, &cfg)) {
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index f003957e8e1c..74fbe56321ff 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -361,7 +361,7 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
361 */ 361 */
362 error = le32_to_cpu(msg->body[0]); 362 error = le32_to_cpu(msg->body[0]);
363 363
364 osm_debug("Completed %ld\n", cmd->serial_number); 364 osm_debug("Completed %0x%p\n", cmd);
365 365
366 cmd->result = error & 0xff; 366 cmd->result = error & 0xff;
367 /* 367 /*
@@ -678,7 +678,7 @@ static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt,
678 /* Queue the message */ 678 /* Queue the message */
679 i2o_msg_post(c, msg); 679 i2o_msg_post(c, msg);
680 680
681 osm_debug("Issued %ld\n", SCpnt->serial_number); 681 osm_debug("Issued %0x%p\n", SCpnt);
682 682
683 return 0; 683 return 0;
684 684
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index da7b9887ec48..f980600f78a8 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,8 +75,10 @@ MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
75MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter"); 75MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
76MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
77MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78static int sleeptime = 10; 78
79static int retrycount = 12; 79#define ARCMSR_SLEEPTIME 10
80#define ARCMSR_RETRYCOUNT 12
81
80wait_queue_head_t wait_q; 82wait_queue_head_t wait_q;
81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 83static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
82 struct scsi_cmnd *cmd); 84 struct scsi_cmnd *cmd);
@@ -171,24 +173,6 @@ static struct pci_driver arcmsr_pci_driver = {
171**************************************************************************** 173****************************************************************************
172**************************************************************************** 174****************************************************************************
173*/ 175*/
174int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
175{
176 struct Scsi_Host *shost = NULL;
177 int i, isleep;
178 shost = cmd->device->host;
179 isleep = sleeptime / 10;
180 if (isleep > 0) {
181 for (i = 0; i < isleep; i++) {
182 msleep(10000);
183 }
184 }
185
186 isleep = sleeptime % 10;
187 if (isleep > 0) {
188 msleep(isleep*1000);
189 }
190 return 0;
191}
192 176
193static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb) 177static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
194{ 178{
@@ -323,66 +307,64 @@ static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
323 307
324 default: acb->adapter_type = ACB_ADAPTER_TYPE_A; 308 default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
325 } 309 }
326} 310}
327 311
328static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) 312static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
329{ 313{
330 struct MessageUnit_A __iomem *reg = acb->pmuA; 314 struct MessageUnit_A __iomem *reg = acb->pmuA;
331 uint32_t Index; 315 int i;
332 uint8_t Retries = 0x00; 316
333 do { 317 for (i = 0; i < 2000; i++) {
334 for (Index = 0; Index < 100; Index++) { 318 if (readl(&reg->outbound_intstatus) &
335 if (readl(&reg->outbound_intstatus) & 319 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
336 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 320 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
337 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, 321 &reg->outbound_intstatus);
338 &reg->outbound_intstatus); 322 return true;
339 return true; 323 }
340 } 324 msleep(10);
341 msleep(10); 325 } /* max 20 seconds */
342 }/*max 1 seconds*/
343 326
344 } while (Retries++ < 20);/*max 20 sec*/
345 return false; 327 return false;
346} 328}
347 329
348static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) 330static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
349{ 331{
350 struct MessageUnit_B *reg = acb->pmuB; 332 struct MessageUnit_B *reg = acb->pmuB;
351 uint32_t Index; 333 int i;
352 uint8_t Retries = 0x00; 334
353 do { 335 for (i = 0; i < 2000; i++) {
354 for (Index = 0; Index < 100; Index++) { 336 if (readl(reg->iop2drv_doorbell)
355 if (readl(reg->iop2drv_doorbell) 337 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
356 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 338 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
357 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN 339 reg->iop2drv_doorbell);
358 , reg->iop2drv_doorbell); 340 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
359 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 341 reg->drv2iop_doorbell);
360 return true; 342 return true;
361 } 343 }
362 msleep(10); 344 msleep(10);
363 }/*max 1 seconds*/ 345 } /* max 20 seconds */
364 346
365 } while (Retries++ < 20);/*max 20 sec*/
366 return false; 347 return false;
367} 348}
368 349
369static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB) 350static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
370{ 351{
371 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; 352 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
372 unsigned char Retries = 0x00; 353 int i;
373 uint32_t Index; 354
374 do { 355 for (i = 0; i < 2000; i++) {
375 for (Index = 0; Index < 100; Index++) { 356 if (readl(&phbcmu->outbound_doorbell)
376 if (readl(&phbcmu->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 357 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
377 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &phbcmu->outbound_doorbell_clear);/*clear interrupt*/ 358 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
378 return true; 359 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
379 } 360 return true;
380 /* one us delay */ 361 }
381 msleep(10); 362 msleep(10);
382 } /*max 1 seconds*/ 363 } /* max 20 seconds */
383 } while (Retries++ < 20); /*max 20 sec*/ 364
384 return false; 365 return false;
385} 366}
367
386static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) 368static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
387{ 369{
388 struct MessageUnit_A __iomem *reg = acb->pmuA; 370 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -459,10 +441,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
459 struct CommandControlBlock *ccb_tmp; 441 struct CommandControlBlock *ccb_tmp;
460 int i = 0, j = 0; 442 int i = 0, j = 0;
461 dma_addr_t cdb_phyaddr; 443 dma_addr_t cdb_phyaddr;
462 unsigned long roundup_ccbsize = 0, offset; 444 unsigned long roundup_ccbsize;
463 unsigned long max_xfer_len; 445 unsigned long max_xfer_len;
464 unsigned long max_sg_entrys; 446 unsigned long max_sg_entrys;
465 uint32_t firm_config_version; 447 uint32_t firm_config_version;
448
466 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 449 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
467 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 450 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
468 acb->devstate[i][j] = ARECA_RAID_GONE; 451 acb->devstate[i][j] = ARECA_RAID_GONE;
@@ -472,23 +455,20 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
472 firm_config_version = acb->firm_cfg_version; 455 firm_config_version = acb->firm_cfg_version;
473 if((firm_config_version & 0xFF) >= 3){ 456 if((firm_config_version & 0xFF) >= 3){
474 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ 457 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
475 max_sg_entrys = (max_xfer_len/4096); 458 max_sg_entrys = (max_xfer_len/4096);
476 } 459 }
477 acb->host->max_sectors = max_xfer_len/512; 460 acb->host->max_sectors = max_xfer_len/512;
478 acb->host->sg_tablesize = max_sg_entrys; 461 acb->host->sg_tablesize = max_sg_entrys;
479 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); 462 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
480 acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM + 32; 463 acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
481 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); 464 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
482 if(!dma_coherent){ 465 if(!dma_coherent){
483 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error \n", acb->host->host_no); 466 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
484 return -ENOMEM; 467 return -ENOMEM;
485 } 468 }
486 acb->dma_coherent = dma_coherent; 469 acb->dma_coherent = dma_coherent;
487 acb->dma_coherent_handle = dma_coherent_handle; 470 acb->dma_coherent_handle = dma_coherent_handle;
488 memset(dma_coherent, 0, acb->uncache_size); 471 memset(dma_coherent, 0, acb->uncache_size);
489 offset = roundup((unsigned long)dma_coherent, 32) - (unsigned long)dma_coherent;
490 dma_coherent_handle = dma_coherent_handle + offset;
491 dma_coherent = (struct CommandControlBlock *)dma_coherent + offset;
492 ccb_tmp = dma_coherent; 472 ccb_tmp = dma_coherent;
493 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; 473 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
494 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ 474 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
@@ -2602,12 +2582,8 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2602 if (cdb_phyaddr_hi32 != 0) { 2582 if (cdb_phyaddr_hi32 != 0) {
2603 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 2583 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
2604 2584
2605 if (cdb_phyaddr_hi32 != 0) { 2585 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
2606 unsigned char Retries = 0x00; 2586 acb->adapter_index, cdb_phyaddr_hi32);
2607 do {
2608 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x \n", acb->adapter_index, cdb_phyaddr_hi32);
2609 } while (Retries++ < 100);
2610 }
2611 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]); 2587 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
2612 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]); 2588 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
2613 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0); 2589 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
@@ -2955,12 +2931,12 @@ static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2955 arcmsr_hardware_reset(acb); 2931 arcmsr_hardware_reset(acb);
2956 acb->acb_flags &= ~ACB_F_IOP_INITED; 2932 acb->acb_flags &= ~ACB_F_IOP_INITED;
2957sleep_again: 2933sleep_again:
2958 arcmsr_sleep_for_bus_reset(cmd); 2934 ssleep(ARCMSR_SLEEPTIME);
2959 if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { 2935 if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
2960 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count); 2936 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
2961 if (retry_count > retrycount) { 2937 if (retry_count > ARCMSR_RETRYCOUNT) {
2962 acb->fw_flag = FW_DEADLOCK; 2938 acb->fw_flag = FW_DEADLOCK;
2963 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no); 2939 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
2964 return FAILED; 2940 return FAILED;
2965 } 2941 }
2966 retry_count++; 2942 retry_count++;
@@ -3025,12 +3001,12 @@ sleep_again:
3025 arcmsr_hardware_reset(acb); 3001 arcmsr_hardware_reset(acb);
3026 acb->acb_flags &= ~ACB_F_IOP_INITED; 3002 acb->acb_flags &= ~ACB_F_IOP_INITED;
3027sleep: 3003sleep:
3028 arcmsr_sleep_for_bus_reset(cmd); 3004 ssleep(ARCMSR_SLEEPTIME);
3029 if ((readl(&reg->host_diagnostic) & 0x04) != 0) { 3005 if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
3030 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count); 3006 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
3031 if (retry_count > retrycount) { 3007 if (retry_count > ARCMSR_RETRYCOUNT) {
3032 acb->fw_flag = FW_DEADLOCK; 3008 acb->fw_flag = FW_DEADLOCK;
3033 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no); 3009 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3034 return FAILED; 3010 return FAILED;
3035 } 3011 }
3036 retry_count++; 3012 retry_count++;
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 1cb8a5e85c7f..1d7b976c850f 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#ifndef BEISCSI_H 18#ifndef BEISCSI_H
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index ad246369d373..b8a82f2c62c8 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
@@ -458,6 +458,7 @@ void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
458 req_hdr->opcode = opcode; 458 req_hdr->opcode = opcode;
459 req_hdr->subsystem = subsystem; 459 req_hdr->subsystem = subsystem;
460 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 460 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
461 req_hdr->timeout = 120;
461} 462}
462 463
463static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 464static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index fbd1dc2c15f7..497eb29e5c9e 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#ifndef BEISCSI_CMDS_H 18#ifndef BEISCSI_CMDS_H
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 868cc5590145..3cad10605023 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#include <scsi/libiscsi.h> 20#include <scsi/libiscsi.h>
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 9c532797c29e..ff60b7fd92d6 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#ifndef _BE_ISCSI_ 20#ifndef _BE_ISCSI_
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 24e20ba9633c..cea9b275965c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,16 +7,16 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
19
20#include <linux/reboot.h> 20#include <linux/reboot.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
@@ -420,7 +420,8 @@ static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
420 return 0; 420 return 0;
421 421
422free_kset: 422free_kset:
423 iscsi_boot_destroy_kset(phba->boot_kset); 423 if (phba->boot_kset)
424 iscsi_boot_destroy_kset(phba->boot_kset);
424 return -ENOMEM; 425 return -ENOMEM;
425} 426}
426 427
@@ -3464,23 +3465,23 @@ static void hwi_enable_intr(struct beiscsi_hba *phba)
3464 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3465 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3465 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3466 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3466 reg = ioread32(addr); 3467 reg = ioread32(addr);
3467 SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
3468 3468
3469 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3469 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3470 if (!enabled) { 3470 if (!enabled) {
3471 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3471 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3472 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr); 3472 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3473 iowrite32(reg, addr); 3473 iowrite32(reg, addr);
3474 if (!phba->msix_enabled) { 3474 }
3475 eq = &phwi_context->be_eq[0].q; 3475
3476 if (!phba->msix_enabled) {
3477 eq = &phwi_context->be_eq[0].q;
3478 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3479 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3480 } else {
3481 for (i = 0; i <= phba->num_cpus; i++) {
3482 eq = &phwi_context->be_eq[i].q;
3476 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); 3483 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3477 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3484 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3478 } else {
3479 for (i = 0; i <= phba->num_cpus; i++) {
3480 eq = &phwi_context->be_eq[i].q;
3481 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3482 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3483 }
3484 } 3485 }
3485 } 3486 }
3486} 3487}
@@ -4019,12 +4020,17 @@ static int beiscsi_mtask(struct iscsi_task *task)
4019 hwi_write_buffer(pwrb, task); 4020 hwi_write_buffer(pwrb, task);
4020 break; 4021 break;
4021 case ISCSI_OP_NOOP_OUT: 4022 case ISCSI_OP_NOOP_OUT:
4022 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4023 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4023 INI_RD_CMD); 4024 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4024 if (task->hdr->ttt == ISCSI_RESERVED_TAG) 4025 TGT_DM_CMD);
4026 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4027 pwrb, 0);
4025 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4028 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4026 else 4029 } else {
4030 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4031 INI_RD_CMD);
4027 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); 4032 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4033 }
4028 hwi_write_buffer(pwrb, task); 4034 hwi_write_buffer(pwrb, task);
4029 break; 4035 break;
4030 case ISCSI_OP_TEXT: 4036 case ISCSI_OP_TEXT:
@@ -4144,10 +4150,11 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4144 phba->ctrl.mbox_mem_alloced.size, 4150 phba->ctrl.mbox_mem_alloced.size,
4145 phba->ctrl.mbox_mem_alloced.va, 4151 phba->ctrl.mbox_mem_alloced.va,
4146 phba->ctrl.mbox_mem_alloced.dma); 4152 phba->ctrl.mbox_mem_alloced.dma);
4153 if (phba->boot_kset)
4154 iscsi_boot_destroy_kset(phba->boot_kset);
4147 iscsi_host_remove(phba->shost); 4155 iscsi_host_remove(phba->shost);
4148 pci_dev_put(phba->pcidev); 4156 pci_dev_put(phba->pcidev);
4149 iscsi_host_free(phba->shost); 4157 iscsi_host_free(phba->shost);
4150 iscsi_boot_destroy_kset(phba->boot_kset);
4151} 4158}
4152 4159
4153static void beiscsi_msix_enable(struct beiscsi_hba *phba) 4160static void beiscsi_msix_enable(struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 90eb74f6bcab..081c171a1ed6 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#ifndef _BEISCSI_MAIN_ 20#ifndef _BEISCSI_MAIN_
@@ -35,7 +34,7 @@
35 34
36#include "be.h" 35#include "be.h"
37#define DRV_NAME "be2iscsi" 36#define DRV_NAME "be2iscsi"
38#define BUILD_STR "2.0.549.0" 37#define BUILD_STR "2.103.298.0"
39#define BE_NAME "ServerEngines BladeEngine2" \ 38#define BE_NAME "ServerEngines BladeEngine2" \
40 "Linux iSCSI Driver version" BUILD_STR 39 "Linux iSCSI Driver version" BUILD_STR
41#define DRV_DESC BE_NAME " " "Driver" 40#define DRV_DESC BE_NAME " " "Driver"
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 877324fc594c..44762cfa3e12 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#include "be_mgmt.h" 20#include "be_mgmt.h"
@@ -203,8 +202,8 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
203 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); 202 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
204 203
205 req->chute = chute; 204 req->chute = chute;
206 req->hdr_ring_id = 0; 205 req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba));
207 req->data_ring_id = 0; 206 req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba));
208 207
209 status = be_mcc_notify_wait(phba); 208 status = be_mcc_notify_wait(phba);
210 if (status) 209 if (status)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index b9acedf78653..08428824ace2 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#ifndef _BEISCSI_MGMT_ 20#ifndef _BEISCSI_MGMT_
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 0fd510a01561..59b5e9b61d71 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -57,9 +57,19 @@ int pcie_max_read_reqsz;
57int bfa_debugfs_enable = 1; 57int bfa_debugfs_enable = 1;
58int msix_disable_cb = 0, msix_disable_ct = 0; 58int msix_disable_cb = 0, msix_disable_ct = 0;
59 59
60/* Firmware releated */
60u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; 61u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
61u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; 62u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
62 63
64#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
65#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
66#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
67
68static u32 *bfad_load_fwimg(struct pci_dev *pdev);
69static void bfad_free_fwimg(void);
70static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
71 u32 *bfi_image_size, char *fw_name);
72
63static const char *msix_name_ct[] = { 73static const char *msix_name_ct[] = {
64 "cpe0", "cpe1", "cpe2", "cpe3", 74 "cpe0", "cpe1", "cpe2", "cpe3",
65 "rme0", "rme1", "rme2", "rme3", 75 "rme0", "rme1", "rme2", "rme3",
@@ -222,6 +232,9 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
222 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 232 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
223 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 233 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
224 } else { 234 } else {
235 printk(KERN_WARNING
236 "bfa %s: bfa init failed\n",
237 bfad->pci_name);
225 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 238 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
226 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); 239 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
227 } 240 }
@@ -991,10 +1004,6 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
991 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; 1004 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
992 } 1005 }
993 1006
994 /* Setup the debugfs node for this scsi_host */
995 if (bfa_debugfs_enable)
996 bfad_debugfs_init(&bfad->pport);
997
998 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 1007 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
999 1008
1000out: 1009out:
@@ -1004,10 +1013,6 @@ out:
1004void 1013void
1005bfad_uncfg_pport(struct bfad_s *bfad) 1014bfad_uncfg_pport(struct bfad_s *bfad)
1006{ 1015{
1007 /* Remove the debugfs node for this scsi_host */
1008 kfree(bfad->regdata);
1009 bfad_debugfs_exit(&bfad->pport);
1010
1011 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 1016 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1012 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { 1017 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1013 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 1018 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
@@ -1389,6 +1394,10 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1389 bfad->pport.bfad = bfad; 1394 bfad->pport.bfad = bfad;
1390 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1395 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1391 1396
1397 /* Setup the debugfs node for this bfad */
1398 if (bfa_debugfs_enable)
1399 bfad_debugfs_init(&bfad->pport);
1400
1392 retval = bfad_drv_init(bfad); 1401 retval = bfad_drv_init(bfad);
1393 if (retval != BFA_STATUS_OK) 1402 if (retval != BFA_STATUS_OK)
1394 goto out_drv_init_failure; 1403 goto out_drv_init_failure;
@@ -1404,6 +1413,9 @@ out_bfad_sm_failure:
1404 bfa_detach(&bfad->bfa); 1413 bfa_detach(&bfad->bfa);
1405 bfad_hal_mem_release(bfad); 1414 bfad_hal_mem_release(bfad);
1406out_drv_init_failure: 1415out_drv_init_failure:
1416 /* Remove the debugfs node for this bfad */
1417 kfree(bfad->regdata);
1418 bfad_debugfs_exit(&bfad->pport);
1407 mutex_lock(&bfad_mutex); 1419 mutex_lock(&bfad_mutex);
1408 bfad_inst--; 1420 bfad_inst--;
1409 list_del(&bfad->list_entry); 1421 list_del(&bfad->list_entry);
@@ -1445,6 +1457,10 @@ bfad_pci_remove(struct pci_dev *pdev)
1445 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1457 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1446 bfad_hal_mem_release(bfad); 1458 bfad_hal_mem_release(bfad);
1447 1459
1460 /* Remove the debugfs node for this bfad */
1461 kfree(bfad->regdata);
1462 bfad_debugfs_exit(&bfad->pport);
1463
1448 /* Cleaning the BFAD instance */ 1464 /* Cleaning the BFAD instance */
1449 mutex_lock(&bfad_mutex); 1465 mutex_lock(&bfad_mutex);
1450 bfad_inst--; 1466 bfad_inst--;
@@ -1550,7 +1566,7 @@ bfad_exit(void)
1550} 1566}
1551 1567
1552/* Firmware handling */ 1568/* Firmware handling */
1553u32 * 1569static void
1554bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 1570bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1555 u32 *bfi_image_size, char *fw_name) 1571 u32 *bfi_image_size, char *fw_name)
1556{ 1572{
@@ -1558,27 +1574,25 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1558 1574
1559 if (request_firmware(&fw, fw_name, &pdev->dev)) { 1575 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1560 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); 1576 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1561 goto error; 1577 *bfi_image = NULL;
1578 goto out;
1562 } 1579 }
1563 1580
1564 *bfi_image = vmalloc(fw->size); 1581 *bfi_image = vmalloc(fw->size);
1565 if (NULL == *bfi_image) { 1582 if (NULL == *bfi_image) {
1566 printk(KERN_ALERT "Fail to allocate buffer for fw image " 1583 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1567 "size=%x!\n", (u32) fw->size); 1584 "size=%x!\n", (u32) fw->size);
1568 goto error; 1585 goto out;
1569 } 1586 }
1570 1587
1571 memcpy(*bfi_image, fw->data, fw->size); 1588 memcpy(*bfi_image, fw->data, fw->size);
1572 *bfi_image_size = fw->size/sizeof(u32); 1589 *bfi_image_size = fw->size/sizeof(u32);
1573 1590out:
1574 return *bfi_image; 1591 release_firmware(fw);
1575
1576error:
1577 return NULL;
1578} 1592}
1579 1593
1580u32 * 1594static u32 *
1581bfad_get_firmware_buf(struct pci_dev *pdev) 1595bfad_load_fwimg(struct pci_dev *pdev)
1582{ 1596{
1583 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { 1597 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1584 if (bfi_image_ct_fc_size == 0) 1598 if (bfi_image_ct_fc_size == 0)
@@ -1598,6 +1612,17 @@ bfad_get_firmware_buf(struct pci_dev *pdev)
1598 } 1612 }
1599} 1613}
1600 1614
1615static void
1616bfad_free_fwimg(void)
1617{
1618 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
1619 vfree(bfi_image_ct_fc);
1620 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
1621 vfree(bfi_image_ct_cna);
1622 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
1623 vfree(bfi_image_cb_fc);
1624}
1625
1601module_init(bfad_init); 1626module_init(bfad_init);
1602module_exit(bfad_exit); 1627module_exit(bfad_exit);
1603MODULE_LICENSE("GPL"); 1628MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index c66e32eced7b..48be0c54f2de 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -28,10 +28,10 @@
28 * mount -t debugfs none /sys/kernel/debug 28 * mount -t debugfs none /sys/kernel/debug
29 * 29 *
30 * BFA Hierarchy: 30 * BFA Hierarchy:
31 * - bfa/host# 31 * - bfa/pci_dev:<pci_name>
32 * where the host number corresponds to the one under /sys/class/scsi_host/host# 32 * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa
33 * 33 *
34 * Debugging service available per host: 34 * Debugging service available per pci_dev:
35 * fwtrc: To collect current firmware trace. 35 * fwtrc: To collect current firmware trace.
36 * drvtrc: To collect current driver trace 36 * drvtrc: To collect current driver trace
37 * fwsave: To collect last saved fw trace as a result of firmware crash. 37 * fwsave: To collect last saved fw trace as a result of firmware crash.
@@ -489,11 +489,9 @@ static atomic_t bfa_debugfs_port_count;
489inline void 489inline void
490bfad_debugfs_init(struct bfad_port_s *port) 490bfad_debugfs_init(struct bfad_port_s *port)
491{ 491{
492 struct bfad_im_port_s *im_port = port->im_port; 492 struct bfad_s *bfad = port->bfad;
493 struct bfad_s *bfad = im_port->bfad;
494 struct Scsi_Host *shost = im_port->shost;
495 const struct bfad_debugfs_entry *file; 493 const struct bfad_debugfs_entry *file;
496 char name[16]; 494 char name[64];
497 int i; 495 int i;
498 496
499 if (!bfa_debugfs_enable) 497 if (!bfa_debugfs_enable)
@@ -510,17 +508,15 @@ bfad_debugfs_init(struct bfad_port_s *port)
510 } 508 }
511 } 509 }
512 510
513 /* 511 /* Setup the pci_dev debugfs directory for the port */
514 * Setup the host# directory for the port, 512 snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name);
515 * corresponds to the scsi_host num of this port.
516 */
517 snprintf(name, sizeof(name), "host%d", shost->host_no);
518 if (!port->port_debugfs_root) { 513 if (!port->port_debugfs_root) {
519 port->port_debugfs_root = 514 port->port_debugfs_root =
520 debugfs_create_dir(name, bfa_debugfs_root); 515 debugfs_create_dir(name, bfa_debugfs_root);
521 if (!port->port_debugfs_root) { 516 if (!port->port_debugfs_root) {
522 printk(KERN_WARNING 517 printk(KERN_WARNING
523 "BFA host root dir creation failed\n"); 518 "bfa %s: debugfs root creation failed\n",
519 bfad->pci_name);
524 goto err; 520 goto err;
525 } 521 }
526 522
@@ -536,8 +532,8 @@ bfad_debugfs_init(struct bfad_port_s *port)
536 file->fops); 532 file->fops);
537 if (!bfad->bfad_dentry_files[i]) { 533 if (!bfad->bfad_dentry_files[i]) {
538 printk(KERN_WARNING 534 printk(KERN_WARNING
539 "BFA host%d: create %s entry failed\n", 535 "bfa %s: debugfs %s creation failed\n",
540 shost->host_no, file->name); 536 bfad->pci_name, file->name);
541 goto err; 537 goto err;
542 } 538 }
543 } 539 }
@@ -550,8 +546,7 @@ err:
550inline void 546inline void
551bfad_debugfs_exit(struct bfad_port_s *port) 547bfad_debugfs_exit(struct bfad_port_s *port)
552{ 548{
553 struct bfad_im_port_s *im_port = port->im_port; 549 struct bfad_s *bfad = port->bfad;
554 struct bfad_s *bfad = im_port->bfad;
555 int i; 550 int i;
556 551
557 for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) { 552 for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
@@ -562,9 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
562 } 557 }
563 558
564 /* 559 /*
565 * Remove the host# directory for the port, 560 * Remove the pci_dev debugfs directory for the port */
566 * corresponds to the scsi_host num of this port.
567 */
568 if (port->port_debugfs_root) { 561 if (port->port_debugfs_root) {
569 debugfs_remove(port->port_debugfs_root); 562 debugfs_remove(port->port_debugfs_root);
570 port->port_debugfs_root = NULL; 563 port->port_debugfs_root = NULL;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index bfee63b16fa9..c296c8968511 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,29 +141,4 @@ extern struct device_attribute *bfad_im_vport_attrs[];
141 141
142irqreturn_t bfad_intx(int irq, void *dev_id); 142irqreturn_t bfad_intx(int irq, void *dev_id);
143 143
144/* Firmware releated */
145#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
146#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
147#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
148
149u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
150u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
151 u32 *bfi_image_size, char *fw_name);
152
153static inline u32 *
154bfad_load_fwimg(struct pci_dev *pdev)
155{
156 return bfad_get_firmware_buf(pdev);
157}
158
159static inline void
160bfad_free_fwimg(void)
161{
162 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
163 vfree(bfi_image_ct_fc);
164 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
165 vfree(bfi_image_ct_cna);
166 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
167 vfree(bfi_image_cb_fc);
168}
169#endif 144#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index b6d350ac4288..0a404bfb44fe 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -130,7 +130,7 @@
130#define BNX2FC_TM_TIMEOUT 60 /* secs */ 130#define BNX2FC_TM_TIMEOUT 60 /* secs */
131#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */ 131#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
132 132
133#define BNX2FC_WAIT_CNT 120 133#define BNX2FC_WAIT_CNT 1200
134#define BNX2FC_FW_TIMEOUT (3 * HZ) 134#define BNX2FC_FW_TIMEOUT (3 * HZ)
135#define PORT_MAX 2 135#define PORT_MAX 2
136 136
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e2e647509a73..662365676689 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1130,7 +1130,7 @@ static void bnx2fc_interface_release(struct kref *kref)
1130 struct net_device *phys_dev; 1130 struct net_device *phys_dev;
1131 1131
1132 hba = container_of(kref, struct bnx2fc_hba, kref); 1132 hba = container_of(kref, struct bnx2fc_hba, kref);
1133 BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n"); 1133 BNX2FC_MISC_DBG("Interface is being released\n");
1134 1134
1135 netdev = hba->netdev; 1135 netdev = hba->netdev;
1136 phys_dev = hba->phys_dev; 1136 phys_dev = hba->phys_dev;
@@ -1254,20 +1254,17 @@ setup_err:
1254static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 1254static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1255 struct device *parent, int npiv) 1255 struct device *parent, int npiv)
1256{ 1256{
1257 struct fc_lport *lport = NULL; 1257 struct fc_lport *lport, *n_port;
1258 struct fcoe_port *port; 1258 struct fcoe_port *port;
1259 struct Scsi_Host *shost; 1259 struct Scsi_Host *shost;
1260 struct fc_vport *vport = dev_to_vport(parent); 1260 struct fc_vport *vport = dev_to_vport(parent);
1261 int rc = 0; 1261 int rc = 0;
1262 1262
1263 /* Allocate Scsi_Host structure */ 1263 /* Allocate Scsi_Host structure */
1264 if (!npiv) { 1264 if (!npiv)
1265 lport = libfc_host_alloc(&bnx2fc_shost_template, 1265 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
1266 sizeof(struct fcoe_port)); 1266 else
1267 } else { 1267 lport = libfc_vport_create(vport, sizeof(*port));
1268 lport = libfc_vport_create(vport,
1269 sizeof(struct fcoe_port));
1270 }
1271 1268
1272 if (!lport) { 1269 if (!lport) {
1273 printk(KERN_ERR PFX "could not allocate scsi host structure\n"); 1270 printk(KERN_ERR PFX "could not allocate scsi host structure\n");
@@ -1285,7 +1282,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1285 goto lp_config_err; 1282 goto lp_config_err;
1286 1283
1287 if (npiv) { 1284 if (npiv) {
1288 vport = dev_to_vport(parent);
1289 printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", 1285 printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
1290 vport->node_name, vport->port_name); 1286 vport->node_name, vport->port_name);
1291 fc_set_wwnn(lport, vport->node_name); 1287 fc_set_wwnn(lport, vport->node_name);
@@ -1314,12 +1310,17 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1314 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 1310 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1315 1311
1316 /* Allocate exchange manager */ 1312 /* Allocate exchange manager */
1317 if (!npiv) { 1313 if (!npiv)
1318 rc = bnx2fc_em_config(lport); 1314 rc = bnx2fc_em_config(lport);
1319 if (rc) { 1315 else {
1320 printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); 1316 shost = vport_to_shost(vport);
1321 goto shost_err; 1317 n_port = shost_priv(shost);
1322 } 1318 rc = fc_exch_mgr_list_clone(n_port, lport);
1319 }
1320
1321 if (rc) {
1322 printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
1323 goto shost_err;
1323 } 1324 }
1324 1325
1325 bnx2fc_interface_get(hba); 1326 bnx2fc_interface_get(hba);
@@ -1352,8 +1353,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1352 /* Free existing transmit skbs */ 1353 /* Free existing transmit skbs */
1353 fcoe_clean_pending_queue(lport); 1354 fcoe_clean_pending_queue(lport);
1354 1355
1355 bnx2fc_interface_put(hba);
1356
1357 /* Free queued packets for the receive thread */ 1356 /* Free queued packets for the receive thread */
1358 bnx2fc_clean_rx_queue(lport); 1357 bnx2fc_clean_rx_queue(lport);
1359 1358
@@ -1372,6 +1371,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1372 1371
1373 /* Release Scsi_Host */ 1372 /* Release Scsi_Host */
1374 scsi_host_put(lport->host); 1373 scsi_host_put(lport->host);
1374
1375 bnx2fc_interface_put(hba);
1375} 1376}
1376 1377
1377/** 1378/**
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 1b680e288c56..f756d5f85c7a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -522,6 +522,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
522 fp = fc_frame_alloc(lport, payload_len); 522 fp = fc_frame_alloc(lport, payload_len);
523 if (!fp) { 523 if (!fp) {
524 printk(KERN_ERR PFX "fc_frame_alloc failure\n"); 524 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
525 kfree(unsol_els);
525 return; 526 return;
526 } 527 }
527 528
@@ -547,6 +548,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
547 */ 548 */
548 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); 549 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
549 kfree_skb(skb); 550 kfree_skb(skb);
551 kfree(unsol_els);
550 return; 552 return;
551 } 553 }
552 } 554 }
@@ -563,6 +565,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
563 } else { 565 } else {
564 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); 566 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
565 kfree_skb(skb); 567 kfree_skb(skb);
568 kfree(unsol_els);
566 } 569 }
567} 570}
568 571
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 1decefbf32e3..b5b5c346d779 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1663,6 +1663,12 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
1663 tgt = (struct bnx2fc_rport *)&rp[1]; 1663 tgt = (struct bnx2fc_rport *)&rp[1];
1664 1664
1665 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1665 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1666 if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) {
1667 sc_cmd->result = DID_NO_CONNECT << 16;
1668 sc_cmd->scsi_done(sc_cmd);
1669 return 0;
1670
1671 }
1666 /* 1672 /*
1667 * Session is not offloaded yet. Let SCSI-ml retry 1673 * Session is not offloaded yet. Let SCSI-ml retry
1668 * the command. 1674 * the command.
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index d0c82340f0e2..60d2ef291646 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -772,6 +772,7 @@ static const struct error_info additional[] =
772 {0x3802, "Esn - power management class event"}, 772 {0x3802, "Esn - power management class event"},
773 {0x3804, "Esn - media class event"}, 773 {0x3804, "Esn - media class event"},
774 {0x3806, "Esn - device busy class event"}, 774 {0x3806, "Esn - device busy class event"},
775 {0x3807, "Thin Provisioning soft threshold reached"},
775 776
776 {0x3900, "Saving parameters not supported"}, 777 {0x3900, "Saving parameters not supported"},
777 778
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index b10b3841535c..f5b718d3c31b 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -778,8 +778,8 @@ static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
778static void srb_waiting_insert(struct DeviceCtlBlk *dcb, 778static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
779 struct ScsiReqBlk *srb) 779 struct ScsiReqBlk *srb)
780{ 780{
781 dprintkdbg(DBG_0, "srb_waiting_insert: (pid#%li) <%02i-%i> srb=%p\n", 781 dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
782 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 782 srb->cmd, dcb->target_id, dcb->target_lun, srb);
783 list_add(&srb->list, &dcb->srb_waiting_list); 783 list_add(&srb->list, &dcb->srb_waiting_list);
784} 784}
785 785
@@ -787,16 +787,16 @@ static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
787static void srb_waiting_append(struct DeviceCtlBlk *dcb, 787static void srb_waiting_append(struct DeviceCtlBlk *dcb,
788 struct ScsiReqBlk *srb) 788 struct ScsiReqBlk *srb)
789{ 789{
790 dprintkdbg(DBG_0, "srb_waiting_append: (pid#%li) <%02i-%i> srb=%p\n", 790 dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
791 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 791 srb->cmd, dcb->target_id, dcb->target_lun, srb);
792 list_add_tail(&srb->list, &dcb->srb_waiting_list); 792 list_add_tail(&srb->list, &dcb->srb_waiting_list);
793} 793}
794 794
795 795
796static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) 796static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
797{ 797{
798 dprintkdbg(DBG_0, "srb_going_append: (pid#%li) <%02i-%i> srb=%p\n", 798 dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
799 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 799 srb->cmd, dcb->target_id, dcb->target_lun, srb);
800 list_add_tail(&srb->list, &dcb->srb_going_list); 800 list_add_tail(&srb->list, &dcb->srb_going_list);
801} 801}
802 802
@@ -805,8 +805,8 @@ static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
805{ 805{
806 struct ScsiReqBlk *i; 806 struct ScsiReqBlk *i;
807 struct ScsiReqBlk *tmp; 807 struct ScsiReqBlk *tmp;
808 dprintkdbg(DBG_0, "srb_going_remove: (pid#%li) <%02i-%i> srb=%p\n", 808 dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
809 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 809 srb->cmd, dcb->target_id, dcb->target_lun, srb);
810 810
811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list) 811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
812 if (i == srb) { 812 if (i == srb) {
@@ -821,8 +821,8 @@ static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
821{ 821{
822 struct ScsiReqBlk *i; 822 struct ScsiReqBlk *i;
823 struct ScsiReqBlk *tmp; 823 struct ScsiReqBlk *tmp;
824 dprintkdbg(DBG_0, "srb_waiting_remove: (pid#%li) <%02i-%i> srb=%p\n", 824 dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
825 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 825 srb->cmd, dcb->target_id, dcb->target_lun, srb);
826 826
827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list) 827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
828 if (i == srb) { 828 if (i == srb) {
@@ -836,8 +836,8 @@ static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
836 struct ScsiReqBlk *srb) 836 struct ScsiReqBlk *srb)
837{ 837{
838 dprintkdbg(DBG_0, 838 dprintkdbg(DBG_0,
839 "srb_going_to_waiting_move: (pid#%li) <%02i-%i> srb=%p\n", 839 "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
840 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 840 srb->cmd, dcb->target_id, dcb->target_lun, srb);
841 list_move(&srb->list, &dcb->srb_waiting_list); 841 list_move(&srb->list, &dcb->srb_waiting_list);
842} 842}
843 843
@@ -846,8 +846,8 @@ static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
846 struct ScsiReqBlk *srb) 846 struct ScsiReqBlk *srb)
847{ 847{
848 dprintkdbg(DBG_0, 848 dprintkdbg(DBG_0,
849 "srb_waiting_to_going_move: (pid#%li) <%02i-%i> srb=%p\n", 849 "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
850 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 850 srb->cmd, dcb->target_id, dcb->target_lun, srb);
851 list_move(&srb->list, &dcb->srb_going_list); 851 list_move(&srb->list, &dcb->srb_going_list);
852} 852}
853 853
@@ -982,8 +982,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
982{ 982{
983 int nseg; 983 int nseg;
984 enum dma_data_direction dir = cmd->sc_data_direction; 984 enum dma_data_direction dir = cmd->sc_data_direction;
985 dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n", 985 dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
986 cmd->serial_number, dcb->target_id, dcb->target_lun); 986 cmd, dcb->target_id, dcb->target_lun);
987 987
988 srb->dcb = dcb; 988 srb->dcb = dcb;
989 srb->cmd = cmd; 989 srb->cmd = cmd;
@@ -1086,8 +1086,8 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
1086 struct ScsiReqBlk *srb; 1086 struct ScsiReqBlk *srb;
1087 struct AdapterCtlBlk *acb = 1087 struct AdapterCtlBlk *acb =
1088 (struct AdapterCtlBlk *)cmd->device->host->hostdata; 1088 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1089 dprintkdbg(DBG_0, "queue_command: (pid#%li) <%02i-%i> cmnd=0x%02x\n", 1089 dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
1090 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 1090 cmd, cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
1091 1091
1092 /* Assume BAD_TARGET; will be cleared later */ 1092 /* Assume BAD_TARGET; will be cleared later */
1093 cmd->result = DID_BAD_TARGET << 16; 1093 cmd->result = DID_BAD_TARGET << 16;
@@ -1140,7 +1140,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
1140 /* process immediately */ 1140 /* process immediately */
1141 send_srb(acb, srb); 1141 send_srb(acb, srb);
1142 } 1142 }
1143 dprintkdbg(DBG_1, "queue_command: (pid#%li) done\n", cmd->serial_number); 1143 dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
1144 return 0; 1144 return 0;
1145 1145
1146complete: 1146complete:
@@ -1203,9 +1203,9 @@ static void dump_register_info(struct AdapterCtlBlk *acb,
1203 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n", 1203 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1204 srb, srb->cmd); 1204 srb, srb->cmd);
1205 else 1205 else
1206 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p (pid#%li) " 1206 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1207 "cmnd=0x%02x <%02i-%i>\n", 1207 "cmnd=0x%02x <%02i-%i>\n",
1208 srb, srb->cmd, srb->cmd->serial_number, 1208 srb, srb->cmd,
1209 srb->cmd->cmnd[0], srb->cmd->device->id, 1209 srb->cmd->cmnd[0], srb->cmd->device->id,
1210 srb->cmd->device->lun); 1210 srb->cmd->device->lun);
1211 printk(" sglist=%p cnt=%i idx=%i len=%zu\n", 1211 printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
@@ -1301,8 +1301,8 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1301 struct AdapterCtlBlk *acb = 1301 struct AdapterCtlBlk *acb =
1302 (struct AdapterCtlBlk *)cmd->device->host->hostdata; 1302 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1303 dprintkl(KERN_INFO, 1303 dprintkl(KERN_INFO,
1304 "eh_bus_reset: (pid#%li) target=<%02i-%i> cmd=%p\n", 1304 "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
1305 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd); 1305 cmd, cmd->device->id, cmd->device->lun, cmd);
1306 1306
1307 if (timer_pending(&acb->waiting_timer)) 1307 if (timer_pending(&acb->waiting_timer))
1308 del_timer(&acb->waiting_timer); 1308 del_timer(&acb->waiting_timer);
@@ -1368,8 +1368,8 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1368 (struct AdapterCtlBlk *)cmd->device->host->hostdata; 1368 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1369 struct DeviceCtlBlk *dcb; 1369 struct DeviceCtlBlk *dcb;
1370 struct ScsiReqBlk *srb; 1370 struct ScsiReqBlk *srb;
1371 dprintkl(KERN_INFO, "eh_abort: (pid#%li) target=<%02i-%i> cmd=%p\n", 1371 dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
1372 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd); 1372 cmd, cmd->device->id, cmd->device->lun, cmd);
1373 1373
1374 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); 1374 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1375 if (!dcb) { 1375 if (!dcb) {
@@ -1495,8 +1495,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1495 u16 s_stat2, return_code; 1495 u16 s_stat2, return_code;
1496 u8 s_stat, scsicommand, i, identify_message; 1496 u8 s_stat, scsicommand, i, identify_message;
1497 u8 *ptr; 1497 u8 *ptr;
1498 dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> srb=%p\n", 1498 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1499 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 1499 dcb->target_id, dcb->target_lun, srb);
1500 1500
1501 srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */ 1501 srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
1502 1502
@@ -1505,8 +1505,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1505 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); 1505 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1506#if 1 1506#if 1
1507 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) { 1507 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
1508 dprintkdbg(DBG_KG, "start_scsi: (pid#%li) BUSY %02x %04x\n", 1508 dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
1509 srb->cmd->serial_number, s_stat, s_stat2); 1509 s_stat, s_stat2);
1510 /* 1510 /*
1511 * Try anyway? 1511 * Try anyway?
1512 * 1512 *
@@ -1522,16 +1522,15 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1522 } 1522 }
1523#endif 1523#endif
1524 if (acb->active_dcb) { 1524 if (acb->active_dcb) {
1525 dprintkl(KERN_DEBUG, "start_scsi: (pid#%li) Attempt to start a" 1525 dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
1526 "command while another command (pid#%li) is active.", 1526 "command while another command (0x%p) is active.",
1527 srb->cmd->serial_number, 1527 srb->cmd,
1528 acb->active_dcb->active_srb ? 1528 acb->active_dcb->active_srb ?
1529 acb->active_dcb->active_srb->cmd->serial_number : 0); 1529 acb->active_dcb->active_srb->cmd : 0);
1530 return 1; 1530 return 1;
1531 } 1531 }
1532 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { 1532 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1533 dprintkdbg(DBG_KG, "start_scsi: (pid#%li) Failed (busy)\n", 1533 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1534 srb->cmd->serial_number);
1535 return 1; 1534 return 1;
1536 } 1535 }
1537 /* Allow starting of SCSI commands half a second before we allow the mid-level 1536 /* Allow starting of SCSI commands half a second before we allow the mid-level
@@ -1603,9 +1602,9 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1603 tag_number++; 1602 tag_number++;
1604 } 1603 }
1605 if (tag_number >= dcb->max_command) { 1604 if (tag_number >= dcb->max_command) {
1606 dprintkl(KERN_WARNING, "start_scsi: (pid#%li) " 1605 dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
1607 "Out of tags target=<%02i-%i>)\n", 1606 "Out of tags target=<%02i-%i>)\n",
1608 srb->cmd->serial_number, srb->cmd->device->id, 1607 srb->cmd, srb->cmd->device->id,
1609 srb->cmd->device->lun); 1608 srb->cmd->device->lun);
1610 srb->state = SRB_READY; 1609 srb->state = SRB_READY;
1611 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, 1610 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
@@ -1623,8 +1622,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1623#endif 1622#endif
1624/*polling:*/ 1623/*polling:*/
1625 /* Send CDB ..command block ......... */ 1624 /* Send CDB ..command block ......... */
1626 dprintkdbg(DBG_KG, "start_scsi: (pid#%li) <%02i-%i> cmnd=0x%02x tag=%i\n", 1625 dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
1627 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun, 1626 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
1628 srb->cmd->cmnd[0], srb->tag_number); 1627 srb->cmd->cmnd[0], srb->tag_number);
1629 if (srb->flag & AUTO_REQSENSE) { 1628 if (srb->flag & AUTO_REQSENSE) {
1630 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); 1629 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
@@ -1647,8 +1646,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1647 * we caught an interrupt (must be reset or reselection ... ) 1646 * we caught an interrupt (must be reset or reselection ... )
1648 * : Let's process it first! 1647 * : Let's process it first!
1649 */ 1648 */
1650 dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> Failed - busy\n", 1649 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
1651 srb->cmd->serial_number, dcb->target_id, dcb->target_lun); 1650 srb->cmd, dcb->target_id, dcb->target_lun);
1652 srb->state = SRB_READY; 1651 srb->state = SRB_READY;
1653 free_tag(dcb, srb); 1652 free_tag(dcb, srb);
1654 srb->msg_count = 0; 1653 srb->msg_count = 0;
@@ -1843,7 +1842,7 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
1843static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 1842static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1844 u16 *pscsi_status) 1843 u16 *pscsi_status)
1845{ 1844{
1846 dprintkdbg(DBG_0, "msgout_phase0: (pid#%li)\n", srb->cmd->serial_number); 1845 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1847 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) 1846 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1848 *pscsi_status = PH_BUS_FREE; /*.. initial phase */ 1847 *pscsi_status = PH_BUS_FREE; /*.. initial phase */
1849 1848
@@ -1857,18 +1856,18 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1857{ 1856{
1858 u16 i; 1857 u16 i;
1859 u8 *ptr; 1858 u8 *ptr;
1860 dprintkdbg(DBG_0, "msgout_phase1: (pid#%li)\n", srb->cmd->serial_number); 1859 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1861 1860
1862 clear_fifo(acb, "msgout_phase1"); 1861 clear_fifo(acb, "msgout_phase1");
1863 if (!(srb->state & SRB_MSGOUT)) { 1862 if (!(srb->state & SRB_MSGOUT)) {
1864 srb->state |= SRB_MSGOUT; 1863 srb->state |= SRB_MSGOUT;
1865 dprintkl(KERN_DEBUG, 1864 dprintkl(KERN_DEBUG,
1866 "msgout_phase1: (pid#%li) Phase unexpected\n", 1865 "msgout_phase1: (0x%p) Phase unexpected\n",
1867 srb->cmd->serial_number); /* So what ? */ 1866 srb->cmd); /* So what ? */
1868 } 1867 }
1869 if (!srb->msg_count) { 1868 if (!srb->msg_count) {
1870 dprintkdbg(DBG_0, "msgout_phase1: (pid#%li) NOP msg\n", 1869 dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
1871 srb->cmd->serial_number); 1870 srb->cmd);
1872 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP); 1871 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
1873 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ 1872 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
1874 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); 1873 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
@@ -1888,7 +1887,7 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1888static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 1887static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1889 u16 *pscsi_status) 1888 u16 *pscsi_status)
1890{ 1889{
1891 dprintkdbg(DBG_0, "command_phase0: (pid#%li)\n", srb->cmd->serial_number); 1890 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1892 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); 1891 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1893} 1892}
1894 1893
@@ -1899,7 +1898,7 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1899 struct DeviceCtlBlk *dcb; 1898 struct DeviceCtlBlk *dcb;
1900 u8 *ptr; 1899 u8 *ptr;
1901 u16 i; 1900 u16 i;
1902 dprintkdbg(DBG_0, "command_phase1: (pid#%li)\n", srb->cmd->serial_number); 1901 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1903 1902
1904 clear_fifo(acb, "command_phase1"); 1903 clear_fifo(acb, "command_phase1");
1905 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN); 1904 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
@@ -2041,8 +2040,8 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2041 struct DeviceCtlBlk *dcb = srb->dcb; 2040 struct DeviceCtlBlk *dcb = srb->dcb;
2042 u16 scsi_status = *pscsi_status; 2041 u16 scsi_status = *pscsi_status;
2043 u32 d_left_counter = 0; 2042 u32 d_left_counter = 0;
2044 dprintkdbg(DBG_0, "data_out_phase0: (pid#%li) <%02i-%i>\n", 2043 dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
2045 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2044 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2046 2045
2047 /* 2046 /*
2048 * KG: We need to drain the buffers before we draw any conclusions! 2047 * KG: We need to drain the buffers before we draw any conclusions!
@@ -2171,8 +2170,8 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2171static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2170static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2172 u16 *pscsi_status) 2171 u16 *pscsi_status)
2173{ 2172{
2174 dprintkdbg(DBG_0, "data_out_phase1: (pid#%li) <%02i-%i>\n", 2173 dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
2175 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2174 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2176 clear_fifo(acb, "data_out_phase1"); 2175 clear_fifo(acb, "data_out_phase1");
2177 /* do prepare before transfer when data out phase */ 2176 /* do prepare before transfer when data out phase */
2178 data_io_transfer(acb, srb, XFERDATAOUT); 2177 data_io_transfer(acb, srb, XFERDATAOUT);
@@ -2183,8 +2182,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2183{ 2182{
2184 u16 scsi_status = *pscsi_status; 2183 u16 scsi_status = *pscsi_status;
2185 2184
2186 dprintkdbg(DBG_0, "data_in_phase0: (pid#%li) <%02i-%i>\n", 2185 dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
2187 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2186 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2188 2187
2189 /* 2188 /*
2190 * KG: DataIn is much more tricky than DataOut. When the device is finished 2189 * KG: DataIn is much more tricky than DataOut. When the device is finished
@@ -2204,8 +2203,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2204 unsigned int sc, fc; 2203 unsigned int sc, fc;
2205 2204
2206 if (scsi_status & PARITYERROR) { 2205 if (scsi_status & PARITYERROR) {
2207 dprintkl(KERN_INFO, "data_in_phase0: (pid#%li) " 2206 dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
2208 "Parity Error\n", srb->cmd->serial_number); 2207 "Parity Error\n", srb->cmd);
2209 srb->status |= PARITY_ERROR; 2208 srb->status |= PARITY_ERROR;
2210 } 2209 }
2211 /* 2210 /*
@@ -2394,8 +2393,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2394static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2393static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2395 u16 *pscsi_status) 2394 u16 *pscsi_status)
2396{ 2395{
2397 dprintkdbg(DBG_0, "data_in_phase1: (pid#%li) <%02i-%i>\n", 2396 dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
2398 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2397 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2399 data_io_transfer(acb, srb, XFERDATAIN); 2398 data_io_transfer(acb, srb, XFERDATAIN);
2400} 2399}
2401 2400
@@ -2406,8 +2405,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
2406 struct DeviceCtlBlk *dcb = srb->dcb; 2405 struct DeviceCtlBlk *dcb = srb->dcb;
2407 u8 bval; 2406 u8 bval;
2408 dprintkdbg(DBG_0, 2407 dprintkdbg(DBG_0,
2409 "data_io_transfer: (pid#%li) <%02i-%i> %c len=%i, sg=(%i/%i)\n", 2408 "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
2410 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun, 2409 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
2411 ((io_dir & DMACMD_DIR) ? 'r' : 'w'), 2410 ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
2412 srb->total_xfer_length, srb->sg_index, srb->sg_count); 2411 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2413 if (srb == acb->tmp_srb) 2412 if (srb == acb->tmp_srb)
@@ -2579,8 +2578,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
2579static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2578static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2580 u16 *pscsi_status) 2579 u16 *pscsi_status)
2581{ 2580{
2582 dprintkdbg(DBG_0, "status_phase0: (pid#%li) <%02i-%i>\n", 2581 dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
2583 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2582 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2584 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); 2583 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2585 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ 2584 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
2586 srb->state = SRB_COMPLETED; 2585 srb->state = SRB_COMPLETED;
@@ -2593,8 +2592,8 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2593static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2592static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2594 u16 *pscsi_status) 2593 u16 *pscsi_status)
2595{ 2594{
2596 dprintkdbg(DBG_0, "status_phase1: (pid#%li) <%02i-%i>\n", 2595 dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
2597 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2596 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2598 srb->state = SRB_STATUS; 2597 srb->state = SRB_STATUS;
2599 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ 2598 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
2600 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); 2599 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
@@ -2635,8 +2634,8 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2635{ 2634{
2636 struct ScsiReqBlk *srb = NULL; 2635 struct ScsiReqBlk *srb = NULL;
2637 struct ScsiReqBlk *i; 2636 struct ScsiReqBlk *i;
2638 dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) tag=%i srb=%p\n", 2637 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2639 srb->cmd->serial_number, tag, srb); 2638 srb->cmd, tag, srb);
2640 2639
2641 if (!(dcb->tag_mask & (1 << tag))) 2640 if (!(dcb->tag_mask & (1 << tag)))
2642 dprintkl(KERN_DEBUG, 2641 dprintkl(KERN_DEBUG,
@@ -2654,8 +2653,8 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2654 if (!srb) 2653 if (!srb)
2655 goto mingx0; 2654 goto mingx0;
2656 2655
2657 dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) <%02i-%i>\n", 2656 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
2658 srb->cmd->serial_number, srb->dcb->target_id, srb->dcb->target_lun); 2657 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2659 if (dcb->flag & ABORT_DEV_) { 2658 if (dcb->flag & ABORT_DEV_) {
2660 /*srb->state = SRB_ABORT_SENT; */ 2659 /*srb->state = SRB_ABORT_SENT; */
2661 enable_msgout_abort(acb, srb); 2660 enable_msgout_abort(acb, srb);
@@ -2865,7 +2864,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2865 u16 *pscsi_status) 2864 u16 *pscsi_status)
2866{ 2865{
2867 struct DeviceCtlBlk *dcb = acb->active_dcb; 2866 struct DeviceCtlBlk *dcb = acb->active_dcb;
2868 dprintkdbg(DBG_0, "msgin_phase0: (pid#%li)\n", srb->cmd->serial_number); 2867 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2869 2868
2870 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); 2869 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2871 if (msgin_completed(srb->msgin_buf, acb->msg_len)) { 2870 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
@@ -2931,9 +2930,9 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2931 * SAVE POINTER may be ignored as we have the struct 2930 * SAVE POINTER may be ignored as we have the struct
2932 * ScsiReqBlk* associated with the scsi command. 2931 * ScsiReqBlk* associated with the scsi command.
2933 */ 2932 */
2934 dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) " 2933 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2935 "SAVE POINTER rem=%i Ignore\n", 2934 "SAVE POINTER rem=%i Ignore\n",
2936 srb->cmd->serial_number, srb->total_xfer_length); 2935 srb->cmd, srb->total_xfer_length);
2937 break; 2936 break;
2938 2937
2939 case RESTORE_POINTERS: 2938 case RESTORE_POINTERS:
@@ -2941,9 +2940,9 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2941 break; 2940 break;
2942 2941
2943 case ABORT: 2942 case ABORT:
2944 dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) " 2943 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2945 "<%02i-%i> ABORT msg\n", 2944 "<%02i-%i> ABORT msg\n",
2946 srb->cmd->serial_number, dcb->target_id, 2945 srb->cmd, dcb->target_id,
2947 dcb->target_lun); 2946 dcb->target_lun);
2948 dcb->flag |= ABORT_DEV_; 2947 dcb->flag |= ABORT_DEV_;
2949 enable_msgout_abort(acb, srb); 2948 enable_msgout_abort(acb, srb);
@@ -2975,7 +2974,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2975static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2974static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2976 u16 *pscsi_status) 2975 u16 *pscsi_status)
2977{ 2976{
2978 dprintkdbg(DBG_0, "msgin_phase1: (pid#%li)\n", srb->cmd->serial_number); 2977 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2979 clear_fifo(acb, "msgin_phase1"); 2978 clear_fifo(acb, "msgin_phase1");
2980 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); 2979 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2981 if (!(srb->state & SRB_MSGIN)) { 2980 if (!(srb->state & SRB_MSGIN)) {
@@ -3041,7 +3040,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
3041 } 3040 }
3042 srb = dcb->active_srb; 3041 srb = dcb->active_srb;
3043 acb->active_dcb = NULL; 3042 acb->active_dcb = NULL;
3044 dprintkdbg(DBG_0, "disconnect: (pid#%li)\n", srb->cmd->serial_number); 3043 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
3045 3044
3046 srb->scsi_phase = PH_BUS_FREE; /* initial phase */ 3045 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
3047 clear_fifo(acb, "disconnect"); 3046 clear_fifo(acb, "disconnect");
@@ -3071,14 +3070,14 @@ static void disconnect(struct AdapterCtlBlk *acb)
3071 && srb->state != SRB_MSGOUT) { 3070 && srb->state != SRB_MSGOUT) {
3072 srb->state = SRB_READY; 3071 srb->state = SRB_READY;
3073 dprintkl(KERN_DEBUG, 3072 dprintkl(KERN_DEBUG,
3074 "disconnect: (pid#%li) Unexpected\n", 3073 "disconnect: (0x%p) Unexpected\n",
3075 srb->cmd->serial_number); 3074 srb->cmd);
3076 srb->target_status = SCSI_STAT_SEL_TIMEOUT; 3075 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
3077 goto disc1; 3076 goto disc1;
3078 } else { 3077 } else {
3079 /* Normal selection timeout */ 3078 /* Normal selection timeout */
3080 dprintkdbg(DBG_KG, "disconnect: (pid#%li) " 3079 dprintkdbg(DBG_KG, "disconnect: (0x%p) "
3081 "<%02i-%i> SelTO\n", srb->cmd->serial_number, 3080 "<%02i-%i> SelTO\n", srb->cmd,
3082 dcb->target_id, dcb->target_lun); 3081 dcb->target_id, dcb->target_lun);
3083 if (srb->retry_count++ > DC395x_MAX_RETRIES 3082 if (srb->retry_count++ > DC395x_MAX_RETRIES
3084 || acb->scan_devices) { 3083 || acb->scan_devices) {
@@ -3089,8 +3088,8 @@ static void disconnect(struct AdapterCtlBlk *acb)
3089 free_tag(dcb, srb); 3088 free_tag(dcb, srb);
3090 srb_going_to_waiting_move(dcb, srb); 3089 srb_going_to_waiting_move(dcb, srb);
3091 dprintkdbg(DBG_KG, 3090 dprintkdbg(DBG_KG,
3092 "disconnect: (pid#%li) Retry\n", 3091 "disconnect: (0x%p) Retry\n",
3093 srb->cmd->serial_number); 3092 srb->cmd);
3094 waiting_set_timer(acb, HZ / 20); 3093 waiting_set_timer(acb, HZ / 20);
3095 } 3094 }
3096 } else if (srb->state & SRB_DISCONNECT) { 3095 } else if (srb->state & SRB_DISCONNECT) {
@@ -3142,9 +3141,9 @@ static void reselect(struct AdapterCtlBlk *acb)
3142 } 3141 }
3143 /* Why the if ? */ 3142 /* Why the if ? */
3144 if (!acb->scan_devices) { 3143 if (!acb->scan_devices) {
3145 dprintkdbg(DBG_KG, "reselect: (pid#%li) <%02i-%i> " 3144 dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
3146 "Arb lost but Resel win rsel=%i stat=0x%04x\n", 3145 "Arb lost but Resel win rsel=%i stat=0x%04x\n",
3147 srb->cmd->serial_number, dcb->target_id, 3146 srb->cmd, dcb->target_id,
3148 dcb->target_lun, rsel_tar_lun_id, 3147 dcb->target_lun, rsel_tar_lun_id,
3149 DC395x_read16(acb, TRM_S1040_SCSI_STATUS)); 3148 DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
3150 arblostflag = 1; 3149 arblostflag = 1;
@@ -3318,7 +3317,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3318 enum dma_data_direction dir = cmd->sc_data_direction; 3317 enum dma_data_direction dir = cmd->sc_data_direction;
3319 int ckc_only = 1; 3318 int ckc_only = 1;
3320 3319
3321 dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->serial_number, 3320 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3322 srb->cmd->device->id, srb->cmd->device->lun); 3321 srb->cmd->device->id, srb->cmd->device->lun);
3323 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", 3322 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3324 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, 3323 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
@@ -3497,9 +3496,9 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3497 cmd->SCp.buffers_residual = 0; 3496 cmd->SCp.buffers_residual = 0;
3498 if (debug_enabled(DBG_KG)) { 3497 if (debug_enabled(DBG_KG)) {
3499 if (srb->total_xfer_length) 3498 if (srb->total_xfer_length)
3500 dprintkdbg(DBG_KG, "srb_done: (pid#%li) <%02i-%i> " 3499 dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
3501 "cmnd=0x%02x Missed %i bytes\n", 3500 "cmnd=0x%02x Missed %i bytes\n",
3502 cmd->serial_number, cmd->device->id, cmd->device->lun, 3501 cmd, cmd->device->id, cmd->device->lun,
3503 cmd->cmnd[0], srb->total_xfer_length); 3502 cmd->cmnd[0], srb->total_xfer_length);
3504 } 3503 }
3505 3504
@@ -3508,8 +3507,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3508 if (srb == acb->tmp_srb) 3507 if (srb == acb->tmp_srb)
3509 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); 3508 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3510 else { 3509 else {
3511 dprintkdbg(DBG_0, "srb_done: (pid#%li) done result=0x%08x\n", 3510 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3512 cmd->serial_number, cmd->result); 3511 cmd, cmd->result);
3513 srb_free_insert(acb, srb); 3512 srb_free_insert(acb, srb);
3514 } 3513 }
3515 pci_unmap_srb(acb, srb); 3514 pci_unmap_srb(acb, srb);
@@ -3538,7 +3537,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3538 p = srb->cmd; 3537 p = srb->cmd;
3539 dir = p->sc_data_direction; 3538 dir = p->sc_data_direction;
3540 result = MK_RES(0, did_flag, 0, 0); 3539 result = MK_RES(0, did_flag, 0, 0);
3541 printk("G:%li(%02i-%i) ", p->serial_number, 3540 printk("G:%p(%02i-%i) ", p,
3542 p->device->id, p->device->lun); 3541 p->device->id, p->device->lun);
3543 srb_going_remove(dcb, srb); 3542 srb_going_remove(dcb, srb);
3544 free_tag(dcb, srb); 3543 free_tag(dcb, srb);
@@ -3568,7 +3567,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3568 p = srb->cmd; 3567 p = srb->cmd;
3569 3568
3570 result = MK_RES(0, did_flag, 0, 0); 3569 result = MK_RES(0, did_flag, 0, 0);
3571 printk("W:%li<%02i-%i>", p->serial_number, p->device->id, 3570 printk("W:%p<%02i-%i>", p, p->device->id,
3572 p->device->lun); 3571 p->device->lun);
3573 srb_waiting_remove(dcb, srb); 3572 srb_waiting_remove(dcb, srb);
3574 srb_free_insert(acb, srb); 3573 srb_free_insert(acb, srb);
@@ -3677,8 +3676,8 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3677 struct ScsiReqBlk *srb) 3676 struct ScsiReqBlk *srb)
3678{ 3677{
3679 struct scsi_cmnd *cmd = srb->cmd; 3678 struct scsi_cmnd *cmd = srb->cmd;
3680 dprintkdbg(DBG_1, "request_sense: (pid#%li) <%02i-%i>\n", 3679 dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
3681 cmd->serial_number, cmd->device->id, cmd->device->lun); 3680 cmd, cmd->device->id, cmd->device->lun);
3682 3681
3683 srb->flag |= AUTO_REQSENSE; 3682 srb->flag |= AUTO_REQSENSE;
3684 srb->adapter_status = 0; 3683 srb->adapter_status = 0;
@@ -3708,8 +3707,8 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3708 3707
3709 if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */ 3708 if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
3710 dprintkl(KERN_DEBUG, 3709 dprintkl(KERN_DEBUG,
3711 "request_sense: (pid#%li) failed <%02i-%i>\n", 3710 "request_sense: (0x%p) failed <%02i-%i>\n",
3712 srb->cmd->serial_number, dcb->target_id, dcb->target_lun); 3711 srb->cmd, dcb->target_id, dcb->target_lun);
3713 srb_going_to_waiting_move(dcb, srb); 3712 srb_going_to_waiting_move(dcb, srb);
3714 waiting_set_timer(acb, HZ / 100); 3713 waiting_set_timer(acb, HZ / 100);
3715 } 3714 }
@@ -4717,13 +4716,13 @@ static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
4717 dcb->target_id, dcb->target_lun, 4716 dcb->target_id, dcb->target_lun,
4718 list_size(&dcb->srb_waiting_list)); 4717 list_size(&dcb->srb_waiting_list));
4719 list_for_each_entry(srb, &dcb->srb_waiting_list, list) 4718 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4720 SPRINTF(" %li", srb->cmd->serial_number); 4719 SPRINTF(" %p", srb->cmd);
4721 if (!list_empty(&dcb->srb_going_list)) 4720 if (!list_empty(&dcb->srb_going_list))
4722 SPRINTF("\nDCB (%02i-%i): Going : %i:", 4721 SPRINTF("\nDCB (%02i-%i): Going : %i:",
4723 dcb->target_id, dcb->target_lun, 4722 dcb->target_id, dcb->target_lun,
4724 list_size(&dcb->srb_going_list)); 4723 list_size(&dcb->srb_going_list));
4725 list_for_each_entry(srb, &dcb->srb_going_list, list) 4724 list_for_each_entry(srb, &dcb->srb_going_list, list)
4726 SPRINTF(" %li", srb->cmd->serial_number); 4725 SPRINTF(" %p", srb->cmd);
4727 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) 4726 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4728 SPRINTF("\n"); 4727 SPRINTF("\n");
4729 } 4728 }
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 42fe52902add..6fec9fe5dc39 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -782,7 +782,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
782 h->sdev = sdev; 782 h->sdev = sdev;
783 783
784 err = alua_initialize(sdev, h); 784 err = alua_initialize(sdev, h);
785 if (err != SCSI_DH_OK) 785 if ((err != SCSI_DH_OK) && (err != SCSI_DH_DEV_OFFLINED))
786 goto failed; 786 goto failed;
787 787
788 if (!try_module_get(THIS_MODULE)) 788 if (!try_module_get(THIS_MODULE))
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 293c183dfe6d..e7fc70d6b478 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -182,14 +182,24 @@ struct rdac_dh_data {
182 struct rdac_controller *ctlr; 182 struct rdac_controller *ctlr;
183#define UNINITIALIZED_LUN (1 << 8) 183#define UNINITIALIZED_LUN (1 << 8)
184 unsigned lun; 184 unsigned lun;
185
186#define RDAC_MODE 0
187#define RDAC_MODE_AVT 1
188#define RDAC_MODE_IOSHIP 2
189 unsigned char mode;
190
185#define RDAC_STATE_ACTIVE 0 191#define RDAC_STATE_ACTIVE 0
186#define RDAC_STATE_PASSIVE 1 192#define RDAC_STATE_PASSIVE 1
187 unsigned char state; 193 unsigned char state;
188 194
189#define RDAC_LUN_UNOWNED 0 195#define RDAC_LUN_UNOWNED 0
190#define RDAC_LUN_OWNED 1 196#define RDAC_LUN_OWNED 1
191#define RDAC_LUN_AVT 2
192 char lun_state; 197 char lun_state;
198
199#define RDAC_PREFERRED 0
200#define RDAC_NON_PREFERRED 1
201 char preferred;
202
193 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 203 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
194 union { 204 union {
195 struct c2_inquiry c2; 205 struct c2_inquiry c2;
@@ -199,11 +209,15 @@ struct rdac_dh_data {
199 } inq; 209 } inq;
200}; 210};
201 211
212static const char *mode[] = {
213 "RDAC",
214 "AVT",
215 "IOSHIP",
216};
202static const char *lun_state[] = 217static const char *lun_state[] =
203{ 218{
204 "unowned", 219 "unowned",
205 "owned", 220 "owned",
206 "owned (AVT mode)",
207}; 221};
208 222
209struct rdac_queue_data { 223struct rdac_queue_data {
@@ -458,25 +472,33 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
458 int err; 472 int err;
459 struct c9_inquiry *inqp; 473 struct c9_inquiry *inqp;
460 474
461 h->lun_state = RDAC_LUN_UNOWNED;
462 h->state = RDAC_STATE_ACTIVE; 475 h->state = RDAC_STATE_ACTIVE;
463 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 476 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
464 if (err == SCSI_DH_OK) { 477 if (err == SCSI_DH_OK) {
465 inqp = &h->inq.c9; 478 inqp = &h->inq.c9;
466 if ((inqp->avte_cvp >> 7) == 0x1) { 479 /* detect the operating mode */
467 /* LUN in AVT mode */ 480 if ((inqp->avte_cvp >> 5) & 0x1)
468 sdev_printk(KERN_NOTICE, sdev, 481 h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
469 "%s: AVT mode detected\n", 482 else if (inqp->avte_cvp >> 7)
470 RDAC_NAME); 483 h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
471 h->lun_state = RDAC_LUN_AVT; 484 else
472 } else if ((inqp->avte_cvp & 0x1) != 0) { 485 h->mode = RDAC_MODE; /* LUN in RDAC mode */
473 /* LUN was owned by the controller */ 486
487 /* Update ownership */
488 if (inqp->avte_cvp & 0x1)
474 h->lun_state = RDAC_LUN_OWNED; 489 h->lun_state = RDAC_LUN_OWNED;
490 else {
491 h->lun_state = RDAC_LUN_UNOWNED;
492 if (h->mode == RDAC_MODE)
493 h->state = RDAC_STATE_PASSIVE;
475 } 494 }
476 }
477 495
478 if (h->lun_state == RDAC_LUN_UNOWNED) 496 /* Update path prio*/
479 h->state = RDAC_STATE_PASSIVE; 497 if (inqp->path_prio & 0x1)
498 h->preferred = RDAC_PREFERRED;
499 else
500 h->preferred = RDAC_NON_PREFERRED;
501 }
480 502
481 return err; 503 return err;
482} 504}
@@ -648,12 +670,27 @@ static int rdac_activate(struct scsi_device *sdev,
648{ 670{
649 struct rdac_dh_data *h = get_rdac_data(sdev); 671 struct rdac_dh_data *h = get_rdac_data(sdev);
650 int err = SCSI_DH_OK; 672 int err = SCSI_DH_OK;
673 int act = 0;
651 674
652 err = check_ownership(sdev, h); 675 err = check_ownership(sdev, h);
653 if (err != SCSI_DH_OK) 676 if (err != SCSI_DH_OK)
654 goto done; 677 goto done;
655 678
656 if (h->lun_state == RDAC_LUN_UNOWNED) { 679 switch (h->mode) {
680 case RDAC_MODE:
681 if (h->lun_state == RDAC_LUN_UNOWNED)
682 act = 1;
683 break;
684 case RDAC_MODE_IOSHIP:
685 if ((h->lun_state == RDAC_LUN_UNOWNED) &&
686 (h->preferred == RDAC_PREFERRED))
687 act = 1;
688 break;
689 default:
690 break;
691 }
692
693 if (act) {
657 err = queue_mode_select(sdev, fn, data); 694 err = queue_mode_select(sdev, fn, data);
658 if (err == SCSI_DH_OK) 695 if (err == SCSI_DH_OK)
659 return 0; 696 return 0;
@@ -836,8 +873,9 @@ static int rdac_bus_attach(struct scsi_device *sdev)
836 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 873 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
837 874
838 sdev_printk(KERN_NOTICE, sdev, 875 sdev_printk(KERN_NOTICE, sdev,
839 "%s: LUN %d (%s)\n", 876 "%s: LUN %d (%s) (%s)\n",
840 RDAC_NAME, h->lun, lun_state[(int)h->lun_state]); 877 RDAC_NAME, h->lun, mode[(int)h->mode],
878 lun_state[(int)h->lun_state]);
841 879
842 return 0; 880 return 0;
843 881
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index cffcb108ac96..b4f6c9a84e71 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -780,7 +780,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
780 return FAILED; 780 return FAILED;
781 } 781 }
782 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 782 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
783 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number); 783 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
784 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) { 784 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
785 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name); 785 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
786 return FAILED; 786 return FAILED;
@@ -802,10 +802,10 @@ static int adpt_abort(struct scsi_cmnd * cmd)
802 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name); 802 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
803 return FAILED; 803 return FAILED;
804 } 804 }
805 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number); 805 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
806 return FAILED; 806 return FAILED;
807 } 807 }
808 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number); 808 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
809 return SUCCESS; 809 return SUCCESS;
810} 810}
811 811
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 0eb4fe6a4c8a..94de88955a99 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1766,8 +1766,8 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
1766 struct mscp *cpp; 1766 struct mscp *cpp;
1767 1767
1768 if (SCpnt->host_scribble) 1768 if (SCpnt->host_scribble)
1769 panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", 1769 panic("%s: qcomm, SCpnt %p already active.\n",
1770 ha->board_name, SCpnt->serial_number, SCpnt); 1770 ha->board_name, SCpnt);
1771 1771
1772 /* i is the mailbox number, look for the first free mailbox 1772 /* i is the mailbox number, look for the first free mailbox
1773 starting from last_cp_used */ 1773 starting from last_cp_used */
@@ -1801,7 +1801,7 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
1801 1801
1802 if (do_trace) 1802 if (do_trace)
1803 scmd_printk(KERN_INFO, SCpnt, 1803 scmd_printk(KERN_INFO, SCpnt,
1804 "qcomm, mbox %d, pid %ld.\n", i, SCpnt->serial_number); 1804 "qcomm, mbox %d.\n", i);
1805 1805
1806 cpp->reqsen = 1; 1806 cpp->reqsen = 1;
1807 cpp->dispri = 1; 1807 cpp->dispri = 1;
@@ -1833,8 +1833,7 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
1833 if (do_dma(shost->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) { 1833 if (do_dma(shost->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) {
1834 unmap_dma(i, ha); 1834 unmap_dma(i, ha);
1835 SCpnt->host_scribble = NULL; 1835 SCpnt->host_scribble = NULL;
1836 scmd_printk(KERN_INFO, SCpnt, 1836 scmd_printk(KERN_INFO, SCpnt, "qcomm, adapter busy.\n");
1837 "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number);
1838 return 1; 1837 return 1;
1839 } 1838 }
1840 1839
@@ -1851,14 +1850,12 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
1851 unsigned int i; 1850 unsigned int i;
1852 1851
1853 if (SCarg->host_scribble == NULL) { 1852 if (SCarg->host_scribble == NULL) {
1854 scmd_printk(KERN_INFO, SCarg, 1853 scmd_printk(KERN_INFO, SCarg, "abort, cmd inactive.\n");
1855 "abort, pid %ld inactive.\n", SCarg->serial_number);
1856 return SUCCESS; 1854 return SUCCESS;
1857 } 1855 }
1858 1856
1859 i = *(unsigned int *)SCarg->host_scribble; 1857 i = *(unsigned int *)SCarg->host_scribble;
1860 scmd_printk(KERN_WARNING, SCarg, 1858 scmd_printk(KERN_WARNING, SCarg, "abort, mbox %d.\n", i);
1861 "abort, mbox %d, pid %ld.\n", i, SCarg->serial_number);
1862 1859
1863 if (i >= shost->can_queue) 1860 if (i >= shost->can_queue)
1864 panic("%s: abort, invalid SCarg->host_scribble.\n", ha->board_name); 1861 panic("%s: abort, invalid SCarg->host_scribble.\n", ha->board_name);
@@ -1902,8 +1899,8 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
1902 SCarg->result = DID_ABORT << 16; 1899 SCarg->result = DID_ABORT << 16;
1903 SCarg->host_scribble = NULL; 1900 SCarg->host_scribble = NULL;
1904 ha->cp_stat[i] = FREE; 1901 ha->cp_stat[i] = FREE;
1905 printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", 1902 printk("%s, abort, mbox %d ready, DID_ABORT, done.\n",
1906 ha->board_name, i, SCarg->serial_number); 1903 ha->board_name, i);
1907 SCarg->scsi_done(SCarg); 1904 SCarg->scsi_done(SCarg);
1908 return SUCCESS; 1905 return SUCCESS;
1909 } 1906 }
@@ -1919,13 +1916,12 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
1919 struct Scsi_Host *shost = SCarg->device->host; 1916 struct Scsi_Host *shost = SCarg->device->host;
1920 struct hostdata *ha = (struct hostdata *)shost->hostdata; 1917 struct hostdata *ha = (struct hostdata *)shost->hostdata;
1921 1918
1922 scmd_printk(KERN_INFO, SCarg, 1919 scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
1923 "reset, enter, pid %ld.\n", SCarg->serial_number);
1924 1920
1925 spin_lock_irq(shost->host_lock); 1921 spin_lock_irq(shost->host_lock);
1926 1922
1927 if (SCarg->host_scribble == NULL) 1923 if (SCarg->host_scribble == NULL)
1928 printk("%s: reset, pid %ld inactive.\n", ha->board_name, SCarg->serial_number); 1924 printk("%s: reset, inactive.\n", ha->board_name);
1929 1925
1930 if (ha->in_reset) { 1926 if (ha->in_reset) {
1931 printk("%s: reset, exit, already in reset.\n", ha->board_name); 1927 printk("%s: reset, exit, already in reset.\n", ha->board_name);
@@ -1964,14 +1960,14 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
1964 1960
1965 if (ha->cp_stat[i] == READY || ha->cp_stat[i] == ABORTING) { 1961 if (ha->cp_stat[i] == READY || ha->cp_stat[i] == ABORTING) {
1966 ha->cp_stat[i] = ABORTING; 1962 ha->cp_stat[i] = ABORTING;
1967 printk("%s: reset, mbox %d aborting, pid %ld.\n", 1963 printk("%s: reset, mbox %d aborting.\n",
1968 ha->board_name, i, SCpnt->serial_number); 1964 ha->board_name, i);
1969 } 1965 }
1970 1966
1971 else { 1967 else {
1972 ha->cp_stat[i] = IN_RESET; 1968 ha->cp_stat[i] = IN_RESET;
1973 printk("%s: reset, mbox %d in reset, pid %ld.\n", 1969 printk("%s: reset, mbox %d in reset.\n",
1974 ha->board_name, i, SCpnt->serial_number); 1970 ha->board_name, i);
1975 } 1971 }
1976 1972
1977 if (SCpnt->host_scribble == NULL) 1973 if (SCpnt->host_scribble == NULL)
@@ -2025,8 +2021,8 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
2025 ha->cp_stat[i] = LOCKED; 2021 ha->cp_stat[i] = LOCKED;
2026 2022
2027 printk 2023 printk
2028 ("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", 2024 ("%s, reset, mbox %d locked, DID_RESET, done.\n",
2029 ha->board_name, i, SCpnt->serial_number); 2025 ha->board_name, i);
2030 } 2026 }
2031 2027
2032 else if (ha->cp_stat[i] == ABORTING) { 2028 else if (ha->cp_stat[i] == ABORTING) {
@@ -2039,8 +2035,8 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
2039 ha->cp_stat[i] = FREE; 2035 ha->cp_stat[i] = FREE;
2040 2036
2041 printk 2037 printk
2042 ("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", 2038 ("%s, reset, mbox %d aborting, DID_RESET, done.\n",
2043 ha->board_name, i, SCpnt->serial_number); 2039 ha->board_name, i);
2044 } 2040 }
2045 2041
2046 else 2042 else
@@ -2054,7 +2050,7 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
2054 do_trace = 0; 2050 do_trace = 0;
2055 2051
2056 if (arg_done) 2052 if (arg_done)
2057 printk("%s: reset, exit, pid %ld done.\n", ha->board_name, SCarg->serial_number); 2053 printk("%s: reset, exit, done.\n", ha->board_name);
2058 else 2054 else
2059 printk("%s: reset, exit.\n", ha->board_name); 2055 printk("%s: reset, exit.\n", ha->board_name);
2060 2056
@@ -2238,10 +2234,10 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2238 cpp = &ha->cp[k]; 2234 cpp = &ha->cp[k];
2239 SCpnt = cpp->SCpnt; 2235 SCpnt = cpp->SCpnt;
2240 scmd_printk(KERN_INFO, SCpnt, 2236 scmd_printk(KERN_INFO, SCpnt,
2241 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u" 2237 "%s mb %d fc %d nr %d sec %ld ns %u"
2242 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2238 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2243 (ihdlr ? "ihdlr" : "qcomm"), 2239 (ihdlr ? "ihdlr" : "qcomm"),
2244 SCpnt->serial_number, k, flushcount, 2240 k, flushcount,
2245 n_ready, blk_rq_pos(SCpnt->request), 2241 n_ready, blk_rq_pos(SCpnt->request),
2246 blk_rq_sectors(SCpnt->request), cursec, YESNO(s), 2242 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2247 YESNO(r), YESNO(rev), YESNO(input_only), 2243 YESNO(r), YESNO(rev), YESNO(input_only),
@@ -2285,10 +2281,10 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec,
2285 2281
2286 if (do_dma(dev->host->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) { 2282 if (do_dma(dev->host->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) {
2287 scmd_printk(KERN_INFO, SCpnt, 2283 scmd_printk(KERN_INFO, SCpnt,
2288 "%s, pid %ld, mbox %d, adapter" 2284 "%s, mbox %d, adapter"
2289 " busy, will abort.\n", 2285 " busy, will abort.\n",
2290 (ihdlr ? "ihdlr" : "qcomm"), 2286 (ihdlr ? "ihdlr" : "qcomm"),
2291 SCpnt->serial_number, k); 2287 k);
2292 ha->cp_stat[k] = ABORTING; 2288 ha->cp_stat[k] = ABORTING;
2293 continue; 2289 continue;
2294 } 2290 }
@@ -2398,12 +2394,12 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2398 panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", ha->board_name, i); 2394 panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", ha->board_name, i);
2399 2395
2400 if (SCpnt->host_scribble == NULL) 2396 if (SCpnt->host_scribble == NULL)
2401 panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", ha->board_name, 2397 panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", ha->board_name,
2402 i, SCpnt->serial_number, SCpnt); 2398 i, SCpnt);
2403 2399
2404 if (*(unsigned int *)SCpnt->host_scribble != i) 2400 if (*(unsigned int *)SCpnt->host_scribble != i)
2405 panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", 2401 panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
2406 ha->board_name, i, SCpnt->serial_number, 2402 ha->board_name, i,
2407 *(unsigned int *)SCpnt->host_scribble); 2403 *(unsigned int *)SCpnt->host_scribble);
2408 2404
2409 sync_dma(i, ha); 2405 sync_dma(i, ha);
@@ -2449,11 +2445,11 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2449 if (spp->target_status && SCpnt->device->type == TYPE_DISK && 2445 if (spp->target_status && SCpnt->device->type == TYPE_DISK &&
2450 (!(tstatus == CHECK_CONDITION && ha->iocount <= 1000 && 2446 (!(tstatus == CHECK_CONDITION && ha->iocount <= 1000 &&
2451 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) 2447 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
2452 printk("%s: ihdlr, target %d.%d:%d, pid %ld, " 2448 printk("%s: ihdlr, target %d.%d:%d, "
2453 "target_status 0x%x, sense key 0x%x.\n", 2449 "target_status 0x%x, sense key 0x%x.\n",
2454 ha->board_name, 2450 ha->board_name,
2455 SCpnt->device->channel, SCpnt->device->id, 2451 SCpnt->device->channel, SCpnt->device->id,
2456 SCpnt->device->lun, SCpnt->serial_number, 2452 SCpnt->device->lun,
2457 spp->target_status, SCpnt->sense_buffer[2]); 2453 spp->target_status, SCpnt->sense_buffer[2]);
2458 2454
2459 ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0; 2455 ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0;
@@ -2522,9 +2518,9 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2522 do_trace || msg_byte(spp->target_status)) 2518 do_trace || msg_byte(spp->target_status))
2523#endif 2519#endif
2524 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x," 2520 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"
2525 " pid %ld, reg 0x%x, count %d.\n", 2521 " reg 0x%x, count %d.\n",
2526 i, spp->adapter_status, spp->target_status, 2522 i, spp->adapter_status, spp->target_status,
2527 SCpnt->serial_number, reg, ha->iocount); 2523 reg, ha->iocount);
2528 2524
2529 unmap_dma(i, ha); 2525 unmap_dma(i, ha);
2530 2526
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 4a9641e69f54..d5f8362335d3 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -372,8 +372,7 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
372 cp->status = USED; /* claim free slot */ 372 cp->status = USED; /* claim free slot */
373 373
374 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, 374 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
375 "eata_pio_queue pid %ld, y %d\n", 375 "eata_pio_queue 0x%p, y %d\n", cmd, y));
376 cmd->serial_number, y));
377 376
378 cmd->scsi_done = (void *) done; 377 cmd->scsi_done = (void *) done;
379 378
@@ -417,8 +416,8 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
417 if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) { 416 if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) {
418 cmd->result = DID_BUS_BUSY << 16; 417 cmd->result = DID_BUS_BUSY << 16;
419 scmd_printk(KERN_NOTICE, cmd, 418 scmd_printk(KERN_NOTICE, cmd,
420 "eata_pio_queue pid %ld, HBA busy, " 419 "eata_pio_queue pid 0x%p, HBA busy, "
421 "returning DID_BUS_BUSY, done.\n", cmd->serial_number); 420 "returning DID_BUS_BUSY, done.\n", cmd);
422 done(cmd); 421 done(cmd);
423 cp->status = FREE; 422 cp->status = FREE;
424 return 0; 423 return 0;
@@ -432,8 +431,8 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
432 outw(0, base + HA_RDATA); 431 outw(0, base + HA_RDATA);
433 432
434 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, 433 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
435 "Queued base %#.4lx pid: %ld " 434 "Queued base %#.4lx cmd: 0x%p "
436 "slot %d irq %d\n", sh->base, cmd->serial_number, y, sh->irq)); 435 "slot %d irq %d\n", sh->base, cmd, y, sh->irq));
437 436
438 return 0; 437 return 0;
439} 438}
@@ -445,8 +444,7 @@ static int eata_pio_abort(struct scsi_cmnd *cmd)
445 unsigned int loop = 100; 444 unsigned int loop = 100;
446 445
447 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, 446 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
448 "eata_pio_abort called pid: %ld\n", 447 "eata_pio_abort called pid: 0x%p\n", cmd));
449 cmd->serial_number));
450 448
451 while (inb(cmd->device->host->base + HA_RAUXSTAT) & HA_ABUSY) 449 while (inb(cmd->device->host->base + HA_RAUXSTAT) & HA_ABUSY)
452 if (--loop == 0) { 450 if (--loop == 0) {
@@ -481,8 +479,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
481 struct Scsi_Host *host = cmd->device->host; 479 struct Scsi_Host *host = cmd->device->host;
482 480
483 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, 481 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
484 "eata_pio_reset called pid:%ld\n", 482 "eata_pio_reset called\n"));
485 cmd->serial_number));
486 483
487 spin_lock_irq(host->host_lock); 484 spin_lock_irq(host->host_lock);
488 485
@@ -501,7 +498,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
501 498
502 sp = HD(cmd)->ccb[x].cmd; 499 sp = HD(cmd)->ccb[x].cmd;
503 HD(cmd)->ccb[x].status = RESET; 500 HD(cmd)->ccb[x].status = RESET;
504 printk(KERN_WARNING "eata_pio_reset: slot %d in reset, pid %ld.\n", x, sp->serial_number); 501 printk(KERN_WARNING "eata_pio_reset: slot %d in reset.\n", x);
505 502
506 if (sp == NULL) 503 if (sp == NULL)
507 panic("eata_pio_reset: slot %d, sp==NULL.\n", x); 504 panic("eata_pio_reset: slot %d, sp==NULL.\n", x);
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 57558523c1b8..9a1af1d6071a 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -708,8 +708,7 @@ static void esp_maybe_execute_command(struct esp *esp)
708 tp = &esp->target[tgt]; 708 tp = &esp->target[tgt];
709 lp = dev->hostdata; 709 lp = dev->hostdata;
710 710
711 list_del(&ent->list); 711 list_move(&ent->list, &esp->active_cmds);
712 list_add(&ent->list, &esp->active_cmds);
713 712
714 esp->active_cmd = ent; 713 esp->active_cmd = ent;
715 714
@@ -1244,8 +1243,7 @@ static int esp_finish_select(struct esp *esp)
1244 /* Now that the state is unwound properly, put back onto 1243 /* Now that the state is unwound properly, put back onto
1245 * the issue queue. This command is no longer active. 1244 * the issue queue. This command is no longer active.
1246 */ 1245 */
1247 list_del(&ent->list); 1246 list_move(&ent->list, &esp->queued_cmds);
1248 list_add(&ent->list, &esp->queued_cmds);
1249 esp->active_cmd = NULL; 1247 esp->active_cmd = NULL;
1250 1248
1251 /* Return value ignored by caller, it directly invokes 1249 /* Return value ignored by caller, it directly invokes
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index bde6ee5333eb..5d3700dc6f8c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -381,6 +381,42 @@ out:
381} 381}
382 382
383/** 383/**
384 * fcoe_interface_release() - fcoe_port kref release function
385 * @kref: Embedded reference count in an fcoe_interface struct
386 */
387static void fcoe_interface_release(struct kref *kref)
388{
389 struct fcoe_interface *fcoe;
390 struct net_device *netdev;
391
392 fcoe = container_of(kref, struct fcoe_interface, kref);
393 netdev = fcoe->netdev;
394 /* tear-down the FCoE controller */
395 fcoe_ctlr_destroy(&fcoe->ctlr);
396 kfree(fcoe);
397 dev_put(netdev);
398 module_put(THIS_MODULE);
399}
400
401/**
402 * fcoe_interface_get() - Get a reference to a FCoE interface
403 * @fcoe: The FCoE interface to be held
404 */
405static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
406{
407 kref_get(&fcoe->kref);
408}
409
410/**
411 * fcoe_interface_put() - Put a reference to a FCoE interface
412 * @fcoe: The FCoE interface to be released
413 */
414static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
415{
416 kref_put(&fcoe->kref, fcoe_interface_release);
417}
418
419/**
384 * fcoe_interface_cleanup() - Clean up a FCoE interface 420 * fcoe_interface_cleanup() - Clean up a FCoE interface
385 * @fcoe: The FCoE interface to be cleaned up 421 * @fcoe: The FCoE interface to be cleaned up
386 * 422 *
@@ -392,6 +428,21 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
392 struct fcoe_ctlr *fip = &fcoe->ctlr; 428 struct fcoe_ctlr *fip = &fcoe->ctlr;
393 u8 flogi_maddr[ETH_ALEN]; 429 u8 flogi_maddr[ETH_ALEN];
394 const struct net_device_ops *ops; 430 const struct net_device_ops *ops;
431 struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
432
433 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
434
435 /* Logout of the fabric */
436 fc_fabric_logoff(fcoe->ctlr.lp);
437
438 /* Cleanup the fc_lport */
439 fc_lport_destroy(fcoe->ctlr.lp);
440
441 /* Stop the transmit retry timer */
442 del_timer_sync(&port->timer);
443
444 /* Free existing transmit skbs */
445 fcoe_clean_pending_queue(fcoe->ctlr.lp);
395 446
396 /* 447 /*
397 * Don't listen for Ethernet packets anymore. 448 * Don't listen for Ethernet packets anymore.
@@ -414,6 +465,9 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
414 } else 465 } else
415 dev_mc_del(netdev, FIP_ALL_ENODE_MACS); 466 dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
416 467
468 if (!is_zero_ether_addr(port->data_src_addr))
469 dev_uc_del(netdev, port->data_src_addr);
470
417 /* Tell the LLD we are done w/ FCoE */ 471 /* Tell the LLD we are done w/ FCoE */
418 ops = netdev->netdev_ops; 472 ops = netdev->netdev_ops;
419 if (ops->ndo_fcoe_disable) { 473 if (ops->ndo_fcoe_disable) {
@@ -421,42 +475,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
421 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 475 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
422 " specific feature for LLD.\n"); 476 " specific feature for LLD.\n");
423 } 477 }
424} 478 fcoe_interface_put(fcoe);
425
426/**
427 * fcoe_interface_release() - fcoe_port kref release function
428 * @kref: Embedded reference count in an fcoe_interface struct
429 */
430static void fcoe_interface_release(struct kref *kref)
431{
432 struct fcoe_interface *fcoe;
433 struct net_device *netdev;
434
435 fcoe = container_of(kref, struct fcoe_interface, kref);
436 netdev = fcoe->netdev;
437 /* tear-down the FCoE controller */
438 fcoe_ctlr_destroy(&fcoe->ctlr);
439 kfree(fcoe);
440 dev_put(netdev);
441 module_put(THIS_MODULE);
442}
443
444/**
445 * fcoe_interface_get() - Get a reference to a FCoE interface
446 * @fcoe: The FCoE interface to be held
447 */
448static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
449{
450 kref_get(&fcoe->kref);
451}
452
453/**
454 * fcoe_interface_put() - Put a reference to a FCoE interface
455 * @fcoe: The FCoE interface to be released
456 */
457static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
458{
459 kref_put(&fcoe->kref, fcoe_interface_release);
460} 479}
461 480
462/** 481/**
@@ -821,39 +840,9 @@ skip_oem:
821 * fcoe_if_destroy() - Tear down a SW FCoE instance 840 * fcoe_if_destroy() - Tear down a SW FCoE instance
822 * @lport: The local port to be destroyed 841 * @lport: The local port to be destroyed
823 * 842 *
824 * Locking: must be called with the RTNL mutex held and RTNL mutex
825 * needed to be dropped by this function since not dropping RTNL
826 * would cause circular locking warning on synchronous fip worker
827 * cancelling thru fcoe_interface_put invoked by this function.
828 *
829 */ 843 */
830static void fcoe_if_destroy(struct fc_lport *lport) 844static void fcoe_if_destroy(struct fc_lport *lport)
831{ 845{
832 struct fcoe_port *port = lport_priv(lport);
833 struct fcoe_interface *fcoe = port->priv;
834 struct net_device *netdev = fcoe->netdev;
835
836 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
837
838 /* Logout of the fabric */
839 fc_fabric_logoff(lport);
840
841 /* Cleanup the fc_lport */
842 fc_lport_destroy(lport);
843
844 /* Stop the transmit retry timer */
845 del_timer_sync(&port->timer);
846
847 /* Free existing transmit skbs */
848 fcoe_clean_pending_queue(lport);
849
850 if (!is_zero_ether_addr(port->data_src_addr))
851 dev_uc_del(netdev, port->data_src_addr);
852 rtnl_unlock();
853
854 /* receives may not be stopped until after this */
855 fcoe_interface_put(fcoe);
856
857 /* Free queued packets for the per-CPU receive threads */ 846 /* Free queued packets for the per-CPU receive threads */
858 fcoe_percpu_clean(lport); 847 fcoe_percpu_clean(lport);
859 848
@@ -1783,23 +1772,8 @@ static int fcoe_disable(struct net_device *netdev)
1783 int rc = 0; 1772 int rc = 0;
1784 1773
1785 mutex_lock(&fcoe_config_mutex); 1774 mutex_lock(&fcoe_config_mutex);
1786#ifdef CONFIG_FCOE_MODULE
1787 /*
1788 * Make sure the module has been initialized, and is not about to be
1789 * removed. Module paramter sysfs files are writable before the
1790 * module_init function is called and after module_exit.
1791 */
1792 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1793 rc = -ENODEV;
1794 goto out_nodev;
1795 }
1796#endif
1797
1798 if (!rtnl_trylock()) {
1799 mutex_unlock(&fcoe_config_mutex);
1800 return -ERESTARTSYS;
1801 }
1802 1775
1776 rtnl_lock();
1803 fcoe = fcoe_hostlist_lookup_port(netdev); 1777 fcoe = fcoe_hostlist_lookup_port(netdev);
1804 rtnl_unlock(); 1778 rtnl_unlock();
1805 1779
@@ -1809,7 +1783,6 @@ static int fcoe_disable(struct net_device *netdev)
1809 } else 1783 } else
1810 rc = -ENODEV; 1784 rc = -ENODEV;
1811 1785
1812out_nodev:
1813 mutex_unlock(&fcoe_config_mutex); 1786 mutex_unlock(&fcoe_config_mutex);
1814 return rc; 1787 return rc;
1815} 1788}
@@ -1828,22 +1801,7 @@ static int fcoe_enable(struct net_device *netdev)
1828 int rc = 0; 1801 int rc = 0;
1829 1802
1830 mutex_lock(&fcoe_config_mutex); 1803 mutex_lock(&fcoe_config_mutex);
1831#ifdef CONFIG_FCOE_MODULE 1804 rtnl_lock();
1832 /*
1833 * Make sure the module has been initialized, and is not about to be
1834 * removed. Module paramter sysfs files are writable before the
1835 * module_init function is called and after module_exit.
1836 */
1837 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1838 rc = -ENODEV;
1839 goto out_nodev;
1840 }
1841#endif
1842 if (!rtnl_trylock()) {
1843 mutex_unlock(&fcoe_config_mutex);
1844 return -ERESTARTSYS;
1845 }
1846
1847 fcoe = fcoe_hostlist_lookup_port(netdev); 1805 fcoe = fcoe_hostlist_lookup_port(netdev);
1848 rtnl_unlock(); 1806 rtnl_unlock();
1849 1807
@@ -1852,7 +1810,6 @@ static int fcoe_enable(struct net_device *netdev)
1852 else if (!fcoe_link_ok(fcoe->ctlr.lp)) 1810 else if (!fcoe_link_ok(fcoe->ctlr.lp))
1853 fcoe_ctlr_link_up(&fcoe->ctlr); 1811 fcoe_ctlr_link_up(&fcoe->ctlr);
1854 1812
1855out_nodev:
1856 mutex_unlock(&fcoe_config_mutex); 1813 mutex_unlock(&fcoe_config_mutex);
1857 return rc; 1814 return rc;
1858} 1815}
@@ -1868,35 +1825,22 @@ out_nodev:
1868static int fcoe_destroy(struct net_device *netdev) 1825static int fcoe_destroy(struct net_device *netdev)
1869{ 1826{
1870 struct fcoe_interface *fcoe; 1827 struct fcoe_interface *fcoe;
1828 struct fc_lport *lport;
1871 int rc = 0; 1829 int rc = 0;
1872 1830
1873 mutex_lock(&fcoe_config_mutex); 1831 mutex_lock(&fcoe_config_mutex);
1874#ifdef CONFIG_FCOE_MODULE 1832 rtnl_lock();
1875 /*
1876 * Make sure the module has been initialized, and is not about to be
1877 * removed. Module paramter sysfs files are writable before the
1878 * module_init function is called and after module_exit.
1879 */
1880 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1881 rc = -ENODEV;
1882 goto out_nodev;
1883 }
1884#endif
1885 if (!rtnl_trylock()) {
1886 mutex_unlock(&fcoe_config_mutex);
1887 return -ERESTARTSYS;
1888 }
1889
1890 fcoe = fcoe_hostlist_lookup_port(netdev); 1833 fcoe = fcoe_hostlist_lookup_port(netdev);
1891 if (!fcoe) { 1834 if (!fcoe) {
1892 rtnl_unlock(); 1835 rtnl_unlock();
1893 rc = -ENODEV; 1836 rc = -ENODEV;
1894 goto out_nodev; 1837 goto out_nodev;
1895 } 1838 }
1896 fcoe_interface_cleanup(fcoe); 1839 lport = fcoe->ctlr.lp;
1897 list_del(&fcoe->list); 1840 list_del(&fcoe->list);
1898 /* RTNL mutex is dropped by fcoe_if_destroy */ 1841 fcoe_interface_cleanup(fcoe);
1899 fcoe_if_destroy(fcoe->ctlr.lp); 1842 rtnl_unlock();
1843 fcoe_if_destroy(lport);
1900out_nodev: 1844out_nodev:
1901 mutex_unlock(&fcoe_config_mutex); 1845 mutex_unlock(&fcoe_config_mutex);
1902 return rc; 1846 return rc;
@@ -1912,8 +1856,6 @@ static void fcoe_destroy_work(struct work_struct *work)
1912 1856
1913 port = container_of(work, struct fcoe_port, destroy_work); 1857 port = container_of(work, struct fcoe_port, destroy_work);
1914 mutex_lock(&fcoe_config_mutex); 1858 mutex_lock(&fcoe_config_mutex);
1915 rtnl_lock();
1916 /* RTNL mutex is dropped by fcoe_if_destroy */
1917 fcoe_if_destroy(port->lport); 1859 fcoe_if_destroy(port->lport);
1918 mutex_unlock(&fcoe_config_mutex); 1860 mutex_unlock(&fcoe_config_mutex);
1919} 1861}
@@ -1948,23 +1890,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1948 struct fc_lport *lport; 1890 struct fc_lport *lport;
1949 1891
1950 mutex_lock(&fcoe_config_mutex); 1892 mutex_lock(&fcoe_config_mutex);
1951 1893 rtnl_lock();
1952 if (!rtnl_trylock()) {
1953 mutex_unlock(&fcoe_config_mutex);
1954 return -ERESTARTSYS;
1955 }
1956
1957#ifdef CONFIG_FCOE_MODULE
1958 /*
1959 * Make sure the module has been initialized, and is not about to be
1960 * removed. Module paramter sysfs files are writable before the
1961 * module_init function is called and after module_exit.
1962 */
1963 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1964 rc = -ENODEV;
1965 goto out_nodev;
1966 }
1967#endif
1968 1894
1969 /* look for existing lport */ 1895 /* look for existing lport */
1970 if (fcoe_hostlist_lookup(netdev)) { 1896 if (fcoe_hostlist_lookup(netdev)) {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 9d38be2a41f9..229e4af5508a 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -978,10 +978,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
978 * the FCF that answers multicast solicitations, not the others that 978 * the FCF that answers multicast solicitations, not the others that
979 * are sending periodic multicast advertisements. 979 * are sending periodic multicast advertisements.
980 */ 980 */
981 if (mtu_valid) { 981 if (mtu_valid)
982 list_del(&fcf->list); 982 list_move(&fcf->list, &fip->fcfs);
983 list_add(&fcf->list, &fip->fcfs);
984 }
985 983
986 /* 984 /*
987 * If this is the first validated FCF, note the time and 985 * If this is the first validated FCF, note the time and
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 258684101bfd..f81f77c8569e 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -335,7 +335,7 @@ out_attach:
335EXPORT_SYMBOL(fcoe_transport_attach); 335EXPORT_SYMBOL(fcoe_transport_attach);
336 336
337/** 337/**
338 * fcoe_transport_attach - Detaches an FCoE transport 338 * fcoe_transport_detach - Detaches an FCoE transport
339 * @ft: The fcoe transport to be attached 339 * @ft: The fcoe transport to be attached
340 * 340 *
341 * Returns : 0 for success 341 * Returns : 0 for success
@@ -343,6 +343,7 @@ EXPORT_SYMBOL(fcoe_transport_attach);
343int fcoe_transport_detach(struct fcoe_transport *ft) 343int fcoe_transport_detach(struct fcoe_transport *ft)
344{ 344{
345 int rc = 0; 345 int rc = 0;
346 struct fcoe_netdev_mapping *nm = NULL, *tmp;
346 347
347 mutex_lock(&ft_mutex); 348 mutex_lock(&ft_mutex);
348 if (!ft->attached) { 349 if (!ft->attached) {
@@ -352,6 +353,19 @@ int fcoe_transport_detach(struct fcoe_transport *ft)
352 goto out_attach; 353 goto out_attach;
353 } 354 }
354 355
356 /* remove netdev mapping for this transport as it is going away */
357 mutex_lock(&fn_mutex);
358 list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
359 if (nm->ft == ft) {
360 LIBFCOE_TRANSPORT_DBG("transport %s going away, "
361 "remove its netdev mapping for %s\n",
362 ft->name, nm->netdev->name);
363 list_del(&nm->list);
364 kfree(nm);
365 }
366 }
367 mutex_unlock(&fn_mutex);
368
355 list_del(&ft->list); 369 list_del(&ft->list);
356 ft->attached = false; 370 ft->attached = false;
357 LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); 371 LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
@@ -371,9 +385,9 @@ static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
371 i = j = sprintf(buffer, "Attached FCoE transports:"); 385 i = j = sprintf(buffer, "Attached FCoE transports:");
372 mutex_lock(&ft_mutex); 386 mutex_lock(&ft_mutex);
373 list_for_each_entry(ft, &fcoe_transports, list) { 387 list_for_each_entry(ft, &fcoe_transports, list) {
374 i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); 388 if (i >= PAGE_SIZE - IFNAMSIZ)
375 if (i >= PAGE_SIZE)
376 break; 389 break;
390 i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
377 } 391 }
378 mutex_unlock(&ft_mutex); 392 mutex_unlock(&ft_mutex);
379 if (i == j) 393 if (i == j)
@@ -530,9 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
530 struct fcoe_transport *ft = NULL; 544 struct fcoe_transport *ft = NULL;
531 enum fip_state fip_mode = (enum fip_state)(long)kp->arg; 545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
532 546
533 if (!mutex_trylock(&ft_mutex))
534 return restart_syscall();
535
536#ifdef CONFIG_LIBFCOE_MODULE 547#ifdef CONFIG_LIBFCOE_MODULE
537 /* 548 /*
538 * Make sure the module has been initialized, and is not about to be 549 * Make sure the module has been initialized, and is not about to be
@@ -543,6 +554,8 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
543 goto out_nodev; 554 goto out_nodev;
544#endif 555#endif
545 556
557 mutex_lock(&ft_mutex);
558
546 netdev = fcoe_if_to_netdev(buffer); 559 netdev = fcoe_if_to_netdev(buffer);
547 if (!netdev) { 560 if (!netdev) {
548 LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); 561 LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
@@ -586,10 +599,7 @@ out_putdev:
586 dev_put(netdev); 599 dev_put(netdev);
587out_nodev: 600out_nodev:
588 mutex_unlock(&ft_mutex); 601 mutex_unlock(&ft_mutex);
589 if (rc == -ERESTARTSYS) 602 return rc;
590 return restart_syscall();
591 else
592 return rc;
593} 603}
594 604
595/** 605/**
@@ -608,9 +618,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
608 struct net_device *netdev = NULL; 618 struct net_device *netdev = NULL;
609 struct fcoe_transport *ft = NULL; 619 struct fcoe_transport *ft = NULL;
610 620
611 if (!mutex_trylock(&ft_mutex))
612 return restart_syscall();
613
614#ifdef CONFIG_LIBFCOE_MODULE 621#ifdef CONFIG_LIBFCOE_MODULE
615 /* 622 /*
616 * Make sure the module has been initialized, and is not about to be 623 * Make sure the module has been initialized, and is not about to be
@@ -621,6 +628,8 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
621 goto out_nodev; 628 goto out_nodev;
622#endif 629#endif
623 630
631 mutex_lock(&ft_mutex);
632
624 netdev = fcoe_if_to_netdev(buffer); 633 netdev = fcoe_if_to_netdev(buffer);
625 if (!netdev) { 634 if (!netdev) {
626 LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); 635 LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
@@ -645,11 +654,7 @@ out_putdev:
645 dev_put(netdev); 654 dev_put(netdev);
646out_nodev: 655out_nodev:
647 mutex_unlock(&ft_mutex); 656 mutex_unlock(&ft_mutex);
648 657 return rc;
649 if (rc == -ERESTARTSYS)
650 return restart_syscall();
651 else
652 return rc;
653} 658}
654 659
655/** 660/**
@@ -667,9 +672,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
667 struct net_device *netdev = NULL; 672 struct net_device *netdev = NULL;
668 struct fcoe_transport *ft = NULL; 673 struct fcoe_transport *ft = NULL;
669 674
670 if (!mutex_trylock(&ft_mutex))
671 return restart_syscall();
672
673#ifdef CONFIG_LIBFCOE_MODULE 675#ifdef CONFIG_LIBFCOE_MODULE
674 /* 676 /*
675 * Make sure the module has been initialized, and is not about to be 677 * Make sure the module has been initialized, and is not about to be
@@ -680,6 +682,8 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
680 goto out_nodev; 682 goto out_nodev;
681#endif 683#endif
682 684
685 mutex_lock(&ft_mutex);
686
683 netdev = fcoe_if_to_netdev(buffer); 687 netdev = fcoe_if_to_netdev(buffer);
684 if (!netdev) 688 if (!netdev)
685 goto out_nodev; 689 goto out_nodev;
@@ -716,9 +720,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
716 struct net_device *netdev = NULL; 720 struct net_device *netdev = NULL;
717 struct fcoe_transport *ft = NULL; 721 struct fcoe_transport *ft = NULL;
718 722
719 if (!mutex_trylock(&ft_mutex))
720 return restart_syscall();
721
722#ifdef CONFIG_LIBFCOE_MODULE 723#ifdef CONFIG_LIBFCOE_MODULE
723 /* 724 /*
724 * Make sure the module has been initialized, and is not about to be 725 * Make sure the module has been initialized, and is not about to be
@@ -729,6 +730,8 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
729 goto out_nodev; 730 goto out_nodev;
730#endif 731#endif
731 732
733 mutex_lock(&ft_mutex);
734
732 netdev = fcoe_if_to_netdev(buffer); 735 netdev = fcoe_if_to_netdev(buffer);
733 if (!netdev) 736 if (!netdev)
734 goto out_nodev; 737 goto out_nodev;
@@ -743,10 +746,7 @@ out_putdev:
743 dev_put(netdev); 746 dev_put(netdev);
744out_nodev: 747out_nodev:
745 mutex_unlock(&ft_mutex); 748 mutex_unlock(&ft_mutex);
746 if (rc == -ERESTARTSYS) 749 return rc;
747 return restart_syscall();
748 else
749 return rc;
750} 750}
751 751
752/** 752/**
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 415ad4fb50d4..c6c0434d8034 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -273,7 +273,7 @@ static ssize_t host_show_transport_mode(struct device *dev,
273 "performant" : "simple"); 273 "performant" : "simple");
274} 274}
275 275
276/* List of controllers which cannot be reset on kexec with reset_devices */ 276/* List of controllers which cannot be hard reset on kexec with reset_devices */
277static u32 unresettable_controller[] = { 277static u32 unresettable_controller[] = {
278 0x324a103C, /* Smart Array P712m */ 278 0x324a103C, /* Smart Array P712m */
279 0x324b103C, /* SmartArray P711m */ 279 0x324b103C, /* SmartArray P711m */
@@ -291,16 +291,45 @@ static u32 unresettable_controller[] = {
291 0x409D0E11, /* Smart Array 6400 EM */ 291 0x409D0E11, /* Smart Array 6400 EM */
292}; 292};
293 293
294static int ctlr_is_resettable(struct ctlr_info *h) 294/* List of controllers which cannot even be soft reset */
295static u32 soft_unresettable_controller[] = {
296 /* Exclude 640x boards. These are two pci devices in one slot
297 * which share a battery backed cache module. One controls the
298 * cache, the other accesses the cache through the one that controls
299 * it. If we reset the one controlling the cache, the other will
300 * likely not be happy. Just forbid resetting this conjoined mess.
301 * The 640x isn't really supported by hpsa anyway.
302 */
303 0x409C0E11, /* Smart Array 6400 */
304 0x409D0E11, /* Smart Array 6400 EM */
305};
306
307static int ctlr_is_hard_resettable(u32 board_id)
295{ 308{
296 int i; 309 int i;
297 310
298 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 311 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
299 if (unresettable_controller[i] == h->board_id) 312 if (unresettable_controller[i] == board_id)
313 return 0;
314 return 1;
315}
316
317static int ctlr_is_soft_resettable(u32 board_id)
318{
319 int i;
320
321 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
322 if (soft_unresettable_controller[i] == board_id)
300 return 0; 323 return 0;
301 return 1; 324 return 1;
302} 325}
303 326
327static int ctlr_is_resettable(u32 board_id)
328{
329 return ctlr_is_hard_resettable(board_id) ||
330 ctlr_is_soft_resettable(board_id);
331}
332
304static ssize_t host_show_resettable(struct device *dev, 333static ssize_t host_show_resettable(struct device *dev,
305 struct device_attribute *attr, char *buf) 334 struct device_attribute *attr, char *buf)
306{ 335{
@@ -308,7 +337,7 @@ static ssize_t host_show_resettable(struct device *dev,
308 struct Scsi_Host *shost = class_to_shost(dev); 337 struct Scsi_Host *shost = class_to_shost(dev);
309 338
310 h = shost_to_hba(shost); 339 h = shost_to_hba(shost);
311 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); 340 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
312} 341}
313 342
314static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 343static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
@@ -929,13 +958,6 @@ static void hpsa_slave_destroy(struct scsi_device *sdev)
929 /* nothing to do. */ 958 /* nothing to do. */
930} 959}
931 960
932static void hpsa_scsi_setup(struct ctlr_info *h)
933{
934 h->ndevices = 0;
935 h->scsi_host = NULL;
936 spin_lock_init(&h->devlock);
937}
938
939static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 961static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
940{ 962{
941 int i; 963 int i;
@@ -1006,8 +1028,7 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1006 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1028 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1007} 1029}
1008 1030
1009static void complete_scsi_command(struct CommandList *cp, 1031static void complete_scsi_command(struct CommandList *cp)
1010 int timeout, u32 tag)
1011{ 1032{
1012 struct scsi_cmnd *cmd; 1033 struct scsi_cmnd *cmd;
1013 struct ctlr_info *h; 1034 struct ctlr_info *h;
@@ -1308,7 +1329,7 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1308 int retry_count = 0; 1329 int retry_count = 0;
1309 1330
1310 do { 1331 do {
1311 memset(c->err_info, 0, sizeof(c->err_info)); 1332 memset(c->err_info, 0, sizeof(*c->err_info));
1312 hpsa_scsi_do_simple_cmd_core(h, c); 1333 hpsa_scsi_do_simple_cmd_core(h, c);
1313 retry_count++; 1334 retry_count++;
1314 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1335 } while (check_for_unit_attention(h, c) && retry_count <= 3);
@@ -1570,6 +1591,7 @@ static unsigned char *msa2xxx_model[] = {
1570 "MSA2024", 1591 "MSA2024",
1571 "MSA2312", 1592 "MSA2312",
1572 "MSA2324", 1593 "MSA2324",
1594 "P2000 G3 SAS",
1573 NULL, 1595 NULL,
1574}; 1596};
1575 1597
@@ -2751,6 +2773,26 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2751 } 2773 }
2752} 2774}
2753 2775
2776static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
2777 unsigned char *scsi3addr, u8 reset_type)
2778{
2779 struct CommandList *c;
2780
2781 c = cmd_alloc(h);
2782 if (!c)
2783 return -ENOMEM;
2784 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2785 RAID_CTLR_LUNID, TYPE_MSG);
2786 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2787 c->waiting = NULL;
2788 enqueue_cmd_and_start_io(h, c);
2789 /* Don't wait for completion, the reset won't complete. Don't free
2790 * the command either. This is the last command we will send before
2791 * re-initializing everything, so it doesn't matter and won't leak.
2792 */
2793 return 0;
2794}
2795
2754static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2796static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2755 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2797 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2756 int cmd_type) 2798 int cmd_type)
@@ -2828,7 +2870,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2828 c->Request.Type.Attribute = ATTR_SIMPLE; 2870 c->Request.Type.Attribute = ATTR_SIMPLE;
2829 c->Request.Type.Direction = XFER_NONE; 2871 c->Request.Type.Direction = XFER_NONE;
2830 c->Request.Timeout = 0; /* Don't time out */ 2872 c->Request.Timeout = 0; /* Don't time out */
2831 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ 2873 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2874 c->Request.CDB[0] = cmd;
2832 c->Request.CDB[1] = 0x03; /* Reset target above */ 2875 c->Request.CDB[1] = 0x03; /* Reset target above */
2833 /* If bytes 4-7 are zero, it means reset the */ 2876 /* If bytes 4-7 are zero, it means reset the */
2834 /* LunID device */ 2877 /* LunID device */
@@ -2936,7 +2979,7 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2936{ 2979{
2937 removeQ(c); 2980 removeQ(c);
2938 if (likely(c->cmd_type == CMD_SCSI)) 2981 if (likely(c->cmd_type == CMD_SCSI))
2939 complete_scsi_command(c, 0, raw_tag); 2982 complete_scsi_command(c);
2940 else if (c->cmd_type == CMD_IOCTL_PEND) 2983 else if (c->cmd_type == CMD_IOCTL_PEND)
2941 complete(c->waiting); 2984 complete(c->waiting);
2942} 2985}
@@ -2994,6 +3037,63 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2994 return next_command(h); 3037 return next_command(h);
2995} 3038}
2996 3039
3040/* Some controllers, like p400, will give us one interrupt
3041 * after a soft reset, even if we turned interrupts off.
3042 * Only need to check for this in the hpsa_xxx_discard_completions
3043 * functions.
3044 */
3045static int ignore_bogus_interrupt(struct ctlr_info *h)
3046{
3047 if (likely(!reset_devices))
3048 return 0;
3049
3050 if (likely(h->interrupts_enabled))
3051 return 0;
3052
3053 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3054 "(known firmware bug.) Ignoring.\n");
3055
3056 return 1;
3057}
3058
3059static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
3060{
3061 struct ctlr_info *h = dev_id;
3062 unsigned long flags;
3063 u32 raw_tag;
3064
3065 if (ignore_bogus_interrupt(h))
3066 return IRQ_NONE;
3067
3068 if (interrupt_not_for_us(h))
3069 return IRQ_NONE;
3070 spin_lock_irqsave(&h->lock, flags);
3071 while (interrupt_pending(h)) {
3072 raw_tag = get_next_completion(h);
3073 while (raw_tag != FIFO_EMPTY)
3074 raw_tag = next_command(h);
3075 }
3076 spin_unlock_irqrestore(&h->lock, flags);
3077 return IRQ_HANDLED;
3078}
3079
3080static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
3081{
3082 struct ctlr_info *h = dev_id;
3083 unsigned long flags;
3084 u32 raw_tag;
3085
3086 if (ignore_bogus_interrupt(h))
3087 return IRQ_NONE;
3088
3089 spin_lock_irqsave(&h->lock, flags);
3090 raw_tag = get_next_completion(h);
3091 while (raw_tag != FIFO_EMPTY)
3092 raw_tag = next_command(h);
3093 spin_unlock_irqrestore(&h->lock, flags);
3094 return IRQ_HANDLED;
3095}
3096
2997static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 3097static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
2998{ 3098{
2999 struct ctlr_info *h = dev_id; 3099 struct ctlr_info *h = dev_id;
@@ -3132,11 +3232,10 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3132 return 0; 3232 return 0;
3133} 3233}
3134 3234
3135#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3136#define hpsa_noop(p) hpsa_message(p, 3, 0) 3235#define hpsa_noop(p) hpsa_message(p, 3, 0)
3137 3236
3138static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3237static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3139 void * __iomem vaddr, bool use_doorbell) 3238 void * __iomem vaddr, u32 use_doorbell)
3140{ 3239{
3141 u16 pmcsr; 3240 u16 pmcsr;
3142 int pos; 3241 int pos;
@@ -3147,8 +3246,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3147 * other way using the doorbell register. 3246 * other way using the doorbell register.
3148 */ 3247 */
3149 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3248 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3150 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 3249 writel(use_doorbell, vaddr + SA5_DOORBELL);
3151 msleep(1000);
3152 } else { /* Try to do it the PCI power state way */ 3250 } else { /* Try to do it the PCI power state way */
3153 3251
3154 /* Quoting from the Open CISS Specification: "The Power 3252 /* Quoting from the Open CISS Specification: "The Power
@@ -3179,12 +3277,63 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3179 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3277 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3180 pmcsr |= PCI_D0; 3278 pmcsr |= PCI_D0;
3181 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3279 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3182
3183 msleep(500);
3184 } 3280 }
3185 return 0; 3281 return 0;
3186} 3282}
3187 3283
3284static __devinit void init_driver_version(char *driver_version, int len)
3285{
3286 memset(driver_version, 0, len);
3287 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
3288}
3289
3290static __devinit int write_driver_ver_to_cfgtable(
3291 struct CfgTable __iomem *cfgtable)
3292{
3293 char *driver_version;
3294 int i, size = sizeof(cfgtable->driver_version);
3295
3296 driver_version = kmalloc(size, GFP_KERNEL);
3297 if (!driver_version)
3298 return -ENOMEM;
3299
3300 init_driver_version(driver_version, size);
3301 for (i = 0; i < size; i++)
3302 writeb(driver_version[i], &cfgtable->driver_version[i]);
3303 kfree(driver_version);
3304 return 0;
3305}
3306
3307static __devinit void read_driver_ver_from_cfgtable(
3308 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3309{
3310 int i;
3311
3312 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3313 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3314}
3315
3316static __devinit int controller_reset_failed(
3317 struct CfgTable __iomem *cfgtable)
3318{
3319
3320 char *driver_ver, *old_driver_ver;
3321 int rc, size = sizeof(cfgtable->driver_version);
3322
3323 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3324 if (!old_driver_ver)
3325 return -ENOMEM;
3326 driver_ver = old_driver_ver + size;
3327
3328 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3329 * should have been changed, otherwise we know the reset failed.
3330 */
3331 init_driver_version(old_driver_ver, size);
3332 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3333 rc = !memcmp(driver_ver, old_driver_ver, size);
3334 kfree(old_driver_ver);
3335 return rc;
3336}
3188/* This does a hard reset of the controller using PCI power management 3337/* This does a hard reset of the controller using PCI power management
3189 * states or the using the doorbell register. 3338 * states or the using the doorbell register.
3190 */ 3339 */
@@ -3195,10 +3344,10 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3195 u64 cfg_base_addr_index; 3344 u64 cfg_base_addr_index;
3196 void __iomem *vaddr; 3345 void __iomem *vaddr;
3197 unsigned long paddr; 3346 unsigned long paddr;
3198 u32 misc_fw_support, active_transport; 3347 u32 misc_fw_support;
3199 int rc; 3348 int rc;
3200 struct CfgTable __iomem *cfgtable; 3349 struct CfgTable __iomem *cfgtable;
3201 bool use_doorbell; 3350 u32 use_doorbell;
3202 u32 board_id; 3351 u32 board_id;
3203 u16 command_register; 3352 u16 command_register;
3204 3353
@@ -3215,20 +3364,15 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3215 * using the doorbell register. 3364 * using the doorbell register.
3216 */ 3365 */
3217 3366
3218 /* Exclude 640x boards. These are two pci devices in one slot
3219 * which share a battery backed cache module. One controls the
3220 * cache, the other accesses the cache through the one that controls
3221 * it. If we reset the one controlling the cache, the other will
3222 * likely not be happy. Just forbid resetting this conjoined mess.
3223 * The 640x isn't really supported by hpsa anyway.
3224 */
3225 rc = hpsa_lookup_board_id(pdev, &board_id); 3367 rc = hpsa_lookup_board_id(pdev, &board_id);
3226 if (rc < 0) { 3368 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3227 dev_warn(&pdev->dev, "Not resetting device.\n"); 3369 dev_warn(&pdev->dev, "Not resetting device.\n");
3228 return -ENODEV; 3370 return -ENODEV;
3229 } 3371 }
3230 if (board_id == 0x409C0E11 || board_id == 0x409D0E11) 3372
3231 return -ENOTSUPP; 3373 /* if controller is soft- but not hard resettable... */
3374 if (!ctlr_is_hard_resettable(board_id))
3375 return -ENOTSUPP; /* try soft reset later. */
3232 3376
3233 /* Save the PCI command register */ 3377 /* Save the PCI command register */
3234 pci_read_config_word(pdev, 4, &command_register); 3378 pci_read_config_word(pdev, 4, &command_register);
@@ -3257,10 +3401,28 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3257 rc = -ENOMEM; 3401 rc = -ENOMEM;
3258 goto unmap_vaddr; 3402 goto unmap_vaddr;
3259 } 3403 }
3404 rc = write_driver_ver_to_cfgtable(cfgtable);
3405 if (rc)
3406 goto unmap_vaddr;
3260 3407
3261 /* If reset via doorbell register is supported, use that. */ 3408 /* If reset via doorbell register is supported, use that.
3409 * There are two such methods. Favor the newest method.
3410 */
3262 misc_fw_support = readl(&cfgtable->misc_fw_support); 3411 misc_fw_support = readl(&cfgtable->misc_fw_support);
3263 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3412 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3413 if (use_doorbell) {
3414 use_doorbell = DOORBELL_CTLR_RESET2;
3415 } else {
3416 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3417 if (use_doorbell) {
3418 dev_warn(&pdev->dev, "Controller claims that "
3419 "'Bit 2 doorbell reset' is "
3420 "supported, but not 'bit 5 doorbell reset'. "
3421 "Firmware update is recommended.\n");
3422 rc = -ENOTSUPP; /* try soft reset */
3423 goto unmap_cfgtable;
3424 }
3425 }
3264 3426
3265 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3427 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3266 if (rc) 3428 if (rc)
@@ -3279,30 +3441,32 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3279 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3441 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3280 3442
3281 /* Wait for board to become not ready, then ready. */ 3443 /* Wait for board to become not ready, then ready. */
3282 dev_info(&pdev->dev, "Waiting for board to become ready.\n"); 3444 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3283 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 3445 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3284 if (rc) 3446 if (rc) {
3285 dev_warn(&pdev->dev, 3447 dev_warn(&pdev->dev,
3286 "failed waiting for board to become not ready\n"); 3448 "failed waiting for board to reset."
3449 " Will try soft reset.\n");
3450 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3451 goto unmap_cfgtable;
3452 }
3287 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 3453 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3288 if (rc) { 3454 if (rc) {
3289 dev_warn(&pdev->dev, 3455 dev_warn(&pdev->dev,
3290 "failed waiting for board to become ready\n"); 3456 "failed waiting for board to become ready "
3457 "after hard reset\n");
3291 goto unmap_cfgtable; 3458 goto unmap_cfgtable;
3292 } 3459 }
3293 dev_info(&pdev->dev, "board ready.\n");
3294 3460
3295 /* Controller should be in simple mode at this point. If it's not, 3461 rc = controller_reset_failed(vaddr);
3296 * It means we're on one of those controllers which doesn't support 3462 if (rc < 0)
3297 * the doorbell reset method and on which the PCI power management reset 3463 goto unmap_cfgtable;
3298 * method doesn't work (P800, for example.) 3464 if (rc) {
3299 * In those cases, don't try to proceed, as it generally doesn't work. 3465 dev_warn(&pdev->dev, "Unable to successfully reset "
3300 */ 3466 "controller. Will try soft reset.\n");
3301 active_transport = readl(&cfgtable->TransportActive); 3467 rc = -ENOTSUPP;
3302 if (active_transport & PERFORMANT_MODE) { 3468 } else {
3303 dev_warn(&pdev->dev, "Unable to successfully reset controller," 3469 dev_info(&pdev->dev, "board ready after hard reset.\n");
3304 " Ignoring controller.\n");
3305 rc = -ENODEV;
3306 } 3470 }
3307 3471
3308unmap_cfgtable: 3472unmap_cfgtable:
@@ -3543,6 +3707,9 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3543 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3707 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3544 if (!h->cfgtable) 3708 if (!h->cfgtable)
3545 return -ENOMEM; 3709 return -ENOMEM;
3710 rc = write_driver_ver_to_cfgtable(h->cfgtable);
3711 if (rc)
3712 return rc;
3546 /* Find performant mode table. */ 3713 /* Find performant mode table. */
3547 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3714 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3548 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3715 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
@@ -3777,11 +3944,12 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3777 * due to concerns about shared bbwc between 6402/6404 pair. 3944 * due to concerns about shared bbwc between 6402/6404 pair.
3778 */ 3945 */
3779 if (rc == -ENOTSUPP) 3946 if (rc == -ENOTSUPP)
3780 return 0; /* just try to do the kdump anyhow. */ 3947 return rc; /* just try to do the kdump anyhow. */
3781 if (rc) 3948 if (rc)
3782 return -ENODEV; 3949 return -ENODEV;
3783 3950
3784 /* Now try to get the controller to respond to a no-op */ 3951 /* Now try to get the controller to respond to a no-op */
3952 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
3785 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 3953 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3786 if (hpsa_noop(pdev) == 0) 3954 if (hpsa_noop(pdev) == 0)
3787 break; 3955 break;
@@ -3792,18 +3960,133 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3792 return 0; 3960 return 0;
3793} 3961}
3794 3962
3963static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
3964{
3965 h->cmd_pool_bits = kzalloc(
3966 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
3967 sizeof(unsigned long), GFP_KERNEL);
3968 h->cmd_pool = pci_alloc_consistent(h->pdev,
3969 h->nr_cmds * sizeof(*h->cmd_pool),
3970 &(h->cmd_pool_dhandle));
3971 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3972 h->nr_cmds * sizeof(*h->errinfo_pool),
3973 &(h->errinfo_pool_dhandle));
3974 if ((h->cmd_pool_bits == NULL)
3975 || (h->cmd_pool == NULL)
3976 || (h->errinfo_pool == NULL)) {
3977 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
3978 return -ENOMEM;
3979 }
3980 return 0;
3981}
3982
3983static void hpsa_free_cmd_pool(struct ctlr_info *h)
3984{
3985 kfree(h->cmd_pool_bits);
3986 if (h->cmd_pool)
3987 pci_free_consistent(h->pdev,
3988 h->nr_cmds * sizeof(struct CommandList),
3989 h->cmd_pool, h->cmd_pool_dhandle);
3990 if (h->errinfo_pool)
3991 pci_free_consistent(h->pdev,
3992 h->nr_cmds * sizeof(struct ErrorInfo),
3993 h->errinfo_pool,
3994 h->errinfo_pool_dhandle);
3995}
3996
3997static int hpsa_request_irq(struct ctlr_info *h,
3998 irqreturn_t (*msixhandler)(int, void *),
3999 irqreturn_t (*intxhandler)(int, void *))
4000{
4001 int rc;
4002
4003 if (h->msix_vector || h->msi_vector)
4004 rc = request_irq(h->intr[h->intr_mode], msixhandler,
4005 IRQF_DISABLED, h->devname, h);
4006 else
4007 rc = request_irq(h->intr[h->intr_mode], intxhandler,
4008 IRQF_DISABLED, h->devname, h);
4009 if (rc) {
4010 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4011 h->intr[h->intr_mode], h->devname);
4012 return -ENODEV;
4013 }
4014 return 0;
4015}
4016
4017static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4018{
4019 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4020 HPSA_RESET_TYPE_CONTROLLER)) {
4021 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4022 return -EIO;
4023 }
4024
4025 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4026 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4027 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4028 return -1;
4029 }
4030
4031 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4032 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4033 dev_warn(&h->pdev->dev, "Board failed to become ready "
4034 "after soft reset.\n");
4035 return -1;
4036 }
4037
4038 return 0;
4039}
4040
4041static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4042{
4043 free_irq(h->intr[h->intr_mode], h);
4044#ifdef CONFIG_PCI_MSI
4045 if (h->msix_vector)
4046 pci_disable_msix(h->pdev);
4047 else if (h->msi_vector)
4048 pci_disable_msi(h->pdev);
4049#endif /* CONFIG_PCI_MSI */
4050 hpsa_free_sg_chain_blocks(h);
4051 hpsa_free_cmd_pool(h);
4052 kfree(h->blockFetchTable);
4053 pci_free_consistent(h->pdev, h->reply_pool_size,
4054 h->reply_pool, h->reply_pool_dhandle);
4055 if (h->vaddr)
4056 iounmap(h->vaddr);
4057 if (h->transtable)
4058 iounmap(h->transtable);
4059 if (h->cfgtable)
4060 iounmap(h->cfgtable);
4061 pci_release_regions(h->pdev);
4062 kfree(h);
4063}
4064
3795static int __devinit hpsa_init_one(struct pci_dev *pdev, 4065static int __devinit hpsa_init_one(struct pci_dev *pdev,
3796 const struct pci_device_id *ent) 4066 const struct pci_device_id *ent)
3797{ 4067{
3798 int dac, rc; 4068 int dac, rc;
3799 struct ctlr_info *h; 4069 struct ctlr_info *h;
4070 int try_soft_reset = 0;
4071 unsigned long flags;
3800 4072
3801 if (number_of_controllers == 0) 4073 if (number_of_controllers == 0)
3802 printk(KERN_INFO DRIVER_NAME "\n"); 4074 printk(KERN_INFO DRIVER_NAME "\n");
3803 4075
3804 rc = hpsa_init_reset_devices(pdev); 4076 rc = hpsa_init_reset_devices(pdev);
3805 if (rc) 4077 if (rc) {
3806 return rc; 4078 if (rc != -ENOTSUPP)
4079 return rc;
4080 /* If the reset fails in a particular way (it has no way to do
4081 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4082 * a soft reset once we get the controller configured up to the
4083 * point that it can accept a command.
4084 */
4085 try_soft_reset = 1;
4086 rc = 0;
4087 }
4088
4089reinit_after_soft_reset:
3807 4090
3808 /* Command structures must be aligned on a 32-byte boundary because 4091 /* Command structures must be aligned on a 32-byte boundary because
3809 * the 5 lower bits of the address are used by the hardware. and by 4092 * the 5 lower bits of the address are used by the hardware. and by
@@ -3847,54 +4130,82 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3847 /* make sure the board interrupts are off */ 4130 /* make sure the board interrupts are off */
3848 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4131 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3849 4132
3850 if (h->msix_vector || h->msi_vector) 4133 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
3851 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
3852 IRQF_DISABLED, h->devname, h);
3853 else
3854 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
3855 IRQF_DISABLED, h->devname, h);
3856 if (rc) {
3857 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3858 h->intr[h->intr_mode], h->devname);
3859 goto clean2; 4134 goto clean2;
3860 }
3861
3862 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 4135 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3863 h->devname, pdev->device, 4136 h->devname, pdev->device,
3864 h->intr[h->intr_mode], dac ? "" : " not"); 4137 h->intr[h->intr_mode], dac ? "" : " not");
3865 4138 if (hpsa_allocate_cmd_pool(h))
3866 h->cmd_pool_bits =
3867 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3868 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3869 h->cmd_pool = pci_alloc_consistent(h->pdev,
3870 h->nr_cmds * sizeof(*h->cmd_pool),
3871 &(h->cmd_pool_dhandle));
3872 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3873 h->nr_cmds * sizeof(*h->errinfo_pool),
3874 &(h->errinfo_pool_dhandle));
3875 if ((h->cmd_pool_bits == NULL)
3876 || (h->cmd_pool == NULL)
3877 || (h->errinfo_pool == NULL)) {
3878 dev_err(&pdev->dev, "out of memory");
3879 rc = -ENOMEM;
3880 goto clean4; 4139 goto clean4;
3881 }
3882 if (hpsa_allocate_sg_chain_blocks(h)) 4140 if (hpsa_allocate_sg_chain_blocks(h))
3883 goto clean4; 4141 goto clean4;
3884 init_waitqueue_head(&h->scan_wait_queue); 4142 init_waitqueue_head(&h->scan_wait_queue);
3885 h->scan_finished = 1; /* no scan currently in progress */ 4143 h->scan_finished = 1; /* no scan currently in progress */
3886 4144
3887 pci_set_drvdata(pdev, h); 4145 pci_set_drvdata(pdev, h);
3888 memset(h->cmd_pool_bits, 0, 4146 h->ndevices = 0;
3889 ((h->nr_cmds + BITS_PER_LONG - 4147 h->scsi_host = NULL;
3890 1) / BITS_PER_LONG) * sizeof(unsigned long)); 4148 spin_lock_init(&h->devlock);
4149 hpsa_put_ctlr_into_performant_mode(h);
4150
4151 /* At this point, the controller is ready to take commands.
4152 * Now, if reset_devices and the hard reset didn't work, try
4153 * the soft reset and see if that works.
4154 */
4155 if (try_soft_reset) {
4156
4157 /* This is kind of gross. We may or may not get a completion
4158 * from the soft reset command, and if we do, then the value
4159 * from the fifo may or may not be valid. So, we wait 10 secs
4160 * after the reset throwing away any completions we get during
4161 * that time. Unregister the interrupt handler and register
4162 * fake ones to scoop up any residual completions.
4163 */
4164 spin_lock_irqsave(&h->lock, flags);
4165 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4166 spin_unlock_irqrestore(&h->lock, flags);
4167 free_irq(h->intr[h->intr_mode], h);
4168 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4169 hpsa_intx_discard_completions);
4170 if (rc) {
4171 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4172 "soft reset.\n");
4173 goto clean4;
4174 }
4175
4176 rc = hpsa_kdump_soft_reset(h);
4177 if (rc)
4178 /* Neither hard nor soft reset worked, we're hosed. */
4179 goto clean4;
4180
4181 dev_info(&h->pdev->dev, "Board READY.\n");
4182 dev_info(&h->pdev->dev,
4183 "Waiting for stale completions to drain.\n");
4184 h->access.set_intr_mask(h, HPSA_INTR_ON);
4185 msleep(10000);
4186 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4187
4188 rc = controller_reset_failed(h->cfgtable);
4189 if (rc)
4190 dev_info(&h->pdev->dev,
4191 "Soft reset appears to have failed.\n");
4192
4193 /* since the controller's reset, we have to go back and re-init
4194 * everything. Easiest to just forget what we've done and do it
4195 * all over again.
4196 */
4197 hpsa_undo_allocations_after_kdump_soft_reset(h);
4198 try_soft_reset = 0;
4199 if (rc)
4200 /* don't go to clean4, we already unallocated */
4201 return -ENODEV;
3891 4202
3892 hpsa_scsi_setup(h); 4203 goto reinit_after_soft_reset;
4204 }
3893 4205
3894 /* Turn the interrupts on so we can service requests */ 4206 /* Turn the interrupts on so we can service requests */
3895 h->access.set_intr_mask(h, HPSA_INTR_ON); 4207 h->access.set_intr_mask(h, HPSA_INTR_ON);
3896 4208
3897 hpsa_put_ctlr_into_performant_mode(h);
3898 hpsa_hba_inquiry(h); 4209 hpsa_hba_inquiry(h);
3899 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4210 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3900 h->busy_initializing = 0; 4211 h->busy_initializing = 0;
@@ -3902,16 +4213,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3902 4213
3903clean4: 4214clean4:
3904 hpsa_free_sg_chain_blocks(h); 4215 hpsa_free_sg_chain_blocks(h);
3905 kfree(h->cmd_pool_bits); 4216 hpsa_free_cmd_pool(h);
3906 if (h->cmd_pool)
3907 pci_free_consistent(h->pdev,
3908 h->nr_cmds * sizeof(struct CommandList),
3909 h->cmd_pool, h->cmd_pool_dhandle);
3910 if (h->errinfo_pool)
3911 pci_free_consistent(h->pdev,
3912 h->nr_cmds * sizeof(struct ErrorInfo),
3913 h->errinfo_pool,
3914 h->errinfo_pool_dhandle);
3915 free_irq(h->intr[h->intr_mode], h); 4217 free_irq(h->intr[h->intr_mode], h);
3916clean2: 4218clean2:
3917clean1: 4219clean1:
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 621a1530054a..6d8dcd4dd06b 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -127,10 +127,12 @@ struct ctlr_info {
127}; 127};
128#define HPSA_ABORT_MSG 0 128#define HPSA_ABORT_MSG 0
129#define HPSA_DEVICE_RESET_MSG 1 129#define HPSA_DEVICE_RESET_MSG 1
130#define HPSA_BUS_RESET_MSG 2 130#define HPSA_RESET_TYPE_CONTROLLER 0x00
131#define HPSA_HOST_RESET_MSG 3 131#define HPSA_RESET_TYPE_BUS 0x01
132#define HPSA_RESET_TYPE_TARGET 0x03
133#define HPSA_RESET_TYPE_LUN 0x04
132#define HPSA_MSG_SEND_RETRY_LIMIT 10 134#define HPSA_MSG_SEND_RETRY_LIMIT 10
133#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000 135#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
134 136
135/* Maximum time in seconds driver will wait for command completions 137/* Maximum time in seconds driver will wait for command completions
136 * when polling before giving up. 138 * when polling before giving up.
@@ -155,7 +157,7 @@ struct ctlr_info {
155 * HPSA_BOARD_READY_ITERATIONS are derived from those. 157 * HPSA_BOARD_READY_ITERATIONS are derived from those.
156 */ 158 */
157#define HPSA_BOARD_READY_WAIT_SECS (120) 159#define HPSA_BOARD_READY_WAIT_SECS (120)
158#define HPSA_BOARD_NOT_READY_WAIT_SECS (10) 160#define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
159#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 161#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
160#define HPSA_BOARD_READY_POLL_INTERVAL \ 162#define HPSA_BOARD_READY_POLL_INTERVAL \
161 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 163 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
@@ -212,6 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
212 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
213 c->Header.Tag.lower); 215 c->Header.Tag.lower);
214 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
217 (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
215 h->commands_outstanding++; 218 h->commands_outstanding++;
216 if (h->commands_outstanding > h->max_outstanding) 219 if (h->commands_outstanding > h->max_outstanding)
217 h->max_outstanding = h->commands_outstanding; 220 h->max_outstanding = h->commands_outstanding;
@@ -227,10 +230,12 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
227 if (val) { /* Turn interrupts on */ 230 if (val) { /* Turn interrupts on */
228 h->interrupts_enabled = 1; 231 h->interrupts_enabled = 1;
229 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 232 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
233 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
230 } else { /* Turn them off */ 234 } else { /* Turn them off */
231 h->interrupts_enabled = 0; 235 h->interrupts_enabled = 0;
232 writel(SA5_INTR_OFF, 236 writel(SA5_INTR_OFF,
233 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 237 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
238 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
234 } 239 }
235} 240}
236 241
@@ -239,10 +244,12 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
239 if (val) { /* turn on interrupts */ 244 if (val) { /* turn on interrupts */
240 h->interrupts_enabled = 1; 245 h->interrupts_enabled = 1;
241 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 246 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
247 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
242 } else { 248 } else {
243 h->interrupts_enabled = 0; 249 h->interrupts_enabled = 0;
244 writel(SA5_PERF_INTR_OFF, 250 writel(SA5_PERF_INTR_OFF,
245 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 251 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
252 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
246 } 253 }
247} 254}
248 255
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 18464900e761..55d741b019db 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -101,6 +101,7 @@
101#define CFGTBL_ChangeReq 0x00000001l 101#define CFGTBL_ChangeReq 0x00000001l
102#define CFGTBL_AccCmds 0x00000001l 102#define CFGTBL_AccCmds 0x00000001l
103#define DOORBELL_CTLR_RESET 0x00000004l 103#define DOORBELL_CTLR_RESET 0x00000004l
104#define DOORBELL_CTLR_RESET2 0x00000020l
104 105
105#define CFGTBL_Trans_Simple 0x00000002l 106#define CFGTBL_Trans_Simple 0x00000002l
106#define CFGTBL_Trans_Performant 0x00000004l 107#define CFGTBL_Trans_Performant 0x00000004l
@@ -256,14 +257,6 @@ struct ErrorInfo {
256#define CMD_IOCTL_PEND 0x01 257#define CMD_IOCTL_PEND 0x01
257#define CMD_SCSI 0x03 258#define CMD_SCSI 0x03
258 259
259/* This structure needs to be divisible by 32 for new
260 * indexing method and performant mode.
261 */
262#define PAD32 32
263#define PAD64DIFF 0
264#define USEEXTRA ((sizeof(void *) - 4)/4)
265#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
266
267#define DIRECT_LOOKUP_SHIFT 5 260#define DIRECT_LOOKUP_SHIFT 5
268#define DIRECT_LOOKUP_BIT 0x10 261#define DIRECT_LOOKUP_BIT 0x10
269#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) 262#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
@@ -345,6 +338,8 @@ struct CfgTable {
345 u8 reserved[0x78 - 0x58]; 338 u8 reserved[0x78 - 0x58];
346 u32 misc_fw_support; /* offset 0x78 */ 339 u32 misc_fw_support; /* offset 0x78 */
347#define MISC_FW_DOORBELL_RESET (0x02) 340#define MISC_FW_DOORBELL_RESET (0x02)
341#define MISC_FW_DOORBELL_RESET2 (0x010)
342 u8 driver_version[32];
348}; 343};
349 344
350#define NUM_BLOCKFETCH_ENTRIES 8 345#define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 041958453e2a..3d391dc3f11f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1849,8 +1849,7 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
1849 rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata); 1849 rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
1850 if (!rc) 1850 if (!rc)
1851 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); 1851 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
1852 if (!rc) 1852 vio_enable_interrupts(to_vio_dev(hostdata->dev));
1853 rc = vio_enable_interrupts(to_vio_dev(hostdata->dev));
1854 } else if (hostdata->reenable_crq) { 1853 } else if (hostdata->reenable_crq) {
1855 smp_rmb(); 1854 smp_rmb();
1856 action = "enable"; 1855 action = "enable";
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 6568aab745a0..92109b126391 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -343,7 +343,7 @@ static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
343 instance = cmd->device->host; 343 instance = cmd->device->host;
344 hostdata = (struct IN2000_hostdata *) instance->hostdata; 344 hostdata = (struct IN2000_hostdata *) instance->hostdata;
345 345
346 DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x-%ld(", cmd->cmnd[0], cmd->serial_number)) 346 DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
347 347
348/* Set up a few fields in the Scsi_Cmnd structure for our own use: 348/* Set up a few fields in the Scsi_Cmnd structure for our own use:
349 * - host_scribble is the pointer to the next cmd in the input queue 349 * - host_scribble is the pointer to the next cmd in the input queue
@@ -427,7 +427,7 @@ static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
427 427
428 in2000_execute(cmd->device->host); 428 in2000_execute(cmd->device->host);
429 429
430 DB(DB_QUEUE_COMMAND, printk(")Q-%ld ", cmd->serial_number)) 430 DB(DB_QUEUE_COMMAND, printk(")Q "))
431 return 0; 431 return 0;
432} 432}
433 433
@@ -705,7 +705,7 @@ static void in2000_execute(struct Scsi_Host *instance)
705 * to search the input_Q again... 705 * to search the input_Q again...
706 */ 706 */
707 707
708 DB(DB_EXECUTE, printk("%s%ld)EX-2 ", (cmd->SCp.phase) ? "d:" : "", cmd->serial_number)) 708 DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
709 709
710} 710}
711 711
@@ -1149,7 +1149,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1149 case CSR_XFER_DONE | PHS_COMMAND: 1149 case CSR_XFER_DONE | PHS_COMMAND:
1150 case CSR_UNEXP | PHS_COMMAND: 1150 case CSR_UNEXP | PHS_COMMAND:
1151 case CSR_SRV_REQ | PHS_COMMAND: 1151 case CSR_SRV_REQ | PHS_COMMAND:
1152 DB(DB_INTR, printk("CMND-%02x,%ld", cmd->cmnd[0], cmd->serial_number)) 1152 DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
1153 transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); 1153 transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
1154 hostdata->state = S_CONNECTED; 1154 hostdata->state = S_CONNECTED;
1155 break; 1155 break;
@@ -1191,7 +1191,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1191 switch (msg) { 1191 switch (msg) {
1192 1192
1193 case COMMAND_COMPLETE: 1193 case COMMAND_COMPLETE:
1194 DB(DB_INTR, printk("CCMP-%ld", cmd->serial_number)) 1194 DB(DB_INTR, printk("CCMP"))
1195 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); 1195 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1196 hostdata->state = S_PRE_CMP_DISC; 1196 hostdata->state = S_PRE_CMP_DISC;
1197 break; 1197 break;
@@ -1329,7 +1329,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1329 1329
1330 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); 1330 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
1331 if (phs == 0x60) { 1331 if (phs == 0x60) {
1332 DB(DB_INTR, printk("SX-DONE-%ld", cmd->serial_number)) 1332 DB(DB_INTR, printk("SX-DONE"))
1333 cmd->SCp.Message = COMMAND_COMPLETE; 1333 cmd->SCp.Message = COMMAND_COMPLETE;
1334 lun = read_3393(hostdata, WD_TARGET_LUN); 1334 lun = read_3393(hostdata, WD_TARGET_LUN);
1335 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) 1335 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
@@ -1350,7 +1350,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1350 1350
1351 in2000_execute(instance); 1351 in2000_execute(instance);
1352 } else { 1352 } else {
1353 printk("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs, cmd->serial_number); 1353 printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
1354 } 1354 }
1355 break; 1355 break;
1356 1356
@@ -1417,7 +1417,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1417 spin_unlock_irqrestore(instance->host_lock, flags); 1417 spin_unlock_irqrestore(instance->host_lock, flags);
1418 return IRQ_HANDLED; 1418 return IRQ_HANDLED;
1419 } 1419 }
1420 DB(DB_INTR, printk("UNEXP_DISC-%ld", cmd->serial_number)) 1420 DB(DB_INTR, printk("UNEXP_DISC"))
1421 hostdata->connected = NULL; 1421 hostdata->connected = NULL;
1422 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 1422 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1423 hostdata->state = S_UNCONNECTED; 1423 hostdata->state = S_UNCONNECTED;
@@ -1442,7 +1442,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1442 */ 1442 */
1443 1443
1444 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); 1444 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
1445 DB(DB_INTR, printk("DISC-%ld", cmd->serial_number)) 1445 DB(DB_INTR, printk("DISC"))
1446 if (cmd == NULL) { 1446 if (cmd == NULL) {
1447 printk(" - Already disconnected! "); 1447 printk(" - Already disconnected! ");
1448 hostdata->state = S_UNCONNECTED; 1448 hostdata->state = S_UNCONNECTED;
@@ -1575,7 +1575,6 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1575 } else 1575 } else
1576 hostdata->state = S_CONNECTED; 1576 hostdata->state = S_CONNECTED;
1577 1577
1578 DB(DB_INTR, printk("-%ld", cmd->serial_number))
1579 break; 1578 break;
1580 1579
1581 default: 1580 default:
@@ -1704,7 +1703,7 @@ static int __in2000_abort(Scsi_Cmnd * cmd)
1704 prev->host_scribble = cmd->host_scribble; 1703 prev->host_scribble = cmd->host_scribble;
1705 cmd->host_scribble = NULL; 1704 cmd->host_scribble = NULL;
1706 cmd->result = DID_ABORT << 16; 1705 cmd->result = DID_ABORT << 16;
1707 printk(KERN_WARNING "scsi%d: Abort - removing command %ld from input_Q. ", instance->host_no, cmd->serial_number); 1706 printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
1708 cmd->scsi_done(cmd); 1707 cmd->scsi_done(cmd);
1709 return SUCCESS; 1708 return SUCCESS;
1710 } 1709 }
@@ -1725,7 +1724,7 @@ static int __in2000_abort(Scsi_Cmnd * cmd)
1725 1724
1726 if (hostdata->connected == cmd) { 1725 if (hostdata->connected == cmd) {
1727 1726
1728 printk(KERN_WARNING "scsi%d: Aborting connected command %ld - ", instance->host_no, cmd->serial_number); 1727 printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
1729 1728
1730 printk("sending wd33c93 ABORT command - "); 1729 printk("sending wd33c93 ABORT command - ");
1731 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); 1730 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
@@ -2270,7 +2269,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2270 strcat(bp, "\nconnected: "); 2269 strcat(bp, "\nconnected: ");
2271 if (hd->connected) { 2270 if (hd->connected) {
2272 cmd = (Scsi_Cmnd *) hd->connected; 2271 cmd = (Scsi_Cmnd *) hd->connected;
2273 sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2272 sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2274 strcat(bp, tbuf); 2273 strcat(bp, tbuf);
2275 } 2274 }
2276 } 2275 }
@@ -2278,7 +2277,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2278 strcat(bp, "\ninput_Q: "); 2277 strcat(bp, "\ninput_Q: ");
2279 cmd = (Scsi_Cmnd *) hd->input_Q; 2278 cmd = (Scsi_Cmnd *) hd->input_Q;
2280 while (cmd) { 2279 while (cmd) {
2281 sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2280 sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2282 strcat(bp, tbuf); 2281 strcat(bp, tbuf);
2283 cmd = (Scsi_Cmnd *) cmd->host_scribble; 2282 cmd = (Scsi_Cmnd *) cmd->host_scribble;
2284 } 2283 }
@@ -2287,7 +2286,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2287 strcat(bp, "\ndisconnected_Q:"); 2286 strcat(bp, "\ndisconnected_Q:");
2288 cmd = (Scsi_Cmnd *) hd->disconnected_Q; 2287 cmd = (Scsi_Cmnd *) hd->disconnected_Q;
2289 while (cmd) { 2288 while (cmd) {
2290 sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2289 sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2291 strcat(bp, tbuf); 2290 strcat(bp, tbuf);
2292 cmd = (Scsi_Cmnd *) cmd->host_scribble; 2291 cmd = (Scsi_Cmnd *) cmd->host_scribble;
2293 } 2292 }
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0621238fac4a..12868ca46110 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -60,6 +60,7 @@
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/slab.h> 62#include <linux/slab.h>
63#include <linux/vmalloc.h>
63#include <linux/ioport.h> 64#include <linux/ioport.h>
64#include <linux/delay.h> 65#include <linux/delay.h>
65#include <linux/pci.h> 66#include <linux/pci.h>
@@ -2717,13 +2718,18 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2717 unsigned long pci_address, u32 length) 2718 unsigned long pci_address, u32 length)
2718{ 2719{
2719 int bytes_copied = 0; 2720 int bytes_copied = 0;
2720 int cur_len, rc, rem_len, rem_page_len; 2721 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2721 __be32 *page; 2722 __be32 *page;
2722 unsigned long lock_flags = 0; 2723 unsigned long lock_flags = 0;
2723 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; 2724 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724 2725
2726 if (ioa_cfg->sis64)
2727 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2728 else
2729 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2730
2725 while (bytes_copied < length && 2731 while (bytes_copied < length &&
2726 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { 2732 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2727 if (ioa_dump->page_offset >= PAGE_SIZE || 2733 if (ioa_dump->page_offset >= PAGE_SIZE ||
2728 ioa_dump->page_offset == 0) { 2734 ioa_dump->page_offset == 0) {
2729 page = (__be32 *)__get_free_page(GFP_ATOMIC); 2735 page = (__be32 *)__get_free_page(GFP_ATOMIC);
@@ -2885,8 +2891,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2885 unsigned long lock_flags = 0; 2891 unsigned long lock_flags = 0;
2886 struct ipr_driver_dump *driver_dump = &dump->driver_dump; 2892 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2887 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; 2893 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2888 u32 num_entries, start_off, end_off; 2894 u32 num_entries, max_num_entries, start_off, end_off;
2889 u32 bytes_to_copy, bytes_copied, rc; 2895 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2890 struct ipr_sdt *sdt; 2896 struct ipr_sdt *sdt;
2891 int valid = 1; 2897 int valid = 1;
2892 int i; 2898 int i;
@@ -2947,8 +2953,18 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2947 on entries in this table */ 2953 on entries in this table */
2948 sdt = &ioa_dump->sdt; 2954 sdt = &ioa_dump->sdt;
2949 2955
2956 if (ioa_cfg->sis64) {
2957 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2958 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2959 } else {
2960 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2961 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2962 }
2963
2964 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
2965 (max_num_entries * sizeof(struct ipr_sdt_entry));
2950 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, 2966 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2951 sizeof(struct ipr_sdt) / sizeof(__be32)); 2967 bytes_to_copy / sizeof(__be32));
2952 2968
2953 /* Smart Dump table is ready to use and the first entry is valid */ 2969 /* Smart Dump table is ready to use and the first entry is valid */
2954 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 2970 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
@@ -2964,13 +2980,20 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2964 2980
2965 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); 2981 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2966 2982
2967 if (num_entries > IPR_NUM_SDT_ENTRIES) 2983 if (num_entries > max_num_entries)
2968 num_entries = IPR_NUM_SDT_ENTRIES; 2984 num_entries = max_num_entries;
2985
2986 /* Update dump length to the actual data to be copied */
2987 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
2988 if (ioa_cfg->sis64)
2989 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
2990 else
2991 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
2969 2992
2970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2993 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2971 2994
2972 for (i = 0; i < num_entries; i++) { 2995 for (i = 0; i < num_entries; i++) {
2973 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) { 2996 if (ioa_dump->hdr.len > max_dump_size) {
2974 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 2997 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2975 break; 2998 break;
2976 } 2999 }
@@ -2989,7 +3012,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2989 valid = 0; 3012 valid = 0;
2990 } 3013 }
2991 if (valid) { 3014 if (valid) {
2992 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { 3015 if (bytes_to_copy > max_dump_size) {
2993 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 3016 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2994 continue; 3017 continue;
2995 } 3018 }
@@ -3044,6 +3067,7 @@ static void ipr_release_dump(struct kref *kref)
3044 for (i = 0; i < dump->ioa_dump.next_page_index; i++) 3067 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3045 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); 3068 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3046 3069
3070 vfree(dump->ioa_dump.ioa_data);
3047 kfree(dump); 3071 kfree(dump);
3048 LEAVE; 3072 LEAVE;
3049} 3073}
@@ -3835,7 +3859,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3835 struct ipr_dump *dump; 3859 struct ipr_dump *dump;
3836 unsigned long lock_flags = 0; 3860 unsigned long lock_flags = 0;
3837 char *src; 3861 char *src;
3838 int len; 3862 int len, sdt_end;
3839 size_t rc = count; 3863 size_t rc = count;
3840 3864
3841 if (!capable(CAP_SYS_ADMIN)) 3865 if (!capable(CAP_SYS_ADMIN))
@@ -3875,9 +3899,17 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3875 3899
3876 off -= sizeof(dump->driver_dump); 3900 off -= sizeof(dump->driver_dump);
3877 3901
3878 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) { 3902 if (ioa_cfg->sis64)
3879 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data)) 3903 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3880 len = offsetof(struct ipr_ioa_dump, ioa_data) - off; 3904 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3905 sizeof(struct ipr_sdt_entry));
3906 else
3907 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3908 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3909
3910 if (count && off < sdt_end) {
3911 if (off + count > sdt_end)
3912 len = sdt_end - off;
3881 else 3913 else
3882 len = count; 3914 len = count;
3883 src = (u8 *)&dump->ioa_dump + off; 3915 src = (u8 *)&dump->ioa_dump + off;
@@ -3887,7 +3919,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3887 count -= len; 3919 count -= len;
3888 } 3920 }
3889 3921
3890 off -= offsetof(struct ipr_ioa_dump, ioa_data); 3922 off -= sdt_end;
3891 3923
3892 while (count) { 3924 while (count) {
3893 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) 3925 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
@@ -3916,6 +3948,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3916static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) 3948static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3917{ 3949{
3918 struct ipr_dump *dump; 3950 struct ipr_dump *dump;
3951 __be32 **ioa_data;
3919 unsigned long lock_flags = 0; 3952 unsigned long lock_flags = 0;
3920 3953
3921 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 3954 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
@@ -3925,6 +3958,19 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3925 return -ENOMEM; 3958 return -ENOMEM;
3926 } 3959 }
3927 3960
3961 if (ioa_cfg->sis64)
3962 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3963 else
3964 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3965
3966 if (!ioa_data) {
3967 ipr_err("Dump memory allocation failed\n");
3968 kfree(dump);
3969 return -ENOMEM;
3970 }
3971
3972 dump->ioa_dump.ioa_data = ioa_data;
3973
3928 kref_init(&dump->kref); 3974 kref_init(&dump->kref);
3929 dump->ioa_cfg = ioa_cfg; 3975 dump->ioa_cfg = ioa_cfg;
3930 3976
@@ -3932,6 +3978,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3932 3978
3933 if (INACTIVE != ioa_cfg->sdt_state) { 3979 if (INACTIVE != ioa_cfg->sdt_state) {
3934 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3981 vfree(dump->ioa_dump.ioa_data);
3935 kfree(dump); 3982 kfree(dump);
3936 return 0; 3983 return 0;
3937 } 3984 }
@@ -4953,9 +5000,35 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4953 * IRQ_NONE / IRQ_HANDLED 5000 * IRQ_NONE / IRQ_HANDLED
4954 **/ 5001 **/
4955static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 5002static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4956 volatile u32 int_reg) 5003 u32 int_reg)
4957{ 5004{
4958 irqreturn_t rc = IRQ_HANDLED; 5005 irqreturn_t rc = IRQ_HANDLED;
5006 u32 int_mask_reg;
5007
5008 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5009 int_reg &= ~int_mask_reg;
5010
5011 /* If an interrupt on the adapter did not occur, ignore it.
5012 * Or in the case of SIS 64, check for a stage change interrupt.
5013 */
5014 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5015 if (ioa_cfg->sis64) {
5016 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5017 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5018 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5019
5020 /* clear stage change */
5021 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5022 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5023 list_del(&ioa_cfg->reset_cmd->queue);
5024 del_timer(&ioa_cfg->reset_cmd->timer);
5025 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5026 return IRQ_HANDLED;
5027 }
5028 }
5029
5030 return IRQ_NONE;
5031 }
4959 5032
4960 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 5033 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4961 /* Mask the interrupt */ 5034 /* Mask the interrupt */
@@ -4968,6 +5041,13 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4968 list_del(&ioa_cfg->reset_cmd->queue); 5041 list_del(&ioa_cfg->reset_cmd->queue);
4969 del_timer(&ioa_cfg->reset_cmd->timer); 5042 del_timer(&ioa_cfg->reset_cmd->timer);
4970 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5043 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5044 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5045 if (ipr_debug && printk_ratelimit())
5046 dev_err(&ioa_cfg->pdev->dev,
5047 "Spurious interrupt detected. 0x%08X\n", int_reg);
5048 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5049 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5050 return IRQ_NONE;
4971 } else { 5051 } else {
4972 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5052 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4973 ioa_cfg->ioa_unit_checked = 1; 5053 ioa_cfg->ioa_unit_checked = 1;
@@ -5016,10 +5096,11 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5016{ 5096{
5017 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 5097 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5018 unsigned long lock_flags = 0; 5098 unsigned long lock_flags = 0;
5019 volatile u32 int_reg, int_mask_reg; 5099 u32 int_reg = 0;
5020 u32 ioasc; 5100 u32 ioasc;
5021 u16 cmd_index; 5101 u16 cmd_index;
5022 int num_hrrq = 0; 5102 int num_hrrq = 0;
5103 int irq_none = 0;
5023 struct ipr_cmnd *ipr_cmd; 5104 struct ipr_cmnd *ipr_cmd;
5024 irqreturn_t rc = IRQ_NONE; 5105 irqreturn_t rc = IRQ_NONE;
5025 5106
@@ -5031,33 +5112,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5031 return IRQ_NONE; 5112 return IRQ_NONE;
5032 } 5113 }
5033 5114
5034 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5035 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5036
5037 /* If an interrupt on the adapter did not occur, ignore it.
5038 * Or in the case of SIS 64, check for a stage change interrupt.
5039 */
5040 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
5041 if (ioa_cfg->sis64) {
5042 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5043 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5044 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5045
5046 /* clear stage change */
5047 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5049 list_del(&ioa_cfg->reset_cmd->queue);
5050 del_timer(&ioa_cfg->reset_cmd->timer);
5051 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5053 return IRQ_HANDLED;
5054 }
5055 }
5056
5057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5058 return IRQ_NONE;
5059 }
5060
5061 while (1) { 5115 while (1) {
5062 ipr_cmd = NULL; 5116 ipr_cmd = NULL;
5063 5117
@@ -5097,7 +5151,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5097 /* Clear the PCI interrupt */ 5151 /* Clear the PCI interrupt */
5098 do { 5152 do {
5099 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5153 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5100 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; 5154 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5101 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5155 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5102 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5156 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5103 5157
@@ -5107,6 +5161,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5107 return IRQ_HANDLED; 5161 return IRQ_HANDLED;
5108 } 5162 }
5109 5163
5164 } else if (rc == IRQ_NONE && irq_none == 0) {
5165 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5166 irq_none++;
5110 } else 5167 } else
5111 break; 5168 break;
5112 } 5169 }
@@ -5143,7 +5200,8 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5143 5200
5144 nseg = scsi_dma_map(scsi_cmd); 5201 nseg = scsi_dma_map(scsi_cmd);
5145 if (nseg < 0) { 5202 if (nseg < 0) {
5146 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5203 if (printk_ratelimit())
5204 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5147 return -1; 5205 return -1;
5148 } 5206 }
5149 5207
@@ -5773,7 +5831,8 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5773 } 5831 }
5774 5832
5775 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5833 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5776 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 5834 if (ipr_is_gscsi(res))
5835 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5777 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 5836 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5778 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); 5837 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5779 } 5838 }
@@ -7516,7 +7575,7 @@ static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7516static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 7575static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7517{ 7576{
7518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7577 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7519 volatile u32 int_reg; 7578 u32 int_reg;
7520 7579
7521 ENTER; 7580 ENTER;
7522 ioa_cfg->pdev->state_saved = true; 7581 ioa_cfg->pdev->state_saved = true;
@@ -7555,7 +7614,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7555 ipr_cmd->job_step = ipr_reset_enable_ioa; 7614 ipr_cmd->job_step = ipr_reset_enable_ioa;
7556 7615
7557 if (GET_DUMP == ioa_cfg->sdt_state) { 7616 if (GET_DUMP == ioa_cfg->sdt_state) {
7558 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT); 7617 if (ioa_cfg->sis64)
7618 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7619 else
7620 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7559 ipr_cmd->job_step = ipr_reset_wait_for_dump; 7621 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7560 schedule_work(&ioa_cfg->work_q); 7622 schedule_work(&ioa_cfg->work_q);
7561 return IPR_RC_JOB_RETURN; 7623 return IPR_RC_JOB_RETURN;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 13f425fb8851..f93f8637c5a1 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
38/* 38/*
39 * Literals 39 * Literals
40 */ 40 */
41#define IPR_DRIVER_VERSION "2.5.1" 41#define IPR_DRIVER_VERSION "2.5.2"
42#define IPR_DRIVER_DATE "(August 10, 2010)" 42#define IPR_DRIVER_DATE "(April 27, 2011)"
43 43
44/* 44/*
45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -217,7 +217,8 @@
217#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) 217#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
218#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 218#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
219#define IPR_PCI_RESET_TIMEOUT (HZ / 2) 219#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
220#define IPR_DUMP_TIMEOUT (15 * HZ) 220#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
221#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
221#define IPR_DUMP_DELAY_SECONDS 4 222#define IPR_DUMP_DELAY_SECONDS 4
222#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ) 223#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ)
223 224
@@ -285,9 +286,12 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
285/* 286/*
286 * Dump literals 287 * Dump literals
287 */ 288 */
288#define IPR_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) 289#define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024)
289#define IPR_NUM_SDT_ENTRIES 511 290#define IPR_FMT3_MAX_IOA_DUMP_SIZE (32 * 1024 * 1024)
290#define IPR_MAX_NUM_DUMP_PAGES ((IPR_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) 291#define IPR_FMT2_NUM_SDT_ENTRIES 511
292#define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF
293#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
294#define IPR_FMT3_MAX_NUM_DUMP_PAGES ((IPR_FMT3_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
291 295
292/* 296/*
293 * Misc literals 297 * Misc literals
@@ -474,7 +478,7 @@ struct ipr_cmd_pkt {
474 478
475 u8 flags_lo; 479 u8 flags_lo;
476#define IPR_FLAGS_LO_ALIGNED_BFR 0x20 480#define IPR_FLAGS_LO_ALIGNED_BFR 0x20
477#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10 481#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10
478#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00 482#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00
479#define IPR_FLAGS_LO_SIMPLE_TASK 0x02 483#define IPR_FLAGS_LO_SIMPLE_TASK 0x02
480#define IPR_FLAGS_LO_ORDERED_TASK 0x04 484#define IPR_FLAGS_LO_ORDERED_TASK 0x04
@@ -1164,7 +1168,7 @@ struct ipr_sdt_header {
1164 1168
1165struct ipr_sdt { 1169struct ipr_sdt {
1166 struct ipr_sdt_header hdr; 1170 struct ipr_sdt_header hdr;
1167 struct ipr_sdt_entry entry[IPR_NUM_SDT_ENTRIES]; 1171 struct ipr_sdt_entry entry[IPR_FMT3_NUM_SDT_ENTRIES];
1168}__attribute__((packed, aligned (4))); 1172}__attribute__((packed, aligned (4)));
1169 1173
1170struct ipr_uc_sdt { 1174struct ipr_uc_sdt {
@@ -1608,7 +1612,7 @@ struct ipr_driver_dump {
1608struct ipr_ioa_dump { 1612struct ipr_ioa_dump {
1609 struct ipr_dump_entry_header hdr; 1613 struct ipr_dump_entry_header hdr;
1610 struct ipr_sdt sdt; 1614 struct ipr_sdt sdt;
1611 __be32 *ioa_data[IPR_MAX_NUM_DUMP_PAGES]; 1615 __be32 **ioa_data;
1612 u32 reserved; 1616 u32 reserved;
1613 u32 next_page_index; 1617 u32 next_page_index;
1614 u32 page_offset; 1618 u32 page_offset;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5b799a37ad09..2a3a4720a771 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -57,9 +57,6 @@ static struct kmem_cache *scsi_pkt_cachep;
57#define FC_SRB_READ (1 << 1) 57#define FC_SRB_READ (1 << 1)
58#define FC_SRB_WRITE (1 << 0) 58#define FC_SRB_WRITE (1 << 0)
59 59
60/* constant added to e_d_tov timeout to get rec_tov value */
61#define REC_TOV_CONST 1
62
63/* 60/*
64 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock 61 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
65 */ 62 */
@@ -248,7 +245,7 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
248/** 245/**
249 * fc_fcp_timer_set() - Start a timer for a fcp_pkt 246 * fc_fcp_timer_set() - Start a timer for a fcp_pkt
250 * @fsp: The FCP packet to start a timer for 247 * @fsp: The FCP packet to start a timer for
251 * @delay: The timeout period for the timer 248 * @delay: The timeout period in jiffies
252 */ 249 */
253static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 250static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
254{ 251{
@@ -335,22 +332,23 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
335/** 332/**
336 * fc_fcp_can_queue_ramp_up() - increases can_queue 333 * fc_fcp_can_queue_ramp_up() - increases can_queue
337 * @lport: lport to ramp up can_queue 334 * @lport: lport to ramp up can_queue
338 *
339 * Locking notes: Called with Scsi_Host lock held
340 */ 335 */
341static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) 336static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
342{ 337{
343 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 338 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
339 unsigned long flags;
344 int can_queue; 340 int can_queue;
345 341
342 spin_lock_irqsave(lport->host->host_lock, flags);
343
346 if (si->last_can_queue_ramp_up_time && 344 if (si->last_can_queue_ramp_up_time &&
347 (time_before(jiffies, si->last_can_queue_ramp_up_time + 345 (time_before(jiffies, si->last_can_queue_ramp_up_time +
348 FC_CAN_QUEUE_PERIOD))) 346 FC_CAN_QUEUE_PERIOD)))
349 return; 347 goto unlock;
350 348
351 if (time_before(jiffies, si->last_can_queue_ramp_down_time + 349 if (time_before(jiffies, si->last_can_queue_ramp_down_time +
352 FC_CAN_QUEUE_PERIOD)) 350 FC_CAN_QUEUE_PERIOD))
353 return; 351 goto unlock;
354 352
355 si->last_can_queue_ramp_up_time = jiffies; 353 si->last_can_queue_ramp_up_time = jiffies;
356 354
@@ -362,6 +360,9 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
362 lport->host->can_queue = can_queue; 360 lport->host->can_queue = can_queue;
363 shost_printk(KERN_ERR, lport->host, "libfc: increased " 361 shost_printk(KERN_ERR, lport->host, "libfc: increased "
364 "can_queue to %d.\n", can_queue); 362 "can_queue to %d.\n", can_queue);
363
364unlock:
365 spin_unlock_irqrestore(lport->host->host_lock, flags);
365} 366}
366 367
367/** 368/**
@@ -373,18 +374,19 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
373 * commands complete or timeout, then try again with a reduced 374 * commands complete or timeout, then try again with a reduced
374 * can_queue. Eventually we will hit the point where we run 375 * can_queue. Eventually we will hit the point where we run
375 * on all reserved structs. 376 * on all reserved structs.
376 *
377 * Locking notes: Called with Scsi_Host lock held
378 */ 377 */
379static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 378static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
380{ 379{
381 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 380 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
381 unsigned long flags;
382 int can_queue; 382 int can_queue;
383 383
384 spin_lock_irqsave(lport->host->host_lock, flags);
385
384 if (si->last_can_queue_ramp_down_time && 386 if (si->last_can_queue_ramp_down_time &&
385 (time_before(jiffies, si->last_can_queue_ramp_down_time + 387 (time_before(jiffies, si->last_can_queue_ramp_down_time +
386 FC_CAN_QUEUE_PERIOD))) 388 FC_CAN_QUEUE_PERIOD)))
387 return; 389 goto unlock;
388 390
389 si->last_can_queue_ramp_down_time = jiffies; 391 si->last_can_queue_ramp_down_time = jiffies;
390 392
@@ -395,6 +397,9 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
395 lport->host->can_queue = can_queue; 397 lport->host->can_queue = can_queue;
396 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" 398 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
397 "Reducing can_queue to %d.\n", can_queue); 399 "Reducing can_queue to %d.\n", can_queue);
400
401unlock:
402 spin_unlock_irqrestore(lport->host->host_lock, flags);
398} 403}
399 404
400/* 405/*
@@ -409,16 +414,13 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
409 size_t len) 414 size_t len)
410{ 415{
411 struct fc_frame *fp; 416 struct fc_frame *fp;
412 unsigned long flags;
413 417
414 fp = fc_frame_alloc(lport, len); 418 fp = fc_frame_alloc(lport, len);
415 if (likely(fp)) 419 if (likely(fp))
416 return fp; 420 return fp;
417 421
418 /* error case */ 422 /* error case */
419 spin_lock_irqsave(lport->host->host_lock, flags);
420 fc_fcp_can_queue_ramp_down(lport); 423 fc_fcp_can_queue_ramp_down(lport);
421 spin_unlock_irqrestore(lport->host->host_lock, flags);
422 return NULL; 424 return NULL;
423} 425}
424 426
@@ -1093,16 +1095,14 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1093/** 1095/**
1094 * get_fsp_rec_tov() - Helper function to get REC_TOV 1096 * get_fsp_rec_tov() - Helper function to get REC_TOV
1095 * @fsp: the FCP packet 1097 * @fsp: the FCP packet
1098 *
1099 * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
1096 */ 1100 */
1097static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) 1101static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
1098{ 1102{
1099 struct fc_rport *rport; 1103 struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
1100 struct fc_rport_libfc_priv *rpriv;
1101
1102 rport = fsp->rport;
1103 rpriv = rport->dd_data;
1104 1104
1105 return rpriv->e_d_tov + REC_TOV_CONST; 1105 return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
1106} 1106}
1107 1107
1108/** 1108/**
@@ -1122,7 +1122,6 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1122 struct fc_rport_libfc_priv *rpriv; 1122 struct fc_rport_libfc_priv *rpriv;
1123 const size_t len = sizeof(fsp->cdb_cmd); 1123 const size_t len = sizeof(fsp->cdb_cmd);
1124 int rc = 0; 1124 int rc = 0;
1125 unsigned int rec_tov;
1126 1125
1127 if (fc_fcp_lock_pkt(fsp)) 1126 if (fc_fcp_lock_pkt(fsp))
1128 return 0; 1127 return 0;
@@ -1153,12 +1152,9 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1153 fsp->seq_ptr = seq; 1152 fsp->seq_ptr = seq;
1154 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1153 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1155 1154
1156 rec_tov = get_fsp_rec_tov(fsp);
1157
1158 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1155 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1159
1160 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1156 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1161 fc_fcp_timer_set(fsp, rec_tov); 1157 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1162 1158
1163unlock: 1159unlock:
1164 fc_fcp_unlock_pkt(fsp); 1160 fc_fcp_unlock_pkt(fsp);
@@ -1235,16 +1231,14 @@ static void fc_lun_reset_send(unsigned long data)
1235{ 1231{
1236 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1232 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1237 struct fc_lport *lport = fsp->lp; 1233 struct fc_lport *lport = fsp->lp;
1238 unsigned int rec_tov;
1239 1234
1240 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { 1235 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
1241 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1236 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1242 return; 1237 return;
1243 if (fc_fcp_lock_pkt(fsp)) 1238 if (fc_fcp_lock_pkt(fsp))
1244 return; 1239 return;
1245 rec_tov = get_fsp_rec_tov(fsp);
1246 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1240 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
1247 fc_fcp_timer_set(fsp, rec_tov); 1241 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1248 fc_fcp_unlock_pkt(fsp); 1242 fc_fcp_unlock_pkt(fsp);
1249 } 1243 }
1250} 1244}
@@ -1536,12 +1530,11 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1536 } 1530 }
1537 fc_fcp_srr(fsp, r_ctl, offset); 1531 fc_fcp_srr(fsp, r_ctl, offset);
1538 } else if (e_stat & ESB_ST_SEQ_INIT) { 1532 } else if (e_stat & ESB_ST_SEQ_INIT) {
1539 unsigned int rec_tov = get_fsp_rec_tov(fsp);
1540 /* 1533 /*
1541 * The remote port has the initiative, so just 1534 * The remote port has the initiative, so just
1542 * keep waiting for it to complete. 1535 * keep waiting for it to complete.
1543 */ 1536 */
1544 fc_fcp_timer_set(fsp, rec_tov); 1537 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1545 } else { 1538 } else {
1546 1539
1547 /* 1540 /*
@@ -1705,7 +1698,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1705{ 1698{
1706 struct fc_fcp_pkt *fsp = arg; 1699 struct fc_fcp_pkt *fsp = arg;
1707 struct fc_frame_header *fh; 1700 struct fc_frame_header *fh;
1708 unsigned int rec_tov;
1709 1701
1710 if (IS_ERR(fp)) { 1702 if (IS_ERR(fp)) {
1711 fc_fcp_srr_error(fsp, fp); 1703 fc_fcp_srr_error(fsp, fp);
@@ -1732,8 +1724,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1732 switch (fc_frame_payload_op(fp)) { 1724 switch (fc_frame_payload_op(fp)) {
1733 case ELS_LS_ACC: 1725 case ELS_LS_ACC:
1734 fsp->recov_retry = 0; 1726 fsp->recov_retry = 0;
1735 rec_tov = get_fsp_rec_tov(fsp); 1727 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1736 fc_fcp_timer_set(fsp, rec_tov);
1737 break; 1728 break;
1738 case ELS_LS_RJT: 1729 case ELS_LS_RJT:
1739 default: 1730 default:
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 906bbcad0e2d..389ab80aef0a 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1590,7 +1590,6 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
1590 */ 1590 */
1591int fc_lport_config(struct fc_lport *lport) 1591int fc_lport_config(struct fc_lport *lport)
1592{ 1592{
1593 INIT_LIST_HEAD(&lport->ema_list);
1594 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1593 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1595 mutex_init(&lport->lp_mutex); 1594 mutex_init(&lport->lp_mutex);
1596 1595
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 60e98a62f308..02d53d89534f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -805,6 +805,8 @@ struct lpfc_hba {
805 struct dentry *idiag_root; 805 struct dentry *idiag_root;
806 struct dentry *idiag_pci_cfg; 806 struct dentry *idiag_pci_cfg;
807 struct dentry *idiag_que_info; 807 struct dentry *idiag_que_info;
808 struct dentry *idiag_que_acc;
809 struct dentry *idiag_drb_acc;
808#endif 810#endif
809 811
810 /* Used for deferred freeing of ELS data buffers */ 812 /* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 77b2871d96b7..37e2a1272f86 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2426,6 +2426,7 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2426{ 2426{
2427 struct bsg_job_data *dd_data; 2427 struct bsg_job_data *dd_data;
2428 struct fc_bsg_job *job; 2428 struct fc_bsg_job *job;
2429 struct lpfc_mbx_nembed_cmd *nembed_sge;
2429 uint32_t size; 2430 uint32_t size;
2430 unsigned long flags; 2431 unsigned long flags;
2431 uint8_t *to; 2432 uint8_t *to;
@@ -2469,9 +2470,8 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2469 memcpy(to, from, size); 2470 memcpy(to, from, size);
2470 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 2471 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2471 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) { 2472 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2472 struct lpfc_mbx_nembed_cmd *nembed_sge = 2473 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2473 (struct lpfc_mbx_nembed_cmd *) 2474 &pmboxq->u.mb.un.varWords[0];
2474 &pmboxq->u.mb.un.varWords[0];
2475 2475
2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. 2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2477 virt; 2477 virt;
@@ -2496,16 +2496,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2496 job->reply_payload.sg_cnt, 2496 job->reply_payload.sg_cnt,
2497 from, size); 2497 from, size);
2498 job->reply->result = 0; 2498 job->reply->result = 0;
2499 2499 /* need to hold the lock until we set job->dd_data to NULL
2500 * to hold off the timeout handler returning to the mid-layer
2501 * while we are still processing the job.
2502 */
2500 job->dd_data = NULL; 2503 job->dd_data = NULL;
2504 dd_data->context_un.mbox.set_job = NULL;
2505 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2501 job->job_done(job); 2506 job->job_done(job);
2507 } else {
2508 dd_data->context_un.mbox.set_job = NULL;
2509 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2502 } 2510 }
2503 dd_data->context_un.mbox.set_job = NULL;
2504 /* need to hold the lock until we call job done to hold off
2505 * the timeout handler returning to the midlayer while
2506 * we are stillprocessing the job
2507 */
2508 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2509 2511
2510 kfree(dd_data->context_un.mbox.mb); 2512 kfree(dd_data->context_un.mbox.mb);
2511 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 2513 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
@@ -2644,6 +2646,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2644 struct ulp_bde64 *rxbpl = NULL; 2646 struct ulp_bde64 *rxbpl = NULL;
2645 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *) 2647 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2646 job->request->rqst_data.h_vendor.vendor_cmd; 2648 job->request->rqst_data.h_vendor.vendor_cmd;
2649 struct READ_EVENT_LOG_VAR *rdEventLog;
2650 uint32_t transmit_length, receive_length, mode;
2651 struct lpfc_mbx_nembed_cmd *nembed_sge;
2652 struct mbox_header *header;
2653 struct ulp_bde64 *bde;
2647 uint8_t *ext = NULL; 2654 uint8_t *ext = NULL;
2648 int rc = 0; 2655 int rc = 0;
2649 uint8_t *from; 2656 uint8_t *from;
@@ -2651,9 +2658,16 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2651 /* in case no data is transferred */ 2658 /* in case no data is transferred */
2652 job->reply->reply_payload_rcv_len = 0; 2659 job->reply->reply_payload_rcv_len = 0;
2653 2660
2661 /* sanity check to protect driver */
2662 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
2663 job->request_payload.payload_len > BSG_MBOX_SIZE) {
2664 rc = -ERANGE;
2665 goto job_done;
2666 }
2667
2654 /* check if requested extended data lengths are valid */ 2668 /* check if requested extended data lengths are valid */
2655 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) || 2669 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
2656 (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) { 2670 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
2657 rc = -ERANGE; 2671 rc = -ERANGE;
2658 goto job_done; 2672 goto job_done;
2659 } 2673 }
@@ -2744,8 +2758,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2744 * use ours 2758 * use ours
2745 */ 2759 */
2746 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 2760 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
2747 uint32_t transmit_length = pmb->un.varWords[1]; 2761 transmit_length = pmb->un.varWords[1];
2748 uint32_t receive_length = pmb->un.varWords[4]; 2762 receive_length = pmb->un.varWords[4];
2749 /* transmit length cannot be greater than receive length or 2763 /* transmit length cannot be greater than receive length or
2750 * mailbox extension size 2764 * mailbox extension size
2751 */ 2765 */
@@ -2795,10 +2809,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2795 from += sizeof(MAILBOX_t); 2809 from += sizeof(MAILBOX_t);
2796 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length); 2810 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2797 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 2811 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2798 struct READ_EVENT_LOG_VAR *rdEventLog = 2812 rdEventLog = &pmb->un.varRdEventLog;
2799 &pmb->un.varRdEventLog ; 2813 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
2800 uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 2814 mode = bf_get(lpfc_event_log, rdEventLog);
2801 uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
2802 2815
2803 /* receive length cannot be greater than mailbox 2816 /* receive length cannot be greater than mailbox
2804 * extension size 2817 * extension size
@@ -2843,7 +2856,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2843 /* rebuild the command for sli4 using our own buffers 2856 /* rebuild the command for sli4 using our own buffers
2844 * like we do for biu diags 2857 * like we do for biu diags
2845 */ 2858 */
2846 uint32_t receive_length = pmb->un.varWords[2]; 2859 receive_length = pmb->un.varWords[2];
2847 /* receive length cannot be greater than mailbox 2860 /* receive length cannot be greater than mailbox
2848 * extension size 2861 * extension size
2849 */ 2862 */
@@ -2879,8 +2892,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2879 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); 2892 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2880 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 2893 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2881 pmb->un.varUpdateCfg.co) { 2894 pmb->un.varUpdateCfg.co) {
2882 struct ulp_bde64 *bde = 2895 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
2883 (struct ulp_bde64 *)&pmb->un.varWords[4];
2884 2896
2885 /* bde size cannot be greater than mailbox ext size */ 2897 /* bde size cannot be greater than mailbox ext size */
2886 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { 2898 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
@@ -2921,10 +2933,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2921 memcpy((uint8_t *)dmp->dma.virt, from, 2933 memcpy((uint8_t *)dmp->dma.virt, from,
2922 bde->tus.f.bdeSize); 2934 bde->tus.f.bdeSize);
2923 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 2935 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2924 struct lpfc_mbx_nembed_cmd *nembed_sge;
2925 struct mbox_header *header;
2926 uint32_t receive_length;
2927
2928 /* rebuild the command for sli4 using our own buffers 2936 /* rebuild the command for sli4 using our own buffers
2929 * like we do for biu diags 2937 * like we do for biu diags
2930 */ 2938 */
@@ -3386,6 +3394,7 @@ no_dd_data:
3386 job->dd_data = NULL; 3394 job->dd_data = NULL;
3387 return rc; 3395 return rc;
3388} 3396}
3397
3389/** 3398/**
3390 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 3399 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
3391 * @job: fc_bsg_job to handle 3400 * @job: fc_bsg_job to handle
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index a2c33e7c9152..b542aca6f5ae 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -109,3 +109,133 @@ struct menlo_response {
109 uint32_t xri; /* return the xri of the iocb exchange */ 109 uint32_t xri; /* return the xri of the iocb exchange */
110}; 110};
111 111
112/*
113 * macros and data structures for handling sli-config mailbox command
114 * pass-through support, this header file is shared between user and
115 * kernel spaces, note the set of macros are duplicates from lpfc_hw4.h,
116 * with macro names prefixed with bsg_, as the macros defined in
117 * lpfc_hw4.h are not accessible from user space.
118 */
119
120/* Macros to deal with bit fields. Each bit field must have 3 #defines
121 * associated with it (_SHIFT, _MASK, and _WORD).
122 * EG. For a bit field that is in the 7th bit of the "field4" field of a
123 * structure and is 2 bits in size the following #defines must exist:
124 * struct temp {
125 * uint32_t field1;
126 * uint32_t field2;
127 * uint32_t field3;
128 * uint32_t field4;
129 * #define example_bit_field_SHIFT 7
130 * #define example_bit_field_MASK 0x03
131 * #define example_bit_field_WORD field4
132 * uint32_t field5;
133 * };
134 * Then the macros below may be used to get or set the value of that field.
135 * EG. To get the value of the bit field from the above example:
136 * struct temp t1;
137 * value = bsg_bf_get(example_bit_field, &t1);
138 * And then to set that bit field:
139 * bsg_bf_set(example_bit_field, &t1, 2);
140 * Or clear that bit field:
141 * bsg_bf_set(example_bit_field, &t1, 0);
142 */
143#define bsg_bf_get_le32(name, ptr) \
144 ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
145#define bsg_bf_get(name, ptr) \
146 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
147#define bsg_bf_set_le32(name, ptr, value) \
148 ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
149 name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
150 ~(name##_MASK << name##_SHIFT)))))
151#define bsg_bf_set(name, ptr, value) \
152 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
153 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
154
155/*
156 * The sli_config structure specified here is based on the following
157 * restriction:
158 *
159 * -- SLI_CONFIG EMB=0, carrying MSEs, will carry subcommands without
160 * carrying HBD.
161 * -- SLI_CONFIG EMB=1, not carrying MSE, will carry subcommands with or
162 * without carrying HBDs.
163 */
164
165struct lpfc_sli_config_mse {
166 uint32_t pa_lo;
167 uint32_t pa_hi;
168 uint32_t buf_len;
169#define lpfc_mbox_sli_config_mse_len_SHIFT 0
170#define lpfc_mbox_sli_config_mse_len_MASK 0xffffff
171#define lpfc_mbox_sli_config_mse_len_WORD buf_len
172};
173
174struct lpfc_sli_config_subcmd_hbd {
175 uint32_t buf_len;
176#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
177#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
178#define lpfc_mbox_sli_config_ecmn_hbd_len_WORD buf_len
179 uint32_t pa_lo;
180 uint32_t pa_hi;
181};
182
183struct lpfc_sli_config_hdr {
184 uint32_t word1;
185#define lpfc_mbox_hdr_emb_SHIFT 0
186#define lpfc_mbox_hdr_emb_MASK 0x00000001
187#define lpfc_mbox_hdr_emb_WORD word1
188#define lpfc_mbox_hdr_mse_cnt_SHIFT 3
189#define lpfc_mbox_hdr_mse_cnt_MASK 0x0000001f
190#define lpfc_mbox_hdr_mse_cnt_WORD word1
191 uint32_t payload_length;
192 uint32_t tag_lo;
193 uint32_t tag_hi;
194 uint32_t reserved5;
195};
196
197struct lpfc_sli_config_generic {
198 struct lpfc_sli_config_hdr sli_config_hdr;
199#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
200 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
201};
202
203struct lpfc_sli_config_subcmnd {
204 struct lpfc_sli_config_hdr sli_config_hdr;
205 uint32_t word6;
206#define lpfc_subcmnd_opcode_SHIFT 0
207#define lpfc_subcmnd_opcode_MASK 0xff
208#define lpfc_subcmnd_opcode_WORD word6
209#define lpfc_subcmnd_subsys_SHIFT 8
210#define lpfc_subcmnd_subsys_MASK 0xff
211#define lpfc_subcmnd_subsys_WORD word6
212 uint32_t timeout;
213 uint32_t request_length;
214 uint32_t word9;
215#define lpfc_subcmnd_version_SHIFT 0
216#define lpfc_subcmnd_version_MASK 0xff
217#define lpfc_subcmnd_version_WORD word9
218 uint32_t word10;
219#define lpfc_subcmnd_ask_rd_len_SHIFT 0
220#define lpfc_subcmnd_ask_rd_len_MASK 0xffffff
221#define lpfc_subcmnd_ask_rd_len_WORD word10
222 uint32_t rd_offset;
223 uint32_t obj_name[26];
224 uint32_t hbd_count;
225#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
226 struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
227};
228
229struct lpfc_sli_config_mbox {
230 uint32_t word0;
231#define lpfc_mqe_status_SHIFT 16
232#define lpfc_mqe_status_MASK 0x0000FFFF
233#define lpfc_mqe_status_WORD word0
234#define lpfc_mqe_command_SHIFT 8
235#define lpfc_mqe_command_MASK 0x000000FF
236#define lpfc_mqe_command_WORD word0
237 union {
238 struct lpfc_sli_config_generic sli_config_generic;
239 struct lpfc_sli_config_subcmnd sli_config_subcmnd;
240 } un;
241};
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3d967741c708..c93fca058603 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1119,172 +1119,14 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
1119} 1119}
1120 1120
1121/* 1121/*
1122 * ---------------------------------
1122 * iDiag debugfs file access methods 1123 * iDiag debugfs file access methods
1123 */ 1124 * ---------------------------------
1124
1125/*
1126 * iDiag PCI config space register access methods:
1127 *
1128 * The PCI config space register accessees of read, write, read-modify-write
1129 * for set bits, and read-modify-write for clear bits to SLI4 PCI functions
1130 * are provided. In the proper SLI4 PCI function's debugfs iDiag directory,
1131 *
1132 * /sys/kernel/debug/lpfc/fn<#>/iDiag
1133 *
1134 * the access is through the debugfs entry pciCfg:
1135 *
1136 * 1. For PCI config space register read access, there are two read methods:
1137 * A) read a single PCI config space register in the size of a byte
1138 * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through
1139 * the 4K extended PCI config space.
1140 *
1141 * A) Read a single PCI config space register consists of two steps:
1142 *
1143 * Step-1: Set up PCI config space register read command, the command
1144 * syntax is,
1145 *
1146 * echo 1 <where> <count> > pciCfg
1147 *
1148 * where, 1 is the iDiag command for PCI config space read, <where> is the
1149 * offset from the beginning of the device's PCI config space to read from,
1150 * and <count> is the size of PCI config space register data to read back,
1151 * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits
1152 * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes).
1153 *
1154 * Setp-2: Perform the debugfs read operation to execute the idiag command
1155 * set up in Step-1,
1156 *
1157 * cat pciCfg
1158 *
1159 * Examples:
1160 * To read PCI device's vendor-id and device-id from PCI config space,
1161 *
1162 * echo 1 0 4 > pciCfg
1163 * cat pciCfg
1164 *
1165 * To read PCI device's currnt command from config space,
1166 *
1167 * echo 1 4 2 > pciCfg
1168 * cat pciCfg
1169 *
1170 * B) Browse through the entire 4K extended PCI config space also consists
1171 * of two steps:
1172 *
1173 * Step-1: Set up PCI config space register browsing command, the command
1174 * syntax is,
1175 *
1176 * echo 1 0 4096 > pciCfg
1177 *
1178 * where, 1 is the iDiag command for PCI config space read, 0 must be used
1179 * as the offset for PCI config space register browse, and 4096 must be
1180 * used as the count for PCI config space register browse.
1181 *
1182 * Step-2: Repeately issue the debugfs read operation to browse through
1183 * the entire PCI config space registers:
1184 *
1185 * cat pciCfg
1186 * cat pciCfg
1187 * cat pciCfg
1188 * ...
1189 *
1190 * When browsing to the end of the 4K PCI config space, the browse method
1191 * shall wrap around to start reading from beginning again, and again...
1192 *
1193 * 2. For PCI config space register write access, it supports a single PCI
1194 * config space register write in the size of a byte (8 bits), a word
1195 * (16 bits), or a dword (32 bits). The command syntax is,
1196 *
1197 * echo 2 <where> <count> <value> > pciCfg
1198 *
1199 * where, 2 is the iDiag command for PCI config space write, <where> is
1200 * the offset from the beginning of the device's PCI config space to write
1201 * into, <count> is the size of data to write into the PCI config space,
1202 * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits
1203 * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value>
1204 * is the data to be written into the PCI config space register at the
1205 * offset.
1206 *
1207 * Examples:
1208 * To disable PCI device's interrupt assertion,
1209 *
1210 * 1) Read in device's PCI config space register command field <cmd>:
1211 *
1212 * echo 1 4 2 > pciCfg
1213 * cat pciCfg
1214 *
1215 * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>:
1216 *
1217 * <cmd> = <cmd> | (1 < 10)
1218 *
1219 * 3) Write the modified command back:
1220 *
1221 * echo 2 4 2 <cmd> > pciCfg
1222 *
1223 * 3. For PCI config space register set bits access, it supports a single PCI
1224 * config space register set bits in the size of a byte (8 bits), a word
1225 * (16 bits), or a dword (32 bits). The command syntax is,
1226 *
1227 * echo 3 <where> <count> <bitmask> > pciCfg
1228 *
1229 * where, 3 is the iDiag command for PCI config space set bits, <where> is
1230 * the offset from the beginning of the device's PCI config space to set
1231 * bits into, <count> is the size of the bitmask to set into the PCI config
1232 * space, it will be 1 for setting a byte (8 bits), 2 for setting a word
1233 * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and
1234 * <bitmask> is the bitmask, indicating the bits to be set into the PCI
1235 * config space register at the offset. The logic performed to the content
1236 * of the PCI config space register, regval, is,
1237 *
1238 * regval |= <bitmask>
1239 *
1240 * 4. For PCI config space register clear bits access, it supports a single
1241 * PCI config space register clear bits in the size of a byte (8 bits),
1242 * a word (16 bits), or a dword (32 bits). The command syntax is,
1243 *
1244 * echo 4 <where> <count> <bitmask> > pciCfg
1245 *
1246 * where, 4 is the iDiag command for PCI config space clear bits, <where>
1247 * is the offset from the beginning of the device's PCI config space to
1248 * clear bits from, <count> is the size of the bitmask to set into the PCI
1249 * config space, it will be 1 for setting a byte (8 bits), 2 for setting
1250 * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4
1251 * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared
1252 * from the PCI config space register at the offset. the logic performed
1253 * to the content of the PCI config space register, regval, is,
1254 *
1255 * regval &= ~<bitmask>
1256 *
1257 * Note, for all single register read, write, set bits, or clear bits access,
1258 * the offset (<where>) must be aligned with the size of the data:
1259 *
1260 * For data size of byte (8 bits), the offset must be aligned to the byte
1261 * boundary; for data size of word (16 bits), the offset must be aligned
1262 * to the word boundary; while for data size of dword (32 bits), the offset
1263 * must be aligned to the dword boundary. Otherwise, the interface will
1264 * return the error:
1265 * 1125 *
1266 * "-bash: echo: write error: Invalid argument". 1126 * All access methods are through the proper SLI4 PCI function's debugfs
1127 * iDiag directory:
1267 * 1128 *
1268 * For example: 1129 * /sys/kernel/debug/lpfc/fn<#>/iDiag
1269 *
1270 * echo 1 2 4 > pciCfg
1271 * -bash: echo: write error: Invalid argument
1272 *
1273 * Note also, all of the numbers in the command fields for all read, write,
1274 * set bits, and clear bits PCI config space register command fields can be
1275 * either decimal or hex.
1276 *
1277 * For example,
1278 * echo 1 0 4096 > pciCfg
1279 *
1280 * will be the same as
1281 * echo 1 0 0x1000 > pciCfg
1282 *
1283 * And,
1284 * echo 2 155 1 10 > pciCfg
1285 *
1286 * will be
1287 * echo 2 0x9b 1 0xa > pciCfg
1288 */ 1130 */
1289 1131
1290/** 1132/**
@@ -1331,10 +1173,10 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
1331 for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { 1173 for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
1332 step_str = strsep(&pbuf, "\t "); 1174 step_str = strsep(&pbuf, "\t ");
1333 if (!step_str) 1175 if (!step_str)
1334 return 0; 1176 return i;
1335 idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); 1177 idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
1336 } 1178 }
1337 return 0; 1179 return i;
1338} 1180}
1339 1181
1340/** 1182/**
@@ -1403,7 +1245,7 @@ lpfc_idiag_release(struct inode *inode, struct file *file)
1403 * Description: 1245 * Description:
1404 * This routine frees the buffer that was allocated when the debugfs file 1246 * This routine frees the buffer that was allocated when the debugfs file
1405 * was opened. It also reset the fields in the idiag command struct in the 1247 * was opened. It also reset the fields in the idiag command struct in the
1406 * case the command is not continuous browsing of the data structure. 1248 * case of command for write operation.
1407 * 1249 *
1408 * Returns: 1250 * Returns:
1409 * This function returns zero. 1251 * This function returns zero.
@@ -1413,18 +1255,20 @@ lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
1413{ 1255{
1414 struct lpfc_debug *debug = file->private_data; 1256 struct lpfc_debug *debug = file->private_data;
1415 1257
1416 /* Read PCI config register, if not read all, clear command fields */ 1258 if (debug->op == LPFC_IDIAG_OP_WR) {
1417 if ((debug->op == LPFC_IDIAG_OP_RD) && 1259 switch (idiag.cmd.opcode) {
1418 (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD)) 1260 case LPFC_IDIAG_CMD_PCICFG_WR:
1419 if ((idiag.cmd.data[1] == sizeof(uint8_t)) || 1261 case LPFC_IDIAG_CMD_PCICFG_ST:
1420 (idiag.cmd.data[1] == sizeof(uint16_t)) || 1262 case LPFC_IDIAG_CMD_PCICFG_CL:
1421 (idiag.cmd.data[1] == sizeof(uint32_t))) 1263 case LPFC_IDIAG_CMD_QUEACC_WR:
1264 case LPFC_IDIAG_CMD_QUEACC_ST:
1265 case LPFC_IDIAG_CMD_QUEACC_CL:
1422 memset(&idiag, 0, sizeof(idiag)); 1266 memset(&idiag, 0, sizeof(idiag));
1423 1267 break;
1424 /* Write PCI config register, clear command fields */ 1268 default:
1425 if ((debug->op == LPFC_IDIAG_OP_WR) && 1269 break;
1426 (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)) 1270 }
1427 memset(&idiag, 0, sizeof(idiag)); 1271 }
1428 1272
1429 /* Free the buffers to the file operation */ 1273 /* Free the buffers to the file operation */
1430 kfree(debug->buffer); 1274 kfree(debug->buffer);
@@ -1504,7 +1348,7 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
1504 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1348 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
1505 "%03x: %08x\n", where, u32val); 1349 "%03x: %08x\n", where, u32val);
1506 break; 1350 break;
1507 case LPFC_PCI_CFG_SIZE: /* browse all */ 1351 case LPFC_PCI_CFG_BROWSE: /* browse all */
1508 goto pcicfg_browse; 1352 goto pcicfg_browse;
1509 break; 1353 break;
1510 default: 1354 default:
@@ -1586,16 +1430,21 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1586 debug->op = LPFC_IDIAG_OP_WR; 1430 debug->op = LPFC_IDIAG_OP_WR;
1587 1431
1588 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); 1432 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
1589 if (rc) 1433 if (rc < 0)
1590 return rc; 1434 return rc;
1591 1435
1592 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { 1436 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
1437 /* Sanity check on PCI config read command line arguments */
1438 if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
1439 goto error_out;
1593 /* Read command from PCI config space, set up command fields */ 1440 /* Read command from PCI config space, set up command fields */
1594 where = idiag.cmd.data[0]; 1441 where = idiag.cmd.data[0];
1595 count = idiag.cmd.data[1]; 1442 count = idiag.cmd.data[1];
1596 if (count == LPFC_PCI_CFG_SIZE) { 1443 if (count == LPFC_PCI_CFG_BROWSE) {
1597 if (where != 0) 1444 if (where % sizeof(uint32_t))
1598 goto error_out; 1445 goto error_out;
1446 /* Starting offset to browse */
1447 idiag.offset.last_rd = where;
1599 } else if ((count != sizeof(uint8_t)) && 1448 } else if ((count != sizeof(uint8_t)) &&
1600 (count != sizeof(uint16_t)) && 1449 (count != sizeof(uint16_t)) &&
1601 (count != sizeof(uint32_t))) 1450 (count != sizeof(uint32_t)))
@@ -1621,6 +1470,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1621 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || 1470 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
1622 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || 1471 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
1623 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { 1472 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
1473 /* Sanity check on PCI config write command line arguments */
1474 if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
1475 goto error_out;
1624 /* Write command to PCI config space, read-modify-write */ 1476 /* Write command to PCI config space, read-modify-write */
1625 where = idiag.cmd.data[0]; 1477 where = idiag.cmd.data[0];
1626 count = idiag.cmd.data[1]; 1478 count = idiag.cmd.data[1];
@@ -1753,10 +1605,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1753 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1605 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1754 "Slow-path EQ information:\n"); 1606 "Slow-path EQ information:\n");
1755 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1607 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1756 "\tID [%02d], EQE-COUNT [%04d], " 1608 "\tEQID[%02d], "
1757 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1609 "QE-COUNT[%04d], QE-SIZE[%04d], "
1610 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1758 phba->sli4_hba.sp_eq->queue_id, 1611 phba->sli4_hba.sp_eq->queue_id,
1759 phba->sli4_hba.sp_eq->entry_count, 1612 phba->sli4_hba.sp_eq->entry_count,
1613 phba->sli4_hba.sp_eq->entry_size,
1760 phba->sli4_hba.sp_eq->host_index, 1614 phba->sli4_hba.sp_eq->host_index,
1761 phba->sli4_hba.sp_eq->hba_index); 1615 phba->sli4_hba.sp_eq->hba_index);
1762 1616
@@ -1765,10 +1619,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1765 "Fast-path EQ information:\n"); 1619 "Fast-path EQ information:\n");
1766 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 1620 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
1767 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1621 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1768 "\tID [%02d], EQE-COUNT [%04d], " 1622 "\tEQID[%02d], "
1769 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1623 "QE-COUNT[%04d], QE-SIZE[%04d], "
1624 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1770 phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, 1625 phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
1771 phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, 1626 phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
1627 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
1772 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 1628 phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
1773 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 1629 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
1774 } 1630 }
@@ -1776,89 +1632,101 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1776 1632
1777 /* Get mailbox complete queue information */ 1633 /* Get mailbox complete queue information */
1778 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1634 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1779 "Mailbox CQ information:\n"); 1635 "Slow-path MBX CQ information:\n");
1780 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1636 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1781 "\t\tAssociated EQ-ID [%02d]:\n", 1637 "Associated EQID[%02d]:\n",
1782 phba->sli4_hba.mbx_cq->assoc_qid); 1638 phba->sli4_hba.mbx_cq->assoc_qid);
1783 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1639 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1784 "\tID [%02d], CQE-COUNT [%04d], " 1640 "\tCQID[%02d], "
1785 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1641 "QE-COUNT[%04d], QE-SIZE[%04d], "
1642 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1786 phba->sli4_hba.mbx_cq->queue_id, 1643 phba->sli4_hba.mbx_cq->queue_id,
1787 phba->sli4_hba.mbx_cq->entry_count, 1644 phba->sli4_hba.mbx_cq->entry_count,
1645 phba->sli4_hba.mbx_cq->entry_size,
1788 phba->sli4_hba.mbx_cq->host_index, 1646 phba->sli4_hba.mbx_cq->host_index,
1789 phba->sli4_hba.mbx_cq->hba_index); 1647 phba->sli4_hba.mbx_cq->hba_index);
1790 1648
1791 /* Get slow-path complete queue information */ 1649 /* Get slow-path complete queue information */
1792 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1650 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1793 "Slow-path CQ information:\n"); 1651 "Slow-path ELS CQ information:\n");
1794 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1652 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1795 "\t\tAssociated EQ-ID [%02d]:\n", 1653 "Associated EQID[%02d]:\n",
1796 phba->sli4_hba.els_cq->assoc_qid); 1654 phba->sli4_hba.els_cq->assoc_qid);
1797 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1655 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1798 "\tID [%02d], CQE-COUNT [%04d], " 1656 "\tCQID [%02d], "
1799 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1657 "QE-COUNT[%04d], QE-SIZE[%04d], "
1658 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1800 phba->sli4_hba.els_cq->queue_id, 1659 phba->sli4_hba.els_cq->queue_id,
1801 phba->sli4_hba.els_cq->entry_count, 1660 phba->sli4_hba.els_cq->entry_count,
1661 phba->sli4_hba.els_cq->entry_size,
1802 phba->sli4_hba.els_cq->host_index, 1662 phba->sli4_hba.els_cq->host_index,
1803 phba->sli4_hba.els_cq->hba_index); 1663 phba->sli4_hba.els_cq->hba_index);
1804 1664
1805 /* Get fast-path complete queue information */ 1665 /* Get fast-path complete queue information */
1806 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1807 "Fast-path CQ information:\n"); 1667 "Fast-path FCP CQ information:\n");
1808 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 1668 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
1809 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1669 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1810 "\t\tAssociated EQ-ID [%02d]:\n", 1670 "Associated EQID[%02d]:\n",
1811 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 1671 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
1812 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1672 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1813 "\tID [%02d], EQE-COUNT [%04d], " 1673 "\tCQID[%02d], "
1814 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1674 "QE-COUNT[%04d], QE-SIZE[%04d], "
1815 phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, 1675 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1816 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, 1676 phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
1817 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 1677 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
1818 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 1678 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
1679 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
1680 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
1819 } 1681 }
1820 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 1682 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
1821 1683
1822 /* Get mailbox queue information */ 1684 /* Get mailbox queue information */
1823 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1685 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1824 "Mailbox MQ information:\n"); 1686 "Slow-path MBX MQ information:\n");
1825 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1687 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1826 "\t\tAssociated CQ-ID [%02d]:\n", 1688 "Associated CQID[%02d]:\n",
1827 phba->sli4_hba.mbx_wq->assoc_qid); 1689 phba->sli4_hba.mbx_wq->assoc_qid);
1828 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1690 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1829 "\tID [%02d], MQE-COUNT [%04d], " 1691 "\tWQID[%02d], "
1830 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1692 "QE-COUNT[%04d], QE-SIZE[%04d], "
1693 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1831 phba->sli4_hba.mbx_wq->queue_id, 1694 phba->sli4_hba.mbx_wq->queue_id,
1832 phba->sli4_hba.mbx_wq->entry_count, 1695 phba->sli4_hba.mbx_wq->entry_count,
1696 phba->sli4_hba.mbx_wq->entry_size,
1833 phba->sli4_hba.mbx_wq->host_index, 1697 phba->sli4_hba.mbx_wq->host_index,
1834 phba->sli4_hba.mbx_wq->hba_index); 1698 phba->sli4_hba.mbx_wq->hba_index);
1835 1699
1836 /* Get slow-path work queue information */ 1700 /* Get slow-path work queue information */
1837 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1701 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1838 "Slow-path WQ information:\n"); 1702 "Slow-path ELS WQ information:\n");
1839 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1703 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1840 "\t\tAssociated CQ-ID [%02d]:\n", 1704 "Associated CQID[%02d]:\n",
1841 phba->sli4_hba.els_wq->assoc_qid); 1705 phba->sli4_hba.els_wq->assoc_qid);
1842 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1706 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1843 "\tID [%02d], WQE-COUNT [%04d], " 1707 "\tWQID[%02d], "
1844 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1708 "QE-COUNT[%04d], QE-SIZE[%04d], "
1709 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1845 phba->sli4_hba.els_wq->queue_id, 1710 phba->sli4_hba.els_wq->queue_id,
1846 phba->sli4_hba.els_wq->entry_count, 1711 phba->sli4_hba.els_wq->entry_count,
1712 phba->sli4_hba.els_wq->entry_size,
1847 phba->sli4_hba.els_wq->host_index, 1713 phba->sli4_hba.els_wq->host_index,
1848 phba->sli4_hba.els_wq->hba_index); 1714 phba->sli4_hba.els_wq->hba_index);
1849 1715
1850 /* Get fast-path work queue information */ 1716 /* Get fast-path work queue information */
1851 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1717 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1852 "Fast-path WQ information:\n"); 1718 "Fast-path FCP WQ information:\n");
1853 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { 1719 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
1854 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1720 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1855 "\t\tAssociated CQ-ID [%02d]:\n", 1721 "Associated CQID[%02d]:\n",
1856 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); 1722 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
1857 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1723 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1858 "\tID [%02d], WQE-COUNT [%04d], " 1724 "\tWQID[%02d], "
1859 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1725 "QE-COUNT[%04d], WQE-SIZE[%04d], "
1726 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1860 phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, 1727 phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
1861 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, 1728 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
1729 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
1862 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, 1730 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
1863 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); 1731 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
1864 } 1732 }
@@ -1868,26 +1736,597 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1868 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1736 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1869 "Slow-path RQ information:\n"); 1737 "Slow-path RQ information:\n");
1870 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1738 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1871 "\t\tAssociated CQ-ID [%02d]:\n", 1739 "Associated CQID[%02d]:\n",
1872 phba->sli4_hba.hdr_rq->assoc_qid); 1740 phba->sli4_hba.hdr_rq->assoc_qid);
1873 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1741 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1874 "\tID [%02d], RHQE-COUNT [%04d], " 1742 "\tHQID[%02d], "
1875 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1743 "QE-COUNT[%04d], QE-SIZE[%04d], "
1744 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1876 phba->sli4_hba.hdr_rq->queue_id, 1745 phba->sli4_hba.hdr_rq->queue_id,
1877 phba->sli4_hba.hdr_rq->entry_count, 1746 phba->sli4_hba.hdr_rq->entry_count,
1747 phba->sli4_hba.hdr_rq->entry_size,
1878 phba->sli4_hba.hdr_rq->host_index, 1748 phba->sli4_hba.hdr_rq->host_index,
1879 phba->sli4_hba.hdr_rq->hba_index); 1749 phba->sli4_hba.hdr_rq->hba_index);
1880 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1750 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1881 "\tID [%02d], RDQE-COUNT [%04d], " 1751 "\tDQID[%02d], "
1882 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1752 "QE-COUNT[%04d], QE-SIZE[%04d], "
1753 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1883 phba->sli4_hba.dat_rq->queue_id, 1754 phba->sli4_hba.dat_rq->queue_id,
1884 phba->sli4_hba.dat_rq->entry_count, 1755 phba->sli4_hba.dat_rq->entry_count,
1756 phba->sli4_hba.dat_rq->entry_size,
1885 phba->sli4_hba.dat_rq->host_index, 1757 phba->sli4_hba.dat_rq->host_index,
1886 phba->sli4_hba.dat_rq->hba_index); 1758 phba->sli4_hba.dat_rq->hba_index);
1887 1759
1888 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 1760 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1889} 1761}
1890 1762
1763/**
1764 * lpfc_idiag_que_param_check - queue access command parameter sanity check
1765 * @q: The pointer to queue structure.
1766 * @index: The index into a queue entry.
1767 * @count: The number of queue entries to access.
1768 *
1769 * Description:
1770 * The routine performs sanity check on device queue access method commands.
1771 *
1772 * Returns:
1773 * This function returns -EINVAL when fails the sanity check, otherwise, it
1774 * returns 0.
1775 **/
1776static int
1777lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
1778{
1779 /* Only support single entry read or browsing */
1780 if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE))
1781 return -EINVAL;
1782 if (index > q->entry_count - 1)
1783 return -EINVAL;
1784 return 0;
1785}
1786
1787/**
1788 * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
1789 * @pbuffer: The pointer to buffer to copy the read data into.
1790 * @pque: The pointer to the queue to be read.
1791 * @index: The index into the queue entry.
1792 *
1793 * Description:
1794 * This routine reads out a single entry from the given queue's index location
1795 * and copies it into the buffer provided.
1796 *
1797 * Returns:
1798 * This function returns 0 when it fails, otherwise, it returns the length of
1799 * the data read into the buffer provided.
1800 **/
1801static int
1802lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
1803 uint32_t index)
1804{
1805 int offset, esize;
1806 uint32_t *pentry;
1807
1808 if (!pbuffer || !pque)
1809 return 0;
1810
1811 esize = pque->entry_size;
1812 len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
1813 "QE-INDEX[%04d]:\n", index);
1814
1815 offset = 0;
1816 pentry = pque->qe[index].address;
1817 while (esize > 0) {
1818 len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
1819 "%08x ", *pentry);
1820 pentry++;
1821 offset += sizeof(uint32_t);
1822 esize -= sizeof(uint32_t);
1823 if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
1824 len += snprintf(pbuffer+len,
1825 LPFC_QUE_ACC_BUF_SIZE-len, "\n");
1826 }
1827 len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
1828
1829 return len;
1830}
1831
1832/**
1833 * lpfc_idiag_queacc_read - idiag debugfs read port queue
1834 * @file: The file pointer to read from.
1835 * @buf: The buffer to copy the data to.
1836 * @nbytes: The number of bytes to read.
1837 * @ppos: The position in the file to start reading from.
1838 *
1839 * Description:
1840 * This routine reads data from the @phba device queue memory according to the
1841 * idiag command, and copies to user @buf. Depending on the queue dump read
1842 * command setup, it does either a single queue entry read or browing through
1843 * all entries of the queue.
1844 *
1845 * Returns:
1846 * This function returns the amount of data that was read (this could be less
1847 * than @nbytes if the end of the file was reached) or a negative error value.
1848 **/
1849static ssize_t
1850lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
1851 loff_t *ppos)
1852{
1853 struct lpfc_debug *debug = file->private_data;
1854 uint32_t last_index, index, count;
1855 struct lpfc_queue *pque = NULL;
1856 char *pbuffer;
1857 int len = 0;
1858
1859 /* This is a user read operation */
1860 debug->op = LPFC_IDIAG_OP_RD;
1861
1862 if (!debug->buffer)
1863 debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL);
1864 if (!debug->buffer)
1865 return 0;
1866 pbuffer = debug->buffer;
1867
1868 if (*ppos)
1869 return 0;
1870
1871 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
1872 index = idiag.cmd.data[2];
1873 count = idiag.cmd.data[3];
1874 pque = (struct lpfc_queue *)idiag.ptr_private;
1875 } else
1876 return 0;
1877
1878 /* Browse the queue starting from index */
1879 if (count == LPFC_QUE_ACC_BROWSE)
1880 goto que_browse;
1881
1882 /* Read a single entry from the queue */
1883 len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
1884
1885 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1886
1887que_browse:
1888
1889 /* Browse all entries from the queue */
1890 last_index = idiag.offset.last_rd;
1891 index = last_index;
1892
1893 while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) {
1894 len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
1895 index++;
1896 if (index > pque->entry_count - 1)
1897 break;
1898 }
1899
1900 /* Set up the offset for next portion of pci cfg read */
1901 if (index > pque->entry_count - 1)
1902 index = 0;
1903 idiag.offset.last_rd = index;
1904
1905 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1906}
1907
1908/**
1909 * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands
1910 * @file: The file pointer to read from.
1911 * @buf: The buffer to copy the user data from.
1912 * @nbytes: The number of bytes to get.
1913 * @ppos: The position in the file to start reading from.
1914 *
1915 * This routine get the debugfs idiag command struct from user space and then
1916 * perform the syntax check for port queue read (dump) or write (set) command
1917 * accordingly. In the case of port queue read command, it sets up the command
1918 * in the idiag command struct for the following debugfs read operation. In
1919 * the case of port queue write operation, it executes the write operation
1920 * into the port queue entry accordingly.
1921 *
1922 * It returns the @nbytges passing in from debugfs user space when successful.
1923 * In case of error conditions, it returns proper error code back to the user
1924 * space.
1925 **/
1926static ssize_t
1927lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
1928 size_t nbytes, loff_t *ppos)
1929{
1930 struct lpfc_debug *debug = file->private_data;
1931 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1932 uint32_t qidx, quetp, queid, index, count, offset, value;
1933 uint32_t *pentry;
1934 struct lpfc_queue *pque;
1935 int rc;
1936
1937 /* This is a user write operation */
1938 debug->op = LPFC_IDIAG_OP_WR;
1939
1940 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
1941 if (rc < 0)
1942 return rc;
1943
1944 /* Get and sanity check on command feilds */
1945 quetp = idiag.cmd.data[0];
1946 queid = idiag.cmd.data[1];
1947 index = idiag.cmd.data[2];
1948 count = idiag.cmd.data[3];
1949 offset = idiag.cmd.data[4];
1950 value = idiag.cmd.data[5];
1951
1952 /* Sanity check on command line arguments */
1953 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
1954 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
1955 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
1956 if (rc != LPFC_QUE_ACC_WR_CMD_ARG)
1957 goto error_out;
1958 if (count != 1)
1959 goto error_out;
1960 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
1961 if (rc != LPFC_QUE_ACC_RD_CMD_ARG)
1962 goto error_out;
1963 } else
1964 goto error_out;
1965
1966 switch (quetp) {
1967 case LPFC_IDIAG_EQ:
1968 /* Slow-path event queue */
1969 if (phba->sli4_hba.sp_eq->queue_id == queid) {
1970 /* Sanity check */
1971 rc = lpfc_idiag_que_param_check(
1972 phba->sli4_hba.sp_eq, index, count);
1973 if (rc)
1974 goto error_out;
1975 idiag.ptr_private = phba->sli4_hba.sp_eq;
1976 goto pass_check;
1977 }
1978 /* Fast-path event queue */
1979 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
1980 if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
1981 /* Sanity check */
1982 rc = lpfc_idiag_que_param_check(
1983 phba->sli4_hba.fp_eq[qidx],
1984 index, count);
1985 if (rc)
1986 goto error_out;
1987 idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
1988 goto pass_check;
1989 }
1990 }
1991 goto error_out;
1992 break;
1993 case LPFC_IDIAG_CQ:
1994 /* MBX complete queue */
1995 if (phba->sli4_hba.mbx_cq->queue_id == queid) {
1996 /* Sanity check */
1997 rc = lpfc_idiag_que_param_check(
1998 phba->sli4_hba.mbx_cq, index, count);
1999 if (rc)
2000 goto error_out;
2001 idiag.ptr_private = phba->sli4_hba.mbx_cq;
2002 goto pass_check;
2003 }
2004 /* ELS complete queue */
2005 if (phba->sli4_hba.els_cq->queue_id == queid) {
2006 /* Sanity check */
2007 rc = lpfc_idiag_que_param_check(
2008 phba->sli4_hba.els_cq, index, count);
2009 if (rc)
2010 goto error_out;
2011 idiag.ptr_private = phba->sli4_hba.els_cq;
2012 goto pass_check;
2013 }
2014 /* FCP complete queue */
2015 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
2016 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
2017 /* Sanity check */
2018 rc = lpfc_idiag_que_param_check(
2019 phba->sli4_hba.fcp_cq[qidx],
2020 index, count);
2021 if (rc)
2022 goto error_out;
2023 idiag.ptr_private =
2024 phba->sli4_hba.fcp_cq[qidx];
2025 goto pass_check;
2026 }
2027 }
2028 goto error_out;
2029 break;
2030 case LPFC_IDIAG_MQ:
2031 /* MBX work queue */
2032 if (phba->sli4_hba.mbx_wq->queue_id == queid) {
2033 /* Sanity check */
2034 rc = lpfc_idiag_que_param_check(
2035 phba->sli4_hba.mbx_wq, index, count);
2036 if (rc)
2037 goto error_out;
2038 idiag.ptr_private = phba->sli4_hba.mbx_wq;
2039 goto pass_check;
2040 }
2041 break;
2042 case LPFC_IDIAG_WQ:
2043 /* ELS work queue */
2044 if (phba->sli4_hba.els_wq->queue_id == queid) {
2045 /* Sanity check */
2046 rc = lpfc_idiag_que_param_check(
2047 phba->sli4_hba.els_wq, index, count);
2048 if (rc)
2049 goto error_out;
2050 idiag.ptr_private = phba->sli4_hba.els_wq;
2051 goto pass_check;
2052 }
2053 /* FCP work queue */
2054 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
2055 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
2056 /* Sanity check */
2057 rc = lpfc_idiag_que_param_check(
2058 phba->sli4_hba.fcp_wq[qidx],
2059 index, count);
2060 if (rc)
2061 goto error_out;
2062 idiag.ptr_private =
2063 phba->sli4_hba.fcp_wq[qidx];
2064 goto pass_check;
2065 }
2066 }
2067 goto error_out;
2068 break;
2069 case LPFC_IDIAG_RQ:
2070 /* HDR queue */
2071 if (phba->sli4_hba.hdr_rq->queue_id == queid) {
2072 /* Sanity check */
2073 rc = lpfc_idiag_que_param_check(
2074 phba->sli4_hba.hdr_rq, index, count);
2075 if (rc)
2076 goto error_out;
2077 idiag.ptr_private = phba->sli4_hba.hdr_rq;
2078 goto pass_check;
2079 }
2080 /* DAT queue */
2081 if (phba->sli4_hba.dat_rq->queue_id == queid) {
2082 /* Sanity check */
2083 rc = lpfc_idiag_que_param_check(
2084 phba->sli4_hba.dat_rq, index, count);
2085 if (rc)
2086 goto error_out;
2087 idiag.ptr_private = phba->sli4_hba.dat_rq;
2088 goto pass_check;
2089 }
2090 goto error_out;
2091 break;
2092 default:
2093 goto error_out;
2094 break;
2095 }
2096
2097pass_check:
2098
2099 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
2100 if (count == LPFC_QUE_ACC_BROWSE)
2101 idiag.offset.last_rd = index;
2102 }
2103
2104 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
2105 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
2106 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
2107 /* Additional sanity checks on write operation */
2108 pque = (struct lpfc_queue *)idiag.ptr_private;
2109 if (offset > pque->entry_size/sizeof(uint32_t) - 1)
2110 goto error_out;
2111 pentry = pque->qe[index].address;
2112 pentry += offset;
2113 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
2114 *pentry = value;
2115 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST)
2116 *pentry |= value;
2117 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL)
2118 *pentry &= ~value;
2119 }
2120 return nbytes;
2121
2122error_out:
2123 /* Clean out command structure on command error out */
2124 memset(&idiag, 0, sizeof(idiag));
2125 return -EINVAL;
2126}
2127
2128/**
2129 * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
2130 * @phba: The pointer to hba structure.
2131 * @pbuffer: The pointer to the buffer to copy the data to.
2132 * @len: The lenght of bytes to copied.
2133 * @drbregid: The id to doorbell registers.
2134 *
2135 * Description:
2136 * This routine reads a doorbell register and copies its content to the
2137 * user buffer pointed to by @pbuffer.
2138 *
2139 * Returns:
2140 * This function returns the amount of data that was copied into @pbuffer.
2141 **/
2142static int
2143lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
2144 int len, uint32_t drbregid)
2145{
2146
2147 if (!pbuffer)
2148 return 0;
2149
2150 switch (drbregid) {
2151 case LPFC_DRB_EQCQ:
2152 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2153 "EQCQ-DRB-REG: 0x%08x\n",
2154 readl(phba->sli4_hba.EQCQDBregaddr));
2155 break;
2156 case LPFC_DRB_MQ:
2157 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2158 "MQ-DRB-REG: 0x%08x\n",
2159 readl(phba->sli4_hba.MQDBregaddr));
2160 break;
2161 case LPFC_DRB_WQ:
2162 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2163 "WQ-DRB-REG: 0x%08x\n",
2164 readl(phba->sli4_hba.WQDBregaddr));
2165 break;
2166 case LPFC_DRB_RQ:
2167 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2168 "RQ-DRB-REG: 0x%08x\n",
2169 readl(phba->sli4_hba.RQDBregaddr));
2170 break;
2171 default:
2172 break;
2173 }
2174
2175 return len;
2176}
2177
2178/**
2179 * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell
2180 * @file: The file pointer to read from.
2181 * @buf: The buffer to copy the data to.
2182 * @nbytes: The number of bytes to read.
2183 * @ppos: The position in the file to start reading from.
2184 *
2185 * Description:
2186 * This routine reads data from the @phba device doorbell register according
2187 * to the idiag command, and copies to user @buf. Depending on the doorbell
2188 * register read command setup, it does either a single doorbell register
2189 * read or dump all doorbell registers.
2190 *
2191 * Returns:
2192 * This function returns the amount of data that was read (this could be less
2193 * than @nbytes if the end of the file was reached) or a negative error value.
2194 **/
2195static ssize_t
2196lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
2197 loff_t *ppos)
2198{
2199 struct lpfc_debug *debug = file->private_data;
2200 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2201 uint32_t drb_reg_id, i;
2202 char *pbuffer;
2203 int len = 0;
2204
2205 /* This is a user read operation */
2206 debug->op = LPFC_IDIAG_OP_RD;
2207
2208 if (!debug->buffer)
2209 debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL);
2210 if (!debug->buffer)
2211 return 0;
2212 pbuffer = debug->buffer;
2213
2214 if (*ppos)
2215 return 0;
2216
2217 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
2218 drb_reg_id = idiag.cmd.data[0];
2219 else
2220 return 0;
2221
2222 if (drb_reg_id == LPFC_DRB_ACC_ALL)
2223 for (i = 1; i <= LPFC_DRB_MAX; i++)
2224 len = lpfc_idiag_drbacc_read_reg(phba,
2225 pbuffer, len, i);
2226 else
2227 len = lpfc_idiag_drbacc_read_reg(phba,
2228 pbuffer, len, drb_reg_id);
2229
2230 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2231}
2232
2233/**
2234 * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands
2235 * @file: The file pointer to read from.
2236 * @buf: The buffer to copy the user data from.
2237 * @nbytes: The number of bytes to get.
2238 * @ppos: The position in the file to start reading from.
2239 *
2240 * This routine get the debugfs idiag command struct from user space and then
2241 * perform the syntax check for port doorbell register read (dump) or write
2242 * (set) command accordingly. In the case of port queue read command, it sets
2243 * up the command in the idiag command struct for the following debugfs read
2244 * operation. In the case of port doorbell register write operation, it
2245 * executes the write operation into the port doorbell register accordingly.
2246 *
2247 * It returns the @nbytges passing in from debugfs user space when successful.
2248 * In case of error conditions, it returns proper error code back to the user
2249 * space.
2250 **/
2251static ssize_t
2252lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2253 size_t nbytes, loff_t *ppos)
2254{
2255 struct lpfc_debug *debug = file->private_data;
2256 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2257 uint32_t drb_reg_id, value, reg_val;
2258 void __iomem *drb_reg;
2259 int rc;
2260
2261 /* This is a user write operation */
2262 debug->op = LPFC_IDIAG_OP_WR;
2263
2264 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2265 if (rc < 0)
2266 return rc;
2267
2268 /* Sanity check on command line arguments */
2269 drb_reg_id = idiag.cmd.data[0];
2270 value = idiag.cmd.data[1];
2271
2272 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
2273 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
2274 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
2275 if (rc != LPFC_DRB_ACC_WR_CMD_ARG)
2276 goto error_out;
2277 if (drb_reg_id > LPFC_DRB_MAX)
2278 goto error_out;
2279 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) {
2280 if (rc != LPFC_DRB_ACC_RD_CMD_ARG)
2281 goto error_out;
2282 if ((drb_reg_id > LPFC_DRB_MAX) &&
2283 (drb_reg_id != LPFC_DRB_ACC_ALL))
2284 goto error_out;
2285 } else
2286 goto error_out;
2287
2288 /* Perform the write access operation */
2289 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
2290 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
2291 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
2292 switch (drb_reg_id) {
2293 case LPFC_DRB_EQCQ:
2294 drb_reg = phba->sli4_hba.EQCQDBregaddr;
2295 break;
2296 case LPFC_DRB_MQ:
2297 drb_reg = phba->sli4_hba.MQDBregaddr;
2298 break;
2299 case LPFC_DRB_WQ:
2300 drb_reg = phba->sli4_hba.WQDBregaddr;
2301 break;
2302 case LPFC_DRB_RQ:
2303 drb_reg = phba->sli4_hba.RQDBregaddr;
2304 break;
2305 default:
2306 goto error_out;
2307 }
2308
2309 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR)
2310 reg_val = value;
2311 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) {
2312 reg_val = readl(drb_reg);
2313 reg_val |= value;
2314 }
2315 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
2316 reg_val = readl(drb_reg);
2317 reg_val &= ~value;
2318 }
2319 writel(reg_val, drb_reg);
2320 readl(drb_reg); /* flush */
2321 }
2322 return nbytes;
2323
2324error_out:
2325 /* Clean out command structure on command error out */
2326 memset(&idiag, 0, sizeof(idiag));
2327 return -EINVAL;
2328}
2329
1891#undef lpfc_debugfs_op_disc_trc 2330#undef lpfc_debugfs_op_disc_trc
1892static const struct file_operations lpfc_debugfs_op_disc_trc = { 2331static const struct file_operations lpfc_debugfs_op_disc_trc = {
1893 .owner = THIS_MODULE, 2332 .owner = THIS_MODULE,
@@ -1986,6 +2425,26 @@ static const struct file_operations lpfc_idiag_op_queInfo = {
1986 .release = lpfc_idiag_release, 2425 .release = lpfc_idiag_release,
1987}; 2426};
1988 2427
2428#undef lpfc_idiag_op_queacc
2429static const struct file_operations lpfc_idiag_op_queAcc = {
2430 .owner = THIS_MODULE,
2431 .open = lpfc_idiag_open,
2432 .llseek = lpfc_debugfs_lseek,
2433 .read = lpfc_idiag_queacc_read,
2434 .write = lpfc_idiag_queacc_write,
2435 .release = lpfc_idiag_cmd_release,
2436};
2437
2438#undef lpfc_idiag_op_drbacc
2439static const struct file_operations lpfc_idiag_op_drbAcc = {
2440 .owner = THIS_MODULE,
2441 .open = lpfc_idiag_open,
2442 .llseek = lpfc_debugfs_lseek,
2443 .read = lpfc_idiag_drbacc_read,
2444 .write = lpfc_idiag_drbacc_write,
2445 .release = lpfc_idiag_cmd_release,
2446};
2447
1989#endif 2448#endif
1990 2449
1991/** 2450/**
@@ -2261,6 +2720,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2261 } 2720 }
2262 } 2721 }
2263 2722
2723 /* iDiag access PCI function queue */
2724 snprintf(name, sizeof(name), "queAcc");
2725 if (!phba->idiag_que_acc) {
2726 phba->idiag_que_acc =
2727 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
2728 phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
2729 if (!phba->idiag_que_acc) {
2730 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2731 "2926 Can't create idiag debugfs\n");
2732 goto debug_failed;
2733 }
2734 }
2735
2736 /* iDiag access PCI function doorbell registers */
2737 snprintf(name, sizeof(name), "drbAcc");
2738 if (!phba->idiag_drb_acc) {
2739 phba->idiag_drb_acc =
2740 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
2741 phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
2742 if (!phba->idiag_drb_acc) {
2743 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2744 "2927 Can't create idiag debugfs\n");
2745 goto debug_failed;
2746 }
2747 }
2748
2264debug_failed: 2749debug_failed:
2265 return; 2750 return;
2266#endif 2751#endif
@@ -2339,6 +2824,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2339 * iDiag release 2824 * iDiag release
2340 */ 2825 */
2341 if (phba->sli_rev == LPFC_SLI_REV4) { 2826 if (phba->sli_rev == LPFC_SLI_REV4) {
2827 if (phba->idiag_drb_acc) {
2828 /* iDiag drbAcc */
2829 debugfs_remove(phba->idiag_drb_acc);
2830 phba->idiag_drb_acc = NULL;
2831 }
2832 if (phba->idiag_que_acc) {
2833 /* iDiag queAcc */
2834 debugfs_remove(phba->idiag_que_acc);
2835 phba->idiag_que_acc = NULL;
2836 }
2342 if (phba->idiag_que_info) { 2837 if (phba->idiag_que_info) {
2343 /* iDiag queInfo */ 2838 /* iDiag queInfo */
2344 debugfs_remove(phba->idiag_que_info); 2839 debugfs_remove(phba->idiag_que_info);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 91b9a9427cda..6525a5e62d27 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -39,13 +39,42 @@
39/* hbqinfo output buffer size */ 39/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 40#define LPFC_HBQINFO_SIZE 8192
41 41
42/* rdPciConf output buffer size */ 42/* pciConf */
43#define LPFC_PCI_CFG_BROWSE 0xffff
44#define LPFC_PCI_CFG_RD_CMD_ARG 2
45#define LPFC_PCI_CFG_WR_CMD_ARG 3
43#define LPFC_PCI_CFG_SIZE 4096 46#define LPFC_PCI_CFG_SIZE 4096
44#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2) 47#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
45#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) 48#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
46 49
47/* queue info output buffer size */ 50/* queue info */
48#define LPFC_QUE_INFO_GET_BUF_SIZE 2048 51#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
52
53/* queue acc */
54#define LPFC_QUE_ACC_BROWSE 0xffff
55#define LPFC_QUE_ACC_RD_CMD_ARG 4
56#define LPFC_QUE_ACC_WR_CMD_ARG 6
57#define LPFC_QUE_ACC_BUF_SIZE 4096
58#define LPFC_QUE_ACC_SIZE (LPFC_QUE_ACC_BUF_SIZE/2)
59
60#define LPFC_IDIAG_EQ 1
61#define LPFC_IDIAG_CQ 2
62#define LPFC_IDIAG_MQ 3
63#define LPFC_IDIAG_WQ 4
64#define LPFC_IDIAG_RQ 5
65
66/* doorbell acc */
67#define LPFC_DRB_ACC_ALL 0xffff
68#define LPFC_DRB_ACC_RD_CMD_ARG 1
69#define LPFC_DRB_ACC_WR_CMD_ARG 2
70#define LPFC_DRB_ACC_BUF_SIZE 256
71
72#define LPFC_DRB_EQCQ 1
73#define LPFC_DRB_MQ 2
74#define LPFC_DRB_WQ 3
75#define LPFC_DRB_RQ 4
76
77#define LPFC_DRB_MAX 4
49 78
50#define SIZE_U8 sizeof(uint8_t) 79#define SIZE_U8 sizeof(uint8_t)
51#define SIZE_U16 sizeof(uint16_t) 80#define SIZE_U16 sizeof(uint16_t)
@@ -73,13 +102,23 @@ struct lpfc_idiag_offset {
73 uint32_t last_rd; 102 uint32_t last_rd;
74}; 103};
75 104
76#define LPFC_IDIAG_CMD_DATA_SIZE 4 105#define LPFC_IDIAG_CMD_DATA_SIZE 8
77struct lpfc_idiag_cmd { 106struct lpfc_idiag_cmd {
78 uint32_t opcode; 107 uint32_t opcode;
79#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001 108#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
80#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002 109#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
81#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 110#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
82#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 111#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
112
113#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
114#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
115#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
116#define LPFC_IDIAG_CMD_QUEACC_CL 0x00000014
117
118#define LPFC_IDIAG_CMD_DRBACC_RD 0x00000021
119#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
120#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
121#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
83 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; 122 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
84}; 123};
85 124
@@ -87,6 +126,7 @@ struct lpfc_idiag {
87 uint32_t active; 126 uint32_t active;
88 struct lpfc_idiag_cmd cmd; 127 struct lpfc_idiag_cmd cmd;
89 struct lpfc_idiag_offset offset; 128 struct lpfc_idiag_offset offset;
129 void *ptr_private;
90}; 130};
91#endif 131#endif
92 132
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d34b69f9cdb1..e2c452467c8b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -670,6 +670,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
670 * Driver needs to re-reg VPI in order for f/w 670 * Driver needs to re-reg VPI in order for f/w
671 * to update the MAC address. 671 * to update the MAC address.
672 */ 672 */
673 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
673 lpfc_register_new_vport(phba, vport, ndlp); 674 lpfc_register_new_vport(phba, vport, ndlp);
674 return 0; 675 return 0;
675 } 676 }
@@ -869,8 +870,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
869 */ 870 */
870 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 871 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
871 (phba->fcf.fcf_flag & FCF_DISCOVERY) && 872 (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
872 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && 873 !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
873 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { 874 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
874 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 875 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
875 "2611 FLOGI failed on FCF (x%x), " 876 "2611 FLOGI failed on FCF (x%x), "
876 "status:x%x/x%x, tmo:x%x, perform " 877 "status:x%x/x%x, tmo:x%x, perform "
@@ -1085,14 +1086,15 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1085 if (sp->cmn.fcphHigh < FC_PH3) 1086 if (sp->cmn.fcphHigh < FC_PH3)
1086 sp->cmn.fcphHigh = FC_PH3; 1087 sp->cmn.fcphHigh = FC_PH3;
1087 1088
1088 if ((phba->sli_rev == LPFC_SLI_REV4) && 1089 if (phba->sli_rev == LPFC_SLI_REV4) {
1089 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1090 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1090 LPFC_SLI_INTF_IF_TYPE_0)) { 1091 LPFC_SLI_INTF_IF_TYPE_0) {
1091 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1092 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1092 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1093 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1093 /* FLOGI needs to be 3 for WQE FCFI */ 1094 /* FLOGI needs to be 3 for WQE FCFI */
1094 /* Set the fcfi to the fcfi we registered with */ 1095 /* Set the fcfi to the fcfi we registered with */
1095 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1096 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1097 }
1096 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1098 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1097 sp->cmn.request_multiple_Nport = 1; 1099 sp->cmn.request_multiple_Nport = 1;
1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1100 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
@@ -4107,13 +4109,13 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
4107 pcmd += sizeof(uint32_t); 4109 pcmd += sizeof(uint32_t);
4108 rrq = (struct RRQ *)pcmd; 4110 rrq = (struct RRQ *)pcmd;
4109 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4111 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
4110 rxid = be16_to_cpu(bf_get(rrq_rxid, rrq)); 4112 rxid = bf_get(rrq_rxid, rrq);
4111 4113
4112 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4113 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4115 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4114 " x%x x%x\n", 4116 " x%x x%x\n",
4115 be32_to_cpu(bf_get(rrq_did, rrq)), 4117 be32_to_cpu(bf_get(rrq_did, rrq)),
4116 be16_to_cpu(bf_get(rrq_oxid, rrq)), 4118 bf_get(rrq_oxid, rrq),
4117 rxid, 4119 rxid,
4118 iocb->iotag, iocb->iocb.ulpContext); 4120 iocb->iotag, iocb->iocb.ulpContext);
4119 4121
@@ -4121,7 +4123,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
4121 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4123 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4122 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4124 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
4123 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4125 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
4124 xri = be16_to_cpu(bf_get(rrq_oxid, rrq)); 4126 xri = bf_get(rrq_oxid, rrq);
4125 else 4127 else
4126 xri = rxid; 4128 xri = rxid;
4127 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4129 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
@@ -7290,8 +7292,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7290 struct lpfc_vport *vport = cmdiocb->vport; 7292 struct lpfc_vport *vport = cmdiocb->vport;
7291 IOCB_t *irsp; 7293 IOCB_t *irsp;
7292 struct lpfc_nodelist *ndlp; 7294 struct lpfc_nodelist *ndlp;
7293 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 7295 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7294 7296
7297 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
7295 irsp = &rspiocb->iocb; 7298 irsp = &rspiocb->iocb;
7296 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7299 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7297 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 7300 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
@@ -7302,6 +7305,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7302 7305
7303 /* Trigger the release of the ndlp after logo */ 7306 /* Trigger the release of the ndlp after logo */
7304 lpfc_nlp_put(ndlp); 7307 lpfc_nlp_put(ndlp);
7308
7309 /* NPIV LOGO completes to NPort <nlp_DID> */
7310 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7311 "2928 NPIV LOGO completes to NPort x%x "
7312 "Data: x%x x%x x%x x%x\n",
7313 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
7314 irsp->ulpTimeout, vport->num_disc_nodes);
7315
7316 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
7317 spin_lock_irq(shost->host_lock);
7318 vport->fc_flag &= ~FC_FABRIC;
7319 spin_unlock_irq(shost->host_lock);
7320 }
7305} 7321}
7306 7322
7307/** 7323/**
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 301498301a8f..7a35df5e2038 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -3569,6 +3569,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3569 "rport add: did:x%x flg:x%x type x%x", 3569 "rport add: did:x%x flg:x%x type x%x",
3570 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3570 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3571 3571
3572 /* Don't add the remote port if unloading. */
3573 if (vport->load_flag & FC_UNLOADING)
3574 return;
3575
3572 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 3576 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
3573 if (!rport || !get_device(&rport->dev)) { 3577 if (!rport || !get_device(&rport->dev)) {
3574 dev_printk(KERN_WARNING, &phba->pcidev->dev, 3578 dev_printk(KERN_WARNING, &phba->pcidev->dev,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 8433ac0d9fb4..4dff668ebdad 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1059,6 +1059,11 @@ struct rq_context {
1059#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ 1059#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
1060#define lpfc_rq_context_rqe_size_MASK 0x0000000F 1060#define lpfc_rq_context_rqe_size_MASK 0x0000000F
1061#define lpfc_rq_context_rqe_size_WORD word0 1061#define lpfc_rq_context_rqe_size_WORD word0
1062#define LPFC_RQE_SIZE_8 2
1063#define LPFC_RQE_SIZE_16 3
1064#define LPFC_RQE_SIZE_32 4
1065#define LPFC_RQE_SIZE_64 5
1066#define LPFC_RQE_SIZE_128 6
1062#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ 1067#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
1063#define lpfc_rq_context_page_size_MASK 0x000000FF 1068#define lpfc_rq_context_page_size_MASK 0x000000FF
1064#define lpfc_rq_context_page_size_WORD word0 1069#define lpfc_rq_context_page_size_WORD word0
@@ -2108,6 +2113,8 @@ struct lpfc_mbx_pc_sli4_params {
2108#define sgl_pp_align_WORD word12 2113#define sgl_pp_align_WORD word12
2109 uint32_t rsvd_13_63[51]; 2114 uint32_t rsvd_13_63[51];
2110}; 2115};
2116#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
2117 &(~((SLI4_PAGE_SIZE)-1)))
2111 2118
2112struct lpfc_sli4_parameters { 2119struct lpfc_sli4_parameters {
2113 uint32_t word0; 2120 uint32_t word0;
@@ -2491,6 +2498,9 @@ struct wqe_common {
2491#define wqe_reqtag_SHIFT 0 2498#define wqe_reqtag_SHIFT 0
2492#define wqe_reqtag_MASK 0x0000FFFF 2499#define wqe_reqtag_MASK 0x0000FFFF
2493#define wqe_reqtag_WORD word9 2500#define wqe_reqtag_WORD word9
2501#define wqe_temp_rpi_SHIFT 16
2502#define wqe_temp_rpi_MASK 0x0000FFFF
2503#define wqe_temp_rpi_WORD word9
2494#define wqe_rcvoxid_SHIFT 16 2504#define wqe_rcvoxid_SHIFT 16
2495#define wqe_rcvoxid_MASK 0x0000FFFF 2505#define wqe_rcvoxid_MASK 0x0000FFFF
2496#define wqe_rcvoxid_WORD word9 2506#define wqe_rcvoxid_WORD word9
@@ -2524,7 +2534,7 @@ struct wqe_common {
2524#define wqe_wqes_WORD word10 2534#define wqe_wqes_WORD word10
2525/* Note that this field overlaps above fields */ 2535/* Note that this field overlaps above fields */
2526#define wqe_wqid_SHIFT 1 2536#define wqe_wqid_SHIFT 1
2527#define wqe_wqid_MASK 0x0000007f 2537#define wqe_wqid_MASK 0x00007fff
2528#define wqe_wqid_WORD word10 2538#define wqe_wqid_WORD word10
2529#define wqe_pri_SHIFT 16 2539#define wqe_pri_SHIFT 16
2530#define wqe_pri_MASK 0x00000007 2540#define wqe_pri_MASK 0x00000007
@@ -2621,7 +2631,11 @@ struct xmit_els_rsp64_wqe {
2621 uint32_t rsvd4; 2631 uint32_t rsvd4;
2622 struct wqe_did wqe_dest; 2632 struct wqe_did wqe_dest;
2623 struct wqe_common wqe_com; /* words 6-11 */ 2633 struct wqe_common wqe_com; /* words 6-11 */
2624 uint32_t rsvd_12_15[4]; 2634 uint32_t word12;
2635#define wqe_rsp_temp_rpi_SHIFT 0
2636#define wqe_rsp_temp_rpi_MASK 0x0000FFFF
2637#define wqe_rsp_temp_rpi_WORD word12
2638 uint32_t rsvd_13_15[3];
2625}; 2639};
2626 2640
2627struct xmit_bls_rsp64_wqe { 2641struct xmit_bls_rsp64_wqe {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 505f88443b5c..7dda036a1af3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3209,9 +3209,9 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3209 phba->sli4_hba.link_state.logical_speed = 3209 phba->sli4_hba.link_state.logical_speed =
3210 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3210 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3211 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3211 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3212 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " 3212 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3213 "LA Type:x%x Port Type:%d Port Number:%d Logical " 3213 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3214 "speed:%dMbps Fault:%d\n", 3214 "Logical speed:%dMbps Fault:%d\n",
3215 phba->sli4_hba.link_state.speed, 3215 phba->sli4_hba.link_state.speed,
3216 phba->sli4_hba.link_state.topology, 3216 phba->sli4_hba.link_state.topology,
3217 phba->sli4_hba.link_state.status, 3217 phba->sli4_hba.link_state.status,
@@ -4906,6 +4906,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4906 uint16_t rpi_limit, curr_rpi_range; 4906 uint16_t rpi_limit, curr_rpi_range;
4907 struct lpfc_dmabuf *dmabuf; 4907 struct lpfc_dmabuf *dmabuf;
4908 struct lpfc_rpi_hdr *rpi_hdr; 4908 struct lpfc_rpi_hdr *rpi_hdr;
4909 uint32_t rpi_count;
4909 4910
4910 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4911 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4912 phba->sli4_hba.max_cfg_param.max_rpi - 1;
@@ -4920,7 +4921,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4920 * and to allow the full max_rpi range per port. 4921 * and to allow the full max_rpi range per port.
4921 */ 4922 */
4922 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4923 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4923 return NULL; 4924 rpi_count = rpi_limit - curr_rpi_range;
4925 else
4926 rpi_count = LPFC_RPI_HDR_COUNT;
4924 4927
4925 /* 4928 /*
4926 * First allocate the protocol header region for the port. The 4929 * First allocate the protocol header region for the port. The
@@ -4961,7 +4964,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4961 * The next_rpi stores the next module-64 rpi value to post 4964 * The next_rpi stores the next module-64 rpi value to post
4962 * in any subsequent rpi memory region postings. 4965 * in any subsequent rpi memory region postings.
4963 */ 4966 */
4964 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4967 phba->sli4_hba.next_rpi += rpi_count;
4965 spin_unlock_irq(&phba->hbalock); 4968 spin_unlock_irq(&phba->hbalock);
4966 return rpi_hdr; 4969 return rpi_hdr;
4967 4970
@@ -7004,7 +7007,8 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7004 lpfc_sli4_bar0_register_memmap(phba, if_type); 7007 lpfc_sli4_bar0_register_memmap(phba, if_type);
7005 } 7008 }
7006 7009
7007 if (pci_resource_start(pdev, 2)) { 7010 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7011 (pci_resource_start(pdev, 2))) {
7008 /* 7012 /*
7009 * Map SLI4 if type 0 HBA Control Register base to a kernel 7013 * Map SLI4 if type 0 HBA Control Register base to a kernel
7010 * virtual address and setup the registers. 7014 * virtual address and setup the registers.
@@ -7021,7 +7025,8 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7021 lpfc_sli4_bar1_register_memmap(phba); 7025 lpfc_sli4_bar1_register_memmap(phba);
7022 } 7026 }
7023 7027
7024 if (pci_resource_start(pdev, 4)) { 7028 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7029 (pci_resource_start(pdev, 4))) {
7025 /* 7030 /*
7026 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7031 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7027 * virtual address and setup the registers. 7032 * virtual address and setup the registers.
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index fbab9734e9b4..e6ce9033f85e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1736,7 +1736,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1736 } 1736 }
1737 1737
1738 /* Setup for the none-embedded mbox command */ 1738 /* Setup for the none-embedded mbox command */
1739 pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE; 1739 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1742 /* Allocate record for keeping SGE virtual addresses */ 1742 /* Allocate record for keeping SGE virtual addresses */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fe7cc84e773b..84e4481b2406 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3238,9 +3238,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3238 if (!lpfc_cmd) { 3238 if (!lpfc_cmd) {
3239 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3239 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3240 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 3240 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
3241 "x%x ID %d " 3241 "x%x ID %d LUN %d\n",
3242 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3242 ret, cmnd->device->id, cmnd->device->lun);
3243 cmnd->device->lun, cmnd->serial_number);
3244 return SUCCESS; 3243 return SUCCESS;
3245 } 3244 }
3246 3245
@@ -3318,16 +3317,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3318 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3317 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3319 "0748 abort handler timed out waiting " 3318 "0748 abort handler timed out waiting "
3320 "for abort to complete: ret %#x, ID %d, " 3319 "for abort to complete: ret %#x, ID %d, "
3321 "LUN %d, snum %#lx\n", 3320 "LUN %d\n",
3322 ret, cmnd->device->id, cmnd->device->lun, 3321 ret, cmnd->device->id, cmnd->device->lun);
3323 cmnd->serial_number);
3324 } 3322 }
3325 3323
3326 out: 3324 out:
3327 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3325 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3328 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 3326 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3329 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3327 "LUN %d\n", ret, cmnd->device->id,
3330 cmnd->device->lun, cmnd->serial_number); 3328 cmnd->device->lun);
3331 return ret; 3329 return ret;
3332} 3330}
3333 3331
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dacabbe0a586..837d272cb2d6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4769,8 +4769,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4769 else 4769 else
4770 phba->hba_flag &= ~HBA_FIP_SUPPORT; 4770 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4771 4771
4772 if (phba->sli_rev != LPFC_SLI_REV4 || 4772 if (phba->sli_rev != LPFC_SLI_REV4) {
4773 !(phba->hba_flag & HBA_FCOE_MODE)) {
4774 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4773 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4775 "0376 READ_REV Error. SLI Level %d " 4774 "0376 READ_REV Error. SLI Level %d "
4776 "FCoE enabled %d\n", 4775 "FCoE enabled %d\n",
@@ -5018,10 +5017,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5018 lpfc_reg_fcfi(phba, mboxq); 5017 lpfc_reg_fcfi(phba, mboxq);
5019 mboxq->vport = phba->pport; 5018 mboxq->vport = phba->pport;
5020 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5019 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5021 if (rc == MBX_SUCCESS) 5020 if (rc != MBX_SUCCESS)
5022 rc = 0;
5023 else
5024 goto out_unset_queue; 5021 goto out_unset_queue;
5022 rc = 0;
5023 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
5024 &mboxq->u.mqe.un.reg_fcfi);
5025 } 5025 }
5026 /* 5026 /*
5027 * The port is ready, set the host's link state to LINK_DOWN 5027 * The port is ready, set the host's link state to LINK_DOWN
@@ -6402,6 +6402,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6402 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 6402 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
6403 int numBdes, i; 6403 int numBdes, i;
6404 struct ulp_bde64 bde; 6404 struct ulp_bde64 bde;
6405 struct lpfc_nodelist *ndlp;
6405 6406
6406 fip = phba->hba_flag & HBA_FIP_SUPPORT; 6407 fip = phba->hba_flag & HBA_FIP_SUPPORT;
6407 /* The fcp commands will set command type */ 6408 /* The fcp commands will set command type */
@@ -6447,6 +6448,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6447 6448
6448 switch (iocbq->iocb.ulpCommand) { 6449 switch (iocbq->iocb.ulpCommand) {
6449 case CMD_ELS_REQUEST64_CR: 6450 case CMD_ELS_REQUEST64_CR:
6451 ndlp = (struct lpfc_nodelist *)iocbq->context1;
6450 if (!iocbq->iocb.ulpLe) { 6452 if (!iocbq->iocb.ulpLe) {
6451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6452 "2007 Only Limited Edition cmd Format" 6454 "2007 Only Limited Edition cmd Format"
@@ -6472,6 +6474,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6472 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 6474 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
6473 >> LPFC_FIP_ELS_ID_SHIFT); 6475 >> LPFC_FIP_ELS_ID_SHIFT);
6474 } 6476 }
6477 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
6475 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 6478 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
6476 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 6479 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
6477 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 6480 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6604,6 +6607,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6604 command_type = OTHER_COMMAND; 6607 command_type = OTHER_COMMAND;
6605 break; 6608 break;
6606 case CMD_XMIT_ELS_RSP64_CX: 6609 case CMD_XMIT_ELS_RSP64_CX:
6610 ndlp = (struct lpfc_nodelist *)iocbq->context1;
6607 /* words0-2 BDE memcpy */ 6611 /* words0-2 BDE memcpy */
6608 /* word3 iocb=iotag32 wqe=response_payload_len */ 6612 /* word3 iocb=iotag32 wqe=response_payload_len */
6609 wqe->xmit_els_rsp.response_payload_len = xmit_len; 6613 wqe->xmit_els_rsp.response_payload_len = xmit_len;
@@ -6626,6 +6630,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6626 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 6630 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
6627 LPFC_WQE_LENLOC_WORD3); 6631 LPFC_WQE_LENLOC_WORD3);
6628 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 6632 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6633 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
6629 command_type = OTHER_COMMAND; 6634 command_type = OTHER_COMMAND;
6630 break; 6635 break;
6631 case CMD_CLOSE_XRI_CN: 6636 case CMD_CLOSE_XRI_CN:
@@ -10522,8 +10527,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
10522 bf_set(lpfc_mbox_hdr_version, &shdr->request, 10527 bf_set(lpfc_mbox_hdr_version, &shdr->request,
10523 phba->sli4_hba.pc_sli4_params.cqv); 10528 phba->sli4_hba.pc_sli4_params.cqv);
10524 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 10529 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
10525 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 10530 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
10526 (PAGE_SIZE/SLI4_PAGE_SIZE)); 10531 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
10527 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 10532 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
10528 eq->queue_id); 10533 eq->queue_id);
10529 } else { 10534 } else {
@@ -10967,6 +10972,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10967 &rq_create->u.request.context, 10972 &rq_create->u.request.context,
10968 hrq->entry_count); 10973 hrq->entry_count);
10969 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 10974 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
10975 bf_set(lpfc_rq_context_rqe_size,
10976 &rq_create->u.request.context,
10977 LPFC_RQE_SIZE_8);
10978 bf_set(lpfc_rq_context_page_size,
10979 &rq_create->u.request.context,
10980 (PAGE_SIZE/SLI4_PAGE_SIZE));
10970 } else { 10981 } else {
10971 switch (hrq->entry_count) { 10982 switch (hrq->entry_count) {
10972 default: 10983 default:
@@ -11042,9 +11053,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
11042 phba->sli4_hba.pc_sli4_params.rqv); 11053 phba->sli4_hba.pc_sli4_params.rqv);
11043 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 11054 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
11044 bf_set(lpfc_rq_context_rqe_count_1, 11055 bf_set(lpfc_rq_context_rqe_count_1,
11045 &rq_create->u.request.context, 11056 &rq_create->u.request.context, hrq->entry_count);
11046 hrq->entry_count);
11047 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 11057 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
11058 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
11059 LPFC_RQE_SIZE_8);
11060 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
11061 (PAGE_SIZE/SLI4_PAGE_SIZE));
11048 } else { 11062 } else {
11049 switch (drq->entry_count) { 11063 switch (drq->entry_count) {
11050 default: 11064 default:
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2404d1d65563..c03921b1232c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.22" 21#define LPFC_DRIVER_VERSION "8.3.23"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index f2684dd09ed0..5c1776406c96 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1469,8 +1469,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1469 if( scb->state & SCB_ABORT ) { 1469 if( scb->state & SCB_ABORT ) {
1470 1470
1471 printk(KERN_WARNING 1471 printk(KERN_WARNING
1472 "megaraid: aborted cmd %lx[%x] complete.\n", 1472 "megaraid: aborted cmd [%x] complete.\n",
1473 scb->cmd->serial_number, scb->idx); 1473 scb->idx);
1474 1474
1475 scb->cmd->result = (DID_ABORT << 16); 1475 scb->cmd->result = (DID_ABORT << 16);
1476 1476
@@ -1488,8 +1488,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1488 if( scb->state & SCB_RESET ) { 1488 if( scb->state & SCB_RESET ) {
1489 1489
1490 printk(KERN_WARNING 1490 printk(KERN_WARNING
1491 "megaraid: reset cmd %lx[%x] complete.\n", 1491 "megaraid: reset cmd [%x] complete.\n",
1492 scb->cmd->serial_number, scb->idx); 1492 scb->idx);
1493 1493
1494 scb->cmd->result = (DID_RESET << 16); 1494 scb->cmd->result = (DID_RESET << 16);
1495 1495
@@ -1958,8 +1958,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1958 struct list_head *pos, *next; 1958 struct list_head *pos, *next;
1959 scb_t *scb; 1959 scb_t *scb;
1960 1960
1961 printk(KERN_WARNING "megaraid: %s-%lx cmd=%x <c=%d t=%d l=%d>\n", 1961 printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n",
1962 (aor == SCB_ABORT)? "ABORTING":"RESET", cmd->serial_number, 1962 (aor == SCB_ABORT)? "ABORTING":"RESET",
1963 cmd->cmnd[0], cmd->device->channel, 1963 cmd->cmnd[0], cmd->device->channel,
1964 cmd->device->id, cmd->device->lun); 1964 cmd->device->id, cmd->device->lun);
1965 1965
@@ -1983,9 +1983,9 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1983 if( scb->state & SCB_ISSUED ) { 1983 if( scb->state & SCB_ISSUED ) {
1984 1984
1985 printk(KERN_WARNING 1985 printk(KERN_WARNING
1986 "megaraid: %s-%lx[%x], fw owner.\n", 1986 "megaraid: %s[%x], fw owner.\n",
1987 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1987 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1988 cmd->serial_number, scb->idx); 1988 scb->idx);
1989 1989
1990 return FALSE; 1990 return FALSE;
1991 } 1991 }
@@ -1996,9 +1996,9 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1996 * list 1996 * list
1997 */ 1997 */
1998 printk(KERN_WARNING 1998 printk(KERN_WARNING
1999 "megaraid: %s-%lx[%x], driver owner.\n", 1999 "megaraid: %s-[%x], driver owner.\n",
2000 (aor==SCB_ABORT) ? "ABORTING":"RESET", 2000 (aor==SCB_ABORT) ? "ABORTING":"RESET",
2001 cmd->serial_number, scb->idx); 2001 scb->idx);
2002 2002
2003 mega_free_scb(adapter, scb); 2003 mega_free_scb(adapter, scb);
2004 2004
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 1dba32870b4c..2e6619eff3ea 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -2315,8 +2315,8 @@ megaraid_mbox_dpc(unsigned long devp)
2315 // Was an abort issued for this command earlier 2315 // Was an abort issued for this command earlier
2316 if (scb->state & SCB_ABORT) { 2316 if (scb->state & SCB_ABORT) {
2317 con_log(CL_ANN, (KERN_NOTICE 2317 con_log(CL_ANN, (KERN_NOTICE
2318 "megaraid: aborted cmd %lx[%x] completed\n", 2318 "megaraid: aborted cmd [%x] completed\n",
2319 scp->serial_number, scb->sno)); 2319 scb->sno));
2320 } 2320 }
2321 2321
2322 /* 2322 /*
@@ -2472,8 +2472,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2472 raid_dev = ADAP2RAIDDEV(adapter); 2472 raid_dev = ADAP2RAIDDEV(adapter);
2473 2473
2474 con_log(CL_ANN, (KERN_WARNING 2474 con_log(CL_ANN, (KERN_WARNING
2475 "megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n", 2475 "megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
2476 scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp), 2476 scp->cmnd[0], SCP2CHANNEL(scp),
2477 SCP2TARGET(scp), SCP2LUN(scp))); 2477 SCP2TARGET(scp), SCP2LUN(scp)));
2478 2478
2479 // If FW has stopped responding, simply return failure 2479 // If FW has stopped responding, simply return failure
@@ -2496,9 +2496,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2496 list_del_init(&scb->list); // from completed list 2496 list_del_init(&scb->list); // from completed list
2497 2497
2498 con_log(CL_ANN, (KERN_WARNING 2498 con_log(CL_ANN, (KERN_WARNING
2499 "megaraid: %ld:%d[%d:%d], abort from completed list\n", 2499 "megaraid: %d[%d:%d], abort from completed list\n",
2500 scp->serial_number, scb->sno, 2500 scb->sno, scb->dev_channel, scb->dev_target));
2501 scb->dev_channel, scb->dev_target));
2502 2501
2503 scp->result = (DID_ABORT << 16); 2502 scp->result = (DID_ABORT << 16);
2504 scp->scsi_done(scp); 2503 scp->scsi_done(scp);
@@ -2527,9 +2526,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2527 ASSERT(!(scb->state & SCB_ISSUED)); 2526 ASSERT(!(scb->state & SCB_ISSUED));
2528 2527
2529 con_log(CL_ANN, (KERN_WARNING 2528 con_log(CL_ANN, (KERN_WARNING
2530 "megaraid abort: %ld[%d:%d], driver owner\n", 2529 "megaraid abort: [%d:%d], driver owner\n",
2531 scp->serial_number, scb->dev_channel, 2530 scb->dev_channel, scb->dev_target));
2532 scb->dev_target));
2533 2531
2534 scp->result = (DID_ABORT << 16); 2532 scp->result = (DID_ABORT << 16);
2535 scp->scsi_done(scp); 2533 scp->scsi_done(scp);
@@ -2560,25 +2558,21 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2560 2558
2561 if (!(scb->state & SCB_ISSUED)) { 2559 if (!(scb->state & SCB_ISSUED)) {
2562 con_log(CL_ANN, (KERN_WARNING 2560 con_log(CL_ANN, (KERN_WARNING
2563 "megaraid abort: %ld%d[%d:%d], invalid state\n", 2561 "megaraid abort: %d[%d:%d], invalid state\n",
2564 scp->serial_number, scb->sno, scb->dev_channel, 2562 scb->sno, scb->dev_channel, scb->dev_target));
2565 scb->dev_target));
2566 BUG(); 2563 BUG();
2567 } 2564 }
2568 else { 2565 else {
2569 con_log(CL_ANN, (KERN_WARNING 2566 con_log(CL_ANN, (KERN_WARNING
2570 "megaraid abort: %ld:%d[%d:%d], fw owner\n", 2567 "megaraid abort: %d[%d:%d], fw owner\n",
2571 scp->serial_number, scb->sno, scb->dev_channel, 2568 scb->sno, scb->dev_channel, scb->dev_target));
2572 scb->dev_target));
2573 } 2569 }
2574 } 2570 }
2575 } 2571 }
2576 spin_unlock_irq(&adapter->lock); 2572 spin_unlock_irq(&adapter->lock);
2577 2573
2578 if (!found) { 2574 if (!found) {
2579 con_log(CL_ANN, (KERN_WARNING 2575 con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
2580 "megaraid abort: scsi cmd:%ld, do now own\n",
2581 scp->serial_number));
2582 2576
2583 // FIXME: Should there be a callback for this command? 2577 // FIXME: Should there be a callback for this command?
2584 return SUCCESS; 2578 return SUCCESS;
@@ -2649,9 +2643,8 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2649 } else { 2643 } else {
2650 if (scb->scp == scp) { // Found command 2644 if (scb->scp == scp) { // Found command
2651 con_log(CL_ANN, (KERN_WARNING 2645 con_log(CL_ANN, (KERN_WARNING
2652 "megaraid: %ld:%d[%d:%d], reset from pending list\n", 2646 "megaraid: %d[%d:%d], reset from pending list\n",
2653 scp->serial_number, scb->sno, 2647 scb->sno, scb->dev_channel, scb->dev_target));
2654 scb->dev_channel, scb->dev_target));
2655 } else { 2648 } else {
2656 con_log(CL_ANN, (KERN_WARNING 2649 con_log(CL_ANN, (KERN_WARNING
2657 "megaraid: IO packet with %d[%d:%d] being reset\n", 2650 "megaraid: IO packet with %d[%d:%d] being reset\n",
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 66d4cea4df98..89c623ebadbc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1751,10 +1751,9 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1751 list_del_init(&reset_cmd->list); 1751 list_del_init(&reset_cmd->list);
1752 if (reset_cmd->scmd) { 1752 if (reset_cmd->scmd) {
1753 reset_cmd->scmd->result = DID_RESET << 16; 1753 reset_cmd->scmd->result = DID_RESET << 16;
1754 printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n", 1754 printk(KERN_NOTICE "%d:%p reset [%02x]\n",
1755 reset_index, reset_cmd, 1755 reset_index, reset_cmd,
1756 reset_cmd->scmd->cmnd[0], 1756 reset_cmd->scmd->cmnd[0]);
1757 reset_cmd->scmd->serial_number);
1758 1757
1759 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 1758 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
1760 megasas_return_cmd(instance, reset_cmd); 1759 megasas_return_cmd(instance, reset_cmd);
@@ -1879,8 +1878,8 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1879 1878
1880 instance = (struct megasas_instance *)scmd->device->host->hostdata; 1879 instance = (struct megasas_instance *)scmd->device->host->hostdata;
1881 1880
1882 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n", 1881 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
1883 scmd->serial_number, scmd->cmnd[0], scmd->retries); 1882 scmd->cmnd[0], scmd->retries);
1884 1883
1885 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 1884 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
1886 printk(KERN_ERR "megasas: cannot recover from previous reset " 1885 printk(KERN_ERR "megasas: cannot recover from previous reset "
@@ -2349,9 +2348,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
2349 cmd->frame_phys_addr , 2348 cmd->frame_phys_addr ,
2350 0, instance->reg_set); 2349 0, instance->reg_set);
2351 } else if (cmd->scmd) { 2350 } else if (cmd->scmd) {
2352 printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx" 2351 printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]"
2353 "detected on the internal queue, issue again.\n", 2352 "detected on the internal queue, issue again.\n",
2354 cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number); 2353 cmd, cmd->scmd->cmnd[0]);
2355 2354
2356 atomic_inc(&instance->fw_outstanding); 2355 atomic_inc(&instance->fw_outstanding);
2357 instance->instancet->fire_cmd(instance, 2356 instance->instancet->fire_cmd(instance,
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 197aa1b3f0f3..494474779532 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -415,8 +415,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
415#if 1 415#if 1
416 if (DEBUG_TARGET(cmd)) { 416 if (DEBUG_TARGET(cmd)) {
417 int i; 417 int i;
418 printk(KERN_DEBUG "mesh_start: %p ser=%lu tgt=%d cmd=", 418 printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id);
419 cmd, cmd->serial_number, id);
420 for (i = 0; i < cmd->cmd_len; ++i) 419 for (i = 0; i < cmd->cmd_len; ++i)
421 printk(" %x", cmd->cmnd[i]); 420 printk(" %x", cmd->cmnd[i]);
422 printk(" use_sg=%d buffer=%p bufflen=%u\n", 421 printk(" use_sg=%d buffer=%p bufflen=%u\n",
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 3346357031e9..efa0255491c2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -522,7 +522,8 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
522 desc = "Device Status Change"; 522 desc = "Device Status Change";
523 break; 523 break;
524 case MPI2_EVENT_IR_OPERATION_STATUS: 524 case MPI2_EVENT_IR_OPERATION_STATUS:
525 desc = "IR Operation Status"; 525 if (!ioc->hide_ir_msg)
526 desc = "IR Operation Status";
526 break; 527 break;
527 case MPI2_EVENT_SAS_DISCOVERY: 528 case MPI2_EVENT_SAS_DISCOVERY:
528 { 529 {
@@ -553,16 +554,20 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
553 desc = "SAS Enclosure Device Status Change"; 554 desc = "SAS Enclosure Device Status Change";
554 break; 555 break;
555 case MPI2_EVENT_IR_VOLUME: 556 case MPI2_EVENT_IR_VOLUME:
556 desc = "IR Volume"; 557 if (!ioc->hide_ir_msg)
558 desc = "IR Volume";
557 break; 559 break;
558 case MPI2_EVENT_IR_PHYSICAL_DISK: 560 case MPI2_EVENT_IR_PHYSICAL_DISK:
559 desc = "IR Physical Disk"; 561 if (!ioc->hide_ir_msg)
562 desc = "IR Physical Disk";
560 break; 563 break;
561 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 564 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
562 desc = "IR Configuration Change List"; 565 if (!ioc->hide_ir_msg)
566 desc = "IR Configuration Change List";
563 break; 567 break;
564 case MPI2_EVENT_LOG_ENTRY_ADDED: 568 case MPI2_EVENT_LOG_ENTRY_ADDED:
565 desc = "Log Entry Added"; 569 if (!ioc->hide_ir_msg)
570 desc = "Log Entry Added";
566 break; 571 break;
567 } 572 }
568 573
@@ -616,7 +621,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
616 originator_str = "PL"; 621 originator_str = "PL";
617 break; 622 break;
618 case 2: 623 case 2:
619 originator_str = "IR"; 624 if (!ioc->hide_ir_msg)
625 originator_str = "IR";
626 else
627 originator_str = "WarpDrive";
620 break; 628 break;
621 } 629 }
622 630
@@ -1508,6 +1516,7 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1508 } 1516 }
1509 ioc->scsi_lookup[i].cb_idx = 0xFF; 1517 ioc->scsi_lookup[i].cb_idx = 0xFF;
1510 ioc->scsi_lookup[i].scmd = NULL; 1518 ioc->scsi_lookup[i].scmd = NULL;
1519 ioc->scsi_lookup[i].direct_io = 0;
1511 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 1520 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1512 &ioc->free_list); 1521 &ioc->free_list);
1513 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1522 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -1844,10 +1853,12 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1844 printk("), "); 1853 printk("), ");
1845 printk("Capabilities=("); 1854 printk("Capabilities=(");
1846 1855
1847 if (ioc->facts.IOCCapabilities & 1856 if (!ioc->hide_ir_msg) {
1848 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 1857 if (ioc->facts.IOCCapabilities &
1849 printk("Raid"); 1858 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
1850 i++; 1859 printk("Raid");
1860 i++;
1861 }
1851 } 1862 }
1852 1863
1853 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 1864 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
@@ -3680,6 +3691,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3680 u32 reply_address; 3691 u32 reply_address;
3681 u16 smid; 3692 u16 smid;
3682 struct _tr_list *delayed_tr, *delayed_tr_next; 3693 struct _tr_list *delayed_tr, *delayed_tr_next;
3694 u8 hide_flag;
3683 3695
3684 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3696 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3685 __func__)); 3697 __func__));
@@ -3706,6 +3718,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3706 ioc->scsi_lookup[i].cb_idx = 0xFF; 3718 ioc->scsi_lookup[i].cb_idx = 0xFF;
3707 ioc->scsi_lookup[i].smid = smid; 3719 ioc->scsi_lookup[i].smid = smid;
3708 ioc->scsi_lookup[i].scmd = NULL; 3720 ioc->scsi_lookup[i].scmd = NULL;
3721 ioc->scsi_lookup[i].direct_io = 0;
3709 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 3722 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
3710 &ioc->free_list); 3723 &ioc->free_list);
3711 } 3724 }
@@ -3766,6 +3779,15 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3766 if (sleep_flag == CAN_SLEEP) 3779 if (sleep_flag == CAN_SLEEP)
3767 _base_static_config_pages(ioc); 3780 _base_static_config_pages(ioc);
3768 3781
3782 if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) {
3783 if (ioc->manu_pg10.OEMIdentifier == 0x80) {
3784 hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
3785 MFG_PAGE10_HIDE_SSDS_MASK);
3786 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
3787 ioc->mfg_pg10_hide_flag = hide_flag;
3788 }
3789 }
3790
3769 if (ioc->wait_for_port_enable_to_complete) { 3791 if (ioc->wait_for_port_enable_to_complete) {
3770 if (diag_buffer_enable != 0) 3792 if (diag_buffer_enable != 0)
3771 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); 3793 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 500328245f61..2a3c05f6db8b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "08.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "08.100.00.01"
73#define MPT2SAS_MAJOR_VERSION 08 73#define MPT2SAS_MAJOR_VERSION 08
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 01
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -189,6 +189,16 @@
189#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 189#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046
190 190
191/* 191/*
192 * WarpDrive Specific Log codes
193 */
194
195#define MPT2_WARPDRIVE_LOGENTRY (0x8002)
196#define MPT2_WARPDRIVE_LC_SSDT (0x41)
197#define MPT2_WARPDRIVE_LC_SSDLW (0x43)
198#define MPT2_WARPDRIVE_LC_SSDLF (0x44)
199#define MPT2_WARPDRIVE_LC_BRMF (0x4D)
200
201/*
192 * per target private data 202 * per target private data
193 */ 203 */
194#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01 204#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
@@ -199,6 +209,7 @@
199 * struct MPT2SAS_TARGET - starget private hostdata 209 * struct MPT2SAS_TARGET - starget private hostdata
200 * @starget: starget object 210 * @starget: starget object
201 * @sas_address: target sas address 211 * @sas_address: target sas address
212 * @raid_device: raid_device pointer to access volume data
202 * @handle: device handle 213 * @handle: device handle
203 * @num_luns: number luns 214 * @num_luns: number luns
204 * @flags: MPT_TARGET_FLAGS_XXX flags 215 * @flags: MPT_TARGET_FLAGS_XXX flags
@@ -208,6 +219,7 @@
208struct MPT2SAS_TARGET { 219struct MPT2SAS_TARGET {
209 struct scsi_target *starget; 220 struct scsi_target *starget;
210 u64 sas_address; 221 u64 sas_address;
222 struct _raid_device *raid_device;
211 u16 handle; 223 u16 handle;
212 int num_luns; 224 int num_luns;
213 u32 flags; 225 u32 flags;
@@ -215,6 +227,7 @@ struct MPT2SAS_TARGET {
215 u8 tm_busy; 227 u8 tm_busy;
216}; 228};
217 229
230
218/* 231/*
219 * per device private data 232 * per device private data
220 */ 233 */
@@ -262,6 +275,12 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_10 {
262 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10, 275 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10,
263 Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t; 276 Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t;
264 277
278#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
279#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
280#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01)
281#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02)
282
283
265struct MPT2SAS_DEVICE { 284struct MPT2SAS_DEVICE {
266 struct MPT2SAS_TARGET *sas_target; 285 struct MPT2SAS_TARGET *sas_target;
267 unsigned int lun; 286 unsigned int lun;
@@ -341,6 +360,7 @@ struct _sas_device {
341 * @sdev: scsi device struct (volumes are single lun) 360 * @sdev: scsi device struct (volumes are single lun)
342 * @wwid: unique identifier for the volume 361 * @wwid: unique identifier for the volume
343 * @handle: device handle 362 * @handle: device handle
363 * @block_size: Block size of the volume
344 * @id: target id 364 * @id: target id
345 * @channel: target channel 365 * @channel: target channel
346 * @volume_type: the raid level 366 * @volume_type: the raid level
@@ -348,20 +368,33 @@ struct _sas_device {
348 * @num_pds: number of hidden raid components 368 * @num_pds: number of hidden raid components
349 * @responding: used in _scsih_raid_device_mark_responding 369 * @responding: used in _scsih_raid_device_mark_responding
350 * @percent_complete: resync percent complete 370 * @percent_complete: resync percent complete
371 * @direct_io_enabled: Whether direct io to PDs are allowed or not
372 * @stripe_exponent: X where 2powX is the stripe sz in blocks
373 * @max_lba: Maximum number of LBA in the volume
374 * @stripe_sz: Stripe Size of the volume
375 * @device_info: Device info of the volume member disk
376 * @pd_handle: Array of handles of the physical drives for direct I/O in le16
351 */ 377 */
378#define MPT_MAX_WARPDRIVE_PDS 8
352struct _raid_device { 379struct _raid_device {
353 struct list_head list; 380 struct list_head list;
354 struct scsi_target *starget; 381 struct scsi_target *starget;
355 struct scsi_device *sdev; 382 struct scsi_device *sdev;
356 u64 wwid; 383 u64 wwid;
357 u16 handle; 384 u16 handle;
385 u16 block_sz;
358 int id; 386 int id;
359 int channel; 387 int channel;
360 u8 volume_type; 388 u8 volume_type;
361 u32 device_info;
362 u8 num_pds; 389 u8 num_pds;
363 u8 responding; 390 u8 responding;
364 u8 percent_complete; 391 u8 percent_complete;
392 u8 direct_io_enabled;
393 u8 stripe_exponent;
394 u64 max_lba;
395 u32 stripe_sz;
396 u32 device_info;
397 u16 pd_handle[MPT_MAX_WARPDRIVE_PDS];
365}; 398};
366 399
367/** 400/**
@@ -470,6 +503,7 @@ struct chain_tracker {
470 * @smid: system message id 503 * @smid: system message id
471 * @scmd: scsi request pointer 504 * @scmd: scsi request pointer
472 * @cb_idx: callback index 505 * @cb_idx: callback index
506 * @direct_io: To indicate whether I/O is direct (WARPDRIVE)
473 * @chain_list: list of chains associated to this IO 507 * @chain_list: list of chains associated to this IO
474 * @tracker_list: list of free request (ioc->free_list) 508 * @tracker_list: list of free request (ioc->free_list)
475 */ 509 */
@@ -477,14 +511,14 @@ struct scsiio_tracker {
477 u16 smid; 511 u16 smid;
478 struct scsi_cmnd *scmd; 512 struct scsi_cmnd *scmd;
479 u8 cb_idx; 513 u8 cb_idx;
514 u8 direct_io;
480 struct list_head chain_list; 515 struct list_head chain_list;
481 struct list_head tracker_list; 516 struct list_head tracker_list;
482}; 517};
483 518
484/** 519/**
485 * struct request_tracker - misc mf request tracker 520 * struct request_tracker - firmware request tracker
486 * @smid: system message id 521 * @smid: system message id
487 * @scmd: scsi request pointer
488 * @cb_idx: callback index 522 * @cb_idx: callback index
489 * @tracker_list: list of free request (ioc->free_list) 523 * @tracker_list: list of free request (ioc->free_list)
490 */ 524 */
@@ -832,6 +866,11 @@ struct MPT2SAS_ADAPTER {
832 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; 866 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
833 u32 ring_buffer_offset; 867 u32 ring_buffer_offset;
834 u32 ring_buffer_sz; 868 u32 ring_buffer_sz;
869 u8 is_warpdrive;
870 u8 hide_ir_msg;
871 u8 mfg_pg10_hide_flag;
872 u8 hide_drives;
873
835}; 874};
836 875
837typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 876typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index d72f1f2b1392..437c2d94c45a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1041,7 +1041,10 @@ _ctl_getiocinfo(void __user *arg)
1041 __func__)); 1041 __func__));
1042 1042
1043 memset(&karg, 0 , sizeof(karg)); 1043 memset(&karg, 0 , sizeof(karg));
1044 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1044 if (ioc->is_warpdrive)
1045 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1046 else
1047 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1045 if (ioc->pfacts) 1048 if (ioc->pfacts)
1046 karg.port_number = ioc->pfacts[0].PortNumber; 1049 karg.port_number = ioc->pfacts[0].PortNumber;
1047 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); 1050 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 69916e46e04f..11ff1d5fb8f0 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -133,6 +133,7 @@ struct mpt2_ioctl_pci_info {
133#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) 133#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
134#define MPT2_IOCTL_INTERFACE_SAS (0x03) 134#define MPT2_IOCTL_INTERFACE_SAS (0x03)
135#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) 135#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
136#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
136#define MPT2_IOCTL_VERSION_LENGTH (32) 137#define MPT2_IOCTL_VERSION_LENGTH (32)
137 138
138/** 139/**
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index d2064a0533ae..f12e02358d6d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -233,6 +233,9 @@ static struct pci_device_id scsih_pci_table[] = {
233 PCI_ANY_ID, PCI_ANY_ID }, 233 PCI_ANY_ID, PCI_ANY_ID },
234 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 234 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
235 PCI_ANY_ID, PCI_ANY_ID }, 235 PCI_ANY_ID, PCI_ANY_ID },
236 /* SSS6200 */
237 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
238 PCI_ANY_ID, PCI_ANY_ID },
236 {0} /* Terminating entry */ 239 {0} /* Terminating entry */
237}; 240};
238MODULE_DEVICE_TABLE(pci, scsih_pci_table); 241MODULE_DEVICE_TABLE(pci, scsih_pci_table);
@@ -1256,6 +1259,7 @@ _scsih_target_alloc(struct scsi_target *starget)
1256 sas_target_priv_data->handle = raid_device->handle; 1259 sas_target_priv_data->handle = raid_device->handle;
1257 sas_target_priv_data->sas_address = raid_device->wwid; 1260 sas_target_priv_data->sas_address = raid_device->wwid;
1258 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1261 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1262 sas_target_priv_data->raid_device = raid_device;
1259 raid_device->starget = starget; 1263 raid_device->starget = starget;
1260 } 1264 }
1261 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1265 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
@@ -1455,7 +1459,10 @@ static int
1455_scsih_is_raid(struct device *dev) 1459_scsih_is_raid(struct device *dev)
1456{ 1460{
1457 struct scsi_device *sdev = to_scsi_device(dev); 1461 struct scsi_device *sdev = to_scsi_device(dev);
1462 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
1458 1463
1464 if (ioc->is_warpdrive)
1465 return 0;
1459 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 1466 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1460} 1467}
1461 1468
@@ -1480,7 +1487,7 @@ _scsih_get_resync(struct device *dev)
1480 sdev->channel); 1487 sdev->channel);
1481 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1488 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1482 1489
1483 if (!raid_device) 1490 if (!raid_device || ioc->is_warpdrive)
1484 goto out; 1491 goto out;
1485 1492
1486 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1493 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
@@ -1640,6 +1647,212 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1640 1647
1641 kfree(vol_pg0); 1648 kfree(vol_pg0);
1642} 1649}
1650/**
1651 * _scsih_disable_ddio - Disable direct I/O for all the volumes
1652 * @ioc: per adapter object
1653 */
1654static void
1655_scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc)
1656{
1657 Mpi2RaidVolPage1_t vol_pg1;
1658 Mpi2ConfigReply_t mpi_reply;
1659 struct _raid_device *raid_device;
1660 u16 handle;
1661 u16 ioc_status;
1662
1663 handle = 0xFFFF;
1664 while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
1665 &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1666 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1667 MPI2_IOCSTATUS_MASK;
1668 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1669 break;
1670 handle = le16_to_cpu(vol_pg1.DevHandle);
1671 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
1672 if (raid_device)
1673 raid_device->direct_io_enabled = 0;
1674 }
1675 return;
1676}
1677
1678
1679/**
1680 * _scsih_get_num_volumes - Get number of volumes in the ioc
1681 * @ioc: per adapter object
1682 */
1683static u8
1684_scsih_get_num_volumes(struct MPT2SAS_ADAPTER *ioc)
1685{
1686 Mpi2RaidVolPage1_t vol_pg1;
1687 Mpi2ConfigReply_t mpi_reply;
1688 u16 handle;
1689 u8 vol_cnt = 0;
1690 u16 ioc_status;
1691
1692 handle = 0xFFFF;
1693 while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
1694 &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1695 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1696 MPI2_IOCSTATUS_MASK;
1697 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1698 break;
1699 vol_cnt++;
1700 handle = le16_to_cpu(vol_pg1.DevHandle);
1701 }
1702 return vol_cnt;
1703}
1704
1705
1706/**
1707 * _scsih_init_warpdrive_properties - Set properties for warpdrive direct I/O.
1708 * @ioc: per adapter object
1709 * @raid_device: the raid_device object
1710 */
1711static void
1712_scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
1713 struct _raid_device *raid_device)
1714{
1715 Mpi2RaidVolPage0_t *vol_pg0;
1716 Mpi2RaidPhysDiskPage0_t pd_pg0;
1717 Mpi2ConfigReply_t mpi_reply;
1718 u16 sz;
1719 u8 num_pds, count;
1720 u64 mb = 1024 * 1024;
1721 u64 tb_2 = 2 * mb * mb;
1722 u64 capacity;
1723 u32 stripe_sz;
1724 u8 i, stripe_exp;
1725
1726 if (!ioc->is_warpdrive)
1727 return;
1728
1729 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) {
1730 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1731 "globally as drives are exposed\n", ioc->name);
1732 return;
1733 }
1734 if (_scsih_get_num_volumes(ioc) > 1) {
1735 _scsih_disable_ddio(ioc);
1736 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1737 "globally as number of drives > 1\n", ioc->name);
1738 return;
1739 }
1740 if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
1741 &num_pds)) || !num_pds) {
1742 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1743 "Failure in computing number of drives\n", ioc->name);
1744 return;
1745 }
1746
1747 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
1748 sizeof(Mpi2RaidVol0PhysDisk_t));
1749 vol_pg0 = kzalloc(sz, GFP_KERNEL);
1750 if (!vol_pg0) {
1751 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1752 "Memory allocation failure for RVPG0\n", ioc->name);
1753 return;
1754 }
1755
1756 if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
1757 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
1758 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1759 "Failure in retrieving RVPG0\n", ioc->name);
1760 kfree(vol_pg0);
1761 return;
1762 }
1763
1764 /*
1765 * WARPDRIVE:If number of physical disks in a volume exceeds the max pds
1766 * assumed for WARPDRIVE, disable direct I/O
1767 */
1768 if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
1769 printk(MPT2SAS_WARN_FMT "WarpDrive : Direct IO is disabled "
1770 "for the drive with handle(0x%04x): num_mem=%d, "
1771 "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
1772 num_pds, MPT_MAX_WARPDRIVE_PDS);
1773 kfree(vol_pg0);
1774 return;
1775 }
1776 for (count = 0; count < num_pds; count++) {
1777 if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
1778 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
1779 vol_pg0->PhysDisk[count].PhysDiskNum) ||
1780 pd_pg0.DevHandle == MPT2SAS_INVALID_DEVICE_HANDLE) {
1781 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
1782 "disabled for the drive with handle(0x%04x) member"
1783 "handle retrieval failed for member number=%d\n",
1784 ioc->name, raid_device->handle,
1785 vol_pg0->PhysDisk[count].PhysDiskNum);
1786 goto out_error;
1787 }
1788 raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
1789 }
1790
1791 /*
1792 * Assumption for WD: Direct I/O is not supported if the volume is
1793 * not RAID0, if the stripe size is not 64KB, if the block size is
1794 * not 512 and if the volume size is >2TB
1795 */
1796 if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 ||
1797 le16_to_cpu(vol_pg0->BlockSize) != 512) {
1798 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1799 "for the drive with handle(0x%04x): type=%d, "
1800 "s_sz=%uK, blk_size=%u\n", ioc->name,
1801 raid_device->handle, raid_device->volume_type,
1802 le32_to_cpu(vol_pg0->StripeSize)/2,
1803 le16_to_cpu(vol_pg0->BlockSize));
1804 goto out_error;
1805 }
1806
1807 capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) *
1808 (le64_to_cpu(vol_pg0->MaxLBA) + 1);
1809
1810 if (capacity > tb_2) {
1811 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1812 "for the drive with handle(0x%04x) since drive sz > 2TB\n",
1813 ioc->name, raid_device->handle);
1814 goto out_error;
1815 }
1816
1817 stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
1818 stripe_exp = 0;
1819 for (i = 0; i < 32; i++) {
1820 if (stripe_sz & 1)
1821 break;
1822 stripe_exp++;
1823 stripe_sz >>= 1;
1824 }
1825 if (i == 32) {
1826 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1827 "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
1828 ioc->name, raid_device->handle,
1829 le32_to_cpu(vol_pg0->StripeSize)/2);
1830 goto out_error;
1831 }
1832 raid_device->stripe_exponent = stripe_exp;
1833 raid_device->direct_io_enabled = 1;
1834
1835 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
1836 " with handle(0x%04x)\n", ioc->name, raid_device->handle);
1837 /*
1838 * WARPDRIVE: Though the following fields are not used for direct IO,
1839 * stored for future purpose:
1840 */
1841 raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA);
1842 raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
1843 raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize);
1844
1845
1846 kfree(vol_pg0);
1847 return;
1848
1849out_error:
1850 raid_device->direct_io_enabled = 0;
1851 for (count = 0; count < num_pds; count++)
1852 raid_device->pd_handle[count] = 0;
1853 kfree(vol_pg0);
1854 return;
1855}
1643 1856
1644/** 1857/**
1645 * _scsih_enable_tlr - setting TLR flags 1858 * _scsih_enable_tlr - setting TLR flags
@@ -1710,6 +1923,11 @@ _scsih_slave_configure(struct scsi_device *sdev)
1710 1923
1711 _scsih_get_volume_capabilities(ioc, raid_device); 1924 _scsih_get_volume_capabilities(ioc, raid_device);
1712 1925
1926 /*
1927 * WARPDRIVE: Initialize the required data for Direct IO
1928 */
1929 _scsih_init_warpdrive_properties(ioc, raid_device);
1930
1713 /* RAID Queue Depth Support 1931 /* RAID Queue Depth Support
1714 * IS volume = underlying qdepth of drive type, either 1932 * IS volume = underlying qdepth of drive type, either
1715 * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH 1933 * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH
@@ -1757,14 +1975,16 @@ _scsih_slave_configure(struct scsi_device *sdev)
1757 break; 1975 break;
1758 } 1976 }
1759 1977
1760 sdev_printk(KERN_INFO, sdev, "%s: " 1978 if (!ioc->hide_ir_msg)
1761 "handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", 1979 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
1762 r_level, raid_device->handle, 1980 "wwid(0x%016llx), pd_count(%d), type(%s)\n",
1763 (unsigned long long)raid_device->wwid, 1981 r_level, raid_device->handle,
1764 raid_device->num_pds, ds); 1982 (unsigned long long)raid_device->wwid,
1983 raid_device->num_pds, ds);
1765 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1984 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
1766 /* raid transport support */ 1985 /* raid transport support */
1767 _scsih_set_level(sdev, raid_device); 1986 if (!ioc->is_warpdrive)
1987 _scsih_set_level(sdev, raid_device);
1768 return 0; 1988 return 0;
1769 } 1989 }
1770 1990
@@ -2133,8 +2353,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2133 switch (type) { 2353 switch (type) {
2134 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 2354 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2135 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task); 2355 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
2136 if (scmd_lookup && (scmd_lookup->serial_number == 2356 if (scmd_lookup)
2137 scmd->serial_number))
2138 rc = FAILED; 2357 rc = FAILED;
2139 else 2358 else
2140 rc = SUCCESS; 2359 rc = SUCCESS;
@@ -2182,16 +2401,20 @@ _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2182 struct MPT2SAS_TARGET *priv_target = starget->hostdata; 2401 struct MPT2SAS_TARGET *priv_target = starget->hostdata;
2183 struct _sas_device *sas_device = NULL; 2402 struct _sas_device *sas_device = NULL;
2184 unsigned long flags; 2403 unsigned long flags;
2404 char *device_str = NULL;
2185 2405
2186 if (!priv_target) 2406 if (!priv_target)
2187 return; 2407 return;
2408 if (ioc->hide_ir_msg)
2409 device_str = "WarpDrive";
2410 else
2411 device_str = "volume";
2188 2412
2189 scsi_print_command(scmd); 2413 scsi_print_command(scmd);
2190 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 2414 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2191 starget_printk(KERN_INFO, starget, "volume handle(0x%04x), " 2415 starget_printk(KERN_INFO, starget, "%s handle(0x%04x), "
2192 "volume wwid(0x%016llx)\n", 2416 "%s wwid(0x%016llx)\n", device_str, priv_target->handle,
2193 priv_target->handle, 2417 device_str, (unsigned long long)priv_target->sas_address);
2194 (unsigned long long)priv_target->sas_address);
2195 } else { 2418 } else {
2196 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2419 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2197 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 2420 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
@@ -3130,6 +3353,9 @@ _scsih_check_ir_config_unhide_events(struct MPT2SAS_ADAPTER *ioc,
3130 a = 0; 3353 a = 0;
3131 b = 0; 3354 b = 0;
3132 3355
3356 if (ioc->is_warpdrive)
3357 return;
3358
3133 /* Volume Resets for Deleted or Removed */ 3359 /* Volume Resets for Deleted or Removed */
3134 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 3360 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
3135 for (i = 0; i < event_data->NumElements; i++, element++) { 3361 for (i = 0; i < event_data->NumElements; i++, element++) {
@@ -3347,6 +3573,105 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
3347} 3573}
3348 3574
3349/** 3575/**
3576 * _scsih_scsi_direct_io_get - returns direct io flag
3577 * @ioc: per adapter object
3578 * @smid: system request message index
3579 *
3580 * Returns the smid stored scmd pointer.
3581 */
3582static inline u8
3583_scsih_scsi_direct_io_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
3584{
3585 return ioc->scsi_lookup[smid - 1].direct_io;
3586}
3587
3588/**
3589 * _scsih_scsi_direct_io_set - sets direct io flag
3590 * @ioc: per adapter object
3591 * @smid: system request message index
3592 * @direct_io: Zero or non-zero value to set in the direct_io flag
3593 *
3594 * Returns Nothing.
3595 */
3596static inline void
3597_scsih_scsi_direct_io_set(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
3598{
3599 ioc->scsi_lookup[smid - 1].direct_io = direct_io;
3600}
3601
3602
3603/**
3604 * _scsih_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
3605 * @ioc: per adapter object
3606 * @scmd: pointer to scsi command object
3607 * @raid_device: pointer to raid device data structure
3608 * @mpi_request: pointer to the SCSI_IO reqest message frame
3609 * @smid: system request message index
3610 *
3611 * Returns nothing
3612 */
3613static void
3614_scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3615 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
3616 u16 smid)
3617{
3618 u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
3619 u32 stripe_sz, stripe_exp;
3620 u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2;
3621 u8 cdb0 = scmd->cmnd[0];
3622
3623 /*
3624 * Try Direct I/O to RAID memeber disks
3625 */
3626 if (cdb0 == READ_16 || cdb0 == READ_10 ||
3627 cdb0 == WRITE_16 || cdb0 == WRITE_10) {
3628 cdb_ptr = mpi_request->CDB.CDB32;
3629
3630 if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
3631 | cdb_ptr[5])) {
3632 io_size = scsi_bufflen(scmd) >> 9;
3633 /* get virtual lba */
3634 lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] :
3635 &cdb_ptr[6];
3636 tmp_ptr = (u8 *)&v_lba + 3;
3637 *tmp_ptr-- = *lba_ptr1++;
3638 *tmp_ptr-- = *lba_ptr1++;
3639 *tmp_ptr-- = *lba_ptr1++;
3640 *tmp_ptr = *lba_ptr1;
3641
3642 if (((u64)v_lba + (u64)io_size - 1) <=
3643 (u32)raid_device->max_lba) {
3644 stripe_sz = raid_device->stripe_sz;
3645 stripe_exp = raid_device->stripe_exponent;
3646 stripe_off = v_lba & (stripe_sz - 1);
3647
3648 /* Check whether IO falls within a stripe */
3649 if ((stripe_off + io_size) <= stripe_sz) {
3650 num_pds = raid_device->num_pds;
3651 p_lba = v_lba >> stripe_exp;
3652 stripe_unit = p_lba / num_pds;
3653 column = p_lba % num_pds;
3654 p_lba = (stripe_unit << stripe_exp) +
3655 stripe_off;
3656 mpi_request->DevHandle =
3657 cpu_to_le16(raid_device->
3658 pd_handle[column]);
3659 tmp_ptr = (u8 *)&p_lba + 3;
3660 *lba_ptr2++ = *tmp_ptr--;
3661 *lba_ptr2++ = *tmp_ptr--;
3662 *lba_ptr2++ = *tmp_ptr--;
3663 *lba_ptr2 = *tmp_ptr;
3664 /*
3665 * WD: To indicate this I/O is directI/O
3666 */
3667 _scsih_scsi_direct_io_set(ioc, smid, 1);
3668 }
3669 }
3670 }
3671 }
3672}
3673
3674/**
3350 * _scsih_qcmd - main scsi request entry point 3675 * _scsih_qcmd - main scsi request entry point
3351 * @scmd: pointer to scsi command object 3676 * @scmd: pointer to scsi command object
3352 * @done: function pointer to be invoked on completion 3677 * @done: function pointer to be invoked on completion
@@ -3363,6 +3688,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3363 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3688 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3364 struct MPT2SAS_DEVICE *sas_device_priv_data; 3689 struct MPT2SAS_DEVICE *sas_device_priv_data;
3365 struct MPT2SAS_TARGET *sas_target_priv_data; 3690 struct MPT2SAS_TARGET *sas_target_priv_data;
3691 struct _raid_device *raid_device;
3366 Mpi2SCSIIORequest_t *mpi_request; 3692 Mpi2SCSIIORequest_t *mpi_request;
3367 u32 mpi_control; 3693 u32 mpi_control;
3368 u16 smid; 3694 u16 smid;
@@ -3424,8 +3750,10 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3424 3750
3425 } else 3751 } else
3426 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 3752 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3427 /* Make sure Device is not raid volume */ 3753 /* Make sure Device is not raid volume.
3428 if (!_scsih_is_raid(&scmd->device->sdev_gendev) && 3754 * We do not expose raid functionality to upper layer for warpdrive.
3755 */
3756 if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) &&
3429 sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 3757 sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
3430 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3758 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
3431 3759
@@ -3473,9 +3801,14 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3473 } 3801 }
3474 } 3802 }
3475 3803
3804 raid_device = sas_target_priv_data->raid_device;
3805 if (raid_device && raid_device->direct_io_enabled)
3806 _scsih_setup_direct_io(ioc, scmd, raid_device, mpi_request,
3807 smid);
3808
3476 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) 3809 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
3477 mpt2sas_base_put_smid_scsi_io(ioc, smid, 3810 mpt2sas_base_put_smid_scsi_io(ioc, smid,
3478 sas_device_priv_data->sas_target->handle); 3811 le16_to_cpu(mpi_request->DevHandle));
3479 else 3812 else
3480 mpt2sas_base_put_smid_default(ioc, smid); 3813 mpt2sas_base_put_smid_default(ioc, smid);
3481 return 0; 3814 return 0;
@@ -3540,10 +3873,16 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3540 unsigned long flags; 3873 unsigned long flags;
3541 struct scsi_target *starget = scmd->device->sdev_target; 3874 struct scsi_target *starget = scmd->device->sdev_target;
3542 struct MPT2SAS_TARGET *priv_target = starget->hostdata; 3875 struct MPT2SAS_TARGET *priv_target = starget->hostdata;
3876 char *device_str = NULL;
3543 3877
3544 if (!priv_target) 3878 if (!priv_target)
3545 return; 3879 return;
3546 3880
3881 if (ioc->hide_ir_msg)
3882 device_str = "WarpDrive";
3883 else
3884 device_str = "volume";
3885
3547 if (log_info == 0x31170000) 3886 if (log_info == 0x31170000)
3548 return; 3887 return;
3549 3888
@@ -3660,8 +3999,8 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3660 scsi_print_command(scmd); 3999 scsi_print_command(scmd);
3661 4000
3662 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 4001 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3663 printk(MPT2SAS_WARN_FMT "\tvolume wwid(0x%016llx)\n", ioc->name, 4002 printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
3664 (unsigned long long)priv_target->sas_address); 4003 device_str, (unsigned long long)priv_target->sas_address);
3665 } else { 4004 } else {
3666 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3667 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 4006 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
@@ -3840,6 +4179,20 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3840 scmd->result = DID_NO_CONNECT << 16; 4179 scmd->result = DID_NO_CONNECT << 16;
3841 goto out; 4180 goto out;
3842 } 4181 }
4182 /*
4183 * WARPDRIVE: If direct_io is set then it is directIO,
4184 * the failed direct I/O should be redirected to volume
4185 */
4186 if (_scsih_scsi_direct_io_get(ioc, smid)) {
4187 _scsih_scsi_direct_io_set(ioc, smid, 0);
4188 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4189 mpi_request->DevHandle =
4190 cpu_to_le16(sas_device_priv_data->sas_target->handle);
4191 mpt2sas_base_put_smid_scsi_io(ioc, smid,
4192 sas_device_priv_data->sas_target->handle);
4193 return 0;
4194 }
4195
3843 4196
3844 /* turning off TLR */ 4197 /* turning off TLR */
3845 scsi_state = mpi_reply->SCSIState; 4198 scsi_state = mpi_reply->SCSIState;
@@ -3848,7 +4201,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3848 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 4201 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
3849 if (!sas_device_priv_data->tlr_snoop_check) { 4202 if (!sas_device_priv_data->tlr_snoop_check) {
3850 sas_device_priv_data->tlr_snoop_check++; 4203 sas_device_priv_data->tlr_snoop_check++;
3851 if (!_scsih_is_raid(&scmd->device->sdev_gendev) && 4204 /* Make sure Device is not raid volume.
4205 * We do not expose raid functionality to upper layer for warpdrive.
4206 */
4207 if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) &&
3852 sas_is_tlr_enabled(scmd->device) && 4208 sas_is_tlr_enabled(scmd->device) &&
3853 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 4209 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
3854 sas_disable_tlr(scmd->device); 4210 sas_disable_tlr(scmd->device);
@@ -4681,8 +5037,10 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
4681 5037
4682 _scsih_ublock_io_device(ioc, sas_device_backup.handle); 5038 _scsih_ublock_io_device(ioc, sas_device_backup.handle);
4683 5039
4684 mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address, 5040 if (!ioc->hide_drives)
4685 sas_device_backup.sas_address_parent); 5041 mpt2sas_transport_port_remove(ioc,
5042 sas_device_backup.sas_address,
5043 sas_device_backup.sas_address_parent);
4686 5044
4687 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" 5045 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
4688 "(0x%016llx)\n", ioc->name, sas_device_backup.handle, 5046 "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
@@ -5413,6 +5771,7 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
5413 &sas_device->volume_wwid); 5771 &sas_device->volume_wwid);
5414 set_bit(handle, ioc->pd_handles); 5772 set_bit(handle, ioc->pd_handles);
5415 _scsih_reprobe_target(sas_device->starget, 1); 5773 _scsih_reprobe_target(sas_device->starget, 1);
5774
5416} 5775}
5417 5776
5418/** 5777/**
@@ -5591,7 +5950,8 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
5591 Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; 5950 Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
5592 5951
5593#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5952#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5594 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 5953 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
5954 && !ioc->hide_ir_msg)
5595 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 5955 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
5596 5956
5597#endif 5957#endif
@@ -5614,16 +5974,20 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
5614 le16_to_cpu(element->VolDevHandle)); 5974 le16_to_cpu(element->VolDevHandle));
5615 break; 5975 break;
5616 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 5976 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
5617 _scsih_sas_pd_hide(ioc, element); 5977 if (!ioc->is_warpdrive)
5978 _scsih_sas_pd_hide(ioc, element);
5618 break; 5979 break;
5619 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 5980 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
5620 _scsih_sas_pd_expose(ioc, element); 5981 if (!ioc->is_warpdrive)
5982 _scsih_sas_pd_expose(ioc, element);
5621 break; 5983 break;
5622 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 5984 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
5623 _scsih_sas_pd_add(ioc, element); 5985 if (!ioc->is_warpdrive)
5986 _scsih_sas_pd_add(ioc, element);
5624 break; 5987 break;
5625 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 5988 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
5626 _scsih_sas_pd_delete(ioc, element); 5989 if (!ioc->is_warpdrive)
5990 _scsih_sas_pd_delete(ioc, element);
5627 break; 5991 break;
5628 } 5992 }
5629 } 5993 }
@@ -5654,9 +6018,10 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
5654 6018
5655 handle = le16_to_cpu(event_data->VolDevHandle); 6019 handle = le16_to_cpu(event_data->VolDevHandle);
5656 state = le32_to_cpu(event_data->NewValue); 6020 state = le32_to_cpu(event_data->NewValue);
5657 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " 6021 if (!ioc->hide_ir_msg)
5658 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, 6022 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
5659 le32_to_cpu(event_data->PreviousValue), state)); 6023 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
6024 le32_to_cpu(event_data->PreviousValue), state));
5660 6025
5661 switch (state) { 6026 switch (state) {
5662 case MPI2_RAID_VOL_STATE_MISSING: 6027 case MPI2_RAID_VOL_STATE_MISSING:
@@ -5736,9 +6101,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
5736 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 6101 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
5737 state = le32_to_cpu(event_data->NewValue); 6102 state = le32_to_cpu(event_data->NewValue);
5738 6103
5739 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " 6104 if (!ioc->hide_ir_msg)
5740 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, 6105 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
5741 le32_to_cpu(event_data->PreviousValue), state)); 6106 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
6107 le32_to_cpu(event_data->PreviousValue), state));
5742 6108
5743 switch (state) { 6109 switch (state) {
5744 case MPI2_RAID_PD_STATE_ONLINE: 6110 case MPI2_RAID_PD_STATE_ONLINE:
@@ -5747,7 +6113,8 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
5747 case MPI2_RAID_PD_STATE_OPTIMAL: 6113 case MPI2_RAID_PD_STATE_OPTIMAL:
5748 case MPI2_RAID_PD_STATE_HOT_SPARE: 6114 case MPI2_RAID_PD_STATE_HOT_SPARE:
5749 6115
5750 set_bit(handle, ioc->pd_handles); 6116 if (!ioc->is_warpdrive)
6117 set_bit(handle, ioc->pd_handles);
5751 6118
5752 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6119 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5753 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6120 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
@@ -5851,7 +6218,8 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
5851 u16 handle; 6218 u16 handle;
5852 6219
5853#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 6220#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5854 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 6221 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6222 && !ioc->hide_ir_msg)
5855 _scsih_sas_ir_operation_status_event_debug(ioc, 6223 _scsih_sas_ir_operation_status_event_debug(ioc,
5856 event_data); 6224 event_data);
5857#endif 6225#endif
@@ -5910,7 +6278,7 @@ static void
5910_scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, 6278_scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5911 u16 slot, u16 handle) 6279 u16 slot, u16 handle)
5912{ 6280{
5913 struct MPT2SAS_TARGET *sas_target_priv_data; 6281 struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
5914 struct scsi_target *starget; 6282 struct scsi_target *starget;
5915 struct _sas_device *sas_device; 6283 struct _sas_device *sas_device;
5916 unsigned long flags; 6284 unsigned long flags;
@@ -5918,7 +6286,7 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5918 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6286 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5919 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 6287 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
5920 if (sas_device->sas_address == sas_address && 6288 if (sas_device->sas_address == sas_address &&
5921 sas_device->slot == slot && sas_device->starget) { 6289 sas_device->slot == slot) {
5922 sas_device->responding = 1; 6290 sas_device->responding = 1;
5923 starget = sas_device->starget; 6291 starget = sas_device->starget;
5924 if (starget && starget->hostdata) { 6292 if (starget && starget->hostdata) {
@@ -5927,13 +6295,15 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5927 sas_target_priv_data->deleted = 0; 6295 sas_target_priv_data->deleted = 0;
5928 } else 6296 } else
5929 sas_target_priv_data = NULL; 6297 sas_target_priv_data = NULL;
5930 starget_printk(KERN_INFO, sas_device->starget, 6298 if (starget)
5931 "handle(0x%04x), sas_addr(0x%016llx), enclosure " 6299 starget_printk(KERN_INFO, starget,
5932 "logical id(0x%016llx), slot(%d)\n", handle, 6300 "handle(0x%04x), sas_addr(0x%016llx), "
5933 (unsigned long long)sas_device->sas_address, 6301 "enclosure logical id(0x%016llx), "
5934 (unsigned long long) 6302 "slot(%d)\n", handle,
5935 sas_device->enclosure_logical_id, 6303 (unsigned long long)sas_device->sas_address,
5936 sas_device->slot); 6304 (unsigned long long)
6305 sas_device->enclosure_logical_id,
6306 sas_device->slot);
5937 if (sas_device->handle == handle) 6307 if (sas_device->handle == handle)
5938 goto out; 6308 goto out;
5939 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 6309 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
@@ -6025,6 +6395,12 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
6025 starget_printk(KERN_INFO, raid_device->starget, 6395 starget_printk(KERN_INFO, raid_device->starget,
6026 "handle(0x%04x), wwid(0x%016llx)\n", handle, 6396 "handle(0x%04x), wwid(0x%016llx)\n", handle,
6027 (unsigned long long)raid_device->wwid); 6397 (unsigned long long)raid_device->wwid);
6398 /*
6399 * WARPDRIVE: The handles of the PDs might have changed
6400 * across the host reset so re-initialize the
6401 * required data for Direct IO
6402 */
6403 _scsih_init_warpdrive_properties(ioc, raid_device);
6028 if (raid_device->handle == handle) 6404 if (raid_device->handle == handle)
6029 goto out; 6405 goto out;
6030 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 6406 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
@@ -6086,18 +6462,20 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
6086 } 6462 }
6087 6463
6088 /* refresh the pd_handles */ 6464 /* refresh the pd_handles */
6089 phys_disk_num = 0xFF; 6465 if (!ioc->is_warpdrive) {
6090 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 6466 phys_disk_num = 0xFF;
6091 while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 6467 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
6092 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 6468 while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
6093 phys_disk_num))) { 6469 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
6094 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6470 phys_disk_num))) {
6095 MPI2_IOCSTATUS_MASK; 6471 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6096 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 6472 MPI2_IOCSTATUS_MASK;
6097 break; 6473 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
6098 phys_disk_num = pd_pg0.PhysDiskNum; 6474 break;
6099 handle = le16_to_cpu(pd_pg0.DevHandle); 6475 phys_disk_num = pd_pg0.PhysDiskNum;
6100 set_bit(handle, ioc->pd_handles); 6476 handle = le16_to_cpu(pd_pg0.DevHandle);
6477 set_bit(handle, ioc->pd_handles);
6478 }
6101 } 6479 }
6102} 6480}
6103 6481
@@ -6243,6 +6621,50 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
6243} 6621}
6244 6622
6245/** 6623/**
6624 * _scsih_hide_unhide_sas_devices - add/remove device to/from OS
6625 * @ioc: per adapter object
6626 *
6627 * Return nothing.
6628 */
6629static void
6630_scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc)
6631{
6632 struct _sas_device *sas_device, *sas_device_next;
6633
6634 if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag !=
6635 MFG_PAGE10_HIDE_IF_VOL_PRESENT)
6636 return;
6637
6638 if (ioc->hide_drives) {
6639 if (_scsih_get_num_volumes(ioc))
6640 return;
6641 ioc->hide_drives = 0;
6642 list_for_each_entry_safe(sas_device, sas_device_next,
6643 &ioc->sas_device_list, list) {
6644 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
6645 sas_device->sas_address_parent)) {
6646 _scsih_sas_device_remove(ioc, sas_device);
6647 } else if (!sas_device->starget) {
6648 mpt2sas_transport_port_remove(ioc,
6649 sas_device->sas_address,
6650 sas_device->sas_address_parent);
6651 _scsih_sas_device_remove(ioc, sas_device);
6652 }
6653 }
6654 } else {
6655 if (!_scsih_get_num_volumes(ioc))
6656 return;
6657 ioc->hide_drives = 1;
6658 list_for_each_entry_safe(sas_device, sas_device_next,
6659 &ioc->sas_device_list, list) {
6660 mpt2sas_transport_port_remove(ioc,
6661 sas_device->sas_address,
6662 sas_device->sas_address_parent);
6663 }
6664 }
6665}
6666
6667/**
6246 * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) 6668 * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
6247 * @ioc: per adapter object 6669 * @ioc: per adapter object
6248 * @reset_phase: phase 6670 * @reset_phase: phase
@@ -6326,6 +6748,7 @@ _firmware_event_work(struct work_struct *work)
6326 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, 6748 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
6327 flags); 6749 flags);
6328 _scsih_remove_unresponding_sas_devices(ioc); 6750 _scsih_remove_unresponding_sas_devices(ioc);
6751 _scsih_hide_unhide_sas_devices(ioc);
6329 return; 6752 return;
6330 } 6753 }
6331 6754
@@ -6425,6 +6848,53 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
6425 (Mpi2EventDataIrVolume_t *) 6848 (Mpi2EventDataIrVolume_t *)
6426 mpi_reply->EventData); 6849 mpi_reply->EventData);
6427 break; 6850 break;
6851 case MPI2_EVENT_LOG_ENTRY_ADDED:
6852 {
6853 Mpi2EventDataLogEntryAdded_t *log_entry;
6854 u32 *log_code;
6855
6856 if (!ioc->is_warpdrive)
6857 break;
6858
6859 log_entry = (Mpi2EventDataLogEntryAdded_t *)
6860 mpi_reply->EventData;
6861 log_code = (u32 *)log_entry->LogData;
6862
6863 if (le16_to_cpu(log_entry->LogEntryQualifier)
6864 != MPT2_WARPDRIVE_LOGENTRY)
6865 break;
6866
6867 switch (le32_to_cpu(*log_code)) {
6868 case MPT2_WARPDRIVE_LC_SSDT:
6869 printk(MPT2SAS_WARN_FMT "WarpDrive Warning: "
6870 "IO Throttling has occurred in the WarpDrive "
6871 "subsystem. Check WarpDrive documentation for "
6872 "additional details.\n", ioc->name);
6873 break;
6874 case MPT2_WARPDRIVE_LC_SSDLW:
6875 printk(MPT2SAS_WARN_FMT "WarpDrive Warning: "
6876 "Program/Erase Cycles for the WarpDrive subsystem "
6877 "in degraded range. Check WarpDrive documentation "
6878 "for additional details.\n", ioc->name);
6879 break;
6880 case MPT2_WARPDRIVE_LC_SSDLF:
6881 printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: "
6882 "There are no Program/Erase Cycles for the "
6883 "WarpDrive subsystem. The storage device will be "
6884 "in read-only mode. Check WarpDrive documentation "
6885 "for additional details.\n", ioc->name);
6886 break;
6887 case MPT2_WARPDRIVE_LC_BRMF:
6888 printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: "
6889 "The Backup Rail Monitor has failed on the "
6890 "WarpDrive subsystem. Check WarpDrive "
6891 "documentation for additional details.\n",
6892 ioc->name);
6893 break;
6894 }
6895
6896 break;
6897 }
6428 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 6898 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
6429 case MPI2_EVENT_IR_OPERATION_STATUS: 6899 case MPI2_EVENT_IR_OPERATION_STATUS:
6430 case MPI2_EVENT_SAS_DISCOVERY: 6900 case MPI2_EVENT_SAS_DISCOVERY:
@@ -6583,7 +7053,8 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
6583 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 7053 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
6584 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 7054 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
6585 7055
6586 printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); 7056 if (!ioc->hide_ir_msg)
7057 printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name);
6587 init_completion(&ioc->scsih_cmds.done); 7058 init_completion(&ioc->scsih_cmds.done);
6588 mpt2sas_base_put_smid_default(ioc, smid); 7059 mpt2sas_base_put_smid_default(ioc, smid);
6589 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 7060 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
@@ -6597,10 +7068,11 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
6597 if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) { 7068 if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) {
6598 mpi_reply = ioc->scsih_cmds.reply; 7069 mpi_reply = ioc->scsih_cmds.reply;
6599 7070
6600 printk(MPT2SAS_INFO_FMT "IR shutdown (complete): " 7071 if (!ioc->hide_ir_msg)
6601 "ioc_status(0x%04x), loginfo(0x%08x)\n", 7072 printk(MPT2SAS_INFO_FMT "IR shutdown (complete): "
6602 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 7073 "ioc_status(0x%04x), loginfo(0x%08x)\n",
6603 le32_to_cpu(mpi_reply->IOCLogInfo)); 7074 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
7075 le32_to_cpu(mpi_reply->IOCLogInfo));
6604 } 7076 }
6605 7077
6606 out: 7078 out:
@@ -6759,6 +7231,9 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
6759 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7231 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6760 list_move_tail(&sas_device->list, &ioc->sas_device_list); 7232 list_move_tail(&sas_device->list, &ioc->sas_device_list);
6761 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7233 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7234
7235 if (ioc->hide_drives)
7236 return;
6762 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 7237 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
6763 sas_device->sas_address_parent)) { 7238 sas_device->sas_address_parent)) {
6764 _scsih_sas_device_remove(ioc, sas_device); 7239 _scsih_sas_device_remove(ioc, sas_device);
@@ -6812,6 +7287,9 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
6812 list_move_tail(&sas_device->list, &ioc->sas_device_list); 7287 list_move_tail(&sas_device->list, &ioc->sas_device_list);
6813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7288 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6814 7289
7290 if (ioc->hide_drives)
7291 continue;
7292
6815 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 7293 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
6816 sas_device->sas_address_parent)) { 7294 sas_device->sas_address_parent)) {
6817 _scsih_sas_device_remove(ioc, sas_device); 7295 _scsih_sas_device_remove(ioc, sas_device);
@@ -6882,6 +7360,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6882 ioc->id = mpt_ids++; 7360 ioc->id = mpt_ids++;
6883 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id); 7361 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id);
6884 ioc->pdev = pdev; 7362 ioc->pdev = pdev;
7363 if (id->device == MPI2_MFGPAGE_DEVID_SSS6200) {
7364 ioc->is_warpdrive = 1;
7365 ioc->hide_ir_msg = 1;
7366 } else
7367 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
6885 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 7368 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
6886 ioc->tm_cb_idx = tm_cb_idx; 7369 ioc->tm_cb_idx = tm_cb_idx;
6887 ioc->ctl_cb_idx = ctl_cb_idx; 7370 ioc->ctl_cb_idx = ctl_cb_idx;
@@ -6947,6 +7430,20 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6947 } 7430 }
6948 7431
6949 ioc->wait_for_port_enable_to_complete = 0; 7432 ioc->wait_for_port_enable_to_complete = 0;
7433 if (ioc->is_warpdrive) {
7434 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
7435 ioc->hide_drives = 0;
7436 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
7437 ioc->hide_drives = 1;
7438 else {
7439 if (_scsih_get_num_volumes(ioc))
7440 ioc->hide_drives = 1;
7441 else
7442 ioc->hide_drives = 0;
7443 }
7444 } else
7445 ioc->hide_drives = 0;
7446
6950 _scsih_probe_devices(ioc); 7447 _scsih_probe_devices(ioc);
6951 return 0; 7448 return 0;
6952 7449
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index 6de7af27e507..c82b012aba37 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com>
6# 7#
7# This file is licensed under GPLv2. 8# This file is licensed under GPLv2.
8# 9#
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
index ffbf759e46f1..87b231a5bd5e 100644
--- a/drivers/scsi/mvsas/Makefile
+++ b/drivers/scsi/mvsas/Makefile
@@ -3,6 +3,7 @@
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6# 7#
7# This file is licensed under GPLv2. 8# This file is licensed under GPLv2.
8# 9#
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index afc7f6f3a13e..13c960481391 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
index 42e947d9795e..545889bd9753 100644
--- a/drivers/scsi/mvsas/mv_64xx.h
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index eed4c5c72013..78162c3c36e6 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 23ed9b164669..8835befe2c0e 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index a67e1c4172f9..1753a6fc42d0 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index 1849da1f030d..bc00c940743c 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -34,6 +35,8 @@ enum chip_flavors {
34 chip_6485, 35 chip_6485,
35 chip_9480, 36 chip_9480,
36 chip_9180, 37 chip_9180,
38 chip_9445,
39 chip_9485,
37 chip_1300, 40 chip_1300,
38 chip_1320 41 chip_1320
39}; 42};
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 938d045e4180..90b636611cde 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -25,13 +26,24 @@
25 26
26#include "mv_sas.h" 27#include "mv_sas.h"
27 28
29static int lldd_max_execute_num = 1;
30module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
31MODULE_PARM_DESC(collector, "\n"
32 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
33 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n");
36
28static struct scsi_transport_template *mvs_stt; 37static struct scsi_transport_template *mvs_stt;
38struct kmem_cache *mvs_task_list_cache;
29static const struct mvs_chip_info mvs_chips[] = { 39static const struct mvs_chip_info mvs_chips[] = {
30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 40 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 41 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, 42 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 43 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 44 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
45 [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
46 [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
35 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 47 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
36 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 48 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
37}; 49};
@@ -107,7 +119,6 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
107 119
108static void mvs_free(struct mvs_info *mvi) 120static void mvs_free(struct mvs_info *mvi)
109{ 121{
110 int i;
111 struct mvs_wq *mwq; 122 struct mvs_wq *mwq;
112 int slot_nr; 123 int slot_nr;
113 124
@@ -119,12 +130,8 @@ static void mvs_free(struct mvs_info *mvi)
119 else 130 else
120 slot_nr = MVS_SLOTS; 131 slot_nr = MVS_SLOTS;
121 132
122 for (i = 0; i < mvi->tags_num; i++) { 133 if (mvi->dma_pool)
123 struct mvs_slot_info *slot = &mvi->slot_info[i]; 134 pci_pool_destroy(mvi->dma_pool);
124 if (slot->buf)
125 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
126 slot->buf, slot->buf_dma);
127 }
128 135
129 if (mvi->tx) 136 if (mvi->tx)
130 dma_free_coherent(mvi->dev, 137 dma_free_coherent(mvi->dev,
@@ -213,6 +220,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
213static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) 220static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
214{ 221{
215 int i = 0, slot_nr; 222 int i = 0, slot_nr;
223 char pool_name[32];
216 224
217 if (mvi->flags & MVF_FLAG_SOC) 225 if (mvi->flags & MVF_FLAG_SOC)
218 slot_nr = MVS_SOC_SLOTS; 226 slot_nr = MVS_SOC_SLOTS;
@@ -272,18 +280,14 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
272 if (!mvi->bulk_buffer) 280 if (!mvi->bulk_buffer)
273 goto err_out; 281 goto err_out;
274#endif 282#endif
275 for (i = 0; i < slot_nr; i++) { 283 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
276 struct mvs_slot_info *slot = &mvi->slot_info[i]; 284 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
277 285 if (!mvi->dma_pool) {
278 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ, 286 printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name);
279 &slot->buf_dma, GFP_KERNEL);
280 if (!slot->buf) {
281 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
282 goto err_out; 287 goto err_out;
283 }
284 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
285 ++mvi->tags_num;
286 } 288 }
289 mvi->tags_num = slot_nr;
290
287 /* Initialize tags */ 291 /* Initialize tags */
288 mvs_tag_init(mvi); 292 mvs_tag_init(mvi);
289 return 0; 293 return 0;
@@ -484,7 +488,7 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
484 488
485 sha->num_phys = nr_core * chip_info->n_phy; 489 sha->num_phys = nr_core * chip_info->n_phy;
486 490
487 sha->lldd_max_execute_num = 1; 491 sha->lldd_max_execute_num = lldd_max_execute_num;
488 492
489 if (mvi->flags & MVF_FLAG_SOC) 493 if (mvi->flags & MVF_FLAG_SOC)
490 can_queue = MVS_SOC_CAN_QUEUE; 494 can_queue = MVS_SOC_CAN_QUEUE;
@@ -670,6 +674,24 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
670 { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, 674 { PCI_VDEVICE(TTI, 0x2740), chip_9480 },
671 { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, 675 { PCI_VDEVICE(TTI, 0x2744), chip_9480 },
672 { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, 676 { PCI_VDEVICE(TTI, 0x2760), chip_9480 },
677 {
678 .vendor = 0x1b4b,
679 .device = 0x9445,
680 .subvendor = PCI_ANY_ID,
681 .subdevice = 0x9480,
682 .class = 0,
683 .class_mask = 0,
684 .driver_data = chip_9445,
685 },
686 {
687 .vendor = 0x1b4b,
688 .device = 0x9485,
689 .subvendor = PCI_ANY_ID,
690 .subdevice = 0x9480,
691 .class = 0,
692 .class_mask = 0,
693 .driver_data = chip_9485,
694 },
673 695
674 { } /* terminate list */ 696 { } /* terminate list */
675}; 697};
@@ -690,6 +712,14 @@ static int __init mvs_init(void)
690 if (!mvs_stt) 712 if (!mvs_stt)
691 return -ENOMEM; 713 return -ENOMEM;
692 714
715 mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list),
716 0, SLAB_HWCACHE_ALIGN, NULL);
717 if (!mvs_task_list_cache) {
718 rc = -ENOMEM;
719 mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__);
720 goto err_out;
721 }
722
693 rc = pci_register_driver(&mvs_pci_driver); 723 rc = pci_register_driver(&mvs_pci_driver);
694 724
695 if (rc) 725 if (rc)
@@ -706,6 +736,7 @@ static void __exit mvs_exit(void)
706{ 736{
707 pci_unregister_driver(&mvs_pci_driver); 737 pci_unregister_driver(&mvs_pci_driver);
708 sas_release_transport(mvs_stt); 738 sas_release_transport(mvs_stt);
739 kmem_cache_destroy(mvs_task_list_cache);
709} 740}
710 741
711module_init(mvs_init); 742module_init(mvs_init);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index adedaa916ecb..0ef27425c447 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -862,178 +863,286 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
862} 863}
863 864
864#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) 865#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
865static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, 866static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
866 struct completion *completion,int is_tmf, 867 struct mvs_tmf_task *tmf, int *pass)
867 struct mvs_tmf_task *tmf)
868{ 868{
869 struct domain_device *dev = task->dev; 869 struct domain_device *dev = task->dev;
870 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 870 struct mvs_device *mvi_dev = dev->lldd_dev;
871 struct mvs_info *mvi = mvi_dev->mvi_info;
872 struct mvs_task_exec_info tei; 871 struct mvs_task_exec_info tei;
873 struct sas_task *t = task;
874 struct mvs_slot_info *slot; 872 struct mvs_slot_info *slot;
875 u32 tag = 0xdeadbeef, rc, n_elem = 0; 873 u32 tag = 0xdeadbeef, n_elem = 0;
876 u32 n = num, pass = 0; 874 int rc = 0;
877 unsigned long flags = 0, flags_libsas = 0;
878 875
879 if (!dev->port) { 876 if (!dev->port) {
880 struct task_status_struct *tsm = &t->task_status; 877 struct task_status_struct *tsm = &task->task_status;
881 878
882 tsm->resp = SAS_TASK_UNDELIVERED; 879 tsm->resp = SAS_TASK_UNDELIVERED;
883 tsm->stat = SAS_PHY_DOWN; 880 tsm->stat = SAS_PHY_DOWN;
881 /*
882 * libsas will use dev->port, should
883 * not call task_done for sata
884 */
884 if (dev->dev_type != SATA_DEV) 885 if (dev->dev_type != SATA_DEV)
885 t->task_done(t); 886 task->task_done(task);
886 return 0; 887 return rc;
887 } 888 }
888 889
889 spin_lock_irqsave(&mvi->lock, flags); 890 if (DEV_IS_GONE(mvi_dev)) {
890 do { 891 if (mvi_dev)
891 dev = t->dev; 892 mv_dprintk("device %d not ready.\n",
892 mvi_dev = dev->lldd_dev; 893 mvi_dev->device_id);
893 if (DEV_IS_GONE(mvi_dev)) { 894 else
894 if (mvi_dev) 895 mv_dprintk("device %016llx not ready.\n",
895 mv_dprintk("device %d not ready.\n", 896 SAS_ADDR(dev->sas_addr));
896 mvi_dev->device_id);
897 else
898 mv_dprintk("device %016llx not ready.\n",
899 SAS_ADDR(dev->sas_addr));
900 897
901 rc = SAS_PHY_DOWN; 898 rc = SAS_PHY_DOWN;
902 goto out_done; 899 return rc;
903 } 900 }
901 tei.port = dev->port->lldd_port;
902 if (tei.port && !tei.port->port_attached && !tmf) {
903 if (sas_protocol_ata(task->task_proto)) {
904 struct task_status_struct *ts = &task->task_status;
905 mv_dprintk("SATA/STP port %d does not attach"
906 "device.\n", dev->port->id);
907 ts->resp = SAS_TASK_COMPLETE;
908 ts->stat = SAS_PHY_DOWN;
904 909
905 if (dev->port->id >= mvi->chip->n_phy) 910 task->task_done(task);
906 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
907 else
908 tei.port = &mvi->port[dev->port->id];
909
910 if (tei.port && !tei.port->port_attached) {
911 if (sas_protocol_ata(t->task_proto)) {
912 struct task_status_struct *ts = &t->task_status;
913
914 mv_dprintk("port %d does not"
915 "attached device.\n", dev->port->id);
916 ts->stat = SAS_PROTO_RESPONSE;
917 ts->stat = SAS_PHY_DOWN;
918 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
919 flags_libsas);
920 spin_unlock_irqrestore(&mvi->lock, flags);
921 t->task_done(t);
922 spin_lock_irqsave(&mvi->lock, flags);
923 spin_lock_irqsave(dev->sata_dev.ap->lock,
924 flags_libsas);
925 if (n > 1)
926 t = list_entry(t->list.next,
927 struct sas_task, list);
928 continue;
929 } else {
930 struct task_status_struct *ts = &t->task_status;
931 ts->resp = SAS_TASK_UNDELIVERED;
932 ts->stat = SAS_PHY_DOWN;
933 t->task_done(t);
934 if (n > 1)
935 t = list_entry(t->list.next,
936 struct sas_task, list);
937 continue;
938 }
939 }
940 911
941 if (!sas_protocol_ata(t->task_proto)) {
942 if (t->num_scatter) {
943 n_elem = dma_map_sg(mvi->dev,
944 t->scatter,
945 t->num_scatter,
946 t->data_dir);
947 if (!n_elem) {
948 rc = -ENOMEM;
949 goto err_out;
950 }
951 }
952 } else { 912 } else {
953 n_elem = t->num_scatter; 913 struct task_status_struct *ts = &task->task_status;
914 mv_dprintk("SAS port %d does not attach"
915 "device.\n", dev->port->id);
916 ts->resp = SAS_TASK_UNDELIVERED;
917 ts->stat = SAS_PHY_DOWN;
918 task->task_done(task);
954 } 919 }
920 return rc;
921 }
955 922
956 rc = mvs_tag_alloc(mvi, &tag); 923 if (!sas_protocol_ata(task->task_proto)) {
957 if (rc) 924 if (task->num_scatter) {
958 goto err_out; 925 n_elem = dma_map_sg(mvi->dev,
926 task->scatter,
927 task->num_scatter,
928 task->data_dir);
929 if (!n_elem) {
930 rc = -ENOMEM;
931 goto prep_out;
932 }
933 }
934 } else {
935 n_elem = task->num_scatter;
936 }
959 937
960 slot = &mvi->slot_info[tag]; 938 rc = mvs_tag_alloc(mvi, &tag);
939 if (rc)
940 goto err_out;
961 941
942 slot = &mvi->slot_info[tag];
962 943
963 t->lldd_task = NULL; 944 task->lldd_task = NULL;
964 slot->n_elem = n_elem; 945 slot->n_elem = n_elem;
965 slot->slot_tag = tag; 946 slot->slot_tag = tag;
966 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 947
948 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
949 if (!slot->buf)
950 goto err_out_tag;
951 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
952
953 tei.task = task;
954 tei.hdr = &mvi->slot[tag];
955 tei.tag = tag;
956 tei.n_elem = n_elem;
957 switch (task->task_proto) {
958 case SAS_PROTOCOL_SMP:
959 rc = mvs_task_prep_smp(mvi, &tei);
960 break;
961 case SAS_PROTOCOL_SSP:
962 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
963 break;
964 case SAS_PROTOCOL_SATA:
965 case SAS_PROTOCOL_STP:
966 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
967 rc = mvs_task_prep_ata(mvi, &tei);
968 break;
969 default:
970 dev_printk(KERN_ERR, mvi->dev,
971 "unknown sas_task proto: 0x%x\n",
972 task->task_proto);
973 rc = -EINVAL;
974 break;
975 }
967 976
968 tei.task = t; 977 if (rc) {
969 tei.hdr = &mvi->slot[tag]; 978 mv_dprintk("rc is %x\n", rc);
970 tei.tag = tag; 979 goto err_out_slot_buf;
971 tei.n_elem = n_elem; 980 }
972 switch (t->task_proto) { 981 slot->task = task;
973 case SAS_PROTOCOL_SMP: 982 slot->port = tei.port;
974 rc = mvs_task_prep_smp(mvi, &tei); 983 task->lldd_task = slot;
975 break; 984 list_add_tail(&slot->entry, &tei.port->list);
976 case SAS_PROTOCOL_SSP: 985 spin_lock(&task->task_state_lock);
977 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); 986 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
978 break; 987 spin_unlock(&task->task_state_lock);
979 case SAS_PROTOCOL_SATA:
980 case SAS_PROTOCOL_STP:
981 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
982 rc = mvs_task_prep_ata(mvi, &tei);
983 break;
984 default:
985 dev_printk(KERN_ERR, mvi->dev,
986 "unknown sas_task proto: 0x%x\n",
987 t->task_proto);
988 rc = -EINVAL;
989 break;
990 }
991 988
992 if (rc) { 989 mvs_hba_memory_dump(mvi, tag, task->task_proto);
993 mv_dprintk("rc is %x\n", rc); 990 mvi_dev->running_req++;
994 goto err_out_tag; 991 ++(*pass);
995 } 992 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
996 slot->task = t;
997 slot->port = tei.port;
998 t->lldd_task = slot;
999 list_add_tail(&slot->entry, &tei.port->list);
1000 /* TODO: select normal or high priority */
1001 spin_lock(&t->task_state_lock);
1002 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
1003 spin_unlock(&t->task_state_lock);
1004
1005 mvs_hba_memory_dump(mvi, tag, t->task_proto);
1006 mvi_dev->running_req++;
1007 ++pass;
1008 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1009 if (n > 1)
1010 t = list_entry(t->list.next, struct sas_task, list);
1011 if (likely(pass))
1012 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1013 (MVS_CHIP_SLOT_SZ - 1));
1014 993
1015 } while (--n); 994 return rc;
1016 rc = 0;
1017 goto out_done;
1018 995
996err_out_slot_buf:
997 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1019err_out_tag: 998err_out_tag:
1020 mvs_tag_free(mvi, tag); 999 mvs_tag_free(mvi, tag);
1021err_out: 1000err_out:
1022 1001
1023 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); 1002 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
1024 if (!sas_protocol_ata(t->task_proto)) 1003 if (!sas_protocol_ata(task->task_proto))
1025 if (n_elem) 1004 if (n_elem)
1026 dma_unmap_sg(mvi->dev, t->scatter, n_elem, 1005 dma_unmap_sg(mvi->dev, task->scatter, n_elem,
1027 t->data_dir); 1006 task->data_dir);
1028out_done: 1007prep_out:
1008 return rc;
1009}
1010
1011static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
1012{
1013 struct mvs_task_list *first = NULL;
1014
1015 for (; *num > 0; --*num) {
1016 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
1017
1018 if (!mvs_list)
1019 break;
1020
1021 INIT_LIST_HEAD(&mvs_list->list);
1022 if (!first)
1023 first = mvs_list;
1024 else
1025 list_add_tail(&mvs_list->list, &first->list);
1026
1027 }
1028
1029 return first;
1030}
1031
1032static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
1033{
1034 LIST_HEAD(list);
1035 struct list_head *pos, *a;
1036 struct mvs_task_list *mlist = NULL;
1037
1038 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
1039
1040 list_for_each_safe(pos, a, &list) {
1041 list_del_init(pos);
1042 mlist = list_entry(pos, struct mvs_task_list, list);
1043 kmem_cache_free(mvs_task_list_cache, mlist);
1044 }
1045}
1046
1047static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1048 struct completion *completion, int is_tmf,
1049 struct mvs_tmf_task *tmf)
1050{
1051 struct domain_device *dev = task->dev;
1052 struct mvs_info *mvi = NULL;
1053 u32 rc = 0;
1054 u32 pass = 0;
1055 unsigned long flags = 0;
1056
1057 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
1058
1059 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1060 spin_unlock_irq(dev->sata_dev.ap->lock);
1061
1062 spin_lock_irqsave(&mvi->lock, flags);
1063 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
1064 if (rc)
1065 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1066
1067 if (likely(pass))
1068 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1069 (MVS_CHIP_SLOT_SZ - 1));
1029 spin_unlock_irqrestore(&mvi->lock, flags); 1070 spin_unlock_irqrestore(&mvi->lock, flags);
1071
1072 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1073 spin_lock_irq(dev->sata_dev.ap->lock);
1074
1075 return rc;
1076}
1077
1078static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1079 struct completion *completion, int is_tmf,
1080 struct mvs_tmf_task *tmf)
1081{
1082 struct domain_device *dev = task->dev;
1083 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
1084 struct mvs_info *mvi = NULL;
1085 struct sas_task *t = task;
1086 struct mvs_task_list *mvs_list = NULL, *a;
1087 LIST_HEAD(q);
1088 int pass[2] = {0};
1089 u32 rc = 0;
1090 u32 n = num;
1091 unsigned long flags = 0;
1092
1093 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
1094 if (n) {
1095 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
1096 rc = -ENOMEM;
1097 goto free_list;
1098 }
1099
1100 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
1101
1102 list_for_each_entry(a, &q, list) {
1103 a->task = t;
1104 t = list_entry(t->list.next, struct sas_task, list);
1105 }
1106
1107 list_for_each_entry(a, &q , list) {
1108
1109 t = a->task;
1110 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
1111
1112 spin_lock_irqsave(&mvi->lock, flags);
1113 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
1114 if (rc)
1115 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1116 spin_unlock_irqrestore(&mvi->lock, flags);
1117 }
1118
1119 if (likely(pass[0]))
1120 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
1121 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1122
1123 if (likely(pass[1]))
1124 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
1125 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1126
1127 list_del_init(&q);
1128
1129free_list:
1130 if (mvs_list)
1131 mvs_task_free_list(mvs_list);
1132
1030 return rc; 1133 return rc;
1031} 1134}
1032 1135
1033int mvs_queue_command(struct sas_task *task, const int num, 1136int mvs_queue_command(struct sas_task *task, const int num,
1034 gfp_t gfp_flags) 1137 gfp_t gfp_flags)
1035{ 1138{
1036 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); 1139 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1140 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
1141
1142 if (sas->lldd_max_execute_num < 2)
1143 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1144 else
1145 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1037} 1146}
1038 1147
1039static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 1148static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
@@ -1067,6 +1176,11 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1067 /* do nothing */ 1176 /* do nothing */
1068 break; 1177 break;
1069 } 1178 }
1179
1180 if (slot->buf) {
1181 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1182 slot->buf = NULL;
1183 }
1070 list_del_init(&slot->entry); 1184 list_del_init(&slot->entry);
1071 task->lldd_task = NULL; 1185 task->lldd_task = NULL;
1072 slot->task = NULL; 1186 slot->task = NULL;
@@ -1255,6 +1369,7 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1255 spin_lock_irqsave(&mvi->lock, flags); 1369 spin_lock_irqsave(&mvi->lock, flags);
1256 port->port_attached = 1; 1370 port->port_attached = 1;
1257 phy->port = port; 1371 phy->port = port;
1372 sas_port->lldd_port = port;
1258 if (phy->phy_type & PORT_TYPE_SAS) { 1373 if (phy->phy_type & PORT_TYPE_SAS) {
1259 port->wide_port_phymap = sas_port->phy_mask; 1374 port->wide_port_phymap = sas_port->phy_mask;
1260 mv_printk("set wide port phy map %x\n", sas_port->phy_mask); 1375 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 77ddc7c1e5f2..1367d8b9350d 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -67,6 +68,7 @@ extern struct mvs_tgt_initiator mvs_tgt;
67extern struct mvs_info *tgt_mvi; 68extern struct mvs_info *tgt_mvi;
68extern const struct mvs_dispatch mvs_64xx_dispatch; 69extern const struct mvs_dispatch mvs_64xx_dispatch;
69extern const struct mvs_dispatch mvs_94xx_dispatch; 70extern const struct mvs_dispatch mvs_94xx_dispatch;
71extern struct kmem_cache *mvs_task_list_cache;
70 72
71#define DEV_IS_EXPANDER(type) \ 73#define DEV_IS_EXPANDER(type) \
72 ((type == EDGE_DEV) || (type == FANOUT_DEV)) 74 ((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -341,6 +343,7 @@ struct mvs_info {
341 dma_addr_t bulk_buffer_dma; 343 dma_addr_t bulk_buffer_dma;
342#define TRASH_BUCKET_SIZE 0x20000 344#define TRASH_BUCKET_SIZE 0x20000
343#endif 345#endif
346 void *dma_pool;
344 struct mvs_slot_info slot_info[0]; 347 struct mvs_slot_info slot_info[0];
345}; 348};
346 349
@@ -367,6 +370,11 @@ struct mvs_task_exec_info {
367 int n_elem; 370 int n_elem;
368}; 371};
369 372
373struct mvs_task_list {
374 struct sas_task *task;
375 struct list_head list;
376};
377
370 378
371/******************** function prototype *********************/ 379/******************** function prototype *********************/
372void mvs_get_sas_addr(void *buf, u32 buflen); 380void mvs_get_sas_addr(void *buf, u32 buflen);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 835d8d66e696..4b3b4755945c 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8147,7 +8147,7 @@ static int ncr53c8xx_abort(struct scsi_cmnd *cmd)
8147 unsigned long flags; 8147 unsigned long flags;
8148 struct scsi_cmnd *done_list; 8148 struct scsi_cmnd *done_list;
8149 8149
8150 printk("ncr53c8xx_abort: command pid %lu\n", cmd->serial_number); 8150 printk("ncr53c8xx_abort\n");
8151 8151
8152 NCR_LOCK_NCB(np, flags); 8152 NCR_LOCK_NCB(np, flags);
8153 8153
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8ba5744c267e..d838205ab169 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4066,7 +4066,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4066 } */ 4066 } */
4067 printk(" tag=%d, transfersize=0x%x \n", 4067 printk(" tag=%d, transfersize=0x%x \n",
4068 cmd->tag, cmd->transfersize); 4068 cmd->tag, cmd->transfersize);
4069 printk(" Pid=%li, SP=0x%p\n", cmd->serial_number, CMD_SP(cmd)); 4069 printk(" SP=0x%p\n", CMD_SP(cmd));
4070 printk(" underflow size = 0x%x, direction=0x%x\n", 4070 printk(" underflow size = 0x%x, direction=0x%x\n",
4071 cmd->underflow, cmd->sc_data_direction); 4071 cmd->underflow, cmd->sc_data_direction);
4072} 4072}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d3e58d763b43..532313e0725e 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -496,8 +496,8 @@ do_read:
496 offset = 0; 496 offset = 0;
497 } 497 }
498 498
499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset, 499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
500 SFP_BLOCK_SIZE); 500 addr, offset, SFP_BLOCK_SIZE, 0);
501 if (rval != QLA_SUCCESS) { 501 if (rval != QLA_SUCCESS) {
502 qla_printk(KERN_WARNING, ha, 502 qla_printk(KERN_WARNING, ha,
503 "Unable to read SFP data (%x/%x/%x).\n", rval, 503 "Unable to read SFP data (%x/%x/%x).\n", rval,
@@ -628,12 +628,12 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
628 628
629 memcpy(ha->edc_data, &buf[8], len); 629 memcpy(ha->edc_data, &buf[8], len);
630 630
631 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma, 631 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
632 ha->edc_data, len, opt); 632 dev, adr, len, opt);
633 if (rval != QLA_SUCCESS) { 633 if (rval != QLA_SUCCESS) {
634 DEBUG2(qla_printk(KERN_INFO, ha, 634 DEBUG2(qla_printk(KERN_INFO, ha,
635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
636 rval, dev, adr, opt, len, *buf)); 636 rval, dev, adr, opt, len, buf[8]));
637 return 0; 637 return 0;
638 } 638 }
639 639
@@ -685,8 +685,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
685 return -EINVAL; 685 return -EINVAL;
686 686
687 memset(ha->edc_data, 0, len); 687 memset(ha->edc_data, 0, len);
688 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma, 688 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
689 ha->edc_data, len, opt); 689 dev, adr, len, opt);
690 if (rval != QLA_SUCCESS) { 690 if (rval != QLA_SUCCESS) {
691 DEBUG2(qla_printk(KERN_INFO, ha, 691 DEBUG2(qla_printk(KERN_INFO, ha,
692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
@@ -1568,7 +1568,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1568 1568
1569 /* Now that the rport has been deleted, set the fcport state to 1569 /* Now that the rport has been deleted, set the fcport state to
1570 FCS_DEVICE_DEAD */ 1570 FCS_DEVICE_DEAD */
1571 atomic_set(&fcport->state, FCS_DEVICE_DEAD); 1571 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1572 1572
1573 /* 1573 /*
1574 * Transport has effectively 'deleted' the rport, clear 1574 * Transport has effectively 'deleted' the rport, clear
@@ -1877,14 +1877,15 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1877 1877
1878 scsi_remove_host(vha->host); 1878 scsi_remove_host(vha->host);
1879 1879
1880 /* Allow timer to run to drain queued items, when removing vp */
1881 qla24xx_deallocate_vp_id(vha);
1882
1880 if (vha->timer_active) { 1883 if (vha->timer_active) {
1881 qla2x00_vp_stop_timer(vha); 1884 qla2x00_vp_stop_timer(vha);
1882 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" 1885 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
1883 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); 1886 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
1884 } 1887 }
1885 1888
1886 qla24xx_deallocate_vp_id(vha);
1887
1888 /* No pending activities shall be there on the vha now */ 1889 /* No pending activities shall be there on the vha now */
1889 DEBUG(msleep(random32()%10)); /* Just to see if something falls on 1890 DEBUG(msleep(random32()%10)); /* Just to see if something falls on
1890 * the net we have placed below */ 1891 * the net we have placed below */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 903b0586ded3..8c10e2c4928e 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 074a999c7017..0f0f54e35f06 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 096141148257..c53719a9a747 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index b74e6b5743dc..930414541ec6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ee20353c8550..cc5a79259d33 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1717,6 +1717,14 @@ typedef struct fc_port {
1717#define FCS_DEVICE_LOST 3 1717#define FCS_DEVICE_LOST 3
1718#define FCS_ONLINE 4 1718#define FCS_ONLINE 4
1719 1719
1720static const char * const port_state_str[] = {
1721 "Unknown",
1722 "UNCONFIGURED",
1723 "DEAD",
1724 "LOST",
1725 "ONLINE"
1726};
1727
1720/* 1728/*
1721 * FC port flags. 1729 * FC port flags.
1722 */ 1730 */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 6271353e8c51..a5a4e1275bf2 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index f5ba09c8a663..691783abfb69 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -416,8 +416,7 @@ struct cmd_type_6 {
416 uint8_t vp_index; 416 uint8_t vp_index;
417 417
418 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */ 418 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
419 uint16_t fcp_data_dseg_len; /* Data segment length. */ 419 uint32_t fcp_data_dseg_len; /* Data segment length. */
420 uint16_t reserved_1; /* MUST be set to 0. */
421}; 420};
422 421
423#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */ 422#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d48326ee3f61..0b381224ae4b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -39,6 +39,8 @@ extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
39extern int qla2x00_perform_loop_resync(scsi_qla_host_t *); 39extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
40extern int qla2x00_loop_resync(scsi_qla_host_t *); 40extern int qla2x00_loop_resync(scsi_qla_host_t *);
41 41
42extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
43
42extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); 44extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
43extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); 45extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
44 46
@@ -100,6 +102,8 @@ extern int ql2xgffidenable;
100extern int ql2xenabledif; 102extern int ql2xenabledif;
101extern int ql2xenablehba_err_chk; 103extern int ql2xenablehba_err_chk;
102extern int ql2xtargetreset; 104extern int ql2xtargetreset;
105extern int ql2xdontresethba;
106extern unsigned int ql2xmaxlun;
103 107
104extern int qla2x00_loop_reset(scsi_qla_host_t *); 108extern int qla2x00_loop_reset(scsi_qla_host_t *);
105extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 109extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -319,15 +323,12 @@ extern int
319qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); 323qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
320 324
321extern int 325extern int
322qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t); 326qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
323 327 uint16_t, uint16_t, uint16_t, uint16_t);
324extern int
325qla2x00_read_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t,
326 uint8_t *, uint16_t, uint16_t);
327 328
328extern int 329extern int
329qla2x00_write_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t, 330qla2x00_write_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
330 uint8_t *, uint16_t, uint16_t); 331 uint16_t, uint16_t, uint16_t, uint16_t);
331 332
332extern int 333extern int
333qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); 334qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
@@ -549,7 +550,6 @@ extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
549extern int qla82xx_rd_32(struct qla_hw_data *, ulong); 550extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
550extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); 551extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
551extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int); 552extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
552extern void qla82xx_rom_unlock(struct qla_hw_data *);
553 553
554/* ISP 8021 IDC */ 554/* ISP 8021 IDC */
555extern void qla82xx_clear_drv_active(struct qla_hw_data *); 555extern void qla82xx_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 74a91b6dfc68..8cd9066ad906 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8575808dbae0..920b76bfbb93 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -35,8 +35,6 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 35
36static int qla2x00_restart_isp(scsi_qla_host_t *); 36static int qla2x00_restart_isp(scsi_qla_host_t *);
37 37
38static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39
40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 38static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41static int qla84xx_init_chip(scsi_qla_host_t *); 39static int qla84xx_init_chip(scsi_qla_host_t *);
42static int qla25xx_init_queues(struct qla_hw_data *); 40static int qla25xx_init_queues(struct qla_hw_data *);
@@ -385,8 +383,18 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
385 383
386 switch (data[0]) { 384 switch (data[0]) {
387 case MBS_COMMAND_COMPLETE: 385 case MBS_COMMAND_COMPLETE:
386 /*
387 * Driver must validate login state - If PRLI not complete,
388 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
389 * requests.
390 */
391 rval = qla2x00_get_port_database(vha, fcport, 0);
392 if (rval != QLA_SUCCESS) {
393 qla2x00_post_async_logout_work(vha, fcport, NULL);
394 qla2x00_post_async_login_work(vha, fcport, NULL);
395 break;
396 }
388 if (fcport->flags & FCF_FCP2_DEVICE) { 397 if (fcport->flags & FCF_FCP2_DEVICE) {
389 fcport->flags |= FCF_ASYNC_SENT;
390 qla2x00_post_async_adisc_work(vha, fcport, data); 398 qla2x00_post_async_adisc_work(vha, fcport, data);
391 break; 399 break;
392 } 400 }
@@ -397,7 +405,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
397 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 405 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
398 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 406 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
399 else 407 else
400 qla2x00_mark_device_lost(vha, fcport, 1, 1); 408 qla2x00_mark_device_lost(vha, fcport, 1, 0);
401 break; 409 break;
402 case MBS_PORT_ID_USED: 410 case MBS_PORT_ID_USED:
403 fcport->loop_id = data[1]; 411 fcport->loop_id = data[1];
@@ -409,7 +417,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
409 rval = qla2x00_find_new_loop_id(vha, fcport); 417 rval = qla2x00_find_new_loop_id(vha, fcport);
410 if (rval != QLA_SUCCESS) { 418 if (rval != QLA_SUCCESS) {
411 fcport->flags &= ~FCF_ASYNC_SENT; 419 fcport->flags &= ~FCF_ASYNC_SENT;
412 qla2x00_mark_device_lost(vha, fcport, 1, 1); 420 qla2x00_mark_device_lost(vha, fcport, 1, 0);
413 break; 421 break;
414 } 422 }
415 qla2x00_post_async_login_work(vha, fcport, NULL); 423 qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -441,7 +449,7 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
441 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 449 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
442 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 450 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
443 else 451 else
444 qla2x00_mark_device_lost(vha, fcport, 1, 1); 452 qla2x00_mark_device_lost(vha, fcport, 1, 0);
445 453
446 return; 454 return;
447} 455}
@@ -2536,7 +2544,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2536 fcport->vp_idx = vha->vp_idx; 2544 fcport->vp_idx = vha->vp_idx;
2537 fcport->port_type = FCT_UNKNOWN; 2545 fcport->port_type = FCT_UNKNOWN;
2538 fcport->loop_id = FC_NO_LOOP_ID; 2546 fcport->loop_id = FC_NO_LOOP_ID;
2539 atomic_set(&fcport->state, FCS_UNCONFIGURED); 2547 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2540 fcport->supported_classes = FC_COS_UNSPECIFIED; 2548 fcport->supported_classes = FC_COS_UNSPECIFIED;
2541 2549
2542 return fcport; 2550 return fcport;
@@ -2722,7 +2730,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2722 "loop_id=0x%04x\n", 2730 "loop_id=0x%04x\n",
2723 vha->host_no, fcport->loop_id)); 2731 vha->host_no, fcport->loop_id));
2724 2732
2725 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2733 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2726 } 2734 }
2727 } 2735 }
2728 2736
@@ -2934,7 +2942,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2934 qla2x00_iidma_fcport(vha, fcport); 2942 qla2x00_iidma_fcport(vha, fcport);
2935 qla24xx_update_fcport_fcp_prio(vha, fcport); 2943 qla24xx_update_fcport_fcp_prio(vha, fcport);
2936 qla2x00_reg_remote_port(vha, fcport); 2944 qla2x00_reg_remote_port(vha, fcport);
2937 atomic_set(&fcport->state, FCS_ONLINE); 2945 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
2938} 2946}
2939 2947
2940/* 2948/*
@@ -3391,7 +3399,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3391 * Context: 3399 * Context:
3392 * Kernel context. 3400 * Kernel context.
3393 */ 3401 */
3394static int 3402int
3395qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 3403qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3396{ 3404{
3397 int rval; 3405 int rval;
@@ -5202,7 +5210,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5202 } 5210 }
5203 5211
5204 /* Reset Initialization control block */ 5212 /* Reset Initialization control block */
5205 memset(icb, 0, sizeof(struct init_cb_81xx)); 5213 memset(icb, 0, ha->init_cb_size);
5206 5214
5207 /* Copy 1st segment. */ 5215 /* Copy 1st segment. */
5208 dptr1 = (uint8_t *)icb; 5216 dptr1 = (uint8_t *)icb;
@@ -5427,6 +5435,13 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5427 ha->isp_abort_cnt = 0; 5435 ha->isp_abort_cnt = 0;
5428 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 5436 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5429 5437
5438 /* Update the firmware version */
5439 qla2x00_get_fw_version(vha, &ha->fw_major_version,
5440 &ha->fw_minor_version, &ha->fw_subminor_version,
5441 &ha->fw_attributes, &ha->fw_memory_size,
5442 ha->mpi_version, &ha->mpi_capabilities,
5443 ha->phy_version);
5444
5430 if (ha->fce) { 5445 if (ha->fce) {
5431 ha->flags.fce_enabled = 1; 5446 ha->flags.fce_enabled = 1;
5432 memset(ha->fce, 0, 5447 memset(ha->fce, 0,
@@ -5508,26 +5523,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
5508 * 5523 *
5509 * Return: 5524 * Return:
5510 * non-zero (if found) 5525 * non-zero (if found)
5511 * 0 (if not found) 5526 * -1 (if not found)
5512 * 5527 *
5513 * Context: 5528 * Context:
5514 * Kernel context 5529 * Kernel context
5515 */ 5530 */
5516uint8_t 5531static int
5517qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 5532qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5518{ 5533{
5519 int i, entries; 5534 int i, entries;
5520 uint8_t pid_match, wwn_match; 5535 uint8_t pid_match, wwn_match;
5521 uint8_t priority; 5536 int priority;
5522 uint32_t pid1, pid2; 5537 uint32_t pid1, pid2;
5523 uint64_t wwn1, wwn2; 5538 uint64_t wwn1, wwn2;
5524 struct qla_fcp_prio_entry *pri_entry; 5539 struct qla_fcp_prio_entry *pri_entry;
5525 struct qla_hw_data *ha = vha->hw; 5540 struct qla_hw_data *ha = vha->hw;
5526 5541
5527 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 5542 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5528 return 0; 5543 return -1;
5529 5544
5530 priority = 0; 5545 priority = -1;
5531 entries = ha->fcp_prio_cfg->num_entries; 5546 entries = ha->fcp_prio_cfg->num_entries;
5532 pri_entry = &ha->fcp_prio_cfg->entry[0]; 5547 pri_entry = &ha->fcp_prio_cfg->entry[0];
5533 5548
@@ -5610,7 +5625,7 @@ int
5610qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 5625qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5611{ 5626{
5612 int ret; 5627 int ret;
5613 uint8_t priority; 5628 int priority;
5614 uint16_t mb[5]; 5629 uint16_t mb[5];
5615 5630
5616 if (fcport->port_type != FCT_TARGET || 5631 if (fcport->port_type != FCT_TARGET ||
@@ -5618,6 +5633,9 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5618 return QLA_FUNCTION_FAILED; 5633 return QLA_FUNCTION_FAILED;
5619 5634
5620 priority = qla24xx_get_fcp_prio(vha, fcport); 5635 priority = qla24xx_get_fcp_prio(vha, fcport);
5636 if (priority < 0)
5637 return QLA_FUNCTION_FAILED;
5638
5621 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 5639 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
5622 if (ret == QLA_SUCCESS) 5640 if (ret == QLA_SUCCESS)
5623 fcport->fcp_prio = priority; 5641 fcport->fcp_prio = priority;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 48f97a92e33d..4c8167e11f69 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -83,3 +83,22 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
83 } 83 }
84 INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list); 84 INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
85} 85}
86
87static inline void
88qla2x00_set_fcport_state(fc_port_t *fcport, int state)
89{
90 int old_state;
91
92 old_state = atomic_read(&fcport->state);
93 atomic_set(&fcport->state, state);
94
95 /* Don't print state transitions during initial allocation of fcport */
96 if (old_state && old_state != state) {
97 DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw,
98 "scsi(%ld): FCPort state transitioned from %s to %s - "
99 "portid=%02x%02x%02x.\n", fcport->vha->host_no,
100 port_state_str[old_state], port_state_str[state],
101 fcport->d_id.b.domain, fcport->d_id.b.area,
102 fcport->d_id.b.al_pa));
103 }
104}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d78d5896fc33..7bac3cd109d6 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 712518d05128..9c0f0e3389eb 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -843,7 +843,10 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
843 qla_printk(KERN_WARNING, ha, 843 qla_printk(KERN_WARNING, ha,
844 "Invalid SCSI completion handle %d.\n", index); 844 "Invalid SCSI completion handle %d.\n", index);
845 845
846 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 846 if (IS_QLA82XX(ha))
847 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
848 else
849 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
847 return; 850 return;
848 } 851 }
849 852
@@ -861,7 +864,10 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
861 qla_printk(KERN_WARNING, ha, 864 qla_printk(KERN_WARNING, ha,
862 "Invalid ISP SCSI completion handle\n"); 865 "Invalid ISP SCSI completion handle\n");
863 866
864 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 867 if (IS_QLA82XX(ha))
868 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
869 else
870 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
865 } 871 }
866} 872}
867 873
@@ -878,7 +884,10 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
878 if (index >= MAX_OUTSTANDING_COMMANDS) { 884 if (index >= MAX_OUTSTANDING_COMMANDS) {
879 qla_printk(KERN_WARNING, ha, 885 qla_printk(KERN_WARNING, ha,
880 "%s: Invalid completion handle (%x).\n", func, index); 886 "%s: Invalid completion handle (%x).\n", func, index);
881 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 887 if (IS_QLA82XX(ha))
888 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
889 else
890 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
882 goto done; 891 goto done;
883 } 892 }
884 sp = req->outstanding_cmds[index]; 893 sp = req->outstanding_cmds[index];
@@ -1564,7 +1573,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1564 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, 1573 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
1565 sts->handle); 1574 sts->handle);
1566 1575
1567 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1576 if (IS_QLA82XX(ha))
1577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1578 else
1579 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1568 qla2xxx_wake_dpc(vha); 1580 qla2xxx_wake_dpc(vha);
1569 return; 1581 return;
1570 } 1582 }
@@ -1794,12 +1806,13 @@ out:
1794 if (logit) 1806 if (logit)
1795 DEBUG2(qla_printk(KERN_INFO, ha, 1807 DEBUG2(qla_printk(KERN_INFO, ha,
1796 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1808 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1797 "oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1809 "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1798 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1810 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1799 cp->device->id, cp->device->lun, comp_status, scsi_status, 1811 cp->device->id, cp->device->lun, comp_status, scsi_status,
1800 cp->result, ox_id, cp->cmnd[0], 1812 cp->result, fcport->d_id.b.domain, fcport->d_id.b.area,
1801 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1813 fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1],
1802 resid_len, fw_resid_len)); 1814 cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
1815 fw_resid_len));
1803 1816
1804 if (rsp->status_srb == NULL) 1817 if (rsp->status_srb == NULL)
1805 qla2x00_sp_compl(ha, sp); 1818 qla2x00_sp_compl(ha, sp);
@@ -1908,13 +1921,17 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1908 qla2x00_sp_compl(ha, sp); 1921 qla2x00_sp_compl(ha, sp);
1909 1922
1910 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1923 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1911 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1924 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1925 || pkt->entry_type == COMMAND_TYPE_6) {
1912 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1926 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1913 vha->host_no)); 1927 vha->host_no));
1914 qla_printk(KERN_WARNING, ha, 1928 qla_printk(KERN_WARNING, ha,
1915 "Error entry - invalid handle\n"); 1929 "Error entry - invalid handle\n");
1916 1930
1917 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1931 if (IS_QLA82XX(ha))
1932 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1933 else
1934 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1918 qla2xxx_wake_dpc(vha); 1935 qla2xxx_wake_dpc(vha);
1919 } 1936 }
1920} 1937}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 34893397ac84..c26f0acdfecc 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1261,11 +1261,12 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1261 /* Check for logged in state. */ 1261 /* Check for logged in state. */
1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1263 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1263 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1264 DEBUG2(printk("%s(%ld): Unable to verify " 1264 DEBUG2(qla_printk(KERN_WARNING, ha,
1265 "login-state (%x/%x) for loop_id %x\n", 1265 "scsi(%ld): Unable to verify login-state (%x/%x) "
1266 __func__, vha->host_no, 1266 " - portid=%02x%02x%02x.\n", vha->host_no,
1267 pd24->current_login_state, 1267 pd24->current_login_state, pd24->last_login_state,
1268 pd24->last_login_state, fcport->loop_id)); 1268 fcport->d_id.b.domain, fcport->d_id.b.area,
1269 fcport->d_id.b.al_pa));
1269 rval = QLA_FUNCTION_FAILED; 1270 rval = QLA_FUNCTION_FAILED;
1270 goto gpd_error_out; 1271 goto gpd_error_out;
1271 } 1272 }
@@ -1289,6 +1290,12 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1289 /* Check for logged in state. */ 1290 /* Check for logged in state. */
1290 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1291 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1291 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1292 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1293 DEBUG2(qla_printk(KERN_WARNING, ha,
1294 "scsi(%ld): Unable to verify login-state (%x/%x) "
1295 " - portid=%02x%02x%02x.\n", vha->host_no,
1296 pd->master_state, pd->slave_state,
1297 fcport->d_id.b.domain, fcport->d_id.b.area,
1298 fcport->d_id.b.al_pa));
1292 rval = QLA_FUNCTION_FAILED; 1299 rval = QLA_FUNCTION_FAILED;
1293 goto gpd_error_out; 1300 goto gpd_error_out;
1294 } 1301 }
@@ -1883,7 +1890,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1883 lg->handle = MAKE_HANDLE(req->id, lg->handle); 1890 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1884 lg->nport_handle = cpu_to_le16(loop_id); 1891 lg->nport_handle = cpu_to_le16(loop_id);
1885 lg->control_flags = 1892 lg->control_flags =
1886 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1893 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
1894 LCF_FREE_NPORT);
1887 lg->port_id[0] = al_pa; 1895 lg->port_id[0] = al_pa;
1888 lg->port_id[1] = area; 1896 lg->port_id[1] = area;
1889 lg->port_id[2] = domain; 1897 lg->port_id[2] = domain;
@@ -2362,7 +2370,7 @@ qla24xx_abort_command(srb_t *sp)
2362 abt->entry_count = 1; 2370 abt->entry_count = 1;
2363 abt->handle = MAKE_HANDLE(req->id, abt->handle); 2371 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2364 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2372 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2365 abt->handle_to_abort = handle; 2373 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2366 abt->port_id[0] = fcport->d_id.b.al_pa; 2374 abt->port_id[0] = fcport->d_id.b.al_pa;
2367 abt->port_id[1] = fcport->d_id.b.area; 2375 abt->port_id[1] = fcport->d_id.b.area;
2368 abt->port_id[2] = fcport->d_id.b.domain; 2376 abt->port_id[2] = fcport->d_id.b.domain;
@@ -2779,44 +2787,6 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2779} 2787}
2780 2788
2781int 2789int
2782qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
2783 uint16_t off, uint16_t count)
2784{
2785 int rval;
2786 mbx_cmd_t mc;
2787 mbx_cmd_t *mcp = &mc;
2788
2789 if (!IS_FWI2_CAPABLE(vha->hw))
2790 return QLA_FUNCTION_FAILED;
2791
2792 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2793
2794 mcp->mb[0] = MBC_READ_SFP;
2795 mcp->mb[1] = addr;
2796 mcp->mb[2] = MSW(sfp_dma);
2797 mcp->mb[3] = LSW(sfp_dma);
2798 mcp->mb[6] = MSW(MSD(sfp_dma));
2799 mcp->mb[7] = LSW(MSD(sfp_dma));
2800 mcp->mb[8] = count;
2801 mcp->mb[9] = off;
2802 mcp->mb[10] = 0;
2803 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2804 mcp->in_mb = MBX_0;
2805 mcp->tov = MBX_TOV_SECONDS;
2806 mcp->flags = 0;
2807 rval = qla2x00_mailbox_command(vha, mcp);
2808
2809 if (rval != QLA_SUCCESS) {
2810 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2811 vha->host_no, rval, mcp->mb[0]));
2812 } else {
2813 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2814 }
2815
2816 return rval;
2817}
2818
2819int
2820qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 2790qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2821 uint16_t *port_speed, uint16_t *mb) 2791 uint16_t *port_speed, uint16_t *mb)
2822{ 2792{
@@ -3581,15 +3551,22 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3581} 3551}
3582 3552
3583int 3553int
3584qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, 3554qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3585 dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) 3555 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3586{ 3556{
3587 int rval; 3557 int rval;
3588 mbx_cmd_t mc; 3558 mbx_cmd_t mc;
3589 mbx_cmd_t *mcp = &mc; 3559 mbx_cmd_t *mcp = &mc;
3560 struct qla_hw_data *ha = vha->hw;
3561
3562 if (!IS_FWI2_CAPABLE(ha))
3563 return QLA_FUNCTION_FAILED;
3590 3564
3591 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3565 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3592 3566
3567 if (len == 1)
3568 opt |= BIT_0;
3569
3593 mcp->mb[0] = MBC_READ_SFP; 3570 mcp->mb[0] = MBC_READ_SFP;
3594 mcp->mb[1] = dev; 3571 mcp->mb[1] = dev;
3595 mcp->mb[2] = MSW(sfp_dma); 3572 mcp->mb[2] = MSW(sfp_dma);
@@ -3597,17 +3574,16 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3597 mcp->mb[6] = MSW(MSD(sfp_dma)); 3574 mcp->mb[6] = MSW(MSD(sfp_dma));
3598 mcp->mb[7] = LSW(MSD(sfp_dma)); 3575 mcp->mb[7] = LSW(MSD(sfp_dma));
3599 mcp->mb[8] = len; 3576 mcp->mb[8] = len;
3600 mcp->mb[9] = adr; 3577 mcp->mb[9] = off;
3601 mcp->mb[10] = opt; 3578 mcp->mb[10] = opt;
3602 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 3579 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3603 mcp->in_mb = MBX_0; 3580 mcp->in_mb = MBX_1|MBX_0;
3604 mcp->tov = MBX_TOV_SECONDS; 3581 mcp->tov = MBX_TOV_SECONDS;
3605 mcp->flags = 0; 3582 mcp->flags = 0;
3606 rval = qla2x00_mailbox_command(vha, mcp); 3583 rval = qla2x00_mailbox_command(vha, mcp);
3607 3584
3608 if (opt & BIT_0) 3585 if (opt & BIT_0)
3609 if (sfp) 3586 *sfp = mcp->mb[1];
3610 *sfp = mcp->mb[8];
3611 3587
3612 if (rval != QLA_SUCCESS) { 3588 if (rval != QLA_SUCCESS) {
3613 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3589 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
@@ -3620,18 +3596,24 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3620} 3596}
3621 3597
3622int 3598int
3623qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, 3599qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3624 dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) 3600 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3625{ 3601{
3626 int rval; 3602 int rval;
3627 mbx_cmd_t mc; 3603 mbx_cmd_t mc;
3628 mbx_cmd_t *mcp = &mc; 3604 mbx_cmd_t *mcp = &mc;
3605 struct qla_hw_data *ha = vha->hw;
3606
3607 if (!IS_FWI2_CAPABLE(ha))
3608 return QLA_FUNCTION_FAILED;
3629 3609
3630 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3610 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3631 3611
3612 if (len == 1)
3613 opt |= BIT_0;
3614
3632 if (opt & BIT_0) 3615 if (opt & BIT_0)
3633 if (sfp) 3616 len = *sfp;
3634 len = *sfp;
3635 3617
3636 mcp->mb[0] = MBC_WRITE_SFP; 3618 mcp->mb[0] = MBC_WRITE_SFP;
3637 mcp->mb[1] = dev; 3619 mcp->mb[1] = dev;
@@ -3640,10 +3622,10 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3640 mcp->mb[6] = MSW(MSD(sfp_dma)); 3622 mcp->mb[6] = MSW(MSD(sfp_dma));
3641 mcp->mb[7] = LSW(MSD(sfp_dma)); 3623 mcp->mb[7] = LSW(MSD(sfp_dma));
3642 mcp->mb[8] = len; 3624 mcp->mb[8] = len;
3643 mcp->mb[9] = adr; 3625 mcp->mb[9] = off;
3644 mcp->mb[10] = opt; 3626 mcp->mb[10] = opt;
3645 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 3627 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3646 mcp->in_mb = MBX_0; 3628 mcp->in_mb = MBX_1|MBX_0;
3647 mcp->tov = MBX_TOV_SECONDS; 3629 mcp->tov = MBX_TOV_SECONDS;
3648 mcp->flags = 0; 3630 mcp->flags = 0;
3649 rval = qla2x00_mailbox_command(vha, mcp); 3631 rval = qla2x00_mailbox_command(vha, mcp);
@@ -4160,63 +4142,32 @@ int
4160qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) 4142qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4161{ 4143{
4162 int rval; 4144 int rval;
4163 mbx_cmd_t mc; 4145 uint8_t byte;
4164 mbx_cmd_t *mcp = &mc;
4165 struct qla_hw_data *ha = vha->hw; 4146 struct qla_hw_data *ha = vha->hw;
4166 4147
4167 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, ha->host_no)); 4148 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
4168 4149
4169 /* High bits. */ 4150 /* Integer part */
4170 mcp->mb[0] = MBC_READ_SFP; 4151 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4171 mcp->mb[1] = 0x98;
4172 mcp->mb[2] = 0;
4173 mcp->mb[3] = 0;
4174 mcp->mb[6] = 0;
4175 mcp->mb[7] = 0;
4176 mcp->mb[8] = 1;
4177 mcp->mb[9] = 0x01;
4178 mcp->mb[10] = BIT_13|BIT_0;
4179 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4180 mcp->in_mb = MBX_1|MBX_0;
4181 mcp->tov = MBX_TOV_SECONDS;
4182 mcp->flags = 0;
4183 rval = qla2x00_mailbox_command(vha, mcp);
4184 if (rval != QLA_SUCCESS) { 4152 if (rval != QLA_SUCCESS) {
4185 DEBUG2_3_11(printk(KERN_WARNING 4153 DEBUG2_3_11(printk(KERN_WARNING
4186 "%s(%ld): failed=%x (%x).\n", __func__, 4154 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4187 vha->host_no, rval, mcp->mb[0]));
4188 ha->flags.thermal_supported = 0; 4155 ha->flags.thermal_supported = 0;
4189 goto fail; 4156 goto fail;
4190 } 4157 }
4191 *temp = mcp->mb[1] & 0xFF; 4158 *temp = byte;
4192 4159
4193 /* Low bits. */ 4160 /* Fraction part */
4194 mcp->mb[0] = MBC_READ_SFP; 4161 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4195 mcp->mb[1] = 0x98;
4196 mcp->mb[2] = 0;
4197 mcp->mb[3] = 0;
4198 mcp->mb[6] = 0;
4199 mcp->mb[7] = 0;
4200 mcp->mb[8] = 1;
4201 mcp->mb[9] = 0x10;
4202 mcp->mb[10] = BIT_13|BIT_0;
4203 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4204 mcp->in_mb = MBX_1|MBX_0;
4205 mcp->tov = MBX_TOV_SECONDS;
4206 mcp->flags = 0;
4207 rval = qla2x00_mailbox_command(vha, mcp);
4208 if (rval != QLA_SUCCESS) { 4162 if (rval != QLA_SUCCESS) {
4209 DEBUG2_3_11(printk(KERN_WARNING 4163 DEBUG2_3_11(printk(KERN_WARNING
4210 "%s(%ld): failed=%x (%x).\n", __func__, 4164 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4211 vha->host_no, rval, mcp->mb[0]));
4212 ha->flags.thermal_supported = 0; 4165 ha->flags.thermal_supported = 0;
4213 goto fail; 4166 goto fail;
4214 } 4167 }
4215 *frac = ((mcp->mb[1] & 0xFF) >> 6) * 25; 4168 *frac = (byte >> 6) * 25;
4216 4169
4217 if (rval == QLA_SUCCESS) 4170 DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no));
4218 DEBUG11(printk(KERN_INFO
4219 "%s(%ld): done.\n", __func__, ha->host_no));
4220fail: 4171fail:
4221 return rval; 4172 return rval;
4222} 4173}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 2b69392a71a1..5e343919acad 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -136,7 +136,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
136 vha->host_no, fcport->loop_id, fcport->vp_idx)); 136 vha->host_no, fcport->loop_id, fcport->vp_idx));
137 137
138 qla2x00_mark_device_lost(vha, fcport, 0, 0); 138 qla2x00_mark_device_lost(vha, fcport, 0, 0);
139 atomic_set(&fcport->state, FCS_UNCONFIGURED); 139 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
140 } 140 }
141} 141}
142 142
@@ -456,7 +456,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
456 else 456 else
457 host->max_cmd_len = MAX_CMDSZ; 457 host->max_cmd_len = MAX_CMDSZ;
458 host->max_channel = MAX_BUSES - 1; 458 host->max_channel = MAX_BUSES - 1;
459 host->max_lun = MAX_LUNS; 459 host->max_lun = ql2xmaxlun;
460 host->unique_id = host->host_no; 460 host->unique_id = host->host_no;
461 host->max_id = MAX_TARGETS_2200; 461 host->max_id = MAX_TARGETS_2200;
462 host->transportt = qla2xxx_transport_vport_template; 462 host->transportt = qla2xxx_transport_vport_template;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 455fe134d31d..e1138bcc834c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -844,6 +844,12 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
844 return 0; 844 return 0;
845} 845}
846 846
847static void
848qla82xx_rom_unlock(struct qla_hw_data *ha)
849{
850 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
851}
852
847static int 853static int
848qla82xx_wait_rom_busy(struct qla_hw_data *ha) 854qla82xx_wait_rom_busy(struct qla_hw_data *ha)
849{ 855{
@@ -924,7 +930,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
924 return -1; 930 return -1;
925 } 931 }
926 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 932 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
927 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 933 qla82xx_rom_unlock(ha);
928 return ret; 934 return ret;
929} 935}
930 936
@@ -1056,7 +1062,7 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1056 ret = qla82xx_flash_wait_write_finish(ha); 1062 ret = qla82xx_flash_wait_write_finish(ha);
1057 1063
1058done_write: 1064done_write:
1059 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1065 qla82xx_rom_unlock(ha);
1060 return ret; 1066 return ret;
1061} 1067}
1062 1068
@@ -1081,12 +1087,26 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1081 /* Halt all the indiviual PEGs and other blocks of the ISP */ 1087 /* Halt all the indiviual PEGs and other blocks of the ISP */
1082 qla82xx_rom_lock(ha); 1088 qla82xx_rom_lock(ha);
1083 1089
1084 /* mask all niu interrupts */ 1090 /* disable all I2Q */
1091 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1092 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1093 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1094 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1095 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1096 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1097
1098 /* disable all niu interrupts */
1085 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 1099 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1086 /* disable xge rx/tx */ 1100 /* disable xge rx/tx */
1087 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 1101 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1088 /* disable xg1 rx/tx */ 1102 /* disable xg1 rx/tx */
1089 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 1103 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1104 /* disable sideband mac */
1105 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1106 /* disable ap0 mac */
1107 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1108 /* disable ap1 mac */
1109 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1090 1110
1091 /* halt sre */ 1111 /* halt sre */
1092 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 1112 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -1101,6 +1121,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1101 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 1121 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1102 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 1122 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1103 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1123 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1124 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1104 1125
1105 /* halt pegs */ 1126 /* halt pegs */
1106 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1127 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -1108,9 +1129,9 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1108 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1129 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1109 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1130 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1110 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1131 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1132 msleep(20);
1111 1133
1112 /* big hammer */ 1134 /* big hammer */
1113 msleep(1000);
1114 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 1135 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1115 /* don't reset CAM block on reset */ 1136 /* don't reset CAM block on reset */
1116 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1137 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
@@ -1129,7 +1150,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1129 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); 1150 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1130 msleep(20); 1151 msleep(20);
1131 1152
1132 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1153 qla82xx_rom_unlock(ha);
1133 1154
1134 /* Read the signature value from the flash. 1155 /* Read the signature value from the flash.
1135 * Offset 0: Contain signature (0xcafecafe) 1156 * Offset 0: Contain signature (0xcafecafe)
@@ -2395,9 +2416,13 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2395 2416
2396 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2417 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2397 qla_printk(KERN_ERR, ha, 2418 qla_printk(KERN_ERR, ha,
2398 "Firmware loaded successfully from flash\n"); 2419 "Firmware loaded successfully from flash\n");
2399 return QLA_SUCCESS; 2420 return QLA_SUCCESS;
2421 } else {
2422 qla_printk(KERN_ERR, ha,
2423 "Firmware load from flash failed\n");
2400 } 2424 }
2425
2401try_blob_fw: 2426try_blob_fw:
2402 qla_printk(KERN_INFO, ha, 2427 qla_printk(KERN_INFO, ha,
2403 "Attempting to load firmware from blob\n"); 2428 "Attempting to load firmware from blob\n");
@@ -2548,11 +2573,11 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2548 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2573 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2549 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2574 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2550 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2575 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2551 cmd_pkt->fcp_data_dseg_len = dsd_list_len; 2576 *dsd_seg++ = cpu_to_le32(dsd_list_len);
2552 } else { 2577 } else {
2553 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2578 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2554 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2579 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2555 *cur_dsd++ = dsd_list_len; 2580 *cur_dsd++ = cpu_to_le32(dsd_list_len);
2556 } 2581 }
2557 cur_dsd = (uint32_t *)next_dsd; 2582 cur_dsd = (uint32_t *)next_dsd;
2558 while (avail_dsds) { 2583 while (avail_dsds) {
@@ -2991,7 +3016,7 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
2991 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3016 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2992 3017
2993done_unprotect: 3018done_unprotect:
2994 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3019 qla82xx_rom_unlock(ha);
2995 return ret; 3020 return ret;
2996} 3021}
2997 3022
@@ -3020,7 +3045,7 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3020 if (qla82xx_write_disable_flash(ha) != 0) 3045 if (qla82xx_write_disable_flash(ha) != 0)
3021 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3046 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3022done_protect: 3047done_protect:
3023 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3048 qla82xx_rom_unlock(ha);
3024 return ret; 3049 return ret;
3025} 3050}
3026 3051
@@ -3048,7 +3073,7 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3048 } 3073 }
3049 ret = qla82xx_flash_wait_write_finish(ha); 3074 ret = qla82xx_flash_wait_write_finish(ha);
3050done: 3075done:
3051 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3076 qla82xx_rom_unlock(ha);
3052 return ret; 3077 return ret;
3053} 3078}
3054 3079
@@ -3228,7 +3253,7 @@ void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3228 * else died while holding it. 3253 * else died while holding it.
3229 * In either case, unlock. 3254 * In either case, unlock.
3230 */ 3255 */
3231 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3256 qla82xx_rom_unlock(ha);
3232} 3257}
3233 3258
3234/* 3259/*
@@ -3528,15 +3553,18 @@ int
3528qla82xx_device_state_handler(scsi_qla_host_t *vha) 3553qla82xx_device_state_handler(scsi_qla_host_t *vha)
3529{ 3554{
3530 uint32_t dev_state; 3555 uint32_t dev_state;
3556 uint32_t old_dev_state;
3531 int rval = QLA_SUCCESS; 3557 int rval = QLA_SUCCESS;
3532 unsigned long dev_init_timeout; 3558 unsigned long dev_init_timeout;
3533 struct qla_hw_data *ha = vha->hw; 3559 struct qla_hw_data *ha = vha->hw;
3560 int loopcount = 0;
3534 3561
3535 qla82xx_idc_lock(ha); 3562 qla82xx_idc_lock(ha);
3536 if (!vha->flags.init_done) 3563 if (!vha->flags.init_done)
3537 qla82xx_set_drv_active(vha); 3564 qla82xx_set_drv_active(vha);
3538 3565
3539 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3566 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3567 old_dev_state = dev_state;
3540 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3568 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3541 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3542 3570
@@ -3553,10 +3581,16 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3553 break; 3581 break;
3554 } 3582 }
3555 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3583 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3556 qla_printk(KERN_INFO, ha, 3584 if (old_dev_state != dev_state) {
3557 "2:Device state is 0x%x = %s\n", dev_state, 3585 loopcount = 0;
3558 dev_state < MAX_STATES ? 3586 old_dev_state = dev_state;
3559 qdev_state[dev_state] : "Unknown"); 3587 }
3588 if (loopcount < 5) {
3589 qla_printk(KERN_INFO, ha,
3590 "2:Device state is 0x%x = %s\n", dev_state,
3591 dev_state < MAX_STATES ?
3592 qdev_state[dev_state] : "Unknown");
3593 }
3560 3594
3561 switch (dev_state) { 3595 switch (dev_state) {
3562 case QLA82XX_DEV_READY: 3596 case QLA82XX_DEV_READY:
@@ -3570,6 +3604,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3570 qla82xx_idc_lock(ha); 3604 qla82xx_idc_lock(ha);
3571 break; 3605 break;
3572 case QLA82XX_DEV_NEED_RESET: 3606 case QLA82XX_DEV_NEED_RESET:
3607 if (!ql2xdontresethba)
3573 qla82xx_need_reset_handler(vha); 3608 qla82xx_need_reset_handler(vha);
3574 dev_init_timeout = jiffies + 3609 dev_init_timeout = jiffies +
3575 (ha->nx_dev_init_timeout * HZ); 3610 (ha->nx_dev_init_timeout * HZ);
@@ -3604,6 +3639,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3604 msleep(1000); 3639 msleep(1000);
3605 qla82xx_idc_lock(ha); 3640 qla82xx_idc_lock(ha);
3606 } 3641 }
3642 loopcount++;
3607 } 3643 }
3608exit: 3644exit:
3609 qla82xx_idc_unlock(ha); 3645 qla82xx_idc_unlock(ha);
@@ -3621,7 +3657,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3621 if (dev_state == QLA82XX_DEV_NEED_RESET && 3657 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3622 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3658 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3623 qla_printk(KERN_WARNING, ha, 3659 qla_printk(KERN_WARNING, ha,
3624 "%s(): Adapter reset needed!\n", __func__); 3660 "scsi(%ld) %s: Adapter reset needed!\n",
3661 vha->host_no, __func__);
3625 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3662 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3626 qla2xxx_wake_dpc(vha); 3663 qla2xxx_wake_dpc(vha);
3627 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3664 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
@@ -3632,10 +3669,27 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3632 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3669 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3633 qla2xxx_wake_dpc(vha); 3670 qla2xxx_wake_dpc(vha);
3634 } else { 3671 } else {
3635 qla82xx_check_fw_alive(vha);
3636 if (qla82xx_check_fw_alive(vha)) { 3672 if (qla82xx_check_fw_alive(vha)) {
3637 halt_status = qla82xx_rd_32(ha, 3673 halt_status = qla82xx_rd_32(ha,
3638 QLA82XX_PEG_HALT_STATUS1); 3674 QLA82XX_PEG_HALT_STATUS1);
3675 qla_printk(KERN_INFO, ha,
3676 "scsi(%ld): %s, Dumping hw/fw registers:\n "
3677 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n "
3678 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n "
3679 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n "
3680 " PEG_NET_4_PC: 0x%x\n",
3681 vha->host_no, __func__, halt_status,
3682 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3683 qla82xx_rd_32(ha,
3684 QLA82XX_CRB_PEG_NET_0 + 0x3c),
3685 qla82xx_rd_32(ha,
3686 QLA82XX_CRB_PEG_NET_1 + 0x3c),
3687 qla82xx_rd_32(ha,
3688 QLA82XX_CRB_PEG_NET_2 + 0x3c),
3689 qla82xx_rd_32(ha,
3690 QLA82XX_CRB_PEG_NET_3 + 0x3c),
3691 qla82xx_rd_32(ha,
3692 QLA82XX_CRB_PEG_NET_4 + 0x3c));
3639 if (halt_status & HALT_STATUS_UNRECOVERABLE) { 3693 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3640 set_bit(ISP_UNRECOVERABLE, 3694 set_bit(ISP_UNRECOVERABLE,
3641 &vha->dpc_flags); 3695 &vha->dpc_flags);
@@ -3651,8 +3705,9 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3651 if (ha->flags.mbox_busy) { 3705 if (ha->flags.mbox_busy) {
3652 ha->flags.mbox_int = 1; 3706 ha->flags.mbox_int = 1;
3653 DEBUG2(qla_printk(KERN_ERR, ha, 3707 DEBUG2(qla_printk(KERN_ERR, ha,
3654 "Due to fw hung, doing premature " 3708 "scsi(%ld) Due to fw hung, doing "
3655 "completion of mbx command\n")); 3709 "premature completion of mbx "
3710 "command\n", vha->host_no));
3656 if (test_bit(MBX_INTR_WAIT, 3711 if (test_bit(MBX_INTR_WAIT,
3657 &ha->mbx_cmd_flags)) 3712 &ha->mbx_cmd_flags))
3658 complete(&ha->mbx_intr_comp); 3713 complete(&ha->mbx_intr_comp);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index ed5883f1778a..8a21832c6693 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index aa7747529165..f461925a9dfc 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -164,6 +164,20 @@ module_param(ql2xasynctmfenable, int, S_IRUGO);
164MODULE_PARM_DESC(ql2xasynctmfenable, 164MODULE_PARM_DESC(ql2xasynctmfenable,
165 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 165 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
166 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 166 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
167
168int ql2xdontresethba;
169module_param(ql2xdontresethba, int, S_IRUGO);
170MODULE_PARM_DESC(ql2xdontresethba,
171 "Option to specify reset behaviour\n"
172 " 0 (Default) -- Reset on failure.\n"
173 " 1 -- Do not reset on failure.\n");
174
175uint ql2xmaxlun = MAX_LUNS;
176module_param(ql2xmaxlun, uint, S_IRUGO);
177MODULE_PARM_DESC(ql2xmaxlun,
178 "Defines the maximum LU number to register with the SCSI "
179 "midlayer. Default is 65535.");
180
167/* 181/*
168 * SCSI host template entry points 182 * SCSI host template entry points
169 */ 183 */
@@ -528,7 +542,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
528static int 542static int
529qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 543qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
530{ 544{
531 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 545 scsi_qla_host_t *vha = shost_priv(host);
532 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 546 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
533 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 547 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
534 struct qla_hw_data *ha = vha->hw; 548 struct qla_hw_data *ha = vha->hw;
@@ -2128,7 +2142,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2128 else 2142 else
2129 host->max_cmd_len = MAX_CMDSZ; 2143 host->max_cmd_len = MAX_CMDSZ;
2130 host->max_channel = MAX_BUSES - 1; 2144 host->max_channel = MAX_BUSES - 1;
2131 host->max_lun = MAX_LUNS; 2145 host->max_lun = ql2xmaxlun;
2132 host->transportt = qla2xxx_transport_template; 2146 host->transportt = qla2xxx_transport_template;
2133 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2147 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2134 2148
@@ -2360,21 +2374,26 @@ qla2x00_remove_one(struct pci_dev *pdev)
2360 base_vha = pci_get_drvdata(pdev); 2374 base_vha = pci_get_drvdata(pdev);
2361 ha = base_vha->hw; 2375 ha = base_vha->hw;
2362 2376
2363 spin_lock_irqsave(&ha->vport_slock, flags); 2377 mutex_lock(&ha->vport_lock);
2364 list_for_each_entry(vha, &ha->vp_list, list) { 2378 while (ha->cur_vport_count) {
2365 atomic_inc(&vha->vref_count); 2379 struct Scsi_Host *scsi_host;
2366 2380
2367 if (vha->fc_vport) { 2381 spin_lock_irqsave(&ha->vport_slock, flags);
2368 spin_unlock_irqrestore(&ha->vport_slock, flags);
2369 2382
2370 fc_vport_terminate(vha->fc_vport); 2383 BUG_ON(base_vha->list.next == &ha->vp_list);
2384 /* This assumes first entry in ha->vp_list is always base vha */
2385 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
2386 scsi_host = scsi_host_get(vha->host);
2371 2387
2372 spin_lock_irqsave(&ha->vport_slock, flags); 2388 spin_unlock_irqrestore(&ha->vport_slock, flags);
2373 } 2389 mutex_unlock(&ha->vport_lock);
2390
2391 fc_vport_terminate(vha->fc_vport);
2392 scsi_host_put(vha->host);
2374 2393
2375 atomic_dec(&vha->vref_count); 2394 mutex_lock(&ha->vport_lock);
2376 } 2395 }
2377 spin_unlock_irqrestore(&ha->vport_slock, flags); 2396 mutex_unlock(&ha->vport_lock);
2378 2397
2379 set_bit(UNLOADING, &base_vha->dpc_flags); 2398 set_bit(UNLOADING, &base_vha->dpc_flags);
2380 2399
@@ -2544,7 +2563,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2544{ 2563{
2545 if (atomic_read(&fcport->state) == FCS_ONLINE && 2564 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2546 vha->vp_idx == fcport->vp_idx) { 2565 vha->vp_idx == fcport->vp_idx) {
2547 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2566 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2548 qla2x00_schedule_rport_del(vha, fcport, defer); 2567 qla2x00_schedule_rport_del(vha, fcport, defer);
2549 } 2568 }
2550 /* 2569 /*
@@ -2552,7 +2571,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2552 * port but do the retries. 2571 * port but do the retries.
2553 */ 2572 */
2554 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 2573 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2555 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2574 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2556 2575
2557 if (!do_login) 2576 if (!do_login)
2558 return; 2577 return;
@@ -2607,7 +2626,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2607 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2626 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2608 continue; 2627 continue;
2609 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2628 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2610 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2629 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2611 if (defer) 2630 if (defer)
2612 qla2x00_schedule_rport_del(vha, fcport, defer); 2631 qla2x00_schedule_rport_del(vha, fcport, defer);
2613 else if (vha->vp_idx == fcport->vp_idx) 2632 else if (vha->vp_idx == fcport->vp_idx)
@@ -3214,6 +3233,17 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3214 fcport->d_id.b.area, 3233 fcport->d_id.b.area,
3215 fcport->d_id.b.al_pa); 3234 fcport->d_id.b.al_pa);
3216 3235
3236 if (fcport->loop_id == FC_NO_LOOP_ID) {
3237 fcport->loop_id = next_loopid =
3238 ha->min_external_loopid;
3239 status = qla2x00_find_new_loop_id(
3240 vha, fcport);
3241 if (status != QLA_SUCCESS) {
3242 /* Ran out of IDs to use */
3243 break;
3244 }
3245 }
3246
3217 if (IS_ALOGIO_CAPABLE(ha)) { 3247 if (IS_ALOGIO_CAPABLE(ha)) {
3218 fcport->flags |= FCF_ASYNC_SENT; 3248 fcport->flags |= FCF_ASYNC_SENT;
3219 data[0] = 0; 3249 data[0] = 0;
@@ -3604,7 +3634,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3604 if (!pci_channel_offline(ha->pdev)) 3634 if (!pci_channel_offline(ha->pdev))
3605 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 3635 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3606 3636
3607 if (IS_QLA82XX(ha)) { 3637 /* Make sure qla82xx_watchdog is run only for physical port */
3638 if (!vha->vp_idx && IS_QLA82XX(ha)) {
3608 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 3639 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
3609 start_dpc++; 3640 start_dpc++;
3610 qla82xx_watchdog(vha); 3641 qla82xx_watchdog(vha);
@@ -3612,7 +3643,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3612 3643
3613 /* Loop down handler. */ 3644 /* Loop down handler. */
3614 if (atomic_read(&vha->loop_down_timer) > 0 && 3645 if (atomic_read(&vha->loop_down_timer) > 0 &&
3615 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 3646 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
3647 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
3616 && vha->flags.online) { 3648 && vha->flags.online) {
3617 3649
3618 if (atomic_read(&vha->loop_down_timer) == 3650 if (atomic_read(&vha->loop_down_timer) ==
@@ -3648,7 +3680,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
3648 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 3680 if (!(sfcp->flags & FCF_FCP2_DEVICE))
3649 continue; 3681 continue;
3650 3682
3651 set_bit(ISP_ABORT_NEEDED, 3683 if (IS_QLA82XX(ha))
3684 set_bit(FCOE_CTX_RESET_NEEDED,
3685 &vha->dpc_flags);
3686 else
3687 set_bit(ISP_ABORT_NEEDED,
3652 &vha->dpc_flags); 3688 &vha->dpc_flags);
3653 break; 3689 break;
3654 } 3690 }
@@ -3667,7 +3703,12 @@ qla2x00_timer(scsi_qla_host_t *vha)
3667 qla_printk(KERN_WARNING, ha, 3703 qla_printk(KERN_WARNING, ha,
3668 "Loop down - aborting ISP.\n"); 3704 "Loop down - aborting ISP.\n");
3669 3705
3670 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3706 if (IS_QLA82XX(ha))
3707 set_bit(FCOE_CTX_RESET_NEEDED,
3708 &vha->dpc_flags);
3709 else
3710 set_bit(ISP_ABORT_NEEDED,
3711 &vha->dpc_flags);
3671 } 3712 }
3672 } 3713 }
3673 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 3714 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
@@ -3675,8 +3716,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3675 atomic_read(&vha->loop_down_timer))); 3716 atomic_read(&vha->loop_down_timer)));
3676 } 3717 }
3677 3718
3678 /* Check if beacon LED needs to be blinked */ 3719 /* Check if beacon LED needs to be blinked for physical host only */
3679 if (ha->beacon_blink_led == 1) { 3720 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
3680 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 3721 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3681 start_dpc++; 3722 start_dpc++;
3682 } 3723 }
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index f0b2b9986a55..d70f03008981 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 22070621206c..693647661ed1 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3a260c3f055a..062c97bf62f5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,15 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.07.00" 10#define QLA2XXX_VERSION "8.03.07.03-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 7 14#define QLA_DRIVER_PATCH_VER 7
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 230ba097d28c..c22f2a764d9d 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2068,15 +2068,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
2068 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 2068 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
2069 unsigned int id = cmd->device->id; 2069 unsigned int id = cmd->device->id;
2070 unsigned int lun = cmd->device->lun; 2070 unsigned int lun = cmd->device->lun;
2071 unsigned long serial = cmd->serial_number;
2072 unsigned long flags; 2071 unsigned long flags;
2073 struct srb *srb = NULL; 2072 struct srb *srb = NULL;
2074 int ret = SUCCESS; 2073 int ret = SUCCESS;
2075 int wait = 0; 2074 int wait = 0;
2076 2075
2077 ql4_printk(KERN_INFO, ha, 2076 ql4_printk(KERN_INFO, ha,
2078 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n", 2077 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
2079 ha->host_no, id, lun, cmd, serial); 2078 ha->host_no, id, lun, cmd);
2080 2079
2081 spin_lock_irqsave(&ha->hardware_lock, flags); 2080 spin_lock_irqsave(&ha->hardware_lock, flags);
2082 srb = (struct srb *) CMD_SP(cmd); 2081 srb = (struct srb *) CMD_SP(cmd);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 633c2395a92a..abea2cf05c2e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -321,6 +321,12 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
321 "changed. The Linux SCSI layer does not " 321 "changed. The Linux SCSI layer does not "
322 "automatically adjust these parameters.\n"); 322 "automatically adjust these parameters.\n");
323 323
324 if (sshdr.asc == 0x38 && sshdr.ascq == 0x07)
325 scmd_printk(KERN_WARNING, scmd,
326 "Warning! Received an indication that the "
327 "LUN reached a thin provisioning soft "
328 "threshold.\n");
329
324 /* 330 /*
325 * Pass the UA upwards for a determination in the completion 331 * Pass the UA upwards for a determination in the completion
326 * functions. 332 * functions.
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c99da926fdac..f46855cd853d 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -386,13 +386,59 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
386 * @s: output goes here 386 * @s: output goes here
387 * @p: not used 387 * @p: not used
388 */ 388 */
389static int proc_scsi_show(struct seq_file *s, void *p) 389static int always_match(struct device *dev, void *data)
390{ 390{
391 seq_printf(s, "Attached devices:\n"); 391 return 1;
392 bus_for_each_dev(&scsi_bus_type, NULL, s, proc_print_scsidevice); 392}
393 return 0; 393
394static inline struct device *next_scsi_device(struct device *start)
395{
396 struct device *next = bus_find_device(&scsi_bus_type, start, NULL,
397 always_match);
398 put_device(start);
399 return next;
394} 400}
395 401
402static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos)
403{
404 struct device *dev = NULL;
405 loff_t n = *pos;
406
407 while ((dev = next_scsi_device(dev))) {
408 if (!n--)
409 break;
410 sfile->private++;
411 }
412 return dev;
413}
414
415static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
416{
417 (*pos)++;
418 sfile->private++;
419 return next_scsi_device(v);
420}
421
422static void scsi_seq_stop(struct seq_file *sfile, void *v)
423{
424 put_device(v);
425}
426
427static int scsi_seq_show(struct seq_file *sfile, void *dev)
428{
429 if (!sfile->private)
430 seq_puts(sfile, "Attached devices:\n");
431
432 return proc_print_scsidevice(dev, sfile);
433}
434
435static const struct seq_operations scsi_seq_ops = {
436 .start = scsi_seq_start,
437 .next = scsi_seq_next,
438 .stop = scsi_seq_stop,
439 .show = scsi_seq_show
440};
441
396/** 442/**
397 * proc_scsi_open - glue function 443 * proc_scsi_open - glue function
398 * @inode: not used 444 * @inode: not used
@@ -406,7 +452,7 @@ static int proc_scsi_open(struct inode *inode, struct file *file)
406 * We don't really need this for the write case but it doesn't 452 * We don't really need this for the write case but it doesn't
407 * harm either. 453 * harm either.
408 */ 454 */
409 return single_open(file, proc_scsi_show, NULL); 455 return seq_open(file, &scsi_seq_ops);
410} 456}
411 457
412static const struct file_operations proc_scsi_operations = { 458static const struct file_operations proc_scsi_operations = {
@@ -415,7 +461,7 @@ static const struct file_operations proc_scsi_operations = {
415 .read = seq_read, 461 .read = seq_read,
416 .write = proc_scsi_write, 462 .write = proc_scsi_write,
417 .llseek = seq_lseek, 463 .llseek = seq_lseek,
418 .release = single_release, 464 .release = seq_release,
419}; 465};
420 466
421/** 467/**
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 8bca8c25ba69..84a1fdf67864 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -275,10 +275,8 @@ void scsi_tgt_free_queue(struct Scsi_Host *shost)
275 275
276 for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) { 276 for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
277 list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i], 277 list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
278 hash_list) { 278 hash_list)
279 list_del(&tcmd->hash_list); 279 list_move(&tcmd->hash_list, &cmds);
280 list_add(&tcmd->hash_list, &cmds);
281 }
282 } 280 }
283 281
284 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); 282 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 815069d13f9b..1b214910b714 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -422,8 +422,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
422 422
423 snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), 423 snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
424 "fc_wq_%d", shost->host_no); 424 "fc_wq_%d", shost->host_no);
425 fc_host->work_q = create_singlethread_workqueue( 425 fc_host->work_q = alloc_workqueue(fc_host->work_q_name, 0, 0);
426 fc_host->work_q_name);
427 if (!fc_host->work_q) 426 if (!fc_host->work_q)
428 return -ENOMEM; 427 return -ENOMEM;
429 428
@@ -431,8 +430,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
431 snprintf(fc_host->devloss_work_q_name, 430 snprintf(fc_host->devloss_work_q_name,
432 sizeof(fc_host->devloss_work_q_name), 431 sizeof(fc_host->devloss_work_q_name),
433 "fc_dl_%d", shost->host_no); 432 "fc_dl_%d", shost->host_no);
434 fc_host->devloss_work_q = create_singlethread_workqueue( 433 fc_host->devloss_work_q =
435 fc_host->devloss_work_q_name); 434 alloc_workqueue(fc_host->devloss_work_q_name, 0, 0);
436 if (!fc_host->devloss_work_q) { 435 if (!fc_host->devloss_work_q) {
437 destroy_workqueue(fc_host->work_q); 436 destroy_workqueue(fc_host->work_q);
438 fc_host->work_q = NULL; 437 fc_host->work_q = NULL;
@@ -2489,6 +2488,8 @@ fc_rport_final_delete(struct work_struct *work)
2489 unsigned long flags; 2488 unsigned long flags;
2490 int do_callback = 0; 2489 int do_callback = 0;
2491 2490
2491 fc_terminate_rport_io(rport);
2492
2492 /* 2493 /*
2493 * if a scan is pending, flush the SCSI Host work_q so that 2494 * if a scan is pending, flush the SCSI Host work_q so that
2494 * that we can reclaim the rport scan work element. 2495 * that we can reclaim the rport scan work element.
@@ -2496,8 +2497,6 @@ fc_rport_final_delete(struct work_struct *work)
2496 if (rport->flags & FC_RPORT_SCAN_PENDING) 2497 if (rport->flags & FC_RPORT_SCAN_PENDING)
2497 scsi_flush_work(shost); 2498 scsi_flush_work(shost);
2498 2499
2499 fc_terminate_rport_io(rport);
2500
2501 /* 2500 /*
2502 * Cancel any outstanding timers. These should really exist 2501 * Cancel any outstanding timers. These should really exist
2503 * only when rmmod'ing the LLDD and we're asking for 2502 * only when rmmod'ing the LLDD and we're asking for
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index a124a28f2ccb..a1baccce05f0 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -565,12 +565,12 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
565 pDCB->TagMask |= 1 << tag[1]; 565 pDCB->TagMask |= 1 << tag[1];
566 pSRB->TagNumber = tag[1]; 566 pSRB->TagNumber = tag[1];
567 DC390_write8(ScsiFifo, tag[1]); 567 DC390_write8(ScsiFifo, tag[1]);
568 DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for Cmd %li (SRB %p), block tag %02x\n", scmd->serial_number, pSRB, tag[1])); 568 DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1]));
569 cmd = SEL_W_ATN3; 569 cmd = SEL_W_ATN3;
570 } else { 570 } else {
571 /* No TagQ */ 571 /* No TagQ */
572//no_tag: 572//no_tag:
573 DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for Cmd %li (SRB %p), No TagQ\n", disc_allowed ? "" : "o", scmd->serial_number, pSRB)); 573 DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB));
574 } 574 }
575 575
576 pSRB->SRBState = SRB_START_; 576 pSRB->SRBState = SRB_START_;
@@ -620,8 +620,8 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
620 if (DC390_read8 (Scsi_Status) & INTERRUPT) 620 if (DC390_read8 (Scsi_Status) & INTERRUPT)
621 { 621 {
622 dc390_freetag (pDCB, pSRB); 622 dc390_freetag (pDCB, pSRB);
623 DEBUG0(printk ("DC390: Interrupt during Start SCSI (pid %li, target %02i-%02i)\n", 623 DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n",
624 scmd->serial_number, scmd->device->id, scmd->device->lun)); 624 scmd->device->id, scmd->device->lun));
625 pSRB->SRBState = SRB_READY; 625 pSRB->SRBState = SRB_READY;
626 //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); 626 //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
627 pACB->SelLost++; 627 pACB->SelLost++;
@@ -1705,8 +1705,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1705 1705
1706 status = pSRB->TargetStatus; 1706 status = pSRB->TargetStatus;
1707 1707
1708 DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p, pid %li\n", status, pcmd->result,\ 1708 DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB));
1709 pSRB, pcmd->serial_number));
1710 if(pSRB->SRBFlag & AUTO_REQSENSE) 1709 if(pSRB->SRBFlag & AUTO_REQSENSE)
1711 { /* Last command was a Request Sense */ 1710 { /* Last command was a Request Sense */
1712 pSRB->SRBFlag &= ~AUTO_REQSENSE; 1711 pSRB->SRBFlag &= ~AUTO_REQSENSE;
@@ -1727,7 +1726,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1727 } else { 1726 } else {
1728 SET_RES_DRV(pcmd->result, DRIVER_SENSE); 1727 SET_RES_DRV(pcmd->result, DRIVER_SENSE);
1729 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); 1728 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
1730 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1729 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1731 pSRB->TotalXferredLen = 0; 1730 pSRB->TotalXferredLen = 0;
1732 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1731 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1733 } 1732 }
@@ -1747,7 +1746,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1747 else if (status == SAM_STAT_TASK_SET_FULL) 1746 else if (status == SAM_STAT_TASK_SET_FULL)
1748 { 1747 {
1749 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); 1748 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
1750 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1749 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1751 pSRB->TotalXferredLen = 0; 1750 pSRB->TotalXferredLen = 0;
1752 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1751 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1753 } 1752 }
@@ -1801,7 +1800,7 @@ cmd_done:
1801 /* Add to free list */ 1800 /* Add to free list */
1802 dc390_Free_insert (pACB, pSRB); 1801 dc390_Free_insert (pACB, pSRB);
1803 1802
1804 DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done pid %li\n", pcmd->serial_number)); 1803 DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n"));
1805 pcmd->scsi_done (pcmd); 1804 pcmd->scsi_done (pcmd);
1806 1805
1807 return; 1806 return;
@@ -1997,8 +1996,7 @@ static int DC390_abort(struct scsi_cmnd *cmd)
1997 struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata; 1996 struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata;
1998 struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata; 1997 struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata;
1999 1998
2000 scmd_printk(KERN_WARNING, cmd, 1999 scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n");
2001 "DC390: Abort command (pid %li)\n", cmd->serial_number);
2002 2000
2003 /* abort() is too stupid for already sent commands at the moment. 2001 /* abort() is too stupid for already sent commands at the moment.
2004 * If it's called we are in trouble anyway, so let's dump some info 2002 * If it's called we are in trouble anyway, so let's dump some info
@@ -2006,7 +2004,7 @@ static int DC390_abort(struct scsi_cmnd *cmd)
2006 dc390_dumpinfo(pACB, pDCB, NULL); 2004 dc390_dumpinfo(pACB, pDCB, NULL);
2007 2005
2008 pDCB->DCBFlag |= ABORT_DEV_; 2006 pDCB->DCBFlag |= ABORT_DEV_;
2009 printk(KERN_INFO "DC390: Aborted pid %li\n", cmd->serial_number); 2007 printk(KERN_INFO "DC390: Aborted.\n");
2010 2008
2011 return FAILED; 2009 return FAILED;
2012} 2010}
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index edfc5da8be4c..90e104d6b558 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1256,8 +1256,8 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
1256 j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number; 1256 j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number;
1257 1257
1258 if (SCpnt->host_scribble) 1258 if (SCpnt->host_scribble)
1259 panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", 1259 panic("%s: qcomm, SCpnt %p already active.\n",
1260 BN(j), SCpnt->serial_number, SCpnt); 1260 BN(j), SCpnt);
1261 1261
1262 /* i is the mailbox number, look for the first free mailbox 1262 /* i is the mailbox number, look for the first free mailbox
1263 starting from last_cp_used */ 1263 starting from last_cp_used */
@@ -1286,9 +1286,9 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
1286 cpp->cpp_index = i; 1286 cpp->cpp_index = i;
1287 SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; 1287 SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
1288 1288
1289 if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n", 1289 if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d.\n",
1290 BN(j), i, SCpnt->device->channel, SCpnt->device->id, 1290 BN(j), i, SCpnt->device->channel, SCpnt->device->id,
1291 SCpnt->device->lun, SCpnt->serial_number); 1291 SCpnt->device->lun);
1292 1292
1293 cpp->opcode = OP_SCSI; 1293 cpp->opcode = OP_SCSI;
1294 cpp->channel = SCpnt->device->channel; 1294 cpp->channel = SCpnt->device->channel;
@@ -1315,7 +1315,7 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
1315 unmap_dma(i, j); 1315 unmap_dma(i, j);
1316 SCpnt->host_scribble = NULL; 1316 SCpnt->host_scribble = NULL;
1317 scmd_printk(KERN_INFO, SCpnt, 1317 scmd_printk(KERN_INFO, SCpnt,
1318 "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number); 1318 "qcomm, adapter busy.\n");
1319 return 1; 1319 return 1;
1320 } 1320 }
1321 1321
@@ -1337,14 +1337,12 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
1337 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; 1337 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
1338 1338
1339 if (SCarg->host_scribble == NULL) { 1339 if (SCarg->host_scribble == NULL) {
1340 scmd_printk(KERN_INFO, SCarg, "abort, pid %ld inactive.\n", 1340 scmd_printk(KERN_INFO, SCarg, "abort, command inactive.\n");
1341 SCarg->serial_number);
1342 return SUCCESS; 1341 return SUCCESS;
1343 } 1342 }
1344 1343
1345 i = *(unsigned int *)SCarg->host_scribble; 1344 i = *(unsigned int *)SCarg->host_scribble;
1346 scmd_printk(KERN_INFO, SCarg, "abort, mbox %d, pid %ld.\n", 1345 scmd_printk(KERN_INFO, SCarg, "abort, mbox %d.\n", i);
1347 i, SCarg->serial_number);
1348 1346
1349 if (i >= sh[j]->can_queue) 1347 if (i >= sh[j]->can_queue)
1350 panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j)); 1348 panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
@@ -1387,8 +1385,7 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
1387 SCarg->result = DID_ABORT << 16; 1385 SCarg->result = DID_ABORT << 16;
1388 SCarg->host_scribble = NULL; 1386 SCarg->host_scribble = NULL;
1389 HD(j)->cp_stat[i] = FREE; 1387 HD(j)->cp_stat[i] = FREE;
1390 printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", 1388 printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", BN(j), i);
1391 BN(j), i, SCarg->serial_number);
1392 SCarg->scsi_done(SCarg); 1389 SCarg->scsi_done(SCarg);
1393 return SUCCESS; 1390 return SUCCESS;
1394 } 1391 }
@@ -1403,12 +1400,12 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1403 struct scsi_cmnd *SCpnt; 1400 struct scsi_cmnd *SCpnt;
1404 1401
1405 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; 1402 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
1406 scmd_printk(KERN_INFO, SCarg, "reset, enter, pid %ld.\n", SCarg->serial_number); 1403 scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
1407 1404
1408 spin_lock_irq(sh[j]->host_lock); 1405 spin_lock_irq(sh[j]->host_lock);
1409 1406
1410 if (SCarg->host_scribble == NULL) 1407 if (SCarg->host_scribble == NULL)
1411 printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->serial_number); 1408 printk("%s: reset, inactive.\n", BN(j));
1412 1409
1413 if (HD(j)->in_reset) { 1410 if (HD(j)->in_reset) {
1414 printk("%s: reset, exit, already in reset.\n", BN(j)); 1411 printk("%s: reset, exit, already in reset.\n", BN(j));
@@ -1445,14 +1442,12 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1445 1442
1446 if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) { 1443 if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
1447 HD(j)->cp_stat[i] = ABORTING; 1444 HD(j)->cp_stat[i] = ABORTING;
1448 printk("%s: reset, mbox %d aborting, pid %ld.\n", 1445 printk("%s: reset, mbox %d aborting.\n", BN(j), i);
1449 BN(j), i, SCpnt->serial_number);
1450 } 1446 }
1451 1447
1452 else { 1448 else {
1453 HD(j)->cp_stat[i] = IN_RESET; 1449 HD(j)->cp_stat[i] = IN_RESET;
1454 printk("%s: reset, mbox %d in reset, pid %ld.\n", 1450 printk("%s: reset, mbox %d in reset.\n", BN(j), i);
1455 BN(j), i, SCpnt->serial_number);
1456 } 1451 }
1457 1452
1458 if (SCpnt->host_scribble == NULL) 1453 if (SCpnt->host_scribble == NULL)
@@ -1500,8 +1495,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1500 /* This mailbox is still waiting for its interrupt */ 1495 /* This mailbox is still waiting for its interrupt */
1501 HD(j)->cp_stat[i] = LOCKED; 1496 HD(j)->cp_stat[i] = LOCKED;
1502 1497
1503 printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", 1498 printk("%s, reset, mbox %d locked, DID_RESET, done.\n", BN(j), i);
1504 BN(j), i, SCpnt->serial_number);
1505 } 1499 }
1506 1500
1507 else if (HD(j)->cp_stat[i] == ABORTING) { 1501 else if (HD(j)->cp_stat[i] == ABORTING) {
@@ -1513,8 +1507,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1513 /* This mailbox was never queued to the adapter */ 1507 /* This mailbox was never queued to the adapter */
1514 HD(j)->cp_stat[i] = FREE; 1508 HD(j)->cp_stat[i] = FREE;
1515 1509
1516 printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", 1510 printk("%s, reset, mbox %d aborting, DID_RESET, done.\n", BN(j), i);
1517 BN(j), i, SCpnt->serial_number);
1518 } 1511 }
1519 1512
1520 else 1513 else
@@ -1528,7 +1521,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1528 HD(j)->in_reset = FALSE; 1521 HD(j)->in_reset = FALSE;
1529 do_trace = FALSE; 1522 do_trace = FALSE;
1530 1523
1531 if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->serial_number); 1524 if (arg_done) printk("%s: reset, exit, done.\n", BN(j));
1532 else printk("%s: reset, exit.\n", BN(j)); 1525 else printk("%s: reset, exit.\n", BN(j));
1533 1526
1534 spin_unlock_irq(sh[j]->host_lock); 1527 spin_unlock_irq(sh[j]->host_lock);
@@ -1671,10 +1664,10 @@ static int reorder(unsigned int j, unsigned long cursec,
1671 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1664 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1672 for (n = 0; n < n_ready; n++) { 1665 for (n = 0; n < n_ready; n++) {
1673 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1666 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1674 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\ 1667 printk("%s %d.%d:%d mb %d fc %d nr %d sec %ld ns %u"\
1675 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1668 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1676 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1669 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1677 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1670 SCpnt->lun, k, flushcount, n_ready,
1678 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), 1671 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1679 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1672 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1680 YESNO(overlap), cpp->xdir); 1673 YESNO(overlap), cpp->xdir);
@@ -1709,9 +1702,9 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in
1709 1702
1710 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { 1703 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
1711 scmd_printk(KERN_INFO, SCpnt, 1704 scmd_printk(KERN_INFO, SCpnt,
1712 "%s, pid %ld, mbox %d, adapter" 1705 "%s, mbox %d, adapter"
1713 " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"), 1706 " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"),
1714 SCpnt->serial_number, k); 1707 k);
1715 HD(j)->cp_stat[k] = ABORTING; 1708 HD(j)->cp_stat[k] = ABORTING;
1716 continue; 1709 continue;
1717 } 1710 }
@@ -1793,12 +1786,12 @@ static irqreturn_t ihdlr(unsigned int j)
1793 if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i); 1786 if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
1794 1787
1795 if (SCpnt->host_scribble == NULL) 1788 if (SCpnt->host_scribble == NULL)
1796 panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i, 1789 panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", BN(j), i,
1797 SCpnt->serial_number, SCpnt); 1790 SCpnt);
1798 1791
1799 if (*(unsigned int *)SCpnt->host_scribble != i) 1792 if (*(unsigned int *)SCpnt->host_scribble != i)
1800 panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", 1793 panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
1801 BN(j), i, SCpnt->serial_number, *(unsigned int *)SCpnt->host_scribble); 1794 BN(j), i, *(unsigned int *)SCpnt->host_scribble);
1802 1795
1803 sync_dma(i, j); 1796 sync_dma(i, j);
1804 1797
@@ -1841,8 +1834,8 @@ static irqreturn_t ihdlr(unsigned int j)
1841 (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 && 1834 (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 &&
1842 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) 1835 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
1843 scmd_printk(KERN_INFO, SCpnt, 1836 scmd_printk(KERN_INFO, SCpnt,
1844 "ihdlr, pid %ld, target_status 0x%x, sense key 0x%x.\n", 1837 "ihdlr, target_status 0x%x, sense key 0x%x.\n",
1845 SCpnt->serial_number, spp->target_status, 1838 spp->target_status,
1846 SCpnt->sense_buffer[2]); 1839 SCpnt->sense_buffer[2]);
1847 1840
1848 HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0; 1841 HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0;
@@ -1913,8 +1906,8 @@ static irqreturn_t ihdlr(unsigned int j)
1913 do_trace || msg_byte(spp->target_status)) 1906 do_trace || msg_byte(spp->target_status))
1914#endif 1907#endif
1915 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\ 1908 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\
1916 " pid %ld, reg 0x%x, count %d.\n", 1909 " reg 0x%x, count %d.\n",
1917 i, spp->adapter_status, spp->target_status, SCpnt->serial_number, 1910 i, spp->adapter_status, spp->target_status,
1918 reg, HD(j)->iocount); 1911 reg, HD(j)->iocount);
1919 1912
1920 unmap_dma(i, j); 1913 unmap_dma(i, j);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 4468ae3610f7..97ae716134d0 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -381,7 +381,7 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
381 hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; 381 hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata;
382 382
383 DB(DB_QUEUE_COMMAND, 383 DB(DB_QUEUE_COMMAND,
384 printk("Q-%d-%02x-%ld( ", cmd->device->id, cmd->cmnd[0], cmd->serial_number)) 384 printk("Q-%d-%02x( ", cmd->device->id, cmd->cmnd[0]))
385 385
386/* Set up a few fields in the scsi_cmnd structure for our own use: 386/* Set up a few fields in the scsi_cmnd structure for our own use:
387 * - host_scribble is the pointer to the next cmd in the input queue 387 * - host_scribble is the pointer to the next cmd in the input queue
@@ -462,7 +462,7 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
462 462
463 wd33c93_execute(cmd->device->host); 463 wd33c93_execute(cmd->device->host);
464 464
465 DB(DB_QUEUE_COMMAND, printk(")Q-%ld ", cmd->serial_number)) 465 DB(DB_QUEUE_COMMAND, printk(")Q "))
466 466
467 spin_unlock_irq(&hostdata->lock); 467 spin_unlock_irq(&hostdata->lock);
468 return 0; 468 return 0;
@@ -687,7 +687,7 @@ wd33c93_execute(struct Scsi_Host *instance)
687 */ 687 */
688 688
689 DB(DB_EXECUTE, 689 DB(DB_EXECUTE,
690 printk("%s%ld)EX-2 ", (cmd->SCp.phase) ? "d:" : "", cmd->serial_number)) 690 printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
691} 691}
692 692
693static void 693static void
@@ -963,7 +963,7 @@ wd33c93_intr(struct Scsi_Host *instance)
963 case CSR_XFER_DONE | PHS_COMMAND: 963 case CSR_XFER_DONE | PHS_COMMAND:
964 case CSR_UNEXP | PHS_COMMAND: 964 case CSR_UNEXP | PHS_COMMAND:
965 case CSR_SRV_REQ | PHS_COMMAND: 965 case CSR_SRV_REQ | PHS_COMMAND:
966 DB(DB_INTR, printk("CMND-%02x,%ld", cmd->cmnd[0], cmd->serial_number)) 966 DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
967 transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, 967 transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR,
968 hostdata); 968 hostdata);
969 hostdata->state = S_CONNECTED; 969 hostdata->state = S_CONNECTED;
@@ -1007,7 +1007,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1007 switch (msg) { 1007 switch (msg) {
1008 1008
1009 case COMMAND_COMPLETE: 1009 case COMMAND_COMPLETE:
1010 DB(DB_INTR, printk("CCMP-%ld", cmd->serial_number)) 1010 DB(DB_INTR, printk("CCMP"))
1011 write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); 1011 write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
1012 hostdata->state = S_PRE_CMP_DISC; 1012 hostdata->state = S_PRE_CMP_DISC;
1013 break; 1013 break;
@@ -1174,7 +1174,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1174 1174
1175 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); 1175 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
1176 if (phs == 0x60) { 1176 if (phs == 0x60) {
1177 DB(DB_INTR, printk("SX-DONE-%ld", cmd->serial_number)) 1177 DB(DB_INTR, printk("SX-DONE"))
1178 cmd->SCp.Message = COMMAND_COMPLETE; 1178 cmd->SCp.Message = COMMAND_COMPLETE;
1179 lun = read_wd33c93(regs, WD_TARGET_LUN); 1179 lun = read_wd33c93(regs, WD_TARGET_LUN);
1180 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) 1180 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
@@ -1200,8 +1200,8 @@ wd33c93_intr(struct Scsi_Host *instance)
1200 wd33c93_execute(instance); 1200 wd33c93_execute(instance);
1201 } else { 1201 } else {
1202 printk 1202 printk
1203 ("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---", 1203 ("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---",
1204 asr, sr, phs, cmd->serial_number); 1204 asr, sr, phs);
1205 spin_unlock_irqrestore(&hostdata->lock, flags); 1205 spin_unlock_irqrestore(&hostdata->lock, flags);
1206 } 1206 }
1207 break; 1207 break;
@@ -1266,7 +1266,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1266 spin_unlock_irqrestore(&hostdata->lock, flags); 1266 spin_unlock_irqrestore(&hostdata->lock, flags);
1267 return; 1267 return;
1268 } 1268 }
1269 DB(DB_INTR, printk("UNEXP_DISC-%ld", cmd->serial_number)) 1269 DB(DB_INTR, printk("UNEXP_DISC"))
1270 hostdata->connected = NULL; 1270 hostdata->connected = NULL;
1271 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 1271 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1272 hostdata->state = S_UNCONNECTED; 1272 hostdata->state = S_UNCONNECTED;
@@ -1292,7 +1292,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1292 */ 1292 */
1293 1293
1294 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); 1294 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
1295 DB(DB_INTR, printk("DISC-%ld", cmd->serial_number)) 1295 DB(DB_INTR, printk("DISC"))
1296 if (cmd == NULL) { 1296 if (cmd == NULL) {
1297 printk(" - Already disconnected! "); 1297 printk(" - Already disconnected! ");
1298 hostdata->state = S_UNCONNECTED; 1298 hostdata->state = S_UNCONNECTED;
@@ -1491,7 +1491,6 @@ wd33c93_intr(struct Scsi_Host *instance)
1491 } else 1491 } else
1492 hostdata->state = S_CONNECTED; 1492 hostdata->state = S_CONNECTED;
1493 1493
1494 DB(DB_INTR, printk("-%ld", cmd->serial_number))
1495 spin_unlock_irqrestore(&hostdata->lock, flags); 1494 spin_unlock_irqrestore(&hostdata->lock, flags);
1496 break; 1495 break;
1497 1496
@@ -1637,8 +1636,8 @@ wd33c93_abort(struct scsi_cmnd * cmd)
1637 cmd->host_scribble = NULL; 1636 cmd->host_scribble = NULL;
1638 cmd->result = DID_ABORT << 16; 1637 cmd->result = DID_ABORT << 16;
1639 printk 1638 printk
1640 ("scsi%d: Abort - removing command %ld from input_Q. ", 1639 ("scsi%d: Abort - removing command from input_Q. ",
1641 instance->host_no, cmd->serial_number); 1640 instance->host_no);
1642 enable_irq(cmd->device->host->irq); 1641 enable_irq(cmd->device->host->irq);
1643 cmd->scsi_done(cmd); 1642 cmd->scsi_done(cmd);
1644 return SUCCESS; 1643 return SUCCESS;
@@ -1662,8 +1661,8 @@ wd33c93_abort(struct scsi_cmnd * cmd)
1662 uchar sr, asr; 1661 uchar sr, asr;
1663 unsigned long timeout; 1662 unsigned long timeout;
1664 1663
1665 printk("scsi%d: Aborting connected command %ld - ", 1664 printk("scsi%d: Aborting connected command - ",
1666 instance->host_no, cmd->serial_number); 1665 instance->host_no);
1667 1666
1668 printk("stopping DMA - "); 1667 printk("stopping DMA - ");
1669 if (hostdata->dma == D_DMA_RUNNING) { 1668 if (hostdata->dma == D_DMA_RUNNING) {
@@ -1729,8 +1728,8 @@ wd33c93_abort(struct scsi_cmnd * cmd)
1729 while (tmp) { 1728 while (tmp) {
1730 if (tmp == cmd) { 1729 if (tmp == cmd) {
1731 printk 1730 printk
1732 ("scsi%d: Abort - command %ld found on disconnected_Q - ", 1731 ("scsi%d: Abort - command found on disconnected_Q - ",
1733 instance->host_no, cmd->serial_number); 1732 instance->host_no);
1734 printk("Abort SNOOZE. "); 1733 printk("Abort SNOOZE. ");
1735 enable_irq(cmd->device->host->irq); 1734 enable_irq(cmd->device->host->irq);
1736 return FAILED; 1735 return FAILED;
@@ -2180,8 +2179,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2180 strcat(bp, "\nconnected: "); 2179 strcat(bp, "\nconnected: ");
2181 if (hd->connected) { 2180 if (hd->connected) {
2182 cmd = (struct scsi_cmnd *) hd->connected; 2181 cmd = (struct scsi_cmnd *) hd->connected;
2183 sprintf(tbuf, " %ld-%d:%d(%02x)", 2182 sprintf(tbuf, " %d:%d(%02x)",
2184 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2183 cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2185 strcat(bp, tbuf); 2184 strcat(bp, tbuf);
2186 } 2185 }
2187 } 2186 }
@@ -2189,8 +2188,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2189 strcat(bp, "\ninput_Q: "); 2188 strcat(bp, "\ninput_Q: ");
2190 cmd = (struct scsi_cmnd *) hd->input_Q; 2189 cmd = (struct scsi_cmnd *) hd->input_Q;
2191 while (cmd) { 2190 while (cmd) {
2192 sprintf(tbuf, " %ld-%d:%d(%02x)", 2191 sprintf(tbuf, " %d:%d(%02x)",
2193 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2192 cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2194 strcat(bp, tbuf); 2193 strcat(bp, tbuf);
2195 cmd = (struct scsi_cmnd *) cmd->host_scribble; 2194 cmd = (struct scsi_cmnd *) cmd->host_scribble;
2196 } 2195 }
@@ -2199,8 +2198,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2199 strcat(bp, "\ndisconnected_Q:"); 2198 strcat(bp, "\ndisconnected_Q:");
2200 cmd = (struct scsi_cmnd *) hd->disconnected_Q; 2199 cmd = (struct scsi_cmnd *) hd->disconnected_Q;
2201 while (cmd) { 2200 while (cmd) {
2202 sprintf(tbuf, " %ld-%d:%d(%02x)", 2201 sprintf(tbuf, " %d:%d(%02x)",
2203 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2202 cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2204 strcat(bp, tbuf); 2203 strcat(bp, tbuf);
2205 cmd = (struct scsi_cmnd *) cmd->host_scribble; 2204 cmd = (struct scsi_cmnd *) cmd->host_scribble;
2206 } 2205 }
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 9ef2dbbfa62b..5cb0f0ef6af0 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -30,5 +30,6 @@ config TCM_PSCSI
30 passthrough access to Linux/SCSI device 30 passthrough access to Linux/SCSI device
31 31
32source "drivers/target/loopback/Kconfig" 32source "drivers/target/loopback/Kconfig"
33source "drivers/target/tcm_fc/Kconfig"
33 34
34endif 35endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 1178bbfc68fe..21df808a992c 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,3 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
24 24
25# Fabric modules 25# Fabric modules
26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
27
28obj-$(CONFIG_TCM_FC) += tcm_fc/
diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig
new file mode 100644
index 000000000000..40caf458e89e
--- /dev/null
+++ b/drivers/target/tcm_fc/Kconfig
@@ -0,0 +1,5 @@
1config TCM_FC
2 tristate "TCM_FC fabric Plugin"
3 depends on LIBFC
4 help
5 Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
new file mode 100644
index 000000000000..7a5c2b64cf65
--- /dev/null
+++ b/drivers/target/tcm_fc/Makefile
@@ -0,0 +1,15 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
2 -I$(srctree)/drivers/scsi/ \
3 -I$(srctree)/include/scsi/ \
4 -I$(srctree)/drivers/target/tcm_fc/
5
6tcm_fc-y += tfc_cmd.o \
7 tfc_conf.o \
8 tfc_io.o \
9 tfc_sess.o
10
11obj-$(CONFIG_TCM_FC) += tcm_fc.o
12
13ifdef CONFIGFS_TCM_FC_DEBUG
14EXTRA_CFLAGS += -DTCM_FC_DEBUG
15endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
new file mode 100644
index 000000000000..defff32b7880
--- /dev/null
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17#ifndef __TCM_FC_H__
18#define __TCM_FC_H__
19
20#define FT_VERSION "0.3"
21
22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25
26/*
27 * Debug options.
28 */
29#define FT_DEBUG_CONF 0x01 /* configuration messages */
30#define FT_DEBUG_SESS 0x02 /* session messages */
31#define FT_DEBUG_TM 0x04 /* TM operations */
32#define FT_DEBUG_IO 0x08 /* I/O commands */
33#define FT_DEBUG_DATA 0x10 /* Data transfer */
34
35extern unsigned int ft_debug_logging; /* debug options */
36
37#define FT_DEBUG(mask, fmt, args...) \
38 do { \
39 if (ft_debug_logging & (mask)) \
40 printk(KERN_INFO "tcm_fc: %s: " fmt, \
41 __func__, ##args); \
42 } while (0)
43
44#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
45#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
46#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
47#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
48#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
49
50struct ft_transport_id {
51 __u8 format;
52 __u8 __resvd1[7];
53 __u8 wwpn[8];
54 __u8 __resvd2[8];
55} __attribute__((__packed__));
56
57/*
58 * Session (remote port).
59 */
60struct ft_sess {
61 u32 port_id; /* for hash lookup use only */
62 u32 params;
63 u16 max_frame; /* maximum frame size */
64 u64 port_name; /* port name for transport ID */
65 struct ft_tport *tport;
66 struct se_session *se_sess;
67 struct hlist_node hash; /* linkage in ft_sess_hash table */
68 struct rcu_head rcu;
69 struct kref kref; /* ref for hash and outstanding I/Os */
70};
71
72/*
73 * Hash table of sessions per local port.
74 * Hash lookup by remote port FC_ID.
75 */
76#define FT_SESS_HASH_BITS 6
77#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
78
79/*
80 * Per local port data.
81 * This is created only after a TPG exists that allows target function
82 * for the local port. If the TPG exists, this is allocated when
83 * we're notified that the local port has been created, or when
84 * the first PRLI provider callback is received.
85 */
86struct ft_tport {
87 struct fc_lport *lport;
88 struct ft_tpg *tpg; /* NULL if TPG deleted before tport */
89 u32 sess_count; /* number of sessions in hash */
90 struct rcu_head rcu;
91 struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
92};
93
94/*
95 * Node ID and authentication.
96 */
97struct ft_node_auth {
98 u64 port_name;
99 u64 node_name;
100};
101
102/*
103 * Node ACL for FC remote port session.
104 */
105struct ft_node_acl {
106 struct ft_node_auth node_auth;
107 struct se_node_acl se_node_acl;
108};
109
110struct ft_lun {
111 u32 index;
112 char name[FT_LUN_NAMELEN];
113};
114
115/*
116 * Target portal group (local port).
117 */
118struct ft_tpg {
119 u32 index;
120 struct ft_lport_acl *lport_acl;
121 struct ft_tport *tport; /* active tport or NULL */
122 struct list_head list; /* linkage in ft_lport_acl tpg_list */
123 struct list_head lun_list; /* head of LUNs */
124 struct se_portal_group se_tpg;
125 struct task_struct *thread; /* processing thread */
126 struct se_queue_obj qobj; /* queue for processing thread */
127};
128
129struct ft_lport_acl {
130 u64 wwpn;
131 char name[FT_NAMELEN];
132 struct list_head list;
133 struct list_head tpg_list;
134 struct se_wwn fc_lport_wwn;
135};
136
137enum ft_cmd_state {
138 FC_CMD_ST_NEW = 0,
139 FC_CMD_ST_REJ
140};
141
142/*
143 * Commands
144 */
145struct ft_cmd {
146 enum ft_cmd_state state;
147 u16 lun; /* LUN from request */
148 struct ft_sess *sess; /* session held for cmd */
149 struct fc_seq *seq; /* sequence in exchange mgr */
150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */
151 struct fc_frame *req_frame;
152 unsigned char *cdb; /* pointer to CDB inside frame */
153 u32 write_data_len; /* data received on writes */
154 struct se_queue_req se_req;
155 /* Local sense buffer */
156 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
157 u32 was_ddp_setup:1; /* Set only if ddp is setup */
158 struct scatterlist *sg; /* Set only if DDP is setup */
159 u32 sg_cnt; /* No. of item in scatterlist */
160};
161
162extern struct list_head ft_lport_list;
163extern struct mutex ft_lport_lock;
164extern struct fc4_prov ft_prov;
165extern struct target_fabric_configfs *ft_configfs;
166
167/*
168 * Fabric methods.
169 */
170
171/*
172 * Session ops.
173 */
174void ft_sess_put(struct ft_sess *);
175int ft_sess_shutdown(struct se_session *);
176void ft_sess_close(struct se_session *);
177void ft_sess_stop(struct se_session *, int, int);
178int ft_sess_logged_in(struct se_session *);
179u32 ft_sess_get_index(struct se_session *);
180u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
181void ft_sess_set_erl0(struct se_session *);
182
183void ft_lport_add(struct fc_lport *, void *);
184void ft_lport_del(struct fc_lport *, void *);
185int ft_lport_notify(struct notifier_block *, unsigned long, void *);
186
187/*
188 * IO methods.
189 */
190void ft_check_stop_free(struct se_cmd *);
191void ft_release_cmd(struct se_cmd *);
192int ft_queue_status(struct se_cmd *);
193int ft_queue_data_in(struct se_cmd *);
194int ft_write_pending(struct se_cmd *);
195int ft_write_pending_status(struct se_cmd *);
196u32 ft_get_task_tag(struct se_cmd *);
197int ft_get_cmd_state(struct se_cmd *);
198void ft_new_cmd_failure(struct se_cmd *);
199int ft_queue_tm_resp(struct se_cmd *);
200int ft_is_state_remove(struct se_cmd *);
201
202/*
203 * other internal functions.
204 */
205int ft_thread(void *);
206void ft_recv_req(struct ft_sess *, struct fc_frame *);
207struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
208struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
209
210void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
211void ft_dump_cmd(struct ft_cmd *, const char *caller);
212
213ssize_t ft_format_wwn(char *, size_t, u64);
214
215#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 000000000000..49e51778f733
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,696 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <asm/unaligned.h>
34#include <scsi/scsi.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/libfc.h>
39#include <scsi/fc_encode.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_transport.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_configfs.h>
47#include <target/target_core_base.h>
48#include <target/target_core_tmr.h>
49#include <target/configfs_macros.h>
50
51#include "tcm_fc.h"
52
53/*
54 * Dump cmd state for debugging.
55 */
56void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
57{
58 struct fc_exch *ep;
59 struct fc_seq *sp;
60 struct se_cmd *se_cmd;
61 struct se_mem *mem;
62 struct se_transport_task *task;
63
64 if (!(ft_debug_logging & FT_DEBUG_IO))
65 return;
66
67 se_cmd = &cmd->se_cmd;
68 printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
69 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
70 printk(KERN_INFO "%s: cmd %p cdb %p\n",
71 caller, cmd, cmd->cdb);
72 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
73
74 task = T_TASK(se_cmd);
75 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
76 caller, cmd, task, task->t_tasks_se_num,
77 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
78 if (task->t_mem_list)
79 list_for_each_entry(mem, task->t_mem_list, se_list)
80 printk(KERN_INFO "%s: cmd %p mem %p page %p "
81 "len 0x%x off 0x%x\n",
82 caller, cmd, mem,
83 mem->se_page, mem->se_len, mem->se_off);
84 sp = cmd->seq;
85 if (sp) {
86 ep = fc_seq_exch(sp);
87 printk(KERN_INFO "%s: cmd %p sid %x did %x "
88 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
89 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
90 sp->id, ep->esb_stat);
91 }
92 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
93 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
94}
95
96/*
97 * Get LUN from CDB.
98 */
99static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
100{
101 u64 lun;
102
103 lun = lunp[1];
104 switch (lunp[0] >> 6) {
105 case 0:
106 break;
107 case 1:
108 lun |= (lunp[0] & 0x3f) << 8;
109 break;
110 default:
111 return -1;
112 }
113 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
114 return -1;
115 cmd->lun = lun;
116 return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
117}
118
119static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
120{
121 struct se_queue_obj *qobj;
122 unsigned long flags;
123
124 qobj = &sess->tport->tpg->qobj;
125 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
126 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
127 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
128 atomic_inc(&qobj->queue_cnt);
129 wake_up_interruptible(&qobj->thread_wq);
130}
131
132static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
133{
134 unsigned long flags;
135 struct se_queue_req *qr;
136
137 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
138 if (list_empty(&qobj->qobj_list)) {
139 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
140 return NULL;
141 }
142 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
143 list_del(&qr->qr_list);
144 atomic_dec(&qobj->queue_cnt);
145 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
146 return container_of(qr, struct ft_cmd, se_req);
147}
148
149static void ft_free_cmd(struct ft_cmd *cmd)
150{
151 struct fc_frame *fp;
152 struct fc_lport *lport;
153
154 if (!cmd)
155 return;
156 fp = cmd->req_frame;
157 lport = fr_dev(fp);
158 if (fr_seq(fp))
159 lport->tt.seq_release(fr_seq(fp));
160 fc_frame_free(fp);
161 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
162 kfree(cmd);
163}
164
165void ft_release_cmd(struct se_cmd *se_cmd)
166{
167 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
168
169 ft_free_cmd(cmd);
170}
171
172void ft_check_stop_free(struct se_cmd *se_cmd)
173{
174 transport_generic_free_cmd(se_cmd, 0, 1, 0);
175}
176
177/*
178 * Send response.
179 */
180int ft_queue_status(struct se_cmd *se_cmd)
181{
182 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
183 struct fc_frame *fp;
184 struct fcp_resp_with_ext *fcp;
185 struct fc_lport *lport;
186 struct fc_exch *ep;
187 size_t len;
188
189 ft_dump_cmd(cmd, __func__);
190 ep = fc_seq_exch(cmd->seq);
191 lport = ep->lp;
192 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
193 fp = fc_frame_alloc(lport, len);
194 if (!fp) {
195 /* XXX shouldn't just drop it - requeue and retry? */
196 return 0;
197 }
198 fcp = fc_frame_payload_get(fp, len);
199 memset(fcp, 0, len);
200 fcp->resp.fr_status = se_cmd->scsi_status;
201
202 len = se_cmd->scsi_sense_length;
203 if (len) {
204 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
205 fcp->ext.fr_sns_len = htonl(len);
206 memcpy((fcp + 1), se_cmd->sense_buffer, len);
207 }
208
209 /*
210 * Test underflow and overflow with one mask. Usually both are off.
211 * Bidirectional commands are not handled yet.
212 */
213 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
214 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
215 fcp->resp.fr_flags |= FCP_RESID_OVER;
216 else
217 fcp->resp.fr_flags |= FCP_RESID_UNDER;
218 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
219 }
220
221 /*
222 * Send response.
223 */
224 cmd->seq = lport->tt.seq_start_next(cmd->seq);
225 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
226 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
227
228 lport->tt.seq_send(lport, cmd->seq, fp);
229 lport->tt.exch_done(cmd->seq);
230 return 0;
231}
232
233int ft_write_pending_status(struct se_cmd *se_cmd)
234{
235 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
236
237 return cmd->write_data_len != se_cmd->data_length;
238}
239
240/*
241 * Send TX_RDY (transfer ready).
242 */
243int ft_write_pending(struct se_cmd *se_cmd)
244{
245 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
246 struct fc_frame *fp;
247 struct fcp_txrdy *txrdy;
248 struct fc_lport *lport;
249 struct fc_exch *ep;
250 struct fc_frame_header *fh;
251 u32 f_ctl;
252
253 ft_dump_cmd(cmd, __func__);
254
255 ep = fc_seq_exch(cmd->seq);
256 lport = ep->lp;
257 fp = fc_frame_alloc(lport, sizeof(*txrdy));
258 if (!fp)
259 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
260
261 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
262 memset(txrdy, 0, sizeof(*txrdy));
263 txrdy->ft_burst_len = htonl(se_cmd->data_length);
264
265 cmd->seq = lport->tt.seq_start_next(cmd->seq);
266 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
267 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
268
269 fh = fc_frame_header_get(fp);
270 f_ctl = ntoh24(fh->fh_f_ctl);
271
272 /* Only if it is 'Exchange Responder' */
273 if (f_ctl & FC_FC_EX_CTX) {
274 /* Target is 'exchange responder' and sending XFER_READY
275 * to 'exchange initiator (initiator)'
276 */
277 if ((ep->xid <= lport->lro_xid) &&
278 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
279 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
280 /*
281 * Map se_mem list to scatterlist, so that
282 * DDP can be setup. DDP setup function require
283 * scatterlist. se_mem_list is internal to
284 * TCM/LIO target
285 */
286 transport_do_task_sg_chain(se_cmd);
287 cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
288 cmd->sg_cnt =
289 T_TASK(se_cmd)->t_tasks_sg_chained_no;
290 }
291 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
292 cmd->sg, cmd->sg_cnt))
293 cmd->was_ddp_setup = 1;
294 }
295 }
296 lport->tt.seq_send(lport, cmd->seq, fp);
297 return 0;
298}
299
300u32 ft_get_task_tag(struct se_cmd *se_cmd)
301{
302 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
303
304 return fc_seq_exch(cmd->seq)->rxid;
305}
306
307int ft_get_cmd_state(struct se_cmd *se_cmd)
308{
309 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
310
311 return cmd->state;
312}
313
314int ft_is_state_remove(struct se_cmd *se_cmd)
315{
316 return 0; /* XXX TBD */
317}
318
319void ft_new_cmd_failure(struct se_cmd *se_cmd)
320{
321 /* XXX TBD */
322 printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
323}
324
325/*
326 * FC sequence response handler for follow-on sequences (data) and aborts.
327 */
328static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
329{
330 struct ft_cmd *cmd = arg;
331 struct fc_frame_header *fh;
332
333 if (IS_ERR(fp)) {
334 /* XXX need to find cmd if queued */
335 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
336 cmd->seq = NULL;
337 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
338 return;
339 }
340
341 fh = fc_frame_header_get(fp);
342
343 switch (fh->fh_r_ctl) {
344 case FC_RCTL_DD_SOL_DATA: /* write data */
345 ft_recv_write_data(cmd, fp);
346 break;
347 case FC_RCTL_DD_UNSOL_CTL: /* command */
348 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
349 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
350 default:
351 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
352 __func__, fh->fh_r_ctl);
353 fc_frame_free(fp);
354 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
355 break;
356 }
357}
358
359/*
360 * Send a FCP response including SCSI status and optional FCP rsp_code.
361 * status is SAM_STAT_GOOD (zero) iff code is valid.
362 * This is used in error cases, such as allocation failures.
363 */
364static void ft_send_resp_status(struct fc_lport *lport,
365 const struct fc_frame *rx_fp,
366 u32 status, enum fcp_resp_rsp_codes code)
367{
368 struct fc_frame *fp;
369 struct fc_seq *sp;
370 const struct fc_frame_header *fh;
371 size_t len;
372 struct fcp_resp_with_ext *fcp;
373 struct fcp_resp_rsp_info *info;
374
375 fh = fc_frame_header_get(rx_fp);
376 FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
377 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
378 len = sizeof(*fcp);
379 if (status == SAM_STAT_GOOD)
380 len += sizeof(*info);
381 fp = fc_frame_alloc(lport, len);
382 if (!fp)
383 return;
384 fcp = fc_frame_payload_get(fp, len);
385 memset(fcp, 0, len);
386 fcp->resp.fr_status = status;
387 if (status == SAM_STAT_GOOD) {
388 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
389 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
390 info = (struct fcp_resp_rsp_info *)(fcp + 1);
391 info->rsp_code = code;
392 }
393
394 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
395 sp = fr_seq(fp);
396 if (sp)
397 lport->tt.seq_send(lport, sp, fp);
398 else
399 lport->tt.frame_send(lport, fp);
400}
401
402/*
403 * Send error or task management response.
404 * Always frees the cmd and associated state.
405 */
406static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
407{
408 ft_send_resp_status(cmd->sess->tport->lport,
409 cmd->req_frame, SAM_STAT_GOOD, code);
410 ft_free_cmd(cmd);
411}
412
413/*
414 * Handle Task Management Request.
415 */
416static void ft_send_tm(struct ft_cmd *cmd)
417{
418 struct se_tmr_req *tmr;
419 struct fcp_cmnd *fcp;
420 u8 tm_func;
421
422 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
423
424 switch (fcp->fc_tm_flags) {
425 case FCP_TMF_LUN_RESET:
426 tm_func = TMR_LUN_RESET;
427 if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
428 ft_dump_cmd(cmd, __func__);
429 transport_send_check_condition_and_sense(&cmd->se_cmd,
430 cmd->se_cmd.scsi_sense_reason, 0);
431 ft_sess_put(cmd->sess);
432 return;
433 }
434 break;
435 case FCP_TMF_TGT_RESET:
436 tm_func = TMR_TARGET_WARM_RESET;
437 break;
438 case FCP_TMF_CLR_TASK_SET:
439 tm_func = TMR_CLEAR_TASK_SET;
440 break;
441 case FCP_TMF_ABT_TASK_SET:
442 tm_func = TMR_ABORT_TASK_SET;
443 break;
444 case FCP_TMF_CLR_ACA:
445 tm_func = TMR_CLEAR_ACA;
446 break;
447 default:
448 /*
449 * FCP4r01 indicates having a combination of
450 * tm_flags set is invalid.
451 */
452 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
453 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
454 return;
455 }
456
457 FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
458 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
459 if (!tmr) {
460 FT_TM_DBG("alloc failed\n");
461 ft_send_resp_code(cmd, FCP_TMF_FAILED);
462 return;
463 }
464 cmd->se_cmd.se_tmr_req = tmr;
465 transport_generic_handle_tmr(&cmd->se_cmd);
466}
467
468/*
469 * Send status from completed task management request.
470 */
471int ft_queue_tm_resp(struct se_cmd *se_cmd)
472{
473 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
474 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
475 enum fcp_resp_rsp_codes code;
476
477 switch (tmr->response) {
478 case TMR_FUNCTION_COMPLETE:
479 code = FCP_TMF_CMPL;
480 break;
481 case TMR_LUN_DOES_NOT_EXIST:
482 code = FCP_TMF_INVALID_LUN;
483 break;
484 case TMR_FUNCTION_REJECTED:
485 code = FCP_TMF_REJECTED;
486 break;
487 case TMR_TASK_DOES_NOT_EXIST:
488 case TMR_TASK_STILL_ALLEGIANT:
489 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
490 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
491 case TMR_FUNCTION_AUTHORIZATION_FAILED:
492 default:
493 code = FCP_TMF_FAILED;
494 break;
495 }
496 FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
497 tmr->function, tmr->response, code);
498 ft_send_resp_code(cmd, code);
499 return 0;
500}
501
502/*
503 * Handle incoming FCP command.
504 */
505static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
506{
507 struct ft_cmd *cmd;
508 struct fc_lport *lport = sess->tport->lport;
509
510 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
511 if (!cmd)
512 goto busy;
513 cmd->sess = sess;
514 cmd->seq = lport->tt.seq_assign(lport, fp);
515 if (!cmd->seq) {
516 kfree(cmd);
517 goto busy;
518 }
519 cmd->req_frame = fp; /* hold frame during cmd */
520 ft_queue_cmd(sess, cmd);
521 return;
522
523busy:
524 FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
525 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
526 fc_frame_free(fp);
527 ft_sess_put(sess); /* undo get from lookup */
528}
529
530
531/*
532 * Handle incoming FCP frame.
533 * Caller has verified that the frame is type FCP.
534 */
535void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
536{
537 struct fc_frame_header *fh = fc_frame_header_get(fp);
538
539 switch (fh->fh_r_ctl) {
540 case FC_RCTL_DD_UNSOL_CMD: /* command */
541 ft_recv_cmd(sess, fp);
542 break;
543 case FC_RCTL_DD_SOL_DATA: /* write data */
544 case FC_RCTL_DD_UNSOL_CTL:
545 case FC_RCTL_DD_SOL_CTL:
546 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
547 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
548 default:
549 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
550 __func__, fh->fh_r_ctl);
551 fc_frame_free(fp);
552 ft_sess_put(sess); /* undo get from lookup */
553 break;
554 }
555}
556
557/*
558 * Send new command to target.
559 */
560static void ft_send_cmd(struct ft_cmd *cmd)
561{
562 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
563 struct se_cmd *se_cmd;
564 struct fcp_cmnd *fcp;
565 int data_dir;
566 u32 data_len;
567 int task_attr;
568 int ret;
569
570 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
571 if (!fcp)
572 goto err;
573
574 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
575 goto err; /* not handling longer CDBs yet */
576
577 if (fcp->fc_tm_flags) {
578 task_attr = FCP_PTA_SIMPLE;
579 data_dir = DMA_NONE;
580 data_len = 0;
581 } else {
582 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
583 case 0:
584 data_dir = DMA_NONE;
585 break;
586 case FCP_CFL_RDDATA:
587 data_dir = DMA_FROM_DEVICE;
588 break;
589 case FCP_CFL_WRDATA:
590 data_dir = DMA_TO_DEVICE;
591 break;
592 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
593 goto err; /* TBD not supported by tcm_fc yet */
594 }
595
596 /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
597 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
598 data_len = ntohl(fcp->fc_dl);
599 cmd->cdb = fcp->fc_cdb;
600 }
601
602 se_cmd = &cmd->se_cmd;
603 /*
604 * Initialize struct se_cmd descriptor from target_core_mod
605 * infrastructure
606 */
607 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
608 data_len, data_dir, task_attr,
609 &cmd->ft_sense_buffer[0]);
610 /*
611 * Check for FCP task management flags
612 */
613 if (fcp->fc_tm_flags) {
614 ft_send_tm(cmd);
615 return;
616 }
617
618 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
619
620 ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
621 if (ret < 0) {
622 ft_dump_cmd(cmd, __func__);
623 transport_send_check_condition_and_sense(&cmd->se_cmd,
624 cmd->se_cmd.scsi_sense_reason, 0);
625 return;
626 }
627
628 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
629
630 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
631 ft_dump_cmd(cmd, __func__);
632
633 if (ret == -1) {
634 transport_send_check_condition_and_sense(se_cmd,
635 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
636 transport_generic_free_cmd(se_cmd, 0, 1, 0);
637 return;
638 }
639 if (ret == -2) {
640 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
641 ft_queue_status(se_cmd);
642 else
643 transport_send_check_condition_and_sense(se_cmd,
644 se_cmd->scsi_sense_reason, 0);
645 transport_generic_free_cmd(se_cmd, 0, 1, 0);
646 return;
647 }
648 transport_generic_handle_cdb(se_cmd);
649 return;
650
651err:
652 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
653 return;
654}
655
656/*
657 * Handle request in the command thread.
658 */
659static void ft_exec_req(struct ft_cmd *cmd)
660{
661 FT_IO_DBG("cmd state %x\n", cmd->state);
662 switch (cmd->state) {
663 case FC_CMD_ST_NEW:
664 ft_send_cmd(cmd);
665 break;
666 default:
667 break;
668 }
669}
670
671/*
672 * Processing thread.
673 * Currently one thread per tpg.
674 */
675int ft_thread(void *arg)
676{
677 struct ft_tpg *tpg = arg;
678 struct se_queue_obj *qobj = &tpg->qobj;
679 struct ft_cmd *cmd;
680 int ret;
681
682 set_user_nice(current, -20);
683
684 while (!kthread_should_stop()) {
685 ret = wait_event_interruptible(qobj->thread_wq,
686 atomic_read(&qobj->queue_cnt) || kthread_should_stop());
687 if (ret < 0 || kthread_should_stop())
688 goto out;
689 cmd = ft_dequeue_cmd(qobj);
690 if (cmd)
691 ft_exec_req(cmd);
692 }
693
694out:
695 return 0;
696}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
new file mode 100644
index 000000000000..fcdbbffe88cc
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -0,0 +1,677 @@
1/*******************************************************************************
2 * Filename: tcm_fc.c
3 *
4 * This file contains the configfs implementation for TCM_fc fabric node.
5 * Based on tcm_loop_configfs.c
6 *
7 * Copyright (c) 2010 Cisco Systems, Inc.
8 * Copyright (c) 2009,2010 Rising Tide, Inc.
9 * Copyright (c) 2009,2010 Linux-iSCSI.org
10 *
11 * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <generated/utsrelease.h>
28#include <linux/utsname.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/kthread.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/configfs.h>
35#include <linux/ctype.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_fabric_lib.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_configfs.h>
51#include <target/target_core_base.h>
52#include <target/configfs_macros.h>
53
54#include "tcm_fc.h"
55
56struct target_fabric_configfs *ft_configfs;
57
58LIST_HEAD(ft_lport_list);
59DEFINE_MUTEX(ft_lport_lock);
60
61unsigned int ft_debug_logging;
62module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65/*
66 * Parse WWN.
67 * If strict, we require lower-case hex and colon separators to be sure
68 * the name is the same as what would be generated by ft_format_wwn()
69 * so the name and wwn are mapped one-to-one.
70 */
71static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
72{
73 const char *cp;
74 char c;
75 u32 nibble;
76 u32 byte = 0;
77 u32 pos = 0;
78 u32 err;
79
80 *wwn = 0;
81 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
82 c = *cp;
83 if (c == '\n' && cp[1] == '\0')
84 continue;
85 if (strict && pos++ == 2 && byte++ < 7) {
86 pos = 0;
87 if (c == ':')
88 continue;
89 err = 1;
90 goto fail;
91 }
92 if (c == '\0') {
93 err = 2;
94 if (strict && byte != 8)
95 goto fail;
96 return cp - name;
97 }
98 err = 3;
99 if (isdigit(c))
100 nibble = c - '0';
101 else if (isxdigit(c) && (islower(c) || !strict))
102 nibble = tolower(c) - 'a' + 10;
103 else
104 goto fail;
105 *wwn = (*wwn << 4) | nibble;
106 }
107 err = 4;
108fail:
109 FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
110 err, cp - name, pos, byte);
111 return -1;
112}
113
114ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
115{
116 u8 b[8];
117
118 put_unaligned_be64(wwn, b);
119 return snprintf(buf, len,
120 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
121 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
122}
123
124static ssize_t ft_wwn_show(void *arg, char *buf)
125{
126 u64 *wwn = arg;
127 ssize_t len;
128
129 len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
130 buf[len++] = '\n';
131 return len;
132}
133
134static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
135{
136 ssize_t ret;
137 u64 wwn;
138
139 ret = ft_parse_wwn(buf, &wwn, 0);
140 if (ret > 0)
141 *(u64 *)arg = wwn;
142 return ret;
143}
144
145/*
146 * ACL auth ops.
147 */
148
149static ssize_t ft_nacl_show_port_name(
150 struct se_node_acl *se_nacl,
151 char *page)
152{
153 struct ft_node_acl *acl = container_of(se_nacl,
154 struct ft_node_acl, se_node_acl);
155
156 return ft_wwn_show(&acl->node_auth.port_name, page);
157}
158
159static ssize_t ft_nacl_store_port_name(
160 struct se_node_acl *se_nacl,
161 const char *page,
162 size_t count)
163{
164 struct ft_node_acl *acl = container_of(se_nacl,
165 struct ft_node_acl, se_node_acl);
166
167 return ft_wwn_store(&acl->node_auth.port_name, page, count);
168}
169
170TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
171
172static ssize_t ft_nacl_show_node_name(
173 struct se_node_acl *se_nacl,
174 char *page)
175{
176 struct ft_node_acl *acl = container_of(se_nacl,
177 struct ft_node_acl, se_node_acl);
178
179 return ft_wwn_show(&acl->node_auth.node_name, page);
180}
181
182static ssize_t ft_nacl_store_node_name(
183 struct se_node_acl *se_nacl,
184 const char *page,
185 size_t count)
186{
187 struct ft_node_acl *acl = container_of(se_nacl,
188 struct ft_node_acl, se_node_acl);
189
190 return ft_wwn_store(&acl->node_auth.node_name, page, count);
191}
192
193TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
194
195static struct configfs_attribute *ft_nacl_base_attrs[] = {
196 &ft_nacl_port_name.attr,
197 &ft_nacl_node_name.attr,
198 NULL,
199};
200
201/*
202 * ACL ops.
203 */
204
205/*
206 * Add ACL for an initiator. The ACL is named arbitrarily.
207 * The port_name and/or node_name are attributes.
208 */
209static struct se_node_acl *ft_add_acl(
210 struct se_portal_group *se_tpg,
211 struct config_group *group,
212 const char *name)
213{
214 struct ft_node_acl *acl;
215 struct ft_tpg *tpg;
216 u64 wwpn;
217 u32 q_depth;
218
219 FT_CONF_DBG("add acl %s\n", name);
220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
221
222 if (ft_parse_wwn(name, &wwpn, 1) < 0)
223 return ERR_PTR(-EINVAL);
224
225 acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
226 if (!(acl))
227 return ERR_PTR(-ENOMEM);
228 acl->node_auth.port_name = wwpn;
229
230 q_depth = 32; /* XXX bogus default - get from tpg? */
231 return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
232 &acl->se_node_acl, name, q_depth);
233}
234
235static void ft_del_acl(struct se_node_acl *se_acl)
236{
237 struct se_portal_group *se_tpg = se_acl->se_tpg;
238 struct ft_tpg *tpg;
239 struct ft_node_acl *acl = container_of(se_acl,
240 struct ft_node_acl, se_node_acl);
241
242 FT_CONF_DBG("del acl %s\n",
243 config_item_name(&se_acl->acl_group.cg_item));
244
245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
246 FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
247 acl, se_acl, tpg, &tpg->se_tpg);
248
249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
250 kfree(acl);
251}
252
253struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
254{
255 struct ft_node_acl *found = NULL;
256 struct ft_node_acl *acl;
257 struct se_portal_group *se_tpg = &tpg->se_tpg;
258 struct se_node_acl *se_acl;
259
260 spin_lock_bh(&se_tpg->acl_node_lock);
261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
263 FT_CONF_DBG("acl %p port_name %llx\n",
264 acl, (unsigned long long)acl->node_auth.port_name);
265 if (acl->node_auth.port_name == rdata->ids.port_name ||
266 acl->node_auth.node_name == rdata->ids.node_name) {
267 FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
268 (unsigned long long)rdata->ids.port_name);
269 found = acl;
270 /* XXX need to hold onto ACL */
271 break;
272 }
273 }
274 spin_unlock_bh(&se_tpg->acl_node_lock);
275 return found;
276}
277
278struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
279{
280 struct ft_node_acl *acl;
281
282 acl = kzalloc(sizeof(*acl), GFP_KERNEL);
283 if (!(acl)) {
284 printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
285 return NULL;
286 }
287 FT_CONF_DBG("acl %p\n", acl);
288 return &acl->se_node_acl;
289}
290
291static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
292 struct se_node_acl *se_acl)
293{
294 struct ft_node_acl *acl = container_of(se_acl,
295 struct ft_node_acl, se_node_acl);
296
297 FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
298 kfree(acl);
299}
300
301/*
302 * local_port port_group (tpg) ops.
303 */
304static struct se_portal_group *ft_add_tpg(
305 struct se_wwn *wwn,
306 struct config_group *group,
307 const char *name)
308{
309 struct ft_lport_acl *lacl;
310 struct ft_tpg *tpg;
311 unsigned long index;
312 int ret;
313
314 FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
315
316 /*
317 * Name must be "tpgt_" followed by the index.
318 */
319 if (strstr(name, "tpgt_") != name)
320 return NULL;
321 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
322 return NULL;
323
324 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
325 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
326 if (!tpg)
327 return NULL;
328 tpg->index = index;
329 tpg->lport_acl = lacl;
330 INIT_LIST_HEAD(&tpg->lun_list);
331 transport_init_queue_obj(&tpg->qobj);
332
333 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
334 (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
335 if (ret < 0) {
336 kfree(tpg);
337 return NULL;
338 }
339
340 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
341 if (IS_ERR(tpg->thread)) {
342 kfree(tpg);
343 return NULL;
344 }
345
346 mutex_lock(&ft_lport_lock);
347 list_add_tail(&tpg->list, &lacl->tpg_list);
348 mutex_unlock(&ft_lport_lock);
349
350 return &tpg->se_tpg;
351}
352
353static void ft_del_tpg(struct se_portal_group *se_tpg)
354{
355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
356
357 FT_CONF_DBG("del tpg %s\n",
358 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
359
360 kthread_stop(tpg->thread);
361
362 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
363 synchronize_rcu();
364
365 mutex_lock(&ft_lport_lock);
366 list_del(&tpg->list);
367 if (tpg->tport) {
368 tpg->tport->tpg = NULL;
369 tpg->tport = NULL;
370 }
371 mutex_unlock(&ft_lport_lock);
372
373 core_tpg_deregister(se_tpg);
374 kfree(tpg);
375}
376
377/*
378 * Verify that an lport is configured to use the tcm_fc module, and return
379 * the target port group that should be used.
380 *
381 * The caller holds ft_lport_lock.
382 */
383struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
384{
385 struct ft_lport_acl *lacl;
386 struct ft_tpg *tpg;
387
388 list_for_each_entry(lacl, &ft_lport_list, list) {
389 if (lacl->wwpn == lport->wwpn) {
390 list_for_each_entry(tpg, &lacl->tpg_list, list)
391 return tpg; /* XXX for now return first entry */
392 return NULL;
393 }
394 }
395 return NULL;
396}
397
398/*
399 * target config instance ops.
400 */
401
402/*
403 * Add lport to allowed config.
404 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
405 */
406static struct se_wwn *ft_add_lport(
407 struct target_fabric_configfs *tf,
408 struct config_group *group,
409 const char *name)
410{
411 struct ft_lport_acl *lacl;
412 struct ft_lport_acl *old_lacl;
413 u64 wwpn;
414
415 FT_CONF_DBG("add lport %s\n", name);
416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
417 return NULL;
418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
419 if (!lacl)
420 return NULL;
421 lacl->wwpn = wwpn;
422 INIT_LIST_HEAD(&lacl->tpg_list);
423
424 mutex_lock(&ft_lport_lock);
425 list_for_each_entry(old_lacl, &ft_lport_list, list) {
426 if (old_lacl->wwpn == wwpn) {
427 mutex_unlock(&ft_lport_lock);
428 kfree(lacl);
429 return NULL;
430 }
431 }
432 list_add_tail(&lacl->list, &ft_lport_list);
433 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
434 mutex_unlock(&ft_lport_lock);
435
436 return &lacl->fc_lport_wwn;
437}
438
439static void ft_del_lport(struct se_wwn *wwn)
440{
441 struct ft_lport_acl *lacl = container_of(wwn,
442 struct ft_lport_acl, fc_lport_wwn);
443
444 FT_CONF_DBG("del lport %s\n",
445 config_item_name(&wwn->wwn_group.cg_item));
446 mutex_lock(&ft_lport_lock);
447 list_del(&lacl->list);
448 mutex_unlock(&ft_lport_lock);
449
450 kfree(lacl);
451}
452
453static ssize_t ft_wwn_show_attr_version(
454 struct target_fabric_configfs *tf,
455 char *page)
456{
457 return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
458 ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
459}
460
461TF_WWN_ATTR_RO(ft, version);
462
463static struct configfs_attribute *ft_wwn_attrs[] = {
464 &ft_wwn_version.attr,
465 NULL,
466};
467
468static char *ft_get_fabric_name(void)
469{
470 return "fc";
471}
472
473static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
474{
475 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
476
477 return tpg->lport_acl->name;
478}
479
480static u16 ft_get_tag(struct se_portal_group *se_tpg)
481{
482 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
483
484 /*
485 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
486 * to represent the SCSI Target Port.
487 */
488 return tpg->index;
489}
490
491static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
492{
493 return 1;
494}
495
496static int ft_check_false(struct se_portal_group *se_tpg)
497{
498 return 0;
499}
500
501static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
502{
503}
504
505static u16 ft_get_fabric_sense_len(void)
506{
507 return 0;
508}
509
510static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
511{
512 return 0;
513}
514
515static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
516{
517 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
518
519 return tpg->index;
520}
521
522static u64 ft_pack_lun(unsigned int index)
523{
524 WARN_ON(index >= 256);
525 /* Caller wants this byte-swapped */
526 return cpu_to_le64((index & 0xff) << 8);
527}
528
529static struct target_core_fabric_ops ft_fabric_ops = {
530 .get_fabric_name = ft_get_fabric_name,
531 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
532 .tpg_get_wwn = ft_get_fabric_wwn,
533 .tpg_get_tag = ft_get_tag,
534 .tpg_get_default_depth = ft_get_default_depth,
535 .tpg_get_pr_transport_id = fc_get_pr_transport_id,
536 .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
537 .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
538 .tpg_check_demo_mode = ft_check_false,
539 .tpg_check_demo_mode_cache = ft_check_false,
540 .tpg_check_demo_mode_write_protect = ft_check_false,
541 .tpg_check_prod_mode_write_protect = ft_check_false,
542 .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
543 .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
544 .tpg_get_inst_index = ft_tpg_get_inst_index,
545 .check_stop_free = ft_check_stop_free,
546 .release_cmd_to_pool = ft_release_cmd,
547 .release_cmd_direct = ft_release_cmd,
548 .shutdown_session = ft_sess_shutdown,
549 .close_session = ft_sess_close,
550 .stop_session = ft_sess_stop,
551 .fall_back_to_erl0 = ft_sess_set_erl0,
552 .sess_logged_in = ft_sess_logged_in,
553 .sess_get_index = ft_sess_get_index,
554 .sess_get_initiator_sid = NULL,
555 .write_pending = ft_write_pending,
556 .write_pending_status = ft_write_pending_status,
557 .set_default_node_attributes = ft_set_default_node_attr,
558 .get_task_tag = ft_get_task_tag,
559 .get_cmd_state = ft_get_cmd_state,
560 .new_cmd_failure = ft_new_cmd_failure,
561 .queue_data_in = ft_queue_data_in,
562 .queue_status = ft_queue_status,
563 .queue_tm_rsp = ft_queue_tm_resp,
564 .get_fabric_sense_len = ft_get_fabric_sense_len,
565 .set_fabric_sense_len = ft_set_fabric_sense_len,
566 .is_state_remove = ft_is_state_remove,
567 .pack_lun = ft_pack_lun,
568 /*
569 * Setup function pointers for generic logic in
570 * target_core_fabric_configfs.c
571 */
572 .fabric_make_wwn = &ft_add_lport,
573 .fabric_drop_wwn = &ft_del_lport,
574 .fabric_make_tpg = &ft_add_tpg,
575 .fabric_drop_tpg = &ft_del_tpg,
576 .fabric_post_link = NULL,
577 .fabric_pre_unlink = NULL,
578 .fabric_make_np = NULL,
579 .fabric_drop_np = NULL,
580 .fabric_make_nodeacl = &ft_add_acl,
581 .fabric_drop_nodeacl = &ft_del_acl,
582};
583
584int ft_register_configfs(void)
585{
586 struct target_fabric_configfs *fabric;
587 int ret;
588
589 /*
590 * Register the top level struct config_item_type with TCM core
591 */
592 fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
593 if (!fabric) {
594 printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
595 __func__);
596 return -1;
597 }
598 fabric->tf_ops = ft_fabric_ops;
599
600 /* Allowing support for task_sg_chaining */
601 fabric->tf_ops.task_sg_chaining = 1;
602
603 /*
604 * Setup default attribute lists for various fabric->tf_cit_tmpl
605 */
606 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
607 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
608 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
609 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
610 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
611 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
612 ft_nacl_base_attrs;
613 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
614 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
615 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
616 /*
617 * register the fabric for use within TCM
618 */
619 ret = target_fabric_configfs_register(fabric);
620 if (ret < 0) {
621 FT_CONF_DBG("target_fabric_configfs_register() for"
622 " FC Target failed!\n");
623 printk(KERN_INFO
624 "%s: target_fabric_configfs_register() failed!\n",
625 __func__);
626 target_fabric_configfs_free(fabric);
627 return -1;
628 }
629
630 /*
631 * Setup our local pointer to *fabric.
632 */
633 ft_configfs = fabric;
634 return 0;
635}
636
637void ft_deregister_configfs(void)
638{
639 if (!ft_configfs)
640 return;
641 target_fabric_configfs_deregister(ft_configfs);
642 ft_configfs = NULL;
643}
644
645static struct notifier_block ft_notifier = {
646 .notifier_call = ft_lport_notify
647};
648
649static int __init ft_init(void)
650{
651 if (ft_register_configfs())
652 return -1;
653 if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
654 ft_deregister_configfs();
655 return -1;
656 }
657 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
658 fc_lport_iterate(ft_lport_add, NULL);
659 return 0;
660}
661
662static void __exit ft_exit(void)
663{
664 blocking_notifier_chain_unregister(&fc_lport_notifier_head,
665 &ft_notifier);
666 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
667 fc_lport_iterate(ft_lport_del, NULL);
668 ft_deregister_configfs();
669 synchronize_rcu();
670}
671
672#ifdef MODULE
673MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
674MODULE_LICENSE("GPL");
675module_init(ft_init);
676module_exit(ft_exit);
677#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
new file mode 100644
index 000000000000..4c3c0efbe13f
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
5 *
6 * Copyright (c) 2007 Intel Corporation. All rights reserved.
7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * Copyright (c) 2008 Mike Christie
9 * Copyright (c) 2009 Rising Tide, Inc.
10 * Copyright (c) 2009 Linux-iSCSI.org
11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/* XXX TBD some includes may be extraneous */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/version.h>
32#include <generated/utsrelease.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/hash.h>
42#include <asm/unaligned.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
47#include <scsi/libfc.h>
48#include <scsi/fc_encode.h>
49
50#include <target/target_core_base.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h>
56#include <target/target_core_base.h>
57#include <target/configfs_macros.h>
58
59#include "tcm_fc.h"
60
61/*
62 * Deliver read data back to initiator.
63 * XXX TBD handle resource problems later.
64 */
65int ft_queue_data_in(struct se_cmd *se_cmd)
66{
67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
68 struct se_transport_task *task;
69 struct fc_frame *fp = NULL;
70 struct fc_exch *ep;
71 struct fc_lport *lport;
72 struct se_mem *mem;
73 size_t remaining;
74 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
75 u32 mem_off;
76 u32 fh_off = 0;
77 u32 frame_off = 0;
78 size_t frame_len = 0;
79 size_t mem_len;
80 size_t tlen;
81 size_t off_in_page;
82 struct page *page;
83 int use_sg;
84 int error;
85 void *page_addr;
86 void *from;
87 void *to = NULL;
88
89 ep = fc_seq_exch(cmd->seq);
90 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92
93 task = T_TASK(se_cmd);
94 BUG_ON(!task);
95 remaining = se_cmd->data_length;
96
97 /*
98 * Setup to use first mem list entry if any.
99 */
100 if (task->t_tasks_se_num) {
101 mem = list_first_entry(task->t_mem_list,
102 struct se_mem, se_list);
103 mem_len = mem->se_len;
104 mem_off = mem->se_off;
105 page = mem->se_page;
106 } else {
107 mem = NULL;
108 mem_len = remaining;
109 mem_off = 0;
110 page = NULL;
111 }
112
113 /* no scatter/gather in skb for odd word length due to fc_seq_send() */
114 use_sg = !(remaining % 4);
115
116 while (remaining) {
117 if (!mem_len) {
118 BUG_ON(!mem);
119 mem = list_entry(mem->se_list.next,
120 struct se_mem, se_list);
121 mem_len = min((size_t)mem->se_len, remaining);
122 mem_off = mem->se_off;
123 page = mem->se_page;
124 }
125 if (!frame_len) {
126 /*
127 * If lport's has capability of Large Send Offload LSO)
128 * , then allow 'frame_len' to be as big as 'lso_max'
129 * if indicated transfer length is >= lport->lso_max
130 */
131 frame_len = (lport->seq_offload) ? lport->lso_max :
132 cmd->sess->max_frame;
133 frame_len = min(frame_len, remaining);
134 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
135 if (!fp)
136 return -ENOMEM;
137 to = fc_frame_payload_get(fp, 0);
138 fh_off = frame_off;
139 frame_off += frame_len;
140 /*
141 * Setup the frame's max payload which is used by base
142 * driver to indicate HW about max frame size, so that
143 * HW can do fragmentation appropriately based on
144 * "gso_max_size" of underline netdev.
145 */
146 fr_max_payload(fp) = cmd->sess->max_frame;
147 }
148 tlen = min(mem_len, frame_len);
149
150 if (use_sg) {
151 if (!mem) {
152 BUG_ON(!task->t_task_buf);
153 page_addr = task->t_task_buf + mem_off;
154 /*
155 * In this case, offset is 'offset_in_page' of
156 * (t_task_buf + mem_off) instead of 'mem_off'.
157 */
158 off_in_page = offset_in_page(page_addr);
159 page = virt_to_page(page_addr);
160 tlen = min(tlen, PAGE_SIZE - off_in_page);
161 } else
162 off_in_page = mem_off;
163 BUG_ON(!page);
164 get_page(page);
165 skb_fill_page_desc(fp_skb(fp),
166 skb_shinfo(fp_skb(fp))->nr_frags,
167 page, off_in_page, tlen);
168 fr_len(fp) += tlen;
169 fp_skb(fp)->data_len += tlen;
170 fp_skb(fp)->truesize +=
171 PAGE_SIZE << compound_order(page);
172 } else if (mem) {
173 BUG_ON(!page);
174 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
175 KM_SOFTIRQ0);
176 page_addr = from;
177 from += mem_off & ~PAGE_MASK;
178 tlen = min(tlen, (size_t)(PAGE_SIZE -
179 (mem_off & ~PAGE_MASK)));
180 memcpy(to, from, tlen);
181 kunmap_atomic(page_addr, KM_SOFTIRQ0);
182 to += tlen;
183 } else {
184 from = task->t_task_buf + mem_off;
185 memcpy(to, from, tlen);
186 to += tlen;
187 }
188
189 mem_off += tlen;
190 mem_len -= tlen;
191 frame_len -= tlen;
192 remaining -= tlen;
193
194 if (frame_len &&
195 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
196 continue;
197 if (!remaining)
198 f_ctl |= FC_FC_END_SEQ;
199 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
200 FC_TYPE_FCP, f_ctl, fh_off);
201 error = lport->tt.seq_send(lport, cmd->seq, fp);
202 if (error) {
203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, "
206 "xid <0x%x>, remaining <0x%x>, "
207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid,
209 remaining, lport->lso_max);
210 }
211 }
212 return ft_queue_status(se_cmd);
213}
214
215/*
216 * Receive write data frame.
217 */
218void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219{
220 struct se_cmd *se_cmd = &cmd->se_cmd;
221 struct fc_seq *seq = cmd->seq;
222 struct fc_exch *ep;
223 struct fc_lport *lport;
224 struct se_transport_task *task;
225 struct fc_frame_header *fh;
226 struct se_mem *mem;
227 u32 mem_off;
228 u32 rel_off;
229 size_t frame_len;
230 size_t mem_len;
231 size_t tlen;
232 struct page *page;
233 void *page_addr;
234 void *from;
235 void *to;
236 u32 f_ctl;
237 void *buf;
238
239 task = T_TASK(se_cmd);
240 BUG_ON(!task);
241
242 fh = fc_frame_header_get(fp);
243 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
244 goto drop;
245
246 /*
247 * Doesn't expect even single byte of payload. Payload
248 * is expected to be copied directly to user buffers
249 * due to DDP (Large Rx offload) feature, hence
250 * BUG_ON if BUF is non-NULL
251 */
252 buf = fc_frame_payload_get(fp, 1);
253 if (cmd->was_ddp_setup && buf) {
254 printk(KERN_INFO "%s: When DDP was setup, not expected to"
255 "receive frame with payload, Payload shall be"
256 "copied directly to buffer instead of coming "
257 "via. legacy receive queues\n", __func__);
258 BUG_ON(buf);
259 }
260
261 /*
262 * If ft_cmd indicated 'ddp_setup', in that case only the last frame
263 * should come with 'TSI bit being set'. If 'TSI bit is not set and if
264 * data frame appears here, means error condition. In both the cases
265 * release the DDP context (ddp_put) and in error case, as well
266 * initiate error recovery mechanism.
267 */
268 ep = fc_seq_exch(seq);
269 if (cmd->was_ddp_setup) {
270 BUG_ON(!ep);
271 lport = ep->lp;
272 BUG_ON(!lport);
273 }
274 if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
275 f_ctl = ntoh24(fh->fh_f_ctl);
276 /*
277 * If TSI bit set in f_ctl, means last write data frame is
278 * received successfully where payload is posted directly
279 * to user buffer and only the last frame's header is posted
280 * in legacy receive queue
281 */
282 if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
283 cmd->write_data_len = lport->tt.ddp_done(lport,
284 ep->xid);
285 goto last_frame;
286 } else {
287 /*
288 * Updating the write_data_len may be meaningless at
289 * this point, but just in case if required in future
290 * for debugging or any other purpose
291 */
292 printk(KERN_ERR "%s: Received frame with TSI bit not"
293 " being SET, dropping the frame, "
294 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
295 __func__, cmd->sg, cmd->sg_cnt);
296 cmd->write_data_len = lport->tt.ddp_done(lport,
297 ep->xid);
298 lport->tt.seq_exch_abort(cmd->seq, 0);
299 goto drop;
300 }
301 }
302
303 rel_off = ntohl(fh->fh_parm_offset);
304 frame_len = fr_len(fp);
305 if (frame_len <= sizeof(*fh))
306 goto drop;
307 frame_len -= sizeof(*fh);
308 from = fc_frame_payload_get(fp, 0);
309 if (rel_off >= se_cmd->data_length)
310 goto drop;
311 if (frame_len + rel_off > se_cmd->data_length)
312 frame_len = se_cmd->data_length - rel_off;
313
314 /*
315 * Setup to use first mem list entry if any.
316 */
317 if (task->t_tasks_se_num) {
318 mem = list_first_entry(task->t_mem_list,
319 struct se_mem, se_list);
320 mem_len = mem->se_len;
321 mem_off = mem->se_off;
322 page = mem->se_page;
323 } else {
324 mem = NULL;
325 page = NULL;
326 mem_off = 0;
327 mem_len = frame_len;
328 }
329
330 while (frame_len) {
331 if (!mem_len) {
332 BUG_ON(!mem);
333 mem = list_entry(mem->se_list.next,
334 struct se_mem, se_list);
335 mem_len = mem->se_len;
336 mem_off = mem->se_off;
337 page = mem->se_page;
338 }
339 if (rel_off >= mem_len) {
340 rel_off -= mem_len;
341 mem_len = 0;
342 continue;
343 }
344 mem_off += rel_off;
345 mem_len -= rel_off;
346 rel_off = 0;
347
348 tlen = min(mem_len, frame_len);
349
350 if (mem) {
351 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
352 KM_SOFTIRQ0);
353 page_addr = to;
354 to += mem_off & ~PAGE_MASK;
355 tlen = min(tlen, (size_t)(PAGE_SIZE -
356 (mem_off & ~PAGE_MASK)));
357 memcpy(to, from, tlen);
358 kunmap_atomic(page_addr, KM_SOFTIRQ0);
359 } else {
360 to = task->t_task_buf + mem_off;
361 memcpy(to, from, tlen);
362 }
363 from += tlen;
364 frame_len -= tlen;
365 mem_off += tlen;
366 mem_len -= tlen;
367 cmd->write_data_len += tlen;
368 }
369last_frame:
370 if (cmd->write_data_len == se_cmd->data_length)
371 transport_generic_handle_data(se_cmd);
372drop:
373 fc_frame_free(fp);
374}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
new file mode 100644
index 000000000000..a3bd57f2ea32
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -0,0 +1,541 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <linux/rcupdate.h>
34#include <linux/rculist.h>
35#include <linux/kref.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_device.h>
47#include <target/target_core_tpg.h>
48#include <target/target_core_configfs.h>
49#include <target/target_core_base.h>
50#include <target/configfs_macros.h>
51
52#include <scsi/libfc.h>
53#include "tcm_fc.h"
54
55static void ft_sess_delete_all(struct ft_tport *);
56
57/*
58 * Lookup or allocate target local port.
59 * Caller holds ft_lport_lock.
60 */
61static struct ft_tport *ft_tport_create(struct fc_lport *lport)
62{
63 struct ft_tpg *tpg;
64 struct ft_tport *tport;
65 int i;
66
67 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
68 if (tport && tport->tpg)
69 return tport;
70
71 tpg = ft_lport_find_tpg(lport);
72 if (!tpg)
73 return NULL;
74
75 if (tport) {
76 tport->tpg = tpg;
77 return tport;
78 }
79
80 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
81 if (!tport)
82 return NULL;
83
84 tport->lport = lport;
85 tport->tpg = tpg;
86 tpg->tport = tport;
87 for (i = 0; i < FT_SESS_HASH_SIZE; i++)
88 INIT_HLIST_HEAD(&tport->hash[i]);
89
90 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
91 return tport;
92}
93
94/*
95 * Free tport via RCU.
96 */
97static void ft_tport_rcu_free(struct rcu_head *rcu)
98{
99 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
100
101 kfree(tport);
102}
103
104/*
105 * Delete a target local port.
106 * Caller holds ft_lport_lock.
107 */
108static void ft_tport_delete(struct ft_tport *tport)
109{
110 struct fc_lport *lport;
111 struct ft_tpg *tpg;
112
113 ft_sess_delete_all(tport);
114 lport = tport->lport;
115 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
116 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
117
118 tpg = tport->tpg;
119 if (tpg) {
120 tpg->tport = NULL;
121 tport->tpg = NULL;
122 }
123 call_rcu(&tport->rcu, ft_tport_rcu_free);
124}
125
126/*
127 * Add local port.
128 * Called thru fc_lport_iterate().
129 */
130void ft_lport_add(struct fc_lport *lport, void *arg)
131{
132 mutex_lock(&ft_lport_lock);
133 ft_tport_create(lport);
134 mutex_unlock(&ft_lport_lock);
135}
136
137/*
138 * Delete local port.
139 * Called thru fc_lport_iterate().
140 */
141void ft_lport_del(struct fc_lport *lport, void *arg)
142{
143 struct ft_tport *tport;
144
145 mutex_lock(&ft_lport_lock);
146 tport = lport->prov[FC_TYPE_FCP];
147 if (tport)
148 ft_tport_delete(tport);
149 mutex_unlock(&ft_lport_lock);
150}
151
152/*
153 * Notification of local port change from libfc.
154 * Create or delete local port and associated tport.
155 */
156int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
157{
158 struct fc_lport *lport = arg;
159
160 switch (event) {
161 case FC_LPORT_EV_ADD:
162 ft_lport_add(lport, NULL);
163 break;
164 case FC_LPORT_EV_DEL:
165 ft_lport_del(lport, NULL);
166 break;
167 }
168 return NOTIFY_DONE;
169}
170
171/*
172 * Hash function for FC_IDs.
173 */
174static u32 ft_sess_hash(u32 port_id)
175{
176 return hash_32(port_id, FT_SESS_HASH_BITS);
177}
178
179/*
180 * Find session in local port.
181 * Sessions and hash lists are RCU-protected.
182 * A reference is taken which must be eventually freed.
183 */
184static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
185{
186 struct ft_tport *tport;
187 struct hlist_head *head;
188 struct hlist_node *pos;
189 struct ft_sess *sess;
190
191 rcu_read_lock();
192 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
193 if (!tport)
194 goto out;
195
196 head = &tport->hash[ft_sess_hash(port_id)];
197 hlist_for_each_entry_rcu(sess, pos, head, hash) {
198 if (sess->port_id == port_id) {
199 kref_get(&sess->kref);
200 rcu_read_unlock();
201 FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
202 return sess;
203 }
204 }
205out:
206 rcu_read_unlock();
207 FT_SESS_DBG("port_id %x not found\n", port_id);
208 return NULL;
209}
210
211/*
212 * Allocate session and enter it in the hash for the local port.
213 * Caller holds ft_lport_lock.
214 */
215static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
216 struct ft_node_acl *acl)
217{
218 struct ft_sess *sess;
219 struct hlist_head *head;
220 struct hlist_node *pos;
221
222 head = &tport->hash[ft_sess_hash(port_id)];
223 hlist_for_each_entry_rcu(sess, pos, head, hash)
224 if (sess->port_id == port_id)
225 return sess;
226
227 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
228 if (!sess)
229 return NULL;
230
231 sess->se_sess = transport_init_session();
232 if (!sess->se_sess) {
233 kfree(sess);
234 return NULL;
235 }
236 sess->se_sess->se_node_acl = &acl->se_node_acl;
237 sess->tport = tport;
238 sess->port_id = port_id;
239 kref_init(&sess->kref); /* ref for table entry */
240 hlist_add_head_rcu(&sess->hash, head);
241 tport->sess_count++;
242
243 FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
244
245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
246 sess->se_sess, sess);
247 return sess;
248}
249
250/*
251 * Unhash the session.
252 * Caller holds ft_lport_lock.
253 */
254static void ft_sess_unhash(struct ft_sess *sess)
255{
256 struct ft_tport *tport = sess->tport;
257
258 hlist_del_rcu(&sess->hash);
259 BUG_ON(!tport->sess_count);
260 tport->sess_count--;
261 sess->port_id = -1;
262 sess->params = 0;
263}
264
265/*
266 * Delete session from hash.
267 * Caller holds ft_lport_lock.
268 */
269static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
270{
271 struct hlist_head *head;
272 struct hlist_node *pos;
273 struct ft_sess *sess;
274
275 head = &tport->hash[ft_sess_hash(port_id)];
276 hlist_for_each_entry_rcu(sess, pos, head, hash) {
277 if (sess->port_id == port_id) {
278 ft_sess_unhash(sess);
279 return sess;
280 }
281 }
282 return NULL;
283}
284
285/*
286 * Delete all sessions from tport.
287 * Caller holds ft_lport_lock.
288 */
289static void ft_sess_delete_all(struct ft_tport *tport)
290{
291 struct hlist_head *head;
292 struct hlist_node *pos;
293 struct ft_sess *sess;
294
295 for (head = tport->hash;
296 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
297 hlist_for_each_entry_rcu(sess, pos, head, hash) {
298 ft_sess_unhash(sess);
299 transport_deregister_session_configfs(sess->se_sess);
300 ft_sess_put(sess); /* release from table */
301 }
302 }
303}
304
305/*
306 * TCM ops for sessions.
307 */
308
309/*
310 * Determine whether session is allowed to be shutdown in the current context.
311 * Returns non-zero if the session should be shutdown.
312 */
313int ft_sess_shutdown(struct se_session *se_sess)
314{
315 struct ft_sess *sess = se_sess->fabric_sess_ptr;
316
317 FT_SESS_DBG("port_id %x\n", sess->port_id);
318 return 1;
319}
320
321/*
322 * Remove session and send PRLO.
323 * This is called when the ACL is being deleted or queue depth is changing.
324 */
325void ft_sess_close(struct se_session *se_sess)
326{
327 struct ft_sess *sess = se_sess->fabric_sess_ptr;
328 struct fc_lport *lport;
329 u32 port_id;
330
331 mutex_lock(&ft_lport_lock);
332 lport = sess->tport->lport;
333 port_id = sess->port_id;
334 if (port_id == -1) {
335 mutex_lock(&ft_lport_lock);
336 return;
337 }
338 FT_SESS_DBG("port_id %x\n", port_id);
339 ft_sess_unhash(sess);
340 mutex_unlock(&ft_lport_lock);
341 transport_deregister_session_configfs(se_sess);
342 ft_sess_put(sess);
343 /* XXX Send LOGO or PRLO */
344 synchronize_rcu(); /* let transport deregister happen */
345}
346
347void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
348{
349 struct ft_sess *sess = se_sess->fabric_sess_ptr;
350
351 FT_SESS_DBG("port_id %x\n", sess->port_id);
352}
353
354int ft_sess_logged_in(struct se_session *se_sess)
355{
356 struct ft_sess *sess = se_sess->fabric_sess_ptr;
357
358 return sess->port_id != -1;
359}
360
361u32 ft_sess_get_index(struct se_session *se_sess)
362{
363 struct ft_sess *sess = se_sess->fabric_sess_ptr;
364
365 return sess->port_id; /* XXX TBD probably not what is needed */
366}
367
368u32 ft_sess_get_port_name(struct se_session *se_sess,
369 unsigned char *buf, u32 len)
370{
371 struct ft_sess *sess = se_sess->fabric_sess_ptr;
372
373 return ft_format_wwn(buf, len, sess->port_name);
374}
375
376void ft_sess_set_erl0(struct se_session *se_sess)
377{
378 /* XXX TBD called when out of memory */
379}
380
381/*
382 * libfc ops involving sessions.
383 */
384
385static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
386 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
387{
388 struct ft_tport *tport;
389 struct ft_sess *sess;
390 struct ft_node_acl *acl;
391 u32 fcp_parm;
392
393 tport = ft_tport_create(rdata->local_port);
394 if (!tport)
395 return 0; /* not a target for this local port */
396
397 acl = ft_acl_get(tport->tpg, rdata);
398 if (!acl)
399 return 0;
400
401 if (!rspp)
402 goto fill;
403
404 if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
405 return FC_SPP_RESP_NO_PA;
406
407 /*
408 * If both target and initiator bits are off, the SPP is invalid.
409 */
410 fcp_parm = ntohl(rspp->spp_params);
411 if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
412 return FC_SPP_RESP_INVL;
413
414 /*
415 * Create session (image pair) only if requested by
416 * EST_IMG_PAIR flag and if the requestor is an initiator.
417 */
418 if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
419 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
420 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
421 return FC_SPP_RESP_CONF;
422 sess = ft_sess_create(tport, rdata->ids.port_id, acl);
423 if (!sess)
424 return FC_SPP_RESP_RES;
425 if (!sess->params)
426 rdata->prli_count++;
427 sess->params = fcp_parm;
428 sess->port_name = rdata->ids.port_name;
429 sess->max_frame = rdata->maxframe_size;
430
431 /* XXX TBD - clearing actions. unit attn, see 4.10 */
432 }
433
434 /*
435 * OR in our service parameters with other provider (initiator), if any.
436 * TBD XXX - indicate RETRY capability?
437 */
438fill:
439 fcp_parm = ntohl(spp->spp_params);
440 spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
441 return FC_SPP_RESP_ACK;
442}
443
444/**
445 * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
446 * @rdata: remote port private
447 * @spp_len: service parameter page length
448 * @rspp: received service parameter page (NULL for outgoing PRLI)
449 * @spp: response service parameter page
450 *
451 * Returns spp response code.
452 */
453static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
454 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
455{
456 int ret;
457
458 mutex_lock(&ft_lport_lock);
459 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
460 mutex_unlock(&ft_lport_lock);
461 FT_SESS_DBG("port_id %x flags %x ret %x\n",
462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
463 return ret;
464}
465
466static void ft_sess_rcu_free(struct rcu_head *rcu)
467{
468 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
469
470 transport_deregister_session(sess->se_sess);
471 kfree(sess);
472}
473
474static void ft_sess_free(struct kref *kref)
475{
476 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
477
478 call_rcu(&sess->rcu, ft_sess_rcu_free);
479}
480
481void ft_sess_put(struct ft_sess *sess)
482{
483 int sess_held = atomic_read(&sess->kref.refcount);
484
485 BUG_ON(!sess_held);
486 kref_put(&sess->kref, ft_sess_free);
487}
488
489static void ft_prlo(struct fc_rport_priv *rdata)
490{
491 struct ft_sess *sess;
492 struct ft_tport *tport;
493
494 mutex_lock(&ft_lport_lock);
495 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
496 if (!tport) {
497 mutex_unlock(&ft_lport_lock);
498 return;
499 }
500 sess = ft_sess_delete(tport, rdata->ids.port_id);
501 if (!sess) {
502 mutex_unlock(&ft_lport_lock);
503 return;
504 }
505 mutex_unlock(&ft_lport_lock);
506 transport_deregister_session_configfs(sess->se_sess);
507 ft_sess_put(sess); /* release from table */
508 rdata->prli_count--;
509 /* XXX TBD - clearing actions. unit attn, see 4.10 */
510}
511
512/*
513 * Handle incoming FCP request.
514 * Caller has verified that the frame is type FCP.
515 */
516static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
517{
518 struct ft_sess *sess;
519 u32 sid = fc_frame_sid(fp);
520
521 FT_SESS_DBG("sid %x\n", sid);
522
523 sess = ft_sess_get(lport, sid);
524 if (!sess) {
525 FT_SESS_DBG("sid %x sess lookup failed\n", sid);
526 /* TBD XXX - if FCP_CMND, send PRLO */
527 fc_frame_free(fp);
528 return;
529 }
530 ft_recv_req(sess, fp); /* must do ft_sess_put() */
531}
532
533/*
534 * Provider ops for libfc.
535 */
536struct fc4_prov ft_prov = {
537 .prli = ft_prli,
538 .prlo = ft_prlo,
539 .recv = ft_recv,
540 .module = THIS_MODULE,
541};
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 09e52ba47ddf..ffc4193e9505 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -499,7 +499,6 @@ static int isd200_action( struct us_data *us, int action,
499 memset(&ata, 0, sizeof(ata)); 499 memset(&ata, 0, sizeof(ata));
500 srb->cmnd = info->cmnd; 500 srb->cmnd = info->cmnd;
501 srb->device = &srb_dev; 501 srb->device = &srb_dev;
502 ++srb->serial_number;
503 502
504 ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; 503 ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
505 ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; 504 ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;