diff options
117 files changed, 4635 insertions, 2709 deletions
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx index 5020b7b5a244..52f0b4359234 100644 --- a/Documentation/scsi/LICENSE.qla2xxx +++ b/Documentation/scsi/LICENSE.qla2xxx | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | Copyright (c) 2003-2013 QLogic Corporation | 1 | Copyright (c) 2003-2014 QLogic Corporation |
| 2 | QLogic Linux FC-FCoE Driver | 2 | QLogic Linux FC-FCoE Driver |
| 3 | 3 | ||
| 4 | This program includes a device driver for Linux 3.x. | 4 | This program includes a device driver for Linux 3.x. |
diff --git a/MAINTAINERS b/MAINTAINERS index 77556fc91cdd..a1f4b576628a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4390,7 +4390,7 @@ S: Supported | |||
| 4390 | F: drivers/crypto/nx/ | 4390 | F: drivers/crypto/nx/ |
| 4391 | 4391 | ||
| 4392 | IBM Power 842 compression accelerator | 4392 | IBM Power 842 compression accelerator |
| 4393 | M: Robert Jennings <rcj@linux.vnet.ibm.com> | 4393 | M: Nathan Fontenot <nfont@linux.vnet.ibm.com> |
| 4394 | S: Supported | 4394 | S: Supported |
| 4395 | F: drivers/crypto/nx/nx-842.c | 4395 | F: drivers/crypto/nx/nx-842.c |
| 4396 | F: include/linux/nx842.h | 4396 | F: include/linux/nx842.h |
| @@ -4406,12 +4406,18 @@ L: netdev@vger.kernel.org | |||
| 4406 | S: Supported | 4406 | S: Supported |
| 4407 | F: drivers/net/ethernet/ibm/ibmveth.* | 4407 | F: drivers/net/ethernet/ibm/ibmveth.* |
| 4408 | 4408 | ||
| 4409 | IBM Power Virtual SCSI/FC Device Drivers | 4409 | IBM Power Virtual SCSI Device Drivers |
| 4410 | M: Robert Jennings <rcj@linux.vnet.ibm.com> | 4410 | M: Nathan Fontenot <nfont@linux.vnet.ibm.com> |
| 4411 | L: linux-scsi@vger.kernel.org | 4411 | L: linux-scsi@vger.kernel.org |
| 4412 | S: Supported | 4412 | S: Supported |
| 4413 | F: drivers/scsi/ibmvscsi/ | 4413 | F: drivers/scsi/ibmvscsi/ibmvscsi* |
| 4414 | X: drivers/scsi/ibmvscsi/ibmvstgt.c | 4414 | F: drivers/scsi/ibmvscsi/viosrp.h |
| 4415 | |||
| 4416 | IBM Power Virtual FC Device Drivers | ||
| 4417 | M: Brian King <brking@linux.vnet.ibm.com> | ||
| 4418 | L: linux-scsi@vger.kernel.org | ||
| 4419 | S: Supported | ||
| 4420 | F: drivers/scsi/ibmvscsi/ibmvfc* | ||
| 4415 | 4421 | ||
| 4416 | IBM ServeRAID RAID DRIVER | 4422 | IBM ServeRAID RAID DRIVER |
| 4417 | P: Jack Hammer | 4423 | P: Jack Hammer |
| @@ -6028,6 +6034,28 @@ M: Petr Vandrovec <petr@vandrovec.name> | |||
| 6028 | S: Odd Fixes | 6034 | S: Odd Fixes |
| 6029 | F: fs/ncpfs/ | 6035 | F: fs/ncpfs/ |
| 6030 | 6036 | ||
| 6037 | NCR 5380 SCSI DRIVERS | ||
| 6038 | M: Finn Thain <fthain@telegraphics.com.au> | ||
| 6039 | M: Michael Schmitz <schmitzmic@gmail.com> | ||
| 6040 | L: linux-scsi@vger.kernel.org | ||
| 6041 | S: Maintained | ||
| 6042 | F: Documentation/scsi/g_NCR5380.txt | ||
| 6043 | F: drivers/scsi/NCR5380.* | ||
| 6044 | F: drivers/scsi/arm/cumana_1.c | ||
| 6045 | F: drivers/scsi/arm/oak.c | ||
| 6046 | F: drivers/scsi/atari_NCR5380.c | ||
| 6047 | F: drivers/scsi/atari_scsi.* | ||
| 6048 | F: drivers/scsi/dmx3191d.c | ||
| 6049 | F: drivers/scsi/dtc.* | ||
| 6050 | F: drivers/scsi/g_NCR5380.* | ||
| 6051 | F: drivers/scsi/g_NCR5380_mmio.c | ||
| 6052 | F: drivers/scsi/mac_scsi.* | ||
| 6053 | F: drivers/scsi/pas16.* | ||
| 6054 | F: drivers/scsi/sun3_NCR5380.c | ||
| 6055 | F: drivers/scsi/sun3_scsi.* | ||
| 6056 | F: drivers/scsi/sun3_scsi_vme.c | ||
| 6057 | F: drivers/scsi/t128.* | ||
| 6058 | |||
| 6031 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) | 6059 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) |
| 6032 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> | 6060 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> |
| 6033 | L: linux-scsi@vger.kernel.org | 6061 | L: linux-scsi@vger.kernel.org |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 570b18a113ff..ebc0af7d769c 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
| @@ -1037,7 +1037,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) | |||
| 1037 | goto out; | 1037 | goto out; |
| 1038 | /* signature to know if this mf is freed */ | 1038 | /* signature to know if this mf is freed */ |
| 1039 | mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf); | 1039 | mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf); |
| 1040 | list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); | 1040 | list_add(&mf->u.frame.linkage.list, &ioc->FreeQ); |
| 1041 | #ifdef MFCNT | 1041 | #ifdef MFCNT |
| 1042 | ioc->mfcnt--; | 1042 | ioc->mfcnt--; |
| 1043 | #endif | 1043 | #endif |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index dcc8385adeb3..8a050e885688 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
| @@ -2432,9 +2432,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) | |||
| 2432 | int rc, cim_rev; | 2432 | int rc, cim_rev; |
| 2433 | ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; | 2433 | ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; |
| 2434 | MPT_FRAME_HDR *mf = NULL; | 2434 | MPT_FRAME_HDR *mf = NULL; |
| 2435 | MPIHeader_t *mpi_hdr; | ||
| 2436 | unsigned long timeleft; | 2435 | unsigned long timeleft; |
| 2437 | int retval; | 2436 | int retval; |
| 2437 | u32 msgcontext; | ||
| 2438 | 2438 | ||
| 2439 | /* Reset long to int. Should affect IA64 and SPARC only | 2439 | /* Reset long to int. Should affect IA64 and SPARC only |
| 2440 | */ | 2440 | */ |
| @@ -2581,11 +2581,11 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) | |||
| 2581 | } | 2581 | } |
| 2582 | 2582 | ||
| 2583 | IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; | 2583 | IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; |
| 2584 | mpi_hdr = (MPIHeader_t *) mf; | 2584 | msgcontext = IstwiRWRequest->MsgContext; |
| 2585 | memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); | 2585 | memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); |
| 2586 | IstwiRWRequest->MsgContext = msgcontext; | ||
| 2586 | IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; | 2587 | IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; |
| 2587 | IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; | 2588 | IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; |
| 2588 | IstwiRWRequest->MsgContext = mpi_hdr->MsgContext; | ||
| 2589 | IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; | 2589 | IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; |
| 2590 | IstwiRWRequest->NumAddressBytes = 0x01; | 2590 | IstwiRWRequest->NumAddressBytes = 0x01; |
| 2591 | IstwiRWRequest->DataLength = cpu_to_le16(0x04); | 2591 | IstwiRWRequest->DataLength = cpu_to_le16(0x04); |
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index fd75108c355e..02a3eefd6931 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
| @@ -649,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev) | |||
| 649 | } | 649 | } |
| 650 | 650 | ||
| 651 | static int | 651 | static int |
| 652 | mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 652 | mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) |
| 653 | { | 653 | { |
| 654 | struct mptfc_rport_info *ri; | 654 | struct mptfc_rport_info *ri; |
| 655 | struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); | 655 | struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); |
| @@ -658,14 +658,14 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
| 658 | 658 | ||
| 659 | if (!vdevice || !vdevice->vtarget) { | 659 | if (!vdevice || !vdevice->vtarget) { |
| 660 | SCpnt->result = DID_NO_CONNECT << 16; | 660 | SCpnt->result = DID_NO_CONNECT << 16; |
| 661 | done(SCpnt); | 661 | SCpnt->scsi_done(SCpnt); |
| 662 | return 0; | 662 | return 0; |
| 663 | } | 663 | } |
| 664 | 664 | ||
| 665 | err = fc_remote_port_chkready(rport); | 665 | err = fc_remote_port_chkready(rport); |
| 666 | if (unlikely(err)) { | 666 | if (unlikely(err)) { |
| 667 | SCpnt->result = err; | 667 | SCpnt->result = err; |
| 668 | done(SCpnt); | 668 | SCpnt->scsi_done(SCpnt); |
| 669 | return 0; | 669 | return 0; |
| 670 | } | 670 | } |
| 671 | 671 | ||
| @@ -673,15 +673,13 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
| 673 | ri = *((struct mptfc_rport_info **)rport->dd_data); | 673 | ri = *((struct mptfc_rport_info **)rport->dd_data); |
| 674 | if (unlikely(!ri)) { | 674 | if (unlikely(!ri)) { |
| 675 | SCpnt->result = DID_IMM_RETRY << 16; | 675 | SCpnt->result = DID_IMM_RETRY << 16; |
| 676 | done(SCpnt); | 676 | SCpnt->scsi_done(SCpnt); |
| 677 | return 0; | 677 | return 0; |
| 678 | } | 678 | } |
| 679 | 679 | ||
| 680 | return mptscsih_qcmd(SCpnt,done); | 680 | return mptscsih_qcmd(SCpnt); |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | static DEF_SCSI_QCMD(mptfc_qcmd) | ||
| 684 | |||
| 685 | /* | 683 | /* |
| 686 | * mptfc_display_port_link_speed - displaying link speed | 684 | * mptfc_display_port_link_speed - displaying link speed |
| 687 | * @ioc: Pointer to MPT_ADAPTER structure | 685 | * @ioc: Pointer to MPT_ADAPTER structure |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 00d339c361fc..711fcb5cec87 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
| @@ -1896,7 +1896,7 @@ mptsas_slave_alloc(struct scsi_device *sdev) | |||
| 1896 | } | 1896 | } |
| 1897 | 1897 | ||
| 1898 | static int | 1898 | static int |
| 1899 | mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1899 | mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) |
| 1900 | { | 1900 | { |
| 1901 | MPT_SCSI_HOST *hd; | 1901 | MPT_SCSI_HOST *hd; |
| 1902 | MPT_ADAPTER *ioc; | 1902 | MPT_ADAPTER *ioc; |
| @@ -1904,11 +1904,11 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
| 1904 | 1904 | ||
| 1905 | if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { | 1905 | if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { |
| 1906 | SCpnt->result = DID_NO_CONNECT << 16; | 1906 | SCpnt->result = DID_NO_CONNECT << 16; |
| 1907 | done(SCpnt); | 1907 | SCpnt->scsi_done(SCpnt); |
| 1908 | return 0; | 1908 | return 0; |
| 1909 | } | 1909 | } |
| 1910 | 1910 | ||
| 1911 | hd = shost_priv(SCpnt->device->host); | 1911 | hd = shost_priv(shost); |
| 1912 | ioc = hd->ioc; | 1912 | ioc = hd->ioc; |
| 1913 | 1913 | ||
| 1914 | if (ioc->sas_discovery_quiesce_io) | 1914 | if (ioc->sas_discovery_quiesce_io) |
| @@ -1917,11 +1917,9 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
| 1917 | if (ioc->debug_level & MPT_DEBUG_SCSI) | 1917 | if (ioc->debug_level & MPT_DEBUG_SCSI) |
| 1918 | scsi_print_command(SCpnt); | 1918 | scsi_print_command(SCpnt); |
| 1919 | 1919 | ||
| 1920 | return mptscsih_qcmd(SCpnt,done); | 1920 | return mptscsih_qcmd(SCpnt); |
| 1921 | } | 1921 | } |
| 1922 | 1922 | ||
| 1923 | static DEF_SCSI_QCMD(mptsas_qcmd) | ||
| 1924 | |||
| 1925 | /** | 1923 | /** |
| 1926 | * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout | 1924 | * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout |
| 1927 | * if the device under question is currently in the | 1925 | * if the device under question is currently in the |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 727819cc7034..2a1c6f21af27 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
| @@ -1304,7 +1304,6 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
| 1304 | /** | 1304 | /** |
| 1305 | * mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine. | 1305 | * mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine. |
| 1306 | * @SCpnt: Pointer to scsi_cmnd structure | 1306 | * @SCpnt: Pointer to scsi_cmnd structure |
| 1307 | * @done: Pointer SCSI mid-layer IO completion function | ||
| 1308 | * | 1307 | * |
| 1309 | * (linux scsi_host_template.queuecommand routine) | 1308 | * (linux scsi_host_template.queuecommand routine) |
| 1310 | * This is the primary SCSI IO start routine. Create a MPI SCSIIORequest | 1309 | * This is the primary SCSI IO start routine. Create a MPI SCSIIORequest |
| @@ -1313,7 +1312,7 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
| 1313 | * Returns 0. (rtn value discarded by linux scsi mid-layer) | 1312 | * Returns 0. (rtn value discarded by linux scsi mid-layer) |
| 1314 | */ | 1313 | */ |
| 1315 | int | 1314 | int |
| 1316 | mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1315 | mptscsih_qcmd(struct scsi_cmnd *SCpnt) |
| 1317 | { | 1316 | { |
| 1318 | MPT_SCSI_HOST *hd; | 1317 | MPT_SCSI_HOST *hd; |
| 1319 | MPT_FRAME_HDR *mf; | 1318 | MPT_FRAME_HDR *mf; |
| @@ -1329,10 +1328,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
| 1329 | 1328 | ||
| 1330 | hd = shost_priv(SCpnt->device->host); | 1329 | hd = shost_priv(SCpnt->device->host); |
| 1331 | ioc = hd->ioc; | 1330 | ioc = hd->ioc; |
| 1332 | SCpnt->scsi_done = done; | ||
| 1333 | 1331 | ||
| 1334 | dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", | 1332 | dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p\n", |
| 1335 | ioc->name, SCpnt, done)); | 1333 | ioc->name, SCpnt)); |
| 1336 | 1334 | ||
| 1337 | if (ioc->taskmgmt_quiesce_io) | 1335 | if (ioc->taskmgmt_quiesce_io) |
| 1338 | return SCSI_MLQUEUE_HOST_BUSY; | 1336 | return SCSI_MLQUEUE_HOST_BUSY; |
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 83f503162f7a..99e3390807f3 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h | |||
| @@ -113,7 +113,7 @@ extern int mptscsih_resume(struct pci_dev *pdev); | |||
| 113 | #endif | 113 | #endif |
| 114 | extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *); | 114 | extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *); |
| 115 | extern const char * mptscsih_info(struct Scsi_Host *SChost); | 115 | extern const char * mptscsih_info(struct Scsi_Host *SChost); |
| 116 | extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); | 116 | extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt); |
| 117 | extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, | 117 | extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, |
| 118 | u8 id, int lun, int ctx2abort, ulong timeout); | 118 | u8 id, int lun, int ctx2abort, ulong timeout); |
| 119 | extern void mptscsih_slave_destroy(struct scsi_device *device); | 119 | extern void mptscsih_slave_destroy(struct scsi_device *device); |
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 5653e505f91f..49d11338294b 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c | |||
| @@ -780,33 +780,31 @@ static int mptspi_slave_configure(struct scsi_device *sdev) | |||
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | static int | 782 | static int |
| 783 | mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 783 | mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) |
| 784 | { | 784 | { |
| 785 | struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host); | 785 | struct _MPT_SCSI_HOST *hd = shost_priv(shost); |
| 786 | VirtDevice *vdevice = SCpnt->device->hostdata; | 786 | VirtDevice *vdevice = SCpnt->device->hostdata; |
| 787 | MPT_ADAPTER *ioc = hd->ioc; | 787 | MPT_ADAPTER *ioc = hd->ioc; |
| 788 | 788 | ||
| 789 | if (!vdevice || !vdevice->vtarget) { | 789 | if (!vdevice || !vdevice->vtarget) { |
| 790 | SCpnt->result = DID_NO_CONNECT << 16; | 790 | SCpnt->result = DID_NO_CONNECT << 16; |
| 791 | done(SCpnt); | 791 | SCpnt->scsi_done(SCpnt); |
| 792 | return 0; | 792 | return 0; |
| 793 | } | 793 | } |
| 794 | 794 | ||
| 795 | if (SCpnt->device->channel == 1 && | 795 | if (SCpnt->device->channel == 1 && |
| 796 | mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) { | 796 | mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) { |
| 797 | SCpnt->result = DID_NO_CONNECT << 16; | 797 | SCpnt->result = DID_NO_CONNECT << 16; |
| 798 | done(SCpnt); | 798 | SCpnt->scsi_done(SCpnt); |
| 799 | return 0; | 799 | return 0; |
| 800 | } | 800 | } |
| 801 | 801 | ||
| 802 | if (spi_dv_pending(scsi_target(SCpnt->device))) | 802 | if (spi_dv_pending(scsi_target(SCpnt->device))) |
| 803 | ddvprintk(ioc, scsi_print_command(SCpnt)); | 803 | ddvprintk(ioc, scsi_print_command(SCpnt)); |
| 804 | 804 | ||
| 805 | return mptscsih_qcmd(SCpnt,done); | 805 | return mptscsih_qcmd(SCpnt); |
| 806 | } | 806 | } |
| 807 | 807 | ||
| 808 | static DEF_SCSI_QCMD(mptspi_qcmd) | ||
| 809 | |||
| 810 | static void mptspi_slave_destroy(struct scsi_device *sdev) | 808 | static void mptspi_slave_destroy(struct scsi_device *sdev) |
| 811 | { | 809 | { |
| 812 | struct scsi_target *starget = scsi_target(sdev); | 810 | struct scsi_target *starget = scsi_target(sdev); |
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index bcd223868227..93d13fc9a293 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | */ | 27 | */ |
| 28 | 28 | ||
| 29 | /* | 29 | /* |
| 30 | * $Log: NCR5380.c,v $ | ||
| 31 | |||
| 32 | * Revision 1.10 1998/9/2 Alan Cox | 30 | * Revision 1.10 1998/9/2 Alan Cox |
| 33 | * (alan@lxorguk.ukuu.org.uk) | 31 | * (alan@lxorguk.ukuu.org.uk) |
| 34 | * Fixed up the timer lockups reported so far. Things still suck. Looking | 32 | * Fixed up the timer lockups reported so far. Things still suck. Looking |
| @@ -89,13 +87,6 @@ | |||
| 89 | #include <scsi/scsi_dbg.h> | 87 | #include <scsi/scsi_dbg.h> |
| 90 | #include <scsi/scsi_transport_spi.h> | 88 | #include <scsi/scsi_transport_spi.h> |
| 91 | 89 | ||
| 92 | #ifndef NDEBUG | ||
| 93 | #define NDEBUG 0 | ||
| 94 | #endif | ||
| 95 | #ifndef NDEBUG_ABORT | ||
| 96 | #define NDEBUG_ABORT 0 | ||
| 97 | #endif | ||
| 98 | |||
| 99 | #if (NDEBUG & NDEBUG_LISTS) | 90 | #if (NDEBUG & NDEBUG_LISTS) |
| 100 | #define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } | 91 | #define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } |
| 101 | #define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } | 92 | #define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } |
| @@ -1005,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *) | |||
| 1005 | LIST(cmd, tmp); | 996 | LIST(cmd, tmp); |
| 1006 | tmp->host_scribble = (unsigned char *) cmd; | 997 | tmp->host_scribble = (unsigned char *) cmd; |
| 1007 | } | 998 | } |
| 1008 | dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail")); | 999 | dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); |
| 1009 | 1000 | ||
| 1010 | /* Run the coroutine if it isn't already running. */ | 1001 | /* Run the coroutine if it isn't already running. */ |
| 1011 | /* Kick off command processing */ | 1002 | /* Kick off command processing */ |
| @@ -1040,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1040 | /* Lock held here */ | 1031 | /* Lock held here */ |
| 1041 | done = 1; | 1032 | done = 1; |
| 1042 | if (!hostdata->connected && !hostdata->selecting) { | 1033 | if (!hostdata->connected && !hostdata->selecting) { |
| 1043 | dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no)); | 1034 | dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no); |
| 1044 | /* | 1035 | /* |
| 1045 | * Search through the issue_queue for a command destined | 1036 | * Search through the issue_queue for a command destined |
| 1046 | * for a target that's not busy. | 1037 | * for a target that's not busy. |
| @@ -1048,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1048 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) | 1039 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) |
| 1049 | { | 1040 | { |
| 1050 | if (prev != tmp) | 1041 | if (prev != tmp) |
| 1051 | dprintk(NDEBUG_LISTS, ("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun)); | 1042 | dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); |
| 1052 | /* When we find one, remove it from the issue queue. */ | 1043 | /* When we find one, remove it from the issue queue. */ |
| 1053 | if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) { | 1044 | if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) { |
| 1054 | if (prev) { | 1045 | if (prev) { |
| @@ -1066,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1066 | * On failure, we must add the command back to the | 1057 | * On failure, we must add the command back to the |
| 1067 | * issue queue so we can keep trying. | 1058 | * issue queue so we can keep trying. |
| 1068 | */ | 1059 | */ |
| 1069 | dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun)); | 1060 | dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun); |
| 1070 | 1061 | ||
| 1071 | /* | 1062 | /* |
| 1072 | * A successful selection is defined as one that | 1063 | * A successful selection is defined as one that |
| @@ -1095,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1095 | tmp->host_scribble = (unsigned char *) hostdata->issue_queue; | 1086 | tmp->host_scribble = (unsigned char *) hostdata->issue_queue; |
| 1096 | hostdata->issue_queue = tmp; | 1087 | hostdata->issue_queue = tmp; |
| 1097 | done = 0; | 1088 | done = 0; |
| 1098 | dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no)); | 1089 | dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no); |
| 1099 | } | 1090 | } |
| 1100 | /* lock held here still */ | 1091 | /* lock held here still */ |
| 1101 | } /* if target/lun is not busy */ | 1092 | } /* if target/lun is not busy */ |
| @@ -1125,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1125 | #endif | 1116 | #endif |
| 1126 | && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies)) | 1117 | && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies)) |
| 1127 | ) { | 1118 | ) { |
| 1128 | dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no)); | 1119 | dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no); |
| 1129 | NCR5380_information_transfer(instance); | 1120 | NCR5380_information_transfer(instance); |
| 1130 | dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no)); | 1121 | dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no); |
| 1131 | done = 0; | 1122 | done = 0; |
| 1132 | } else | 1123 | } else |
| 1133 | break; | 1124 | break; |
| @@ -1159,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) | |||
| 1159 | unsigned char basr; | 1150 | unsigned char basr; |
| 1160 | unsigned long flags; | 1151 | unsigned long flags; |
| 1161 | 1152 | ||
| 1162 | dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", | 1153 | dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n", |
| 1163 | instance->irq)); | 1154 | instance->irq); |
| 1164 | 1155 | ||
| 1165 | do { | 1156 | do { |
| 1166 | done = 1; | 1157 | done = 1; |
| @@ -1173,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) | |||
| 1173 | NCR5380_dprint(NDEBUG_INTR, instance); | 1164 | NCR5380_dprint(NDEBUG_INTR, instance); |
| 1174 | if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { | 1165 | if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { |
| 1175 | done = 0; | 1166 | done = 0; |
| 1176 | dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no)); | 1167 | dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no); |
| 1177 | NCR5380_reselect(instance); | 1168 | NCR5380_reselect(instance); |
| 1178 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1169 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1179 | } else if (basr & BASR_PARITY_ERROR) { | 1170 | } else if (basr & BASR_PARITY_ERROR) { |
| 1180 | dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no)); | 1171 | dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no); |
| 1181 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1172 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1182 | } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { | 1173 | } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { |
| 1183 | dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no)); | 1174 | dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no); |
| 1184 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1175 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1185 | } else { | 1176 | } else { |
| 1186 | #if defined(REAL_DMA) | 1177 | #if defined(REAL_DMA) |
| @@ -1210,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) | |||
| 1210 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1201 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 1211 | } | 1202 | } |
| 1212 | #else | 1203 | #else |
| 1213 | dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG))); | 1204 | dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); |
| 1214 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1205 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1215 | #endif | 1206 | #endif |
| 1216 | } | 1207 | } |
| @@ -1304,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) | |||
| 1304 | hostdata->restart_select = 0; | 1295 | hostdata->restart_select = 0; |
| 1305 | 1296 | ||
| 1306 | NCR5380_dprint(NDEBUG_ARBITRATION, instance); | 1297 | NCR5380_dprint(NDEBUG_ARBITRATION, instance); |
| 1307 | dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id)); | 1298 | dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id); |
| 1308 | 1299 | ||
| 1309 | /* | 1300 | /* |
| 1310 | * Set the phase bits to 0, otherwise the NCR5380 won't drive the | 1301 | * Set the phase bits to 0, otherwise the NCR5380 won't drive the |
| @@ -1333,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) | |||
| 1333 | goto failed; | 1324 | goto failed; |
| 1334 | } | 1325 | } |
| 1335 | 1326 | ||
| 1336 | dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no)); | 1327 | dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no); |
| 1337 | 1328 | ||
| 1338 | /* | 1329 | /* |
| 1339 | * The arbitration delay is 2.2us, but this is a minimum and there is | 1330 | * The arbitration delay is 2.2us, but this is a minimum and there is |
| @@ -1347,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) | |||
| 1347 | /* Check for lost arbitration */ | 1338 | /* Check for lost arbitration */ |
| 1348 | if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { | 1339 | if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { |
| 1349 | NCR5380_write(MODE_REG, MR_BASE); | 1340 | NCR5380_write(MODE_REG, MR_BASE); |
| 1350 | dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no)); | 1341 | dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no); |
| 1351 | goto failed; | 1342 | goto failed; |
| 1352 | } | 1343 | } |
| 1353 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL); | 1344 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL); |
| @@ -1360,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) | |||
| 1360 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { | 1351 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { |
| 1361 | NCR5380_write(MODE_REG, MR_BASE); | 1352 | NCR5380_write(MODE_REG, MR_BASE); |
| 1362 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1353 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 1363 | dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no)); | 1354 | dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no); |
| 1364 | goto failed; | 1355 | goto failed; |
| 1365 | } | 1356 | } |
| 1366 | /* | 1357 | /* |
| @@ -1370,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) | |||
| 1370 | 1361 | ||
| 1371 | udelay(2); | 1362 | udelay(2); |
| 1372 | 1363 | ||
| 1373 | dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no)); | 1364 | dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no); |
| 1374 | 1365 | ||
| 1375 | /* | 1366 | /* |
| 1376 | * Now that we have won arbitration, start Selection process, asserting | 1367 | * Now that we have won arbitration, start Selection process, asserting |
| @@ -1422,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) | |||
| 1422 | 1413 | ||
| 1423 | udelay(1); | 1414 | udelay(1); |
| 1424 | 1415 | ||
| 1425 | dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd))); | 1416 | dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)); |
| 1426 | 1417 | ||
| 1427 | /* | 1418 | /* |
| 1428 | * The SCSI specification calls for a 250 ms timeout for the actual | 1419 | * The SCSI specification calls for a 250 ms timeout for the actual |
| @@ -1487,7 +1478,7 @@ part2: | |||
| 1487 | collect_stats(hostdata, cmd); | 1478 | collect_stats(hostdata, cmd); |
| 1488 | cmd->scsi_done(cmd); | 1479 | cmd->scsi_done(cmd); |
| 1489 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1480 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1490 | dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no)); | 1481 | dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no); |
| 1491 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1482 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1492 | return 0; | 1483 | return 0; |
| 1493 | } | 1484 | } |
| @@ -1520,7 +1511,7 @@ part2: | |||
| 1520 | goto failed; | 1511 | goto failed; |
| 1521 | } | 1512 | } |
| 1522 | 1513 | ||
| 1523 | dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id)); | 1514 | dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id); |
| 1524 | tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); | 1515 | tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); |
| 1525 | 1516 | ||
| 1526 | len = 1; | 1517 | len = 1; |
| @@ -1530,7 +1521,7 @@ part2: | |||
| 1530 | data = tmp; | 1521 | data = tmp; |
| 1531 | phase = PHASE_MSGOUT; | 1522 | phase = PHASE_MSGOUT; |
| 1532 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 1523 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 1533 | dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no)); | 1524 | dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no); |
| 1534 | /* XXX need to handle errors here */ | 1525 | /* XXX need to handle errors here */ |
| 1535 | hostdata->connected = cmd; | 1526 | hostdata->connected = cmd; |
| 1536 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 1527 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
| @@ -1583,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase | |||
| 1583 | NCR5380_setup(instance); | 1574 | NCR5380_setup(instance); |
| 1584 | 1575 | ||
| 1585 | if (!(p & SR_IO)) | 1576 | if (!(p & SR_IO)) |
| 1586 | dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c)); | 1577 | dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c); |
| 1587 | else | 1578 | else |
| 1588 | dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c)); | 1579 | dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c); |
| 1589 | 1580 | ||
| 1590 | /* | 1581 | /* |
| 1591 | * The NCR5380 chip will only drive the SCSI bus when the | 1582 | * The NCR5380 chip will only drive the SCSI bus when the |
| @@ -1620,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase | |||
| 1620 | break; | 1611 | break; |
| 1621 | } | 1612 | } |
| 1622 | 1613 | ||
| 1623 | dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no)); | 1614 | dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no); |
| 1624 | 1615 | ||
| 1625 | /* Check for phase mismatch */ | 1616 | /* Check for phase mismatch */ |
| 1626 | if ((tmp & PHASE_MASK) != p) { | 1617 | if ((tmp & PHASE_MASK) != p) { |
| 1627 | dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no)); | 1618 | dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no); |
| 1628 | NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance); | 1619 | NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance); |
| 1629 | break; | 1620 | break; |
| 1630 | } | 1621 | } |
| @@ -1660,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase | |||
| 1660 | 1651 | ||
| 1661 | /* FIXME - if this fails bus reset ?? */ | 1652 | /* FIXME - if this fails bus reset ?? */ |
| 1662 | NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ); | 1653 | NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ); |
| 1663 | dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no)); | 1654 | dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no); |
| 1664 | 1655 | ||
| 1665 | /* | 1656 | /* |
| 1666 | * We have several special cases to consider during REQ/ACK handshaking : | 1657 | * We have several special cases to consider during REQ/ACK handshaking : |
| @@ -1681,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase | |||
| 1681 | } | 1672 | } |
| 1682 | } while (--c); | 1673 | } while (--c); |
| 1683 | 1674 | ||
| 1684 | dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c)); | 1675 | dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c); |
| 1685 | 1676 | ||
| 1686 | *count = c; | 1677 | *count = c; |
| 1687 | *data = d; | 1678 | *data = d; |
| @@ -1828,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 1828 | c -= 2; | 1819 | c -= 2; |
| 1829 | } | 1820 | } |
| 1830 | #endif | 1821 | #endif |
| 1831 | dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d)); | 1822 | dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d); |
| 1832 | hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); | 1823 | hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); |
| 1833 | #endif | 1824 | #endif |
| 1834 | 1825 | ||
| @@ -1857,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 1857 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE); | 1848 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE); |
| 1858 | #endif /* def REAL_DMA */ | 1849 | #endif /* def REAL_DMA */ |
| 1859 | 1850 | ||
| 1860 | dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG))); | 1851 | dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)); |
| 1861 | 1852 | ||
| 1862 | /* | 1853 | /* |
| 1863 | * On the PAS16 at least I/O recovery delays are not needed here. | 1854 | * On the PAS16 at least I/O recovery delays are not needed here. |
| @@ -1934,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 1934 | } | 1925 | } |
| 1935 | } | 1926 | } |
| 1936 | 1927 | ||
| 1937 | dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG))); | 1928 | dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)); |
| 1938 | 1929 | ||
| 1939 | NCR5380_write(MODE_REG, MR_BASE); | 1930 | NCR5380_write(MODE_REG, MR_BASE); |
| 1940 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1931 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| @@ -1948,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 1948 | #ifdef READ_OVERRUNS | 1939 | #ifdef READ_OVERRUNS |
| 1949 | if (*phase == p && (p & SR_IO) && residue == 0) { | 1940 | if (*phase == p && (p & SR_IO) && residue == 0) { |
| 1950 | if (overrun) { | 1941 | if (overrun) { |
| 1951 | dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n")); | 1942 | dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); |
| 1952 | **data = saved_data; | 1943 | **data = saved_data; |
| 1953 | *data += 1; | 1944 | *data += 1; |
| 1954 | *count -= 1; | 1945 | *count -= 1; |
| @@ -1957,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 1957 | printk("No overrun??\n"); | 1948 | printk("No overrun??\n"); |
| 1958 | cnt = toPIO = 2; | 1949 | cnt = toPIO = 2; |
| 1959 | } | 1950 | } |
| 1960 | dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data)); | 1951 | dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data); |
| 1961 | NCR5380_transfer_pio(instance, phase, &cnt, data); | 1952 | NCR5380_transfer_pio(instance, phase, &cnt, data); |
| 1962 | *count -= toPIO - cnt; | 1953 | *count -= toPIO - cnt; |
| 1963 | } | 1954 | } |
| 1964 | #endif | 1955 | #endif |
| 1965 | 1956 | ||
| 1966 | dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count))); | 1957 | dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)); |
| 1967 | return 0; | 1958 | return 0; |
| 1968 | 1959 | ||
| 1969 | #elif defined(REAL_DMA) | 1960 | #elif defined(REAL_DMA) |
| @@ -2013,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 2013 | foo = NCR5380_pwrite(instance, d, c); | 2004 | foo = NCR5380_pwrite(instance, d, c); |
| 2014 | #else | 2005 | #else |
| 2015 | int timeout; | 2006 | int timeout; |
| 2016 | dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c)); | 2007 | dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c); |
| 2017 | if (!(foo = NCR5380_pwrite(instance, d, c))) { | 2008 | if (!(foo = NCR5380_pwrite(instance, d, c))) { |
| 2018 | /* | 2009 | /* |
| 2019 | * Wait for the last byte to be sent. If REQ is being asserted for | 2010 | * Wait for the last byte to be sent. If REQ is being asserted for |
| @@ -2024,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 2024 | while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)); | 2015 | while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)); |
| 2025 | 2016 | ||
| 2026 | if (!timeout) | 2017 | if (!timeout) |
| 2027 | dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no)); | 2018 | dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no); |
| 2028 | 2019 | ||
| 2029 | if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) { | 2020 | if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) { |
| 2030 | hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT; | 2021 | hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT; |
| 2031 | if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) { | 2022 | if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) { |
| 2032 | hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT; | 2023 | hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT; |
| 2033 | dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no)); | 2024 | dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no); |
| 2034 | } | 2025 | } |
| 2035 | } | 2026 | } |
| 2036 | } else { | 2027 | } else { |
| 2037 | dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n")); | 2028 | dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n"); |
| 2038 | while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)); | 2029 | while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)); |
| 2039 | dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n")); | 2030 | dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n"); |
| 2040 | } | 2031 | } |
| 2041 | } | 2032 | } |
| 2042 | #endif | 2033 | #endif |
| @@ -2045,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase | |||
| 2045 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2036 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2046 | 2037 | ||
| 2047 | if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) { | 2038 | if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) { |
| 2048 | dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n")); | 2039 | dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n"); |
| 2049 | if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) { | 2040 | if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) { |
| 2050 | dprintk(NDEBUG_C400_PWRITE, ("53C400w: got it, reading reset interrupt reg\n")); | 2041 | dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n"); |
| 2051 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 2042 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 2052 | } else { | 2043 | } else { |
| 2053 | printk("53C400w: IRQ NOT THERE!\n"); | 2044 | printk("53C400w: IRQ NOT THERE!\n"); |
| @@ -2139,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2139 | --cmd->SCp.buffers_residual; | 2130 | --cmd->SCp.buffers_residual; |
| 2140 | cmd->SCp.this_residual = cmd->SCp.buffer->length; | 2131 | cmd->SCp.this_residual = cmd->SCp.buffer->length; |
| 2141 | cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); | 2132 | cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); |
| 2142 | dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual)); | 2133 | dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual); |
| 2143 | } | 2134 | } |
| 2144 | /* | 2135 | /* |
| 2145 | * The preferred transfer method is going to be | 2136 | * The preferred transfer method is going to be |
| @@ -2219,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2219 | case LINKED_FLG_CMD_COMPLETE: | 2210 | case LINKED_FLG_CMD_COMPLETE: |
| 2220 | /* Accept message by clearing ACK */ | 2211 | /* Accept message by clearing ACK */ |
| 2221 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2212 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2222 | dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun)); | 2213 | dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2223 | /* | 2214 | /* |
| 2224 | * Sanity check : A linked command should only terminate with | 2215 | * Sanity check : A linked command should only terminate with |
| 2225 | * one of these messages if there are more linked commands | 2216 | * one of these messages if there are more linked commands |
| @@ -2235,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2235 | /* The next command is still part of this process */ | 2226 | /* The next command is still part of this process */ |
| 2236 | cmd->next_link->tag = cmd->tag; | 2227 | cmd->next_link->tag = cmd->tag; |
| 2237 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2228 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
| 2238 | dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun)); | 2229 | dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2239 | collect_stats(hostdata, cmd); | 2230 | collect_stats(hostdata, cmd); |
| 2240 | cmd->scsi_done(cmd); | 2231 | cmd->scsi_done(cmd); |
| 2241 | cmd = hostdata->connected; | 2232 | cmd = hostdata->connected; |
| @@ -2247,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2247 | sink = 1; | 2238 | sink = 1; |
| 2248 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2239 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2249 | hostdata->connected = NULL; | 2240 | hostdata->connected = NULL; |
| 2250 | dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun)); | 2241 | dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2251 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 2242 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
| 2252 | 2243 | ||
| 2253 | /* | 2244 | /* |
| @@ -2281,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2281 | if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { | 2272 | if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { |
| 2282 | scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); | 2273 | scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); |
| 2283 | 2274 | ||
| 2284 | dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no)); | 2275 | dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no); |
| 2285 | 2276 | ||
| 2286 | LIST(cmd, hostdata->issue_queue); | 2277 | LIST(cmd, hostdata->issue_queue); |
| 2287 | cmd->host_scribble = (unsigned char *) | 2278 | cmd->host_scribble = (unsigned char *) |
| 2288 | hostdata->issue_queue; | 2279 | hostdata->issue_queue; |
| 2289 | hostdata->issue_queue = (Scsi_Cmnd *) cmd; | 2280 | hostdata->issue_queue = (Scsi_Cmnd *) cmd; |
| 2290 | dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no)); | 2281 | dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no); |
| 2291 | } else | 2282 | } else |
| 2292 | #endif /* def AUTOSENSE */ | 2283 | #endif /* def AUTOSENSE */ |
| 2293 | { | 2284 | { |
| @@ -2327,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2327 | hostdata->disconnected_queue; | 2318 | hostdata->disconnected_queue; |
| 2328 | hostdata->connected = NULL; | 2319 | hostdata->connected = NULL; |
| 2329 | hostdata->disconnected_queue = cmd; | 2320 | hostdata->disconnected_queue = cmd; |
| 2330 | dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun)); | 2321 | dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2331 | /* | 2322 | /* |
| 2332 | * Restore phase bits to 0 so an interrupted selection, | 2323 | * Restore phase bits to 0 so an interrupted selection, |
| 2333 | * arbitration can resume. | 2324 | * arbitration can resume. |
| @@ -2373,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2373 | extended_msg[0] = EXTENDED_MESSAGE; | 2364 | extended_msg[0] = EXTENDED_MESSAGE; |
| 2374 | /* Accept first byte by clearing ACK */ | 2365 | /* Accept first byte by clearing ACK */ |
| 2375 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2366 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2376 | dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no)); | 2367 | dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no); |
| 2377 | 2368 | ||
| 2378 | len = 2; | 2369 | len = 2; |
| 2379 | data = extended_msg + 1; | 2370 | data = extended_msg + 1; |
| 2380 | phase = PHASE_MSGIN; | 2371 | phase = PHASE_MSGIN; |
| 2381 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2372 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2382 | 2373 | ||
| 2383 | dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2])); | 2374 | dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]); |
| 2384 | 2375 | ||
| 2385 | if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { | 2376 | if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { |
| 2386 | /* Accept third byte by clearing ACK */ | 2377 | /* Accept third byte by clearing ACK */ |
| @@ -2390,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2390 | phase = PHASE_MSGIN; | 2381 | phase = PHASE_MSGIN; |
| 2391 | 2382 | ||
| 2392 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2383 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2393 | dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len)); | 2384 | dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len); |
| 2394 | 2385 | ||
| 2395 | switch (extended_msg[2]) { | 2386 | switch (extended_msg[2]) { |
| 2396 | case EXTENDED_SDTR: | 2387 | case EXTENDED_SDTR: |
| @@ -2456,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2456 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2447 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2457 | if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) { | 2448 | if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) { |
| 2458 | NCR5380_set_timer(hostdata, USLEEP_SLEEP); | 2449 | NCR5380_set_timer(hostdata, USLEEP_SLEEP); |
| 2459 | dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); | 2450 | dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires); |
| 2460 | return; | 2451 | return; |
| 2461 | } | 2452 | } |
| 2462 | break; | 2453 | break; |
| @@ -2468,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2468 | break; | 2459 | break; |
| 2469 | default: | 2460 | default: |
| 2470 | printk("scsi%d : unknown phase\n", instance->host_no); | 2461 | printk("scsi%d : unknown phase\n", instance->host_no); |
| 2471 | NCR5380_dprint(NDEBUG_ALL, instance); | 2462 | NCR5380_dprint(NDEBUG_ANY, instance); |
| 2472 | } /* switch(phase) */ | 2463 | } /* switch(phase) */ |
| 2473 | } /* if (tmp * SR_REQ) */ | 2464 | } /* if (tmp * SR_REQ) */ |
| 2474 | else { | 2465 | else { |
| @@ -2476,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2476 | */ | 2467 | */ |
| 2477 | if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) { | 2468 | if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) { |
| 2478 | NCR5380_set_timer(hostdata, USLEEP_SLEEP); | 2469 | NCR5380_set_timer(hostdata, USLEEP_SLEEP); |
| 2479 | dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); | 2470 | dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires); |
| 2480 | return; | 2471 | return; |
| 2481 | } | 2472 | } |
| 2482 | } | 2473 | } |
| @@ -2517,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { | |||
| 2517 | hostdata->restart_select = 1; | 2508 | hostdata->restart_select = 1; |
| 2518 | 2509 | ||
| 2519 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); | 2510 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); |
| 2520 | dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no)); | 2511 | dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no); |
| 2521 | 2512 | ||
| 2522 | /* | 2513 | /* |
| 2523 | * At this point, we have detected that our SCSI ID is on the bus, | 2514 | * At this point, we have detected that our SCSI ID is on the bus, |
| @@ -2597,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { | |||
| 2597 | do_abort(instance); | 2588 | do_abort(instance); |
| 2598 | } else { | 2589 | } else { |
| 2599 | hostdata->connected = tmp; | 2590 | hostdata->connected = tmp; |
| 2600 | dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag)); | 2591 | dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag); |
| 2601 | } | 2592 | } |
| 2602 | } | 2593 | } |
| 2603 | 2594 | ||
| @@ -2682,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { | |||
| 2682 | 2673 | ||
| 2683 | NCR5380_setup(instance); | 2674 | NCR5380_setup(instance); |
| 2684 | 2675 | ||
| 2685 | dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no)); | 2676 | dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no); |
| 2686 | dprintk(NDEBUG_ABORT, (" basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG))); | 2677 | dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); |
| 2687 | 2678 | ||
| 2688 | #if 0 | 2679 | #if 0 |
| 2689 | /* | 2680 | /* |
| @@ -2693,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { | |||
| 2693 | */ | 2684 | */ |
| 2694 | 2685 | ||
| 2695 | if (hostdata->connected == cmd) { | 2686 | if (hostdata->connected == cmd) { |
| 2696 | dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no)); | 2687 | dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no); |
| 2697 | hostdata->aborted = 1; | 2688 | hostdata->aborted = 1; |
| 2698 | /* | 2689 | /* |
| 2699 | * We should perform BSY checking, and make sure we haven't slipped | 2690 | * We should perform BSY checking, and make sure we haven't slipped |
| @@ -2721,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { | |||
| 2721 | * from the issue queue. | 2712 | * from the issue queue. |
| 2722 | */ | 2713 | */ |
| 2723 | 2714 | ||
| 2724 | dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no)); | 2715 | dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no); |
| 2725 | for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) | 2716 | for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) |
| 2726 | if (cmd == tmp) { | 2717 | if (cmd == tmp) { |
| 2727 | REMOVE(5, *prev, tmp, tmp->host_scribble); | 2718 | REMOVE(5, *prev, tmp, tmp->host_scribble); |
| 2728 | (*prev) = (Scsi_Cmnd *) tmp->host_scribble; | 2719 | (*prev) = (Scsi_Cmnd *) tmp->host_scribble; |
| 2729 | tmp->host_scribble = NULL; | 2720 | tmp->host_scribble = NULL; |
| 2730 | tmp->result = DID_ABORT << 16; | 2721 | tmp->result = DID_ABORT << 16; |
| 2731 | dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no)); | 2722 | dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no); |
| 2732 | tmp->scsi_done(tmp); | 2723 | tmp->scsi_done(tmp); |
| 2733 | return SUCCESS; | 2724 | return SUCCESS; |
| 2734 | } | 2725 | } |
| @@ -2750,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { | |||
| 2750 | */ | 2741 | */ |
| 2751 | 2742 | ||
| 2752 | if (hostdata->connected) { | 2743 | if (hostdata->connected) { |
| 2753 | dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no)); | 2744 | dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no); |
| 2754 | return FAILED; | 2745 | return FAILED; |
| 2755 | } | 2746 | } |
| 2756 | /* | 2747 | /* |
| @@ -2780,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { | |||
| 2780 | 2771 | ||
| 2781 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) | 2772 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) |
| 2782 | if (cmd == tmp) { | 2773 | if (cmd == tmp) { |
| 2783 | dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no)); | 2774 | dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no); |
| 2784 | 2775 | ||
| 2785 | if (NCR5380_select(instance, cmd, (int) cmd->tag)) | 2776 | if (NCR5380_select(instance, cmd, (int) cmd->tag)) |
| 2786 | return FAILED; | 2777 | return FAILED; |
| 2787 | dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no)); | 2778 | dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no); |
| 2788 | 2779 | ||
| 2789 | do_abort(instance); | 2780 | do_abort(instance); |
| 2790 | 2781 | ||
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 14964d0a0e9d..c79ddfa6f53c 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h | |||
| @@ -21,10 +21,6 @@ | |||
| 21 | * 1+ (800) 334-5454 | 21 | * 1+ (800) 334-5454 |
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | /* | ||
| 25 | * $Log: NCR5380.h,v $ | ||
| 26 | */ | ||
| 27 | |||
| 28 | #ifndef NCR5380_H | 24 | #ifndef NCR5380_H |
| 29 | #define NCR5380_H | 25 | #define NCR5380_H |
| 30 | 26 | ||
| @@ -60,6 +56,9 @@ | |||
| 60 | #define NDEBUG_C400_PREAD 0x100000 | 56 | #define NDEBUG_C400_PREAD 0x100000 |
| 61 | #define NDEBUG_C400_PWRITE 0x200000 | 57 | #define NDEBUG_C400_PWRITE 0x200000 |
| 62 | #define NDEBUG_LISTS 0x400000 | 58 | #define NDEBUG_LISTS 0x400000 |
| 59 | #define NDEBUG_ABORT 0x800000 | ||
| 60 | #define NDEBUG_TAGS 0x1000000 | ||
| 61 | #define NDEBUG_MERGING 0x2000000 | ||
| 63 | 62 | ||
| 64 | #define NDEBUG_ANY 0xFFFFFFFFUL | 63 | #define NDEBUG_ANY 0xFFFFFFFFUL |
| 65 | 64 | ||
| @@ -292,9 +291,24 @@ struct NCR5380_hostdata { | |||
| 292 | 291 | ||
| 293 | #ifdef __KERNEL__ | 292 | #ifdef __KERNEL__ |
| 294 | 293 | ||
| 295 | #define dprintk(a,b) do {} while(0) | 294 | #ifndef NDEBUG |
| 296 | #define NCR5380_dprint(a,b) do {} while(0) | 295 | #define NDEBUG (0) |
| 297 | #define NCR5380_dprint_phase(a,b) do {} while(0) | 296 | #endif |
| 297 | |||
| 298 | #define dprintk(flg, fmt, ...) \ | ||
| 299 | do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0) | ||
| 300 | |||
| 301 | #if NDEBUG | ||
| 302 | #define NCR5380_dprint(flg, arg) \ | ||
| 303 | do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0) | ||
| 304 | #define NCR5380_dprint_phase(flg, arg) \ | ||
| 305 | do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0) | ||
| 306 | static void NCR5380_print_phase(struct Scsi_Host *instance); | ||
| 307 | static void NCR5380_print(struct Scsi_Host *instance); | ||
| 308 | #else | ||
| 309 | #define NCR5380_dprint(flg, arg) do {} while (0) | ||
| 310 | #define NCR5380_dprint_phase(flg, arg) do {} while (0) | ||
| 311 | #endif | ||
| 298 | 312 | ||
| 299 | #if defined(AUTOPROBE_IRQ) | 313 | #if defined(AUTOPROBE_IRQ) |
| 300 | static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); | 314 | static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); |
| @@ -307,10 +321,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id); | |||
| 307 | #endif | 321 | #endif |
| 308 | static void NCR5380_main(struct work_struct *work); | 322 | static void NCR5380_main(struct work_struct *work); |
| 309 | static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); | 323 | static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); |
| 310 | #ifdef NDEBUG | ||
| 311 | static void NCR5380_print_phase(struct Scsi_Host *instance); | ||
| 312 | static void NCR5380_print(struct Scsi_Host *instance); | ||
| 313 | #endif | ||
| 314 | static int NCR5380_abort(Scsi_Cmnd * cmd); | 324 | static int NCR5380_abort(Scsi_Cmnd * cmd); |
| 315 | static int NCR5380_bus_reset(Scsi_Cmnd * cmd); | 325 | static int NCR5380_bus_reset(Scsi_Cmnd * cmd); |
| 316 | static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); | 326 | static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c index 14b5f8d0e7f4..cc9bd26f5d1a 100644 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c | |||
| @@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd) | |||
| 827 | for (bit = 0; bit < 8; bit++) { | 827 | for (bit = 0; bit < 8; bit++) { |
| 828 | 828 | ||
| 829 | if ((pci_status[i] & (0x1 << bit)) != 0) { | 829 | if ((pci_status[i] & (0x1 << bit)) != 0) { |
| 830 | static const char *s; | 830 | const char *s; |
| 831 | 831 | ||
| 832 | s = pci_status_strings[bit]; | 832 | s = pci_status_strings[bit]; |
| 833 | if (i == 7/*TARG*/ && bit == 3) | 833 | if (i == 7/*TARG*/ && bit == 3) |
| @@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) | |||
| 887 | 887 | ||
| 888 | for (bit = 0; bit < 8; bit++) { | 888 | for (bit = 0; bit < 8; bit++) { |
| 889 | 889 | ||
| 890 | if ((split_status[i] & (0x1 << bit)) != 0) { | 890 | if ((split_status[i] & (0x1 << bit)) != 0) |
| 891 | static const char *s; | 891 | printk(split_status_strings[bit], ahd_name(ahd), |
| 892 | |||
| 893 | s = split_status_strings[bit]; | ||
| 894 | printk(s, ahd_name(ahd), | ||
| 895 | split_status_source[i]); | 892 | split_status_source[i]); |
| 896 | } | ||
| 897 | 893 | ||
| 898 | if (i > 1) | 894 | if (i > 1) |
| 899 | continue; | 895 | continue; |
| 900 | 896 | ||
| 901 | if ((sg_split_status[i] & (0x1 << bit)) != 0) { | 897 | if ((sg_split_status[i] & (0x1 << bit)) != 0) |
| 902 | static const char *s; | 898 | printk(split_status_strings[bit], ahd_name(ahd), "SG"); |
| 903 | |||
| 904 | s = split_status_strings[bit]; | ||
| 905 | printk(s, ahd_name(ahd), "SG"); | ||
| 906 | } | ||
| 907 | } | 899 | } |
| 908 | } | 900 | } |
| 909 | /* | 901 | /* |
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 059ff477a398..2e797a367608 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c | |||
| @@ -62,13 +62,6 @@ | |||
| 62 | */ | 62 | */ |
| 63 | #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE | 63 | #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE |
| 64 | /* | 64 | /* |
| 65 | * SCSI-II Linked command support. | ||
| 66 | * | ||
| 67 | * The higher level code doesn't support linked commands yet, and so the option | ||
| 68 | * is undef'd here. | ||
| 69 | */ | ||
| 70 | #undef CONFIG_SCSI_ACORNSCSI_LINK | ||
| 71 | /* | ||
| 72 | * SCSI-II Synchronous transfer support. | 65 | * SCSI-II Synchronous transfer support. |
| 73 | * | 66 | * |
| 74 | * Tried and tested... | 67 | * Tried and tested... |
| @@ -160,10 +153,6 @@ | |||
| 160 | #error "Yippee! ABORT TAG is now defined! Remove this error!" | 153 | #error "Yippee! ABORT TAG is now defined! Remove this error!" |
| 161 | #endif | 154 | #endif |
| 162 | 155 | ||
| 163 | #ifdef CONFIG_SCSI_ACORNSCSI_LINK | ||
| 164 | #error SCSI2 LINKed commands not supported (yet)! | ||
| 165 | #endif | ||
| 166 | |||
| 167 | #ifdef USE_DMAC | 156 | #ifdef USE_DMAC |
| 168 | /* | 157 | /* |
| 169 | * DMAC setup parameters | 158 | * DMAC setup parameters |
| @@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host) | |||
| 1668 | } | 1657 | } |
| 1669 | break; | 1658 | break; |
| 1670 | 1659 | ||
| 1671 | #ifdef CONFIG_SCSI_ACORNSCSI_LINK | ||
| 1672 | case LINKED_CMD_COMPLETE: | ||
| 1673 | case LINKED_FLG_CMD_COMPLETE: | ||
| 1674 | /* | ||
| 1675 | * We don't support linked commands yet | ||
| 1676 | */ | ||
| 1677 | if (0) { | ||
| 1678 | #if (DEBUG & DEBUG_LINK) | ||
| 1679 | printk("scsi%d.%c: lun %d tag %d linked command complete\n", | ||
| 1680 | host->host->host_no, acornscsi_target(host), host->SCpnt->tag); | ||
| 1681 | #endif | ||
| 1682 | /* | ||
| 1683 | * A linked command should only terminate with one of these messages | ||
| 1684 | * if there are more linked commands available. | ||
| 1685 | */ | ||
| 1686 | if (!host->SCpnt->next_link) { | ||
| 1687 | printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n", | ||
| 1688 | instance->host_no, acornscsi_target(host), host->SCpnt->tag); | ||
| 1689 | acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); | ||
| 1690 | msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); | ||
| 1691 | } else { | ||
| 1692 | struct scsi_cmnd *SCpnt = host->SCpnt; | ||
| 1693 | |||
| 1694 | acornscsi_dma_cleanup(host); | ||
| 1695 | |||
| 1696 | host->SCpnt = host->SCpnt->next_link; | ||
| 1697 | host->SCpnt->tag = SCpnt->tag; | ||
| 1698 | SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status; | ||
| 1699 | SCpnt->done(SCpnt); | ||
| 1700 | |||
| 1701 | /* initialise host->SCpnt->SCp */ | ||
| 1702 | } | ||
| 1703 | break; | ||
| 1704 | } | ||
| 1705 | #endif | ||
| 1706 | |||
| 1707 | default: /* reject message */ | 1660 | default: /* reject message */ |
| 1708 | printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n", | 1661 | printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n", |
| 1709 | host->host->host_no, acornscsi_target(host), | 1662 | host->host->host_no, acornscsi_target(host), |
| @@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host) | |||
| 2825 | #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE | 2778 | #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE |
| 2826 | " TAG" | 2779 | " TAG" |
| 2827 | #endif | 2780 | #endif |
| 2828 | #ifdef CONFIG_SCSI_ACORNSCSI_LINK | ||
| 2829 | " LINK" | ||
| 2830 | #endif | ||
| 2831 | #if (DEBUG & DEBUG_NO_WRITE) | 2781 | #if (DEBUG & DEBUG_NO_WRITE) |
| 2832 | " NOWRITE (" __stringify(NO_WRITE) ")" | 2782 | " NOWRITE (" __stringify(NO_WRITE) ")" |
| 2833 | #endif | 2783 | #endif |
| @@ -2851,9 +2801,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
| 2851 | #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE | 2801 | #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE |
| 2852 | " TAG" | 2802 | " TAG" |
| 2853 | #endif | 2803 | #endif |
| 2854 | #ifdef CONFIG_SCSI_ACORNSCSI_LINK | ||
| 2855 | " LINK" | ||
| 2856 | #endif | ||
| 2857 | #if (DEBUG & DEBUG_NO_WRITE) | 2804 | #if (DEBUG & DEBUG_NO_WRITE) |
| 2858 | " NOWRITE (" __stringify(NO_WRITE) ")" | 2805 | " NOWRITE (" __stringify(NO_WRITE) ")" |
| 2859 | #endif | 2806 | #endif |
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index f8e060900052..8ef810a4476e 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c | |||
| @@ -36,9 +36,6 @@ | |||
| 36 | void __iomem *base; \ | 36 | void __iomem *base; \ |
| 37 | void __iomem *dma | 37 | void __iomem *dma |
| 38 | 38 | ||
| 39 | #define BOARD_NORMAL 0 | ||
| 40 | #define BOARD_NCR53C400 1 | ||
| 41 | |||
| 42 | #include "../NCR5380.h" | 39 | #include "../NCR5380.h" |
| 43 | 40 | ||
| 44 | void cumanascsi_setup(char *str, int *ints) | 41 | void cumanascsi_setup(char *str, int *ints) |
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 4266eef8aca1..188e734c7ff0 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c | |||
| @@ -37,9 +37,6 @@ | |||
| 37 | #define NCR5380_implementation_fields \ | 37 | #define NCR5380_implementation_fields \ |
| 38 | void __iomem *base | 38 | void __iomem *base |
| 39 | 39 | ||
| 40 | #define BOARD_NORMAL 0 | ||
| 41 | #define BOARD_NCR53C400 1 | ||
| 42 | |||
| 43 | #include "../NCR5380.h" | 40 | #include "../NCR5380.h" |
| 44 | 41 | ||
| 45 | #undef START_DMA_INITIATOR_RECEIVE_REG | 42 | #undef START_DMA_INITIATOR_RECEIVE_REG |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 0f3cdbc80ba6..1814aa20b724 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
| @@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) | |||
| 370 | return 0; | 370 | return 0; |
| 371 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= | 371 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= |
| 372 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { | 372 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { |
| 373 | TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n", | 373 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", |
| 374 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 374 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
| 375 | return 1; | 375 | return 1; |
| 376 | } | 376 | } |
| @@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) | |||
| 394 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { | 394 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { |
| 395 | cmd->tag = TAG_NONE; | 395 | cmd->tag = TAG_NONE; |
| 396 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 396 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
| 397 | TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged " | 397 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " |
| 398 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); | 398 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); |
| 399 | } else { | 399 | } else { |
| 400 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 400 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
| @@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) | |||
| 402 | cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); | 402 | cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); |
| 403 | set_bit(cmd->tag, ta->allocated); | 403 | set_bit(cmd->tag, ta->allocated); |
| 404 | ta->nr_allocated++; | 404 | ta->nr_allocated++; |
| 405 | TAG_PRINTK("scsi%d: using tag %d for target %d lun %d " | 405 | dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " |
| 406 | "(now %d tags in use)\n", | 406 | "(now %d tags in use)\n", |
| 407 | H_NO(cmd), cmd->tag, cmd->device->id, | 407 | H_NO(cmd), cmd->tag, cmd->device->id, |
| 408 | cmd->device->lun, ta->nr_allocated); | 408 | cmd->device->lun, ta->nr_allocated); |
| @@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) | |||
| 420 | 420 | ||
| 421 | if (cmd->tag == TAG_NONE) { | 421 | if (cmd->tag == TAG_NONE) { |
| 422 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 422 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
| 423 | TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n", | 423 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", |
| 424 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 424 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
| 425 | } else if (cmd->tag >= MAX_TAGS) { | 425 | } else if (cmd->tag >= MAX_TAGS) { |
| 426 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", | 426 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", |
| @@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) | |||
| 429 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 429 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
| 430 | clear_bit(cmd->tag, ta->allocated); | 430 | clear_bit(cmd->tag, ta->allocated); |
| 431 | ta->nr_allocated--; | 431 | ta->nr_allocated--; |
| 432 | TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n", | 432 | dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", |
| 433 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); | 433 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); |
| 434 | } | 434 | } |
| 435 | } | 435 | } |
| @@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) | |||
| 478 | for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; | 478 | for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; |
| 479 | cmd->SCp.buffers_residual && | 479 | cmd->SCp.buffers_residual && |
| 480 | virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { | 480 | virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { |
| 481 | MER_PRINTK("VTOP(%p) == %08lx -> merging\n", | 481 | dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", |
| 482 | page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); | 482 | page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); |
| 483 | #if (NDEBUG & NDEBUG_MERGING) | 483 | #if (NDEBUG & NDEBUG_MERGING) |
| 484 | ++cnt; | 484 | ++cnt; |
| @@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) | |||
| 490 | } | 490 | } |
| 491 | #if (NDEBUG & NDEBUG_MERGING) | 491 | #if (NDEBUG & NDEBUG_MERGING) |
| 492 | if (oldlen != cmd->SCp.this_residual) | 492 | if (oldlen != cmd->SCp.this_residual) |
| 493 | MER_PRINTK("merged %d buffers from %p, new length %08x\n", | 493 | dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", |
| 494 | cnt, cmd->SCp.ptr, cmd->SCp.this_residual); | 494 | cnt, cmd->SCp.ptr, cmd->SCp.this_residual); |
| 495 | #endif | 495 | #endif |
| 496 | } | 496 | } |
| @@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) | |||
| 626 | } | 626 | } |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | #else /* !NDEBUG */ | ||
| 630 | |||
| 631 | /* dummies... */ | ||
| 632 | static inline void NCR5380_print(struct Scsi_Host *instance) | ||
| 633 | { | ||
| 634 | }; | ||
| 635 | static inline void NCR5380_print_phase(struct Scsi_Host *instance) | ||
| 636 | { | ||
| 637 | }; | ||
| 638 | |||
| 639 | #endif | 629 | #endif |
| 640 | 630 | ||
| 641 | /* | 631 | /* |
| @@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void) | |||
| 676 | { | 666 | { |
| 677 | static int done = 0; | 667 | static int done = 0; |
| 678 | if (!done) { | 668 | if (!done) { |
| 679 | INI_PRINTK("scsi : NCR5380_all_init()\n"); | 669 | dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); |
| 680 | done = 1; | 670 | done = 1; |
| 681 | } | 671 | } |
| 682 | } | 672 | } |
| @@ -739,8 +729,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance) | |||
| 739 | Scsi_Cmnd *ptr; | 729 | Scsi_Cmnd *ptr; |
| 740 | unsigned long flags; | 730 | unsigned long flags; |
| 741 | 731 | ||
| 742 | NCR_PRINT(NDEBUG_ANY); | 732 | NCR5380_dprint(NDEBUG_ANY, instance); |
| 743 | NCR_PRINT_PHASE(NDEBUG_ANY); | 733 | NCR5380_dprint_phase(NDEBUG_ANY, instance); |
| 744 | 734 | ||
| 745 | hostdata = (struct NCR5380_hostdata *)instance->hostdata; | 735 | hostdata = (struct NCR5380_hostdata *)instance->hostdata; |
| 746 | 736 | ||
| @@ -984,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | |||
| 984 | } | 974 | } |
| 985 | local_irq_restore(flags); | 975 | local_irq_restore(flags); |
| 986 | 976 | ||
| 987 | QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), | 977 | dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), |
| 988 | (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); | 978 | (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); |
| 989 | 979 | ||
| 990 | /* If queue_command() is called from an interrupt (real one or bottom | 980 | /* If queue_command() is called from an interrupt (real one or bottom |
| @@ -1054,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1054 | done = 1; | 1044 | done = 1; |
| 1055 | 1045 | ||
| 1056 | if (!hostdata->connected) { | 1046 | if (!hostdata->connected) { |
| 1057 | MAIN_PRINTK("scsi%d: not connected\n", HOSTNO); | 1047 | dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO); |
| 1058 | /* | 1048 | /* |
| 1059 | * Search through the issue_queue for a command destined | 1049 | * Search through the issue_queue for a command destined |
| 1060 | * for a target that's not busy. | 1050 | * for a target that's not busy. |
| @@ -1107,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1107 | * On failure, we must add the command back to the | 1097 | * On failure, we must add the command back to the |
| 1108 | * issue queue so we can keep trying. | 1098 | * issue queue so we can keep trying. |
| 1109 | */ | 1099 | */ |
| 1110 | MAIN_PRINTK("scsi%d: main(): command for target %d " | 1100 | dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " |
| 1111 | "lun %d removed from issue_queue\n", | 1101 | "lun %d removed from issue_queue\n", |
| 1112 | HOSTNO, tmp->device->id, tmp->device->lun); | 1102 | HOSTNO, tmp->device->id, tmp->device->lun); |
| 1113 | /* | 1103 | /* |
| @@ -1140,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1140 | #endif | 1130 | #endif |
| 1141 | falcon_dont_release--; | 1131 | falcon_dont_release--; |
| 1142 | local_irq_restore(flags); | 1132 | local_irq_restore(flags); |
| 1143 | MAIN_PRINTK("scsi%d: main(): select() failed, " | 1133 | dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " |
| 1144 | "returned to issue_queue\n", HOSTNO); | 1134 | "returned to issue_queue\n", HOSTNO); |
| 1145 | if (hostdata->connected) | 1135 | if (hostdata->connected) |
| 1146 | break; | 1136 | break; |
| @@ -1155,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1155 | #endif | 1145 | #endif |
| 1156 | ) { | 1146 | ) { |
| 1157 | local_irq_restore(flags); | 1147 | local_irq_restore(flags); |
| 1158 | MAIN_PRINTK("scsi%d: main: performing information transfer\n", | 1148 | dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", |
| 1159 | HOSTNO); | 1149 | HOSTNO); |
| 1160 | NCR5380_information_transfer(instance); | 1150 | NCR5380_information_transfer(instance); |
| 1161 | MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); | 1151 | dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); |
| 1162 | done = 0; | 1152 | done = 0; |
| 1163 | } | 1153 | } |
| 1164 | } while (!done); | 1154 | } while (!done); |
| @@ -1204,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) | |||
| 1204 | (BASR_PHASE_MATCH|BASR_ACK)) { | 1194 | (BASR_PHASE_MATCH|BASR_ACK)) { |
| 1205 | saved_data = NCR5380_read(INPUT_DATA_REG); | 1195 | saved_data = NCR5380_read(INPUT_DATA_REG); |
| 1206 | overrun = 1; | 1196 | overrun = 1; |
| 1207 | DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); | 1197 | dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO); |
| 1208 | } | 1198 | } |
| 1209 | } | 1199 | } |
| 1210 | } | 1200 | } |
| 1211 | 1201 | ||
| 1212 | DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", | 1202 | dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", |
| 1213 | HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), | 1203 | HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), |
| 1214 | NCR5380_read(STATUS_REG)); | 1204 | NCR5380_read(STATUS_REG)); |
| 1215 | 1205 | ||
| @@ -1229,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) | |||
| 1229 | if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { | 1219 | if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { |
| 1230 | cnt = toPIO = atari_read_overruns; | 1220 | cnt = toPIO = atari_read_overruns; |
| 1231 | if (overrun) { | 1221 | if (overrun) { |
| 1232 | DMA_PRINTK("Got an input overrun, using saved byte\n"); | 1222 | dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); |
| 1233 | *(*data)++ = saved_data; | 1223 | *(*data)++ = saved_data; |
| 1234 | (*count)--; | 1224 | (*count)--; |
| 1235 | cnt--; | 1225 | cnt--; |
| 1236 | toPIO--; | 1226 | toPIO--; |
| 1237 | } | 1227 | } |
| 1238 | DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); | 1228 | dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); |
| 1239 | NCR5380_transfer_pio(instance, &p, &cnt, data); | 1229 | NCR5380_transfer_pio(instance, &p, &cnt, data); |
| 1240 | *count -= toPIO - cnt; | 1230 | *count -= toPIO - cnt; |
| 1241 | } | 1231 | } |
| @@ -1261,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
| 1261 | int done = 1, handled = 0; | 1251 | int done = 1, handled = 0; |
| 1262 | unsigned char basr; | 1252 | unsigned char basr; |
| 1263 | 1253 | ||
| 1264 | INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); | 1254 | dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); |
| 1265 | 1255 | ||
| 1266 | /* Look for pending interrupts */ | 1256 | /* Look for pending interrupts */ |
| 1267 | basr = NCR5380_read(BUS_AND_STATUS_REG); | 1257 | basr = NCR5380_read(BUS_AND_STATUS_REG); |
| 1268 | INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); | 1258 | dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); |
| 1269 | /* dispatch to appropriate routine if found and done=0 */ | 1259 | /* dispatch to appropriate routine if found and done=0 */ |
| 1270 | if (basr & BASR_IRQ) { | 1260 | if (basr & BASR_IRQ) { |
| 1271 | NCR_PRINT(NDEBUG_INTR); | 1261 | NCR5380_dprint(NDEBUG_INTR, instance); |
| 1272 | if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { | 1262 | if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { |
| 1273 | done = 0; | 1263 | done = 0; |
| 1274 | ENABLE_IRQ(); | 1264 | ENABLE_IRQ(); |
| 1275 | INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); | 1265 | dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); |
| 1276 | NCR5380_reselect(instance); | 1266 | NCR5380_reselect(instance); |
| 1277 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1267 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1278 | } else if (basr & BASR_PARITY_ERROR) { | 1268 | } else if (basr & BASR_PARITY_ERROR) { |
| 1279 | INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); | 1269 | dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); |
| 1280 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1270 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1281 | } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { | 1271 | } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { |
| 1282 | INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); | 1272 | dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); |
| 1283 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1273 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1284 | } else { | 1274 | } else { |
| 1285 | /* | 1275 | /* |
| @@ -1298,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
| 1298 | ((basr & BASR_END_DMA_TRANSFER) || | 1288 | ((basr & BASR_END_DMA_TRANSFER) || |
| 1299 | !(basr & BASR_PHASE_MATCH))) { | 1289 | !(basr & BASR_PHASE_MATCH))) { |
| 1300 | 1290 | ||
| 1301 | INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); | 1291 | dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); |
| 1302 | NCR5380_dma_complete( instance ); | 1292 | NCR5380_dma_complete( instance ); |
| 1303 | done = 0; | 1293 | done = 0; |
| 1304 | ENABLE_IRQ(); | 1294 | ENABLE_IRQ(); |
| @@ -1323,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
| 1323 | } | 1313 | } |
| 1324 | 1314 | ||
| 1325 | if (!done) { | 1315 | if (!done) { |
| 1326 | INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); | 1316 | dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); |
| 1327 | /* Put a call to NCR5380_main() on the queue... */ | 1317 | /* Put a call to NCR5380_main() on the queue... */ |
| 1328 | queue_main(); | 1318 | queue_main(); |
| 1329 | } | 1319 | } |
| @@ -1396,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1396 | unsigned long flags; | 1386 | unsigned long flags; |
| 1397 | 1387 | ||
| 1398 | hostdata->restart_select = 0; | 1388 | hostdata->restart_select = 0; |
| 1399 | NCR_PRINT(NDEBUG_ARBITRATION); | 1389 | NCR5380_dprint(NDEBUG_ARBITRATION, instance); |
| 1400 | ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, | 1390 | dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, |
| 1401 | instance->this_id); | 1391 | instance->this_id); |
| 1402 | 1392 | ||
| 1403 | /* | 1393 | /* |
| @@ -1442,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1442 | ; | 1432 | ; |
| 1443 | #endif | 1433 | #endif |
| 1444 | 1434 | ||
| 1445 | ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); | 1435 | dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); |
| 1446 | 1436 | ||
| 1447 | if (hostdata->connected) { | 1437 | if (hostdata->connected) { |
| 1448 | NCR5380_write(MODE_REG, MR_BASE); | 1438 | NCR5380_write(MODE_REG, MR_BASE); |
| @@ -1463,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1463 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | 1453 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || |
| 1464 | hostdata->connected) { | 1454 | hostdata->connected) { |
| 1465 | NCR5380_write(MODE_REG, MR_BASE); | 1455 | NCR5380_write(MODE_REG, MR_BASE); |
| 1466 | ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", | 1456 | dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", |
| 1467 | HOSTNO); | 1457 | HOSTNO); |
| 1468 | return -1; | 1458 | return -1; |
| 1469 | } | 1459 | } |
| @@ -1478,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1478 | hostdata->connected) { | 1468 | hostdata->connected) { |
| 1479 | NCR5380_write(MODE_REG, MR_BASE); | 1469 | NCR5380_write(MODE_REG, MR_BASE); |
| 1480 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1470 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 1481 | ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", | 1471 | dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", |
| 1482 | HOSTNO); | 1472 | HOSTNO); |
| 1483 | return -1; | 1473 | return -1; |
| 1484 | } | 1474 | } |
| @@ -1501,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1501 | return -1; | 1491 | return -1; |
| 1502 | } | 1492 | } |
| 1503 | 1493 | ||
| 1504 | ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); | 1494 | dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); |
| 1505 | 1495 | ||
| 1506 | /* | 1496 | /* |
| 1507 | * Now that we have won arbitration, start Selection process, asserting | 1497 | * Now that we have won arbitration, start Selection process, asserting |
| @@ -1561,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1561 | 1551 | ||
| 1562 | udelay(1); | 1552 | udelay(1); |
| 1563 | 1553 | ||
| 1564 | SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); | 1554 | dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); |
| 1565 | 1555 | ||
| 1566 | /* | 1556 | /* |
| 1567 | * The SCSI specification calls for a 250 ms timeout for the actual | 1557 | * The SCSI specification calls for a 250 ms timeout for the actual |
| @@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1617 | printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); | 1607 | printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); |
| 1618 | if (hostdata->restart_select) | 1608 | if (hostdata->restart_select) |
| 1619 | printk(KERN_NOTICE "\trestart select\n"); | 1609 | printk(KERN_NOTICE "\trestart select\n"); |
| 1620 | NCR_PRINT(NDEBUG_ANY); | 1610 | NCR5380_dprint(NDEBUG_ANY, instance); |
| 1621 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1611 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1622 | return -1; | 1612 | return -1; |
| 1623 | } | 1613 | } |
| @@ -1630,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1630 | #endif | 1620 | #endif |
| 1631 | cmd->scsi_done(cmd); | 1621 | cmd->scsi_done(cmd); |
| 1632 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1622 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1633 | SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); | 1623 | dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); |
| 1634 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1624 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1635 | return 0; | 1625 | return 0; |
| 1636 | } | 1626 | } |
| @@ -1656,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1656 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)) | 1646 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)) |
| 1657 | ; | 1647 | ; |
| 1658 | 1648 | ||
| 1659 | SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", | 1649 | dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", |
| 1660 | HOSTNO, cmd->device->id); | 1650 | HOSTNO, cmd->device->id); |
| 1661 | tmp[0] = IDENTIFY(1, cmd->device->lun); | 1651 | tmp[0] = IDENTIFY(1, cmd->device->lun); |
| 1662 | 1652 | ||
| @@ -1676,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | |||
| 1676 | data = tmp; | 1666 | data = tmp; |
| 1677 | phase = PHASE_MSGOUT; | 1667 | phase = PHASE_MSGOUT; |
| 1678 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 1668 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 1679 | SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); | 1669 | dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); |
| 1680 | /* XXX need to handle errors here */ | 1670 | /* XXX need to handle errors here */ |
| 1681 | hostdata->connected = cmd; | 1671 | hostdata->connected = cmd; |
| 1682 | #ifndef SUPPORT_TAGS | 1672 | #ifndef SUPPORT_TAGS |
| @@ -1737,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, | |||
| 1737 | while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) | 1727 | while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) |
| 1738 | ; | 1728 | ; |
| 1739 | 1729 | ||
| 1740 | HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); | 1730 | dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); |
| 1741 | 1731 | ||
| 1742 | /* Check for phase mismatch */ | 1732 | /* Check for phase mismatch */ |
| 1743 | if ((tmp & PHASE_MASK) != p) { | 1733 | if ((tmp & PHASE_MASK) != p) { |
| 1744 | PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); | 1734 | dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); |
| 1745 | NCR_PRINT_PHASE(NDEBUG_PIO); | 1735 | NCR5380_dprint_phase(NDEBUG_PIO, instance); |
| 1746 | break; | 1736 | break; |
| 1747 | } | 1737 | } |
| 1748 | 1738 | ||
| @@ -1764,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, | |||
| 1764 | if (!(p & SR_IO)) { | 1754 | if (!(p & SR_IO)) { |
| 1765 | if (!((p & SR_MSG) && c > 1)) { | 1755 | if (!((p & SR_MSG) && c > 1)) { |
| 1766 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); | 1756 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); |
| 1767 | NCR_PRINT(NDEBUG_PIO); | 1757 | NCR5380_dprint(NDEBUG_PIO, instance); |
| 1768 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 1758 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| 1769 | ICR_ASSERT_DATA | ICR_ASSERT_ACK); | 1759 | ICR_ASSERT_DATA | ICR_ASSERT_ACK); |
| 1770 | } else { | 1760 | } else { |
| 1771 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 1761 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| 1772 | ICR_ASSERT_DATA | ICR_ASSERT_ATN); | 1762 | ICR_ASSERT_DATA | ICR_ASSERT_ATN); |
| 1773 | NCR_PRINT(NDEBUG_PIO); | 1763 | NCR5380_dprint(NDEBUG_PIO, instance); |
| 1774 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 1764 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| 1775 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); | 1765 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); |
| 1776 | } | 1766 | } |
| 1777 | } else { | 1767 | } else { |
| 1778 | NCR_PRINT(NDEBUG_PIO); | 1768 | NCR5380_dprint(NDEBUG_PIO, instance); |
| 1779 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); | 1769 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); |
| 1780 | } | 1770 | } |
| 1781 | 1771 | ||
| 1782 | while (NCR5380_read(STATUS_REG) & SR_REQ) | 1772 | while (NCR5380_read(STATUS_REG) & SR_REQ) |
| 1783 | ; | 1773 | ; |
| 1784 | 1774 | ||
| 1785 | HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); | 1775 | dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); |
| 1786 | 1776 | ||
| 1787 | /* | 1777 | /* |
| 1788 | * We have several special cases to consider during REQ/ACK handshaking : | 1778 | * We have several special cases to consider during REQ/ACK handshaking : |
| @@ -1803,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, | |||
| 1803 | } | 1793 | } |
| 1804 | } while (--c); | 1794 | } while (--c); |
| 1805 | 1795 | ||
| 1806 | PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); | 1796 | dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); |
| 1807 | 1797 | ||
| 1808 | *count = c; | 1798 | *count = c; |
| 1809 | *data = d; | 1799 | *data = d; |
| @@ -1917,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, | |||
| 1917 | if (atari_read_overruns && (p & SR_IO)) | 1907 | if (atari_read_overruns && (p & SR_IO)) |
| 1918 | c -= atari_read_overruns; | 1908 | c -= atari_read_overruns; |
| 1919 | 1909 | ||
| 1920 | DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", | 1910 | dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", |
| 1921 | HOSTNO, (p & SR_IO) ? "reading" : "writing", | 1911 | HOSTNO, (p & SR_IO) ? "reading" : "writing", |
| 1922 | c, (p & SR_IO) ? "to" : "from", d); | 1912 | c, (p & SR_IO) ? "to" : "from", d); |
| 1923 | 1913 | ||
| @@ -1997,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 1997 | phase = (tmp & PHASE_MASK); | 1987 | phase = (tmp & PHASE_MASK); |
| 1998 | if (phase != old_phase) { | 1988 | if (phase != old_phase) { |
| 1999 | old_phase = phase; | 1989 | old_phase = phase; |
| 2000 | NCR_PRINT_PHASE(NDEBUG_INFORMATION); | 1990 | NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); |
| 2001 | } | 1991 | } |
| 2002 | 1992 | ||
| 2003 | if (sink && (phase != PHASE_MSGOUT)) { | 1993 | if (sink && (phase != PHASE_MSGOUT)) { |
| @@ -2039,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2039 | * they are at contiguous physical addresses. | 2029 | * they are at contiguous physical addresses. |
| 2040 | */ | 2030 | */ |
| 2041 | merge_contiguous_buffers(cmd); | 2031 | merge_contiguous_buffers(cmd); |
| 2042 | INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", | 2032 | dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", |
| 2043 | HOSTNO, cmd->SCp.this_residual, | 2033 | HOSTNO, cmd->SCp.this_residual, |
| 2044 | cmd->SCp.buffers_residual); | 2034 | cmd->SCp.buffers_residual); |
| 2045 | } | 2035 | } |
| @@ -2123,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2123 | /* Accept message by clearing ACK */ | 2113 | /* Accept message by clearing ACK */ |
| 2124 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2114 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2125 | 2115 | ||
| 2126 | LNK_PRINTK("scsi%d: target %d lun %d linked command " | 2116 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " |
| 2127 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2117 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2128 | 2118 | ||
| 2129 | /* Enable reselect interrupts */ | 2119 | /* Enable reselect interrupts */ |
| @@ -2148,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2148 | * and don't free it! */ | 2138 | * and don't free it! */ |
| 2149 | cmd->next_link->tag = cmd->tag; | 2139 | cmd->next_link->tag = cmd->tag; |
| 2150 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2140 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
| 2151 | LNK_PRINTK("scsi%d: target %d lun %d linked request " | 2141 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " |
| 2152 | "done, calling scsi_done().\n", | 2142 | "done, calling scsi_done().\n", |
| 2153 | HOSTNO, cmd->device->id, cmd->device->lun); | 2143 | HOSTNO, cmd->device->id, cmd->device->lun); |
| 2154 | #ifdef NCR5380_STATS | 2144 | #ifdef NCR5380_STATS |
| @@ -2165,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2165 | /* ++guenther: possible race with Falcon locking */ | 2155 | /* ++guenther: possible race with Falcon locking */ |
| 2166 | falcon_dont_release++; | 2156 | falcon_dont_release++; |
| 2167 | hostdata->connected = NULL; | 2157 | hostdata->connected = NULL; |
| 2168 | QU_PRINTK("scsi%d: command for target %d, lun %d " | 2158 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " |
| 2169 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2159 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2170 | #ifdef SUPPORT_TAGS | 2160 | #ifdef SUPPORT_TAGS |
| 2171 | cmd_free_tag(cmd); | 2161 | cmd_free_tag(cmd); |
| @@ -2179,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2179 | /* ++Andreas: the mid level code knows about | 2169 | /* ++Andreas: the mid level code knows about |
| 2180 | QUEUE_FULL now. */ | 2170 | QUEUE_FULL now. */ |
| 2181 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 2171 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
| 2182 | TAG_PRINTK("scsi%d: target %d lun %d returned " | 2172 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " |
| 2183 | "QUEUE_FULL after %d commands\n", | 2173 | "QUEUE_FULL after %d commands\n", |
| 2184 | HOSTNO, cmd->device->id, cmd->device->lun, | 2174 | HOSTNO, cmd->device->id, cmd->device->lun, |
| 2185 | ta->nr_allocated); | 2175 | ta->nr_allocated); |
| @@ -2224,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2224 | (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { | 2214 | (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { |
| 2225 | scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); | 2215 | scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); |
| 2226 | 2216 | ||
| 2227 | ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); | 2217 | dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); |
| 2228 | 2218 | ||
| 2229 | local_irq_save(flags); | 2219 | local_irq_save(flags); |
| 2230 | LIST(cmd,hostdata->issue_queue); | 2220 | LIST(cmd,hostdata->issue_queue); |
| 2231 | SET_NEXT(cmd, hostdata->issue_queue); | 2221 | SET_NEXT(cmd, hostdata->issue_queue); |
| 2232 | hostdata->issue_queue = (Scsi_Cmnd *) cmd; | 2222 | hostdata->issue_queue = (Scsi_Cmnd *) cmd; |
| 2233 | local_irq_restore(flags); | 2223 | local_irq_restore(flags); |
| 2234 | QU_PRINTK("scsi%d: REQUEST SENSE added to head of " | 2224 | dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " |
| 2235 | "issue queue\n", H_NO(cmd)); | 2225 | "issue queue\n", H_NO(cmd)); |
| 2236 | } else | 2226 | } else |
| 2237 | #endif /* def AUTOSENSE */ | 2227 | #endif /* def AUTOSENSE */ |
| @@ -2277,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2277 | cmd->device->tagged_supported = 0; | 2267 | cmd->device->tagged_supported = 0; |
| 2278 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 2268 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
| 2279 | cmd->tag = TAG_NONE; | 2269 | cmd->tag = TAG_NONE; |
| 2280 | TAG_PRINTK("scsi%d: target %d lun %d rejected " | 2270 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " |
| 2281 | "QUEUE_TAG message; tagged queuing " | 2271 | "QUEUE_TAG message; tagged queuing " |
| 2282 | "disabled\n", | 2272 | "disabled\n", |
| 2283 | HOSTNO, cmd->device->id, cmd->device->lun); | 2273 | HOSTNO, cmd->device->id, cmd->device->lun); |
| @@ -2294,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2294 | hostdata->connected = NULL; | 2284 | hostdata->connected = NULL; |
| 2295 | hostdata->disconnected_queue = cmd; | 2285 | hostdata->disconnected_queue = cmd; |
| 2296 | local_irq_restore(flags); | 2286 | local_irq_restore(flags); |
| 2297 | QU_PRINTK("scsi%d: command for target %d lun %d was " | 2287 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " |
| 2298 | "moved from connected to the " | 2288 | "moved from connected to the " |
| 2299 | "disconnected_queue\n", HOSTNO, | 2289 | "disconnected_queue\n", HOSTNO, |
| 2300 | cmd->device->id, cmd->device->lun); | 2290 | cmd->device->id, cmd->device->lun); |
| @@ -2344,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2344 | /* Accept first byte by clearing ACK */ | 2334 | /* Accept first byte by clearing ACK */ |
| 2345 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2335 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2346 | 2336 | ||
| 2347 | EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); | 2337 | dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); |
| 2348 | 2338 | ||
| 2349 | len = 2; | 2339 | len = 2; |
| 2350 | data = extended_msg + 1; | 2340 | data = extended_msg + 1; |
| 2351 | phase = PHASE_MSGIN; | 2341 | phase = PHASE_MSGIN; |
| 2352 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2342 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2353 | EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, | 2343 | dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, |
| 2354 | (int)extended_msg[1], (int)extended_msg[2]); | 2344 | (int)extended_msg[1], (int)extended_msg[2]); |
| 2355 | 2345 | ||
| 2356 | if (!len && extended_msg[1] <= | 2346 | if (!len && extended_msg[1] <= |
| @@ -2362,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2362 | phase = PHASE_MSGIN; | 2352 | phase = PHASE_MSGIN; |
| 2363 | 2353 | ||
| 2364 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2354 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2365 | EXT_PRINTK("scsi%d: message received, residual %d\n", | 2355 | dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", |
| 2366 | HOSTNO, len); | 2356 | HOSTNO, len); |
| 2367 | 2357 | ||
| 2368 | switch (extended_msg[2]) { | 2358 | switch (extended_msg[2]) { |
| @@ -2451,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2451 | break; | 2441 | break; |
| 2452 | default: | 2442 | default: |
| 2453 | printk("scsi%d: unknown phase\n", HOSTNO); | 2443 | printk("scsi%d: unknown phase\n", HOSTNO); |
| 2454 | NCR_PRINT(NDEBUG_ANY); | 2444 | NCR5380_dprint(NDEBUG_ANY, instance); |
| 2455 | } /* switch(phase) */ | 2445 | } /* switch(phase) */ |
| 2456 | } /* if (tmp * SR_REQ) */ | 2446 | } /* if (tmp * SR_REQ) */ |
| 2457 | } /* while (1) */ | 2447 | } /* while (1) */ |
| @@ -2493,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) | |||
| 2493 | 2483 | ||
| 2494 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); | 2484 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); |
| 2495 | 2485 | ||
| 2496 | RSL_PRINTK("scsi%d: reselect\n", HOSTNO); | 2486 | dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); |
| 2497 | 2487 | ||
| 2498 | /* | 2488 | /* |
| 2499 | * At this point, we have detected that our SCSI ID is on the bus, | 2489 | * At this point, we have detected that our SCSI ID is on the bus, |
| @@ -2544,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) | |||
| 2544 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && | 2534 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && |
| 2545 | msg[1] == SIMPLE_QUEUE_TAG) | 2535 | msg[1] == SIMPLE_QUEUE_TAG) |
| 2546 | tag = msg[2]; | 2536 | tag = msg[2]; |
| 2547 | TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " | 2537 | dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " |
| 2548 | "reselection\n", HOSTNO, target_mask, lun, tag); | 2538 | "reselection\n", HOSTNO, target_mask, lun, tag); |
| 2549 | } | 2539 | } |
| 2550 | #endif | 2540 | #endif |
| @@ -2598,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) | |||
| 2598 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2588 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2599 | 2589 | ||
| 2600 | hostdata->connected = tmp; | 2590 | hostdata->connected = tmp; |
| 2601 | RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", | 2591 | dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", |
| 2602 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); | 2592 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); |
| 2603 | falcon_dont_release--; | 2593 | falcon_dont_release--; |
| 2604 | } | 2594 | } |
| @@ -2640,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2640 | printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", | 2630 | printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", |
| 2641 | HOSTNO); | 2631 | HOSTNO); |
| 2642 | 2632 | ||
| 2643 | ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, | 2633 | dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, |
| 2644 | NCR5380_read(BUS_AND_STATUS_REG), | 2634 | NCR5380_read(BUS_AND_STATUS_REG), |
| 2645 | NCR5380_read(STATUS_REG)); | 2635 | NCR5380_read(STATUS_REG)); |
| 2646 | 2636 | ||
| @@ -2653,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2653 | 2643 | ||
| 2654 | if (hostdata->connected == cmd) { | 2644 | if (hostdata->connected == cmd) { |
| 2655 | 2645 | ||
| 2656 | ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); | 2646 | dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); |
| 2657 | /* | 2647 | /* |
| 2658 | * We should perform BSY checking, and make sure we haven't slipped | 2648 | * We should perform BSY checking, and make sure we haven't slipped |
| 2659 | * into BUS FREE. | 2649 | * into BUS FREE. |
| @@ -2683,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2683 | local_irq_restore(flags); | 2673 | local_irq_restore(flags); |
| 2684 | cmd->scsi_done(cmd); | 2674 | cmd->scsi_done(cmd); |
| 2685 | falcon_release_lock_if_possible(hostdata); | 2675 | falcon_release_lock_if_possible(hostdata); |
| 2686 | return SCSI_ABORT_SUCCESS; | 2676 | return SUCCESS; |
| 2687 | } else { | 2677 | } else { |
| 2688 | /* local_irq_restore(flags); */ | 2678 | /* local_irq_restore(flags); */ |
| 2689 | printk("scsi%d: abort of connected command failed!\n", HOSTNO); | 2679 | printk("scsi%d: abort of connected command failed!\n", HOSTNO); |
| 2690 | return SCSI_ABORT_ERROR; | 2680 | return FAILED; |
| 2691 | } | 2681 | } |
| 2692 | } | 2682 | } |
| 2693 | #endif | 2683 | #endif |
| @@ -2705,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2705 | SET_NEXT(tmp, NULL); | 2695 | SET_NEXT(tmp, NULL); |
| 2706 | tmp->result = DID_ABORT << 16; | 2696 | tmp->result = DID_ABORT << 16; |
| 2707 | local_irq_restore(flags); | 2697 | local_irq_restore(flags); |
| 2708 | ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", | 2698 | dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", |
| 2709 | HOSTNO); | 2699 | HOSTNO); |
| 2710 | /* Tagged queuing note: no tag to free here, hasn't been assigned | 2700 | /* Tagged queuing note: no tag to free here, hasn't been assigned |
| 2711 | * yet... */ | 2701 | * yet... */ |
| 2712 | tmp->scsi_done(tmp); | 2702 | tmp->scsi_done(tmp); |
| 2713 | falcon_release_lock_if_possible(hostdata); | 2703 | falcon_release_lock_if_possible(hostdata); |
| 2714 | return SCSI_ABORT_SUCCESS; | 2704 | return SUCCESS; |
| 2715 | } | 2705 | } |
| 2716 | } | 2706 | } |
| 2717 | 2707 | ||
| @@ -2728,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2728 | 2718 | ||
| 2729 | if (hostdata->connected) { | 2719 | if (hostdata->connected) { |
| 2730 | local_irq_restore(flags); | 2720 | local_irq_restore(flags); |
| 2731 | ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); | 2721 | dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); |
| 2732 | return SCSI_ABORT_SNOOZE; | 2722 | return FAILED; |
| 2733 | } | 2723 | } |
| 2734 | 2724 | ||
| 2735 | /* | 2725 | /* |
| @@ -2761,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2761 | tmp = NEXT(tmp)) { | 2751 | tmp = NEXT(tmp)) { |
| 2762 | if (cmd == tmp) { | 2752 | if (cmd == tmp) { |
| 2763 | local_irq_restore(flags); | 2753 | local_irq_restore(flags); |
| 2764 | ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); | 2754 | dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); |
| 2765 | 2755 | ||
| 2766 | if (NCR5380_select(instance, cmd, (int)cmd->tag)) | 2756 | if (NCR5380_select(instance, cmd, (int)cmd->tag)) |
| 2767 | return SCSI_ABORT_BUSY; | 2757 | return FAILED; |
| 2768 | 2758 | ||
| 2769 | ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); | 2759 | dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); |
| 2770 | 2760 | ||
| 2771 | do_abort(instance); | 2761 | do_abort(instance); |
| 2772 | 2762 | ||
| @@ -2791,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2791 | local_irq_restore(flags); | 2781 | local_irq_restore(flags); |
| 2792 | tmp->scsi_done(tmp); | 2782 | tmp->scsi_done(tmp); |
| 2793 | falcon_release_lock_if_possible(hostdata); | 2783 | falcon_release_lock_if_possible(hostdata); |
| 2794 | return SCSI_ABORT_SUCCESS; | 2784 | return SUCCESS; |
| 2795 | } | 2785 | } |
| 2796 | } | 2786 | } |
| 2797 | } | 2787 | } |
| @@ -2816,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2816 | */ | 2806 | */ |
| 2817 | falcon_release_lock_if_possible(hostdata); | 2807 | falcon_release_lock_if_possible(hostdata); |
| 2818 | 2808 | ||
| 2819 | return SCSI_ABORT_NOT_RUNNING; | 2809 | return FAILED; |
| 2820 | } | 2810 | } |
| 2821 | 2811 | ||
| 2822 | 2812 | ||
| @@ -2825,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) | |||
| 2825 | * | 2815 | * |
| 2826 | * Purpose : reset the SCSI bus. | 2816 | * Purpose : reset the SCSI bus. |
| 2827 | * | 2817 | * |
| 2828 | * Returns : SCSI_RESET_WAKEUP | 2818 | * Returns : SUCCESS or FAILURE |
| 2829 | * | 2819 | * |
| 2830 | */ | 2820 | */ |
| 2831 | 2821 | ||
| @@ -2834,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) | |||
| 2834 | SETUP_HOSTDATA(cmd->device->host); | 2824 | SETUP_HOSTDATA(cmd->device->host); |
| 2835 | int i; | 2825 | int i; |
| 2836 | unsigned long flags; | 2826 | unsigned long flags; |
| 2837 | #if 1 | 2827 | #if defined(RESET_RUN_DONE) |
| 2838 | Scsi_Cmnd *connected, *disconnected_queue; | 2828 | Scsi_Cmnd *connected, *disconnected_queue; |
| 2839 | #endif | 2829 | #endif |
| 2840 | 2830 | ||
| @@ -2859,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) | |||
| 2859 | * through anymore ... */ | 2849 | * through anymore ... */ |
| 2860 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 2850 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 2861 | 2851 | ||
| 2862 | #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ | 2852 | /* MSch 20140115 - looking at the generic NCR5380 driver, all of this |
| 2853 | * should go. | ||
| 2854 | * Catch-22: if we don't clear all queues, the SCSI driver lock will | ||
| 2855 | * not be reset by atari_scsi_reset()! | ||
| 2856 | */ | ||
| 2857 | |||
| 2858 | #if defined(RESET_RUN_DONE) | ||
| 2859 | /* XXX Should now be done by midlevel code, but it's broken XXX */ | ||
| 2863 | /* XXX see below XXX */ | 2860 | /* XXX see below XXX */ |
| 2864 | 2861 | ||
| 2865 | /* MSch: old-style reset: actually abort all command processing here */ | 2862 | /* MSch: old-style reset: actually abort all command processing here */ |
| @@ -2890,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) | |||
| 2890 | */ | 2887 | */ |
| 2891 | 2888 | ||
| 2892 | if ((cmd = connected)) { | 2889 | if ((cmd = connected)) { |
| 2893 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); | 2890 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); |
| 2894 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); | 2891 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); |
| 2895 | cmd->scsi_done(cmd); | 2892 | cmd->scsi_done(cmd); |
| 2896 | } | 2893 | } |
| @@ -2902,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) | |||
| 2902 | cmd->scsi_done(cmd); | 2899 | cmd->scsi_done(cmd); |
| 2903 | } | 2900 | } |
| 2904 | if (i > 0) | 2901 | if (i > 0) |
| 2905 | ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); | 2902 | dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); |
| 2906 | 2903 | ||
| 2907 | /* The Falcon lock should be released after a reset... | 2904 | /* The Falcon lock should be released after a reset... |
| 2908 | */ | 2905 | */ |
| @@ -2915,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) | |||
| 2915 | * the midlevel code that the reset was SUCCESSFUL, and there is no | 2912 | * the midlevel code that the reset was SUCCESSFUL, and there is no |
| 2916 | * need to 'wake up' the commands by a request_sense | 2913 | * need to 'wake up' the commands by a request_sense |
| 2917 | */ | 2914 | */ |
| 2918 | return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; | 2915 | return SUCCESS; |
| 2919 | #else /* 1 */ | 2916 | #else /* 1 */ |
| 2920 | 2917 | ||
| 2921 | /* MSch: new-style reset handling: let the mid-level do what it can */ | 2918 | /* MSch: new-style reset handling: let the mid-level do what it can */ |
| @@ -2942,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) | |||
| 2942 | */ | 2939 | */ |
| 2943 | 2940 | ||
| 2944 | if (hostdata->issue_queue) | 2941 | if (hostdata->issue_queue) |
| 2945 | ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); | 2942 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); |
| 2946 | if (hostdata->connected) | 2943 | if (hostdata->connected) |
| 2947 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); | 2944 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); |
| 2948 | if (hostdata->disconnected_queue) | 2945 | if (hostdata->disconnected_queue) |
| 2949 | ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); | 2946 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); |
| 2950 | 2947 | ||
| 2951 | local_irq_save(flags); | 2948 | local_irq_save(flags); |
| 2952 | hostdata->issue_queue = NULL; | 2949 | hostdata->issue_queue = NULL; |
| @@ -2963,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) | |||
| 2963 | local_irq_restore(flags); | 2960 | local_irq_restore(flags); |
| 2964 | 2961 | ||
| 2965 | /* we did no complete reset of all commands, so a wakeup is required */ | 2962 | /* we did no complete reset of all commands, so a wakeup is required */ |
| 2966 | return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; | 2963 | return SUCCESS; |
| 2967 | #endif /* 1 */ | 2964 | #endif /* 1 */ |
| 2968 | } | 2965 | } |
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index a8d721ff19eb..b522134528d6 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c | |||
| @@ -67,12 +67,6 @@ | |||
| 67 | 67 | ||
| 68 | #include <linux/module.h> | 68 | #include <linux/module.h> |
| 69 | 69 | ||
| 70 | #define NDEBUG (0) | ||
| 71 | |||
| 72 | #define NDEBUG_ABORT 0x00100000 | ||
| 73 | #define NDEBUG_TAGS 0x00200000 | ||
| 74 | #define NDEBUG_MERGING 0x00400000 | ||
| 75 | |||
| 76 | #define AUTOSENSE | 70 | #define AUTOSENSE |
| 77 | /* For the Atari version, use only polled IO or REAL_DMA */ | 71 | /* For the Atari version, use only polled IO or REAL_DMA */ |
| 78 | #define REAL_DMA | 72 | #define REAL_DMA |
| @@ -314,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) | |||
| 314 | 308 | ||
| 315 | dma_stat = tt_scsi_dma.dma_ctrl; | 309 | dma_stat = tt_scsi_dma.dma_ctrl; |
| 316 | 310 | ||
| 317 | INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n", | 311 | dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n", |
| 318 | atari_scsi_host->host_no, dma_stat & 0xff); | 312 | atari_scsi_host->host_no, dma_stat & 0xff); |
| 319 | 313 | ||
| 320 | /* Look if it was the DMA that has interrupted: First possibility | 314 | /* Look if it was the DMA that has interrupted: First possibility |
| @@ -340,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) | |||
| 340 | if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { | 334 | if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { |
| 341 | atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); | 335 | atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); |
| 342 | 336 | ||
| 343 | DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", | 337 | dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", |
| 344 | atari_dma_residual); | 338 | atari_dma_residual); |
| 345 | 339 | ||
| 346 | if ((signed int)atari_dma_residual < 0) | 340 | if ((signed int)atari_dma_residual < 0) |
| @@ -371,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) | |||
| 371 | * other command. These shouldn't disconnect anyway. | 365 | * other command. These shouldn't disconnect anyway. |
| 372 | */ | 366 | */ |
| 373 | if (atari_dma_residual & 0x1ff) { | 367 | if (atari_dma_residual & 0x1ff) { |
| 374 | DMA_PRINTK("SCSI DMA: DMA bug corrected, " | 368 | dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, " |
| 375 | "difference %ld bytes\n", | 369 | "difference %ld bytes\n", |
| 376 | 512 - (atari_dma_residual & 0x1ff)); | 370 | 512 - (atari_dma_residual & 0x1ff)); |
| 377 | atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; | 371 | atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; |
| @@ -438,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) | |||
| 438 | "ST-DMA fifo\n", transferred & 15); | 432 | "ST-DMA fifo\n", transferred & 15); |
| 439 | 433 | ||
| 440 | atari_dma_residual = HOSTDATA_DMALEN - transferred; | 434 | atari_dma_residual = HOSTDATA_DMALEN - transferred; |
| 441 | DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", | 435 | dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", |
| 442 | atari_dma_residual); | 436 | atari_dma_residual); |
| 443 | } else | 437 | } else |
| 444 | atari_dma_residual = 0; | 438 | atari_dma_residual = 0; |
| @@ -474,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void) | |||
| 474 | /* there are 'nr' bytes left for the last long address | 468 | /* there are 'nr' bytes left for the last long address |
| 475 | before the DMA pointer */ | 469 | before the DMA pointer */ |
| 476 | phys_dst ^= nr; | 470 | phys_dst ^= nr; |
| 477 | DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", | 471 | dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", |
| 478 | nr, phys_dst); | 472 | nr, phys_dst); |
| 479 | /* The content of the DMA pointer is a physical address! */ | 473 | /* The content of the DMA pointer is a physical address! */ |
| 480 | dst = phys_to_virt(phys_dst); | 474 | dst = phys_to_virt(phys_dst); |
| 481 | DMA_PRINTK(" = virt addr %p\n", dst); | 475 | dprintk(NDEBUG_DMA, " = virt addr %p\n", dst); |
| 482 | for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) | 476 | for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) |
| 483 | *dst++ = *src++; | 477 | *dst++ = *src++; |
| 484 | } | 478 | } |
| @@ -827,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd) | |||
| 827 | } else { | 821 | } else { |
| 828 | atari_turnon_irq(IRQ_MFP_FSCSI); | 822 | atari_turnon_irq(IRQ_MFP_FSCSI); |
| 829 | } | 823 | } |
| 830 | if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) | 824 | if (rv == SUCCESS) |
| 831 | falcon_release_lock_if_possible(hostdata); | 825 | falcon_release_lock_if_possible(hostdata); |
| 832 | 826 | ||
| 833 | return rv; | 827 | return rv; |
| @@ -883,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, | |||
| 883 | { | 877 | { |
| 884 | unsigned long addr = virt_to_phys(data); | 878 | unsigned long addr = virt_to_phys(data); |
| 885 | 879 | ||
| 886 | DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " | 880 | dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " |
| 887 | "dir = %d\n", instance->host_no, data, addr, count, dir); | 881 | "dir = %d\n", instance->host_no, data, addr, count, dir); |
| 888 | 882 | ||
| 889 | if (!IS_A_TT() && !STRAM_ADDR(addr)) { | 883 | if (!IS_A_TT() && !STRAM_ADDR(addr)) { |
| @@ -1063,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, | |||
| 1063 | possible_len = limit; | 1057 | possible_len = limit; |
| 1064 | 1058 | ||
| 1065 | if (possible_len != wanted_len) | 1059 | if (possible_len != wanted_len) |
| 1066 | DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " | 1060 | dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes " |
| 1067 | "instead of %ld\n", possible_len, wanted_len); | 1061 | "instead of %ld\n", possible_len, wanted_len); |
| 1068 | 1062 | ||
| 1069 | return possible_len; | 1063 | return possible_len; |
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h index 11c624bb122d..3299d91d7336 100644 --- a/drivers/scsi/atari_scsi.h +++ b/drivers/scsi/atari_scsi.h | |||
| @@ -54,125 +54,6 @@ | |||
| 54 | #define NCR5380_dma_xfer_len(i,cmd,phase) \ | 54 | #define NCR5380_dma_xfer_len(i,cmd,phase) \ |
| 55 | atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) | 55 | atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) |
| 56 | 56 | ||
| 57 | /* former generic SCSI error handling stuff */ | ||
| 58 | |||
| 59 | #define SCSI_ABORT_SNOOZE 0 | ||
| 60 | #define SCSI_ABORT_SUCCESS 1 | ||
| 61 | #define SCSI_ABORT_PENDING 2 | ||
| 62 | #define SCSI_ABORT_BUSY 3 | ||
| 63 | #define SCSI_ABORT_NOT_RUNNING 4 | ||
| 64 | #define SCSI_ABORT_ERROR 5 | ||
| 65 | |||
| 66 | #define SCSI_RESET_SNOOZE 0 | ||
| 67 | #define SCSI_RESET_PUNT 1 | ||
| 68 | #define SCSI_RESET_SUCCESS 2 | ||
| 69 | #define SCSI_RESET_PENDING 3 | ||
| 70 | #define SCSI_RESET_WAKEUP 4 | ||
| 71 | #define SCSI_RESET_NOT_RUNNING 5 | ||
| 72 | #define SCSI_RESET_ERROR 6 | ||
| 73 | |||
| 74 | #define SCSI_RESET_SYNCHRONOUS 0x01 | ||
| 75 | #define SCSI_RESET_ASYNCHRONOUS 0x02 | ||
| 76 | #define SCSI_RESET_SUGGEST_BUS_RESET 0x04 | ||
| 77 | #define SCSI_RESET_SUGGEST_HOST_RESET 0x08 | ||
| 78 | |||
| 79 | #define SCSI_RESET_BUS_RESET 0x100 | ||
| 80 | #define SCSI_RESET_HOST_RESET 0x200 | ||
| 81 | #define SCSI_RESET_ACTION 0xff | ||
| 82 | |||
| 83 | /* Debugging printk definitions: | ||
| 84 | * | ||
| 85 | * ARB -> arbitration | ||
| 86 | * ASEN -> auto-sense | ||
| 87 | * DMA -> DMA | ||
| 88 | * HSH -> PIO handshake | ||
| 89 | * INF -> information transfer | ||
| 90 | * INI -> initialization | ||
| 91 | * INT -> interrupt | ||
| 92 | * LNK -> linked commands | ||
| 93 | * MAIN -> NCR5380_main() control flow | ||
| 94 | * NDAT -> no data-out phase | ||
| 95 | * NWR -> no write commands | ||
| 96 | * PIO -> PIO transfers | ||
| 97 | * PDMA -> pseudo DMA (unused on Atari) | ||
| 98 | * QU -> queues | ||
| 99 | * RSL -> reselections | ||
| 100 | * SEL -> selections | ||
| 101 | * USL -> usleep cpde (unused on Atari) | ||
| 102 | * LBS -> last byte sent (unused on Atari) | ||
| 103 | * RSS -> restarting of selections | ||
| 104 | * EXT -> extended messages | ||
| 105 | * ABRT -> aborting and resetting | ||
| 106 | * TAG -> queue tag handling | ||
| 107 | * MER -> merging of consec. buffers | ||
| 108 | * | ||
| 109 | */ | ||
| 110 | |||
| 111 | #define dprint(flg, format...) \ | ||
| 112 | ({ \ | ||
| 113 | if (NDEBUG & (flg)) \ | ||
| 114 | printk(KERN_DEBUG format); \ | ||
| 115 | }) | ||
| 116 | |||
| 117 | #define ARB_PRINTK(format, args...) \ | ||
| 118 | dprint(NDEBUG_ARBITRATION, format , ## args) | ||
| 119 | #define ASEN_PRINTK(format, args...) \ | ||
| 120 | dprint(NDEBUG_AUTOSENSE, format , ## args) | ||
| 121 | #define DMA_PRINTK(format, args...) \ | ||
| 122 | dprint(NDEBUG_DMA, format , ## args) | ||
| 123 | #define HSH_PRINTK(format, args...) \ | ||
| 124 | dprint(NDEBUG_HANDSHAKE, format , ## args) | ||
| 125 | #define INF_PRINTK(format, args...) \ | ||
| 126 | dprint(NDEBUG_INFORMATION, format , ## args) | ||
| 127 | #define INI_PRINTK(format, args...) \ | ||
| 128 | dprint(NDEBUG_INIT, format , ## args) | ||
| 129 | #define INT_PRINTK(format, args...) \ | ||
| 130 | dprint(NDEBUG_INTR, format , ## args) | ||
| 131 | #define LNK_PRINTK(format, args...) \ | ||
| 132 | dprint(NDEBUG_LINKED, format , ## args) | ||
| 133 | #define MAIN_PRINTK(format, args...) \ | ||
| 134 | dprint(NDEBUG_MAIN, format , ## args) | ||
| 135 | #define NDAT_PRINTK(format, args...) \ | ||
| 136 | dprint(NDEBUG_NO_DATAOUT, format , ## args) | ||
| 137 | #define NWR_PRINTK(format, args...) \ | ||
| 138 | dprint(NDEBUG_NO_WRITE, format , ## args) | ||
| 139 | #define PIO_PRINTK(format, args...) \ | ||
| 140 | dprint(NDEBUG_PIO, format , ## args) | ||
| 141 | #define PDMA_PRINTK(format, args...) \ | ||
| 142 | dprint(NDEBUG_PSEUDO_DMA, format , ## args) | ||
| 143 | #define QU_PRINTK(format, args...) \ | ||
| 144 | dprint(NDEBUG_QUEUES, format , ## args) | ||
| 145 | #define RSL_PRINTK(format, args...) \ | ||
| 146 | dprint(NDEBUG_RESELECTION, format , ## args) | ||
| 147 | #define SEL_PRINTK(format, args...) \ | ||
| 148 | dprint(NDEBUG_SELECTION, format , ## args) | ||
| 149 | #define USL_PRINTK(format, args...) \ | ||
| 150 | dprint(NDEBUG_USLEEP, format , ## args) | ||
| 151 | #define LBS_PRINTK(format, args...) \ | ||
| 152 | dprint(NDEBUG_LAST_BYTE_SENT, format , ## args) | ||
| 153 | #define RSS_PRINTK(format, args...) \ | ||
| 154 | dprint(NDEBUG_RESTART_SELECT, format , ## args) | ||
| 155 | #define EXT_PRINTK(format, args...) \ | ||
| 156 | dprint(NDEBUG_EXTENDED, format , ## args) | ||
| 157 | #define ABRT_PRINTK(format, args...) \ | ||
| 158 | dprint(NDEBUG_ABORT, format , ## args) | ||
| 159 | #define TAG_PRINTK(format, args...) \ | ||
| 160 | dprint(NDEBUG_TAGS, format , ## args) | ||
| 161 | #define MER_PRINTK(format, args...) \ | ||
| 162 | dprint(NDEBUG_MERGING, format , ## args) | ||
| 163 | |||
| 164 | /* conditional macros for NCR5380_print_{,phase,status} */ | ||
| 165 | |||
| 166 | #define NCR_PRINT(mask) \ | ||
| 167 | ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) | ||
| 168 | |||
| 169 | #define NCR_PRINT_PHASE(mask) \ | ||
| 170 | ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) | ||
| 171 | |||
| 172 | #define NCR_PRINT_STATUS(mask) \ | ||
| 173 | ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) | ||
| 174 | |||
| 175 | |||
| 176 | #endif /* ndef ASM */ | 57 | #endif /* ndef ASM */ |
| 177 | #endif /* ATARI_SCSI_H */ | 58 | #endif /* ATARI_SCSI_H */ |
| 178 | 59 | ||
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 1bfb0bd01198..860f527d8f26 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h | |||
| @@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q) | |||
| 83 | 83 | ||
| 84 | /*ISCSI */ | 84 | /*ISCSI */ |
| 85 | 85 | ||
| 86 | struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ | ||
| 87 | bool enable; | ||
| 88 | u32 min_eqd; /* in usecs */ | ||
| 89 | u32 max_eqd; /* in usecs */ | ||
| 90 | u32 prev_eqd; /* in usecs */ | ||
| 91 | u32 et_eqd; /* configured val when aic is off */ | ||
| 92 | ulong jiffs; | ||
| 93 | u64 eq_prev; /* Used to calculate eqe */ | ||
| 94 | }; | ||
| 95 | |||
| 86 | struct be_eq_obj { | 96 | struct be_eq_obj { |
| 87 | bool todo_mcc_cq; | 97 | bool todo_mcc_cq; |
| 88 | bool todo_cq; | 98 | bool todo_cq; |
| 99 | u32 cq_count; | ||
| 89 | struct be_queue_info q; | 100 | struct be_queue_info q; |
| 90 | struct beiscsi_hba *phba; | 101 | struct beiscsi_hba *phba; |
| 91 | struct be_queue_info *cq; | 102 | struct be_queue_info *cq; |
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 7cf7f99ee442..cc7405c0eca0 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h | |||
| @@ -71,6 +71,7 @@ struct be_mcc_wrb { | |||
| 71 | #define BEISCSI_FW_MBX_TIMEOUT 100 | 71 | #define BEISCSI_FW_MBX_TIMEOUT 100 |
| 72 | 72 | ||
| 73 | /* MBOX Command VER */ | 73 | /* MBOX Command VER */ |
| 74 | #define MBX_CMD_VER1 0x01 | ||
| 74 | #define MBX_CMD_VER2 0x02 | 75 | #define MBX_CMD_VER2 0x02 |
| 75 | 76 | ||
| 76 | struct be_mcc_compl { | 77 | struct be_mcc_compl { |
| @@ -271,6 +272,12 @@ struct be_cmd_resp_eq_create { | |||
| 271 | u16 rsvd0; /* sword */ | 272 | u16 rsvd0; /* sword */ |
| 272 | } __packed; | 273 | } __packed; |
| 273 | 274 | ||
| 275 | struct be_set_eqd { | ||
| 276 | u32 eq_id; | ||
| 277 | u32 phase; | ||
| 278 | u32 delay_multiplier; | ||
| 279 | } __packed; | ||
| 280 | |||
| 274 | struct mgmt_chap_format { | 281 | struct mgmt_chap_format { |
| 275 | u32 flags; | 282 | u32 flags; |
| 276 | u8 intr_chap_name[256]; | 283 | u8 intr_chap_name[256]; |
| @@ -622,7 +629,7 @@ struct be_cmd_req_modify_eq_delay { | |||
| 622 | u32 eq_id; | 629 | u32 eq_id; |
| 623 | u32 phase; | 630 | u32 phase; |
| 624 | u32 delay_multiplier; | 631 | u32 delay_multiplier; |
| 625 | } delay[8]; | 632 | } delay[MAX_CPUS]; |
| 626 | } __packed; | 633 | } __packed; |
| 627 | 634 | ||
| 628 | /******************** Get MAC ADDR *******************/ | 635 | /******************** Get MAC ADDR *******************/ |
| @@ -708,6 +715,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba); | |||
| 708 | 715 | ||
| 709 | void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); | 716 | void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); |
| 710 | 717 | ||
| 718 | int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, | ||
| 719 | int num); | ||
| 711 | int beiscsi_mccq_compl(struct beiscsi_hba *phba, | 720 | int beiscsi_mccq_compl(struct beiscsi_hba *phba, |
| 712 | uint32_t tag, struct be_mcc_wrb **wrb, | 721 | uint32_t tag, struct be_mcc_wrb **wrb, |
| 713 | struct be_dma_mem *mbx_cmd_mem); | 722 | struct be_dma_mem *mbx_cmd_mem); |
| @@ -1005,6 +1014,26 @@ struct tcp_connect_and_offload_in { | |||
| 1005 | u8 rsvd0[3]; | 1014 | u8 rsvd0[3]; |
| 1006 | } __packed; | 1015 | } __packed; |
| 1007 | 1016 | ||
| 1017 | struct tcp_connect_and_offload_in_v1 { | ||
| 1018 | struct be_cmd_req_hdr hdr; | ||
| 1019 | struct ip_addr_format ip_address; | ||
| 1020 | u16 tcp_port; | ||
| 1021 | u16 cid; | ||
| 1022 | u16 cq_id; | ||
| 1023 | u16 defq_id; | ||
| 1024 | struct phys_addr dataout_template_pa; | ||
| 1025 | u16 hdr_ring_id; | ||
| 1026 | u16 data_ring_id; | ||
| 1027 | u8 do_offload; | ||
| 1028 | u8 ifd_state; | ||
| 1029 | u8 rsvd0[2]; | ||
| 1030 | u16 tcp_window_size; | ||
| 1031 | u8 tcp_window_scale_count; | ||
| 1032 | u8 rsvd1; | ||
| 1033 | u32 tcp_mss:24; | ||
| 1034 | u8 rsvd2; | ||
| 1035 | } __packed; | ||
| 1036 | |||
| 1008 | struct tcp_connect_and_offload_out { | 1037 | struct tcp_connect_and_offload_out { |
| 1009 | struct be_cmd_resp_hdr hdr; | 1038 | struct be_cmd_resp_hdr hdr; |
| 1010 | u32 connection_handle; | 1039 | u32 connection_handle; |
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index a3df43324c98..fd284ff36ecf 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c | |||
| @@ -1106,7 +1106,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, | |||
| 1106 | struct beiscsi_hba *phba = beiscsi_ep->phba; | 1106 | struct beiscsi_hba *phba = beiscsi_ep->phba; |
| 1107 | struct tcp_connect_and_offload_out *ptcpcnct_out; | 1107 | struct tcp_connect_and_offload_out *ptcpcnct_out; |
| 1108 | struct be_dma_mem nonemb_cmd; | 1108 | struct be_dma_mem nonemb_cmd; |
| 1109 | unsigned int tag; | 1109 | unsigned int tag, req_memsize; |
| 1110 | int ret = -ENOMEM; | 1110 | int ret = -ENOMEM; |
| 1111 | 1111 | ||
| 1112 | beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, | 1112 | beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, |
| @@ -1127,8 +1127,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, | |||
| 1127 | (beiscsi_ep->ep_cid)] = ep; | 1127 | (beiscsi_ep->ep_cid)] = ep; |
| 1128 | 1128 | ||
| 1129 | beiscsi_ep->cid_vld = 0; | 1129 | beiscsi_ep->cid_vld = 0; |
| 1130 | |||
| 1131 | if (is_chip_be2_be3r(phba)) | ||
| 1132 | req_memsize = sizeof(struct tcp_connect_and_offload_in); | ||
| 1133 | else | ||
| 1134 | req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); | ||
| 1135 | |||
| 1130 | nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, | 1136 | nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, |
| 1131 | sizeof(struct tcp_connect_and_offload_in), | 1137 | req_memsize, |
| 1132 | &nonemb_cmd.dma); | 1138 | &nonemb_cmd.dma); |
| 1133 | if (nonemb_cmd.va == NULL) { | 1139 | if (nonemb_cmd.va == NULL) { |
| 1134 | 1140 | ||
| @@ -1139,7 +1145,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, | |||
| 1139 | beiscsi_free_ep(beiscsi_ep); | 1145 | beiscsi_free_ep(beiscsi_ep); |
| 1140 | return -ENOMEM; | 1146 | return -ENOMEM; |
| 1141 | } | 1147 | } |
| 1142 | nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); | 1148 | nonemb_cmd.size = req_memsize; |
| 1143 | memset(nonemb_cmd.va, 0, nonemb_cmd.size); | 1149 | memset(nonemb_cmd.va, 0, nonemb_cmd.size); |
| 1144 | tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); | 1150 | tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); |
| 1145 | if (tag <= 0) { | 1151 | if (tag <= 0) { |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 0d822297aa80..554349029628 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
| @@ -599,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) | |||
| 599 | pci_set_drvdata(pcidev, phba); | 599 | pci_set_drvdata(pcidev, phba); |
| 600 | phba->interface_handle = 0xFFFFFFFF; | 600 | phba->interface_handle = 0xFFFFFFFF; |
| 601 | 601 | ||
| 602 | if (iscsi_host_add(shost, &phba->pcidev->dev)) | ||
| 603 | goto free_devices; | ||
| 604 | |||
| 605 | return phba; | 602 | return phba; |
| 606 | |||
| 607 | free_devices: | ||
| 608 | pci_dev_put(phba->pcidev); | ||
| 609 | iscsi_host_free(phba->shost); | ||
| 610 | return NULL; | ||
| 611 | } | 603 | } |
| 612 | 604 | ||
| 613 | static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) | 605 | static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) |
| @@ -2279,6 +2271,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget) | |||
| 2279 | 2271 | ||
| 2280 | pbe_eq = container_of(iop, struct be_eq_obj, iopoll); | 2272 | pbe_eq = container_of(iop, struct be_eq_obj, iopoll); |
| 2281 | ret = beiscsi_process_cq(pbe_eq); | 2273 | ret = beiscsi_process_cq(pbe_eq); |
| 2274 | pbe_eq->cq_count += ret; | ||
| 2282 | if (ret < budget) { | 2275 | if (ret < budget) { |
| 2283 | phba = pbe_eq->phba; | 2276 | phba = pbe_eq->phba; |
| 2284 | blk_iopoll_complete(iop); | 2277 | blk_iopoll_complete(iop); |
| @@ -3692,7 +3685,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) | |||
| 3692 | struct hwi_controller *phwi_ctrlr; | 3685 | struct hwi_controller *phwi_ctrlr; |
| 3693 | struct hwi_context_memory *phwi_context; | 3686 | struct hwi_context_memory *phwi_context; |
| 3694 | struct hwi_async_pdu_context *pasync_ctx; | 3687 | struct hwi_async_pdu_context *pasync_ctx; |
| 3695 | int i, eq_num, ulp_num; | 3688 | int i, eq_for_mcc, ulp_num; |
| 3696 | 3689 | ||
| 3697 | phwi_ctrlr = phba->phwi_ctrlr; | 3690 | phwi_ctrlr = phba->phwi_ctrlr; |
| 3698 | phwi_context = phwi_ctrlr->phwi_ctxt; | 3691 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| @@ -3729,16 +3722,17 @@ static void hwi_cleanup(struct beiscsi_hba *phba) | |||
| 3729 | if (q->created) | 3722 | if (q->created) |
| 3730 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); | 3723 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); |
| 3731 | } | 3724 | } |
| 3725 | |||
| 3726 | be_mcc_queues_destroy(phba); | ||
| 3732 | if (phba->msix_enabled) | 3727 | if (phba->msix_enabled) |
| 3733 | eq_num = 1; | 3728 | eq_for_mcc = 1; |
| 3734 | else | 3729 | else |
| 3735 | eq_num = 0; | 3730 | eq_for_mcc = 0; |
| 3736 | for (i = 0; i < (phba->num_cpus + eq_num); i++) { | 3731 | for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { |
| 3737 | q = &phwi_context->be_eq[i].q; | 3732 | q = &phwi_context->be_eq[i].q; |
| 3738 | if (q->created) | 3733 | if (q->created) |
| 3739 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); | 3734 | beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); |
| 3740 | } | 3735 | } |
| 3741 | be_mcc_queues_destroy(phba); | ||
| 3742 | be_cmd_fw_uninit(ctrl); | 3736 | be_cmd_fw_uninit(ctrl); |
| 3743 | } | 3737 | } |
| 3744 | 3738 | ||
| @@ -3833,9 +3827,9 @@ static int hwi_init_port(struct beiscsi_hba *phba) | |||
| 3833 | 3827 | ||
| 3834 | phwi_ctrlr = phba->phwi_ctrlr; | 3828 | phwi_ctrlr = phba->phwi_ctrlr; |
| 3835 | phwi_context = phwi_ctrlr->phwi_ctxt; | 3829 | phwi_context = phwi_ctrlr->phwi_ctxt; |
| 3836 | phwi_context->max_eqd = 0; | 3830 | phwi_context->max_eqd = 128; |
| 3837 | phwi_context->min_eqd = 0; | 3831 | phwi_context->min_eqd = 0; |
| 3838 | phwi_context->cur_eqd = 64; | 3832 | phwi_context->cur_eqd = 0; |
| 3839 | be_cmd_fw_initialize(&phba->ctrl); | 3833 | be_cmd_fw_initialize(&phba->ctrl); |
| 3840 | 3834 | ||
| 3841 | status = beiscsi_create_eqs(phba, phwi_context); | 3835 | status = beiscsi_create_eqs(phba, phwi_context); |
| @@ -5290,6 +5284,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba) | |||
| 5290 | return; | 5284 | return; |
| 5291 | } | 5285 | } |
| 5292 | 5286 | ||
| 5287 | static void be_eqd_update(struct beiscsi_hba *phba) | ||
| 5288 | { | ||
| 5289 | struct be_set_eqd set_eqd[MAX_CPUS]; | ||
| 5290 | struct be_aic_obj *aic; | ||
| 5291 | struct be_eq_obj *pbe_eq; | ||
| 5292 | struct hwi_controller *phwi_ctrlr; | ||
| 5293 | struct hwi_context_memory *phwi_context; | ||
| 5294 | int eqd, i, num = 0; | ||
| 5295 | ulong now; | ||
| 5296 | u32 pps, delta; | ||
| 5297 | unsigned int tag; | ||
| 5298 | |||
| 5299 | phwi_ctrlr = phba->phwi_ctrlr; | ||
| 5300 | phwi_context = phwi_ctrlr->phwi_ctxt; | ||
| 5301 | |||
| 5302 | for (i = 0; i <= phba->num_cpus; i++) { | ||
| 5303 | aic = &phba->aic_obj[i]; | ||
| 5304 | pbe_eq = &phwi_context->be_eq[i]; | ||
| 5305 | now = jiffies; | ||
| 5306 | if (!aic->jiffs || time_before(now, aic->jiffs) || | ||
| 5307 | pbe_eq->cq_count < aic->eq_prev) { | ||
| 5308 | aic->jiffs = now; | ||
| 5309 | aic->eq_prev = pbe_eq->cq_count; | ||
| 5310 | continue; | ||
| 5311 | } | ||
| 5312 | delta = jiffies_to_msecs(now - aic->jiffs); | ||
| 5313 | pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); | ||
| 5314 | eqd = (pps / 1500) << 2; | ||
| 5315 | |||
| 5316 | if (eqd < 8) | ||
| 5317 | eqd = 0; | ||
| 5318 | eqd = min_t(u32, eqd, phwi_context->max_eqd); | ||
| 5319 | eqd = max_t(u32, eqd, phwi_context->min_eqd); | ||
| 5320 | |||
| 5321 | aic->jiffs = now; | ||
| 5322 | aic->eq_prev = pbe_eq->cq_count; | ||
| 5323 | |||
| 5324 | if (eqd != aic->prev_eqd) { | ||
| 5325 | set_eqd[num].delay_multiplier = (eqd * 65)/100; | ||
| 5326 | set_eqd[num].eq_id = pbe_eq->q.id; | ||
| 5327 | aic->prev_eqd = eqd; | ||
| 5328 | num++; | ||
| 5329 | } | ||
| 5330 | } | ||
| 5331 | if (num) { | ||
| 5332 | tag = be_cmd_modify_eq_delay(phba, set_eqd, num); | ||
| 5333 | if (tag) | ||
| 5334 | beiscsi_mccq_compl(phba, tag, NULL, NULL); | ||
| 5335 | } | ||
| 5336 | } | ||
| 5337 | |||
| 5293 | /* | 5338 | /* |
| 5294 | * beiscsi_hw_health_check()- Check adapter health | 5339 | * beiscsi_hw_health_check()- Check adapter health |
| 5295 | * @work: work item to check HW health | 5340 | * @work: work item to check HW health |
| @@ -5303,6 +5348,8 @@ beiscsi_hw_health_check(struct work_struct *work) | |||
| 5303 | container_of(work, struct beiscsi_hba, | 5348 | container_of(work, struct beiscsi_hba, |
| 5304 | beiscsi_hw_check_task.work); | 5349 | beiscsi_hw_check_task.work); |
| 5305 | 5350 | ||
| 5351 | be_eqd_update(phba); | ||
| 5352 | |||
| 5306 | beiscsi_ue_detect(phba); | 5353 | beiscsi_ue_detect(phba); |
| 5307 | 5354 | ||
| 5308 | schedule_delayed_work(&phba->beiscsi_hw_check_task, | 5355 | schedule_delayed_work(&phba->beiscsi_hw_check_task, |
| @@ -5579,7 +5626,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, | |||
| 5579 | phba->ctrl.mcc_numtag[i + 1] = 0; | 5626 | phba->ctrl.mcc_numtag[i + 1] = 0; |
| 5580 | phba->ctrl.mcc_tag_available++; | 5627 | phba->ctrl.mcc_tag_available++; |
| 5581 | memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, | 5628 | memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, |
| 5582 | sizeof(struct beiscsi_mcc_tag_state)); | 5629 | sizeof(struct be_dma_mem)); |
| 5583 | } | 5630 | } |
| 5584 | 5631 | ||
| 5585 | phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; | 5632 | phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; |
| @@ -5621,6 +5668,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, | |||
| 5621 | } | 5668 | } |
| 5622 | hwi_enable_intr(phba); | 5669 | hwi_enable_intr(phba); |
| 5623 | 5670 | ||
| 5671 | if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) | ||
| 5672 | goto free_blkenbld; | ||
| 5673 | |||
| 5624 | if (beiscsi_setup_boot_info(phba)) | 5674 | if (beiscsi_setup_boot_info(phba)) |
| 5625 | /* | 5675 | /* |
| 5626 | * log error but continue, because we may not be using | 5676 | * log error but continue, because we may not be using |
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 9380b55bdeaf..9ceab426eec9 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h | |||
| @@ -36,7 +36,7 @@ | |||
| 36 | #include <scsi/scsi_transport_iscsi.h> | 36 | #include <scsi/scsi_transport_iscsi.h> |
| 37 | 37 | ||
| 38 | #define DRV_NAME "be2iscsi" | 38 | #define DRV_NAME "be2iscsi" |
| 39 | #define BUILD_STR "10.2.125.0" | 39 | #define BUILD_STR "10.2.273.0" |
| 40 | #define BE_NAME "Emulex OneConnect" \ | 40 | #define BE_NAME "Emulex OneConnect" \ |
| 41 | "Open-iSCSI Driver version" BUILD_STR | 41 | "Open-iSCSI Driver version" BUILD_STR |
| 42 | #define DRV_DESC BE_NAME " " "Driver" | 42 | #define DRV_DESC BE_NAME " " "Driver" |
| @@ -71,8 +71,8 @@ | |||
| 71 | 71 | ||
| 72 | #define BEISCSI_SGLIST_ELEMENTS 30 | 72 | #define BEISCSI_SGLIST_ELEMENTS 30 |
| 73 | 73 | ||
| 74 | #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ | 74 | #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ |
| 75 | #define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ | 75 | #define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ |
| 76 | #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ | 76 | #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ |
| 77 | 77 | ||
| 78 | #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ | 78 | #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ |
| @@ -427,6 +427,7 @@ struct beiscsi_hba { | |||
| 427 | struct mgmt_session_info boot_sess; | 427 | struct mgmt_session_info boot_sess; |
| 428 | struct invalidate_command_table inv_tbl[128]; | 428 | struct invalidate_command_table inv_tbl[128]; |
| 429 | 429 | ||
| 430 | struct be_aic_obj aic_obj[MAX_CPUS]; | ||
| 430 | unsigned int attr_log_enable; | 431 | unsigned int attr_log_enable; |
| 431 | int (*iotask_fn)(struct iscsi_task *, | 432 | int (*iotask_fn)(struct iscsi_task *, |
| 432 | struct scatterlist *sg, | 433 | struct scatterlist *sg, |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 088bdf752cfa..6045aa78986a 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
| @@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba) | |||
| 155 | } | 155 | } |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, | ||
| 159 | struct be_set_eqd *set_eqd, int num) | ||
| 160 | { | ||
| 161 | struct be_ctrl_info *ctrl = &phba->ctrl; | ||
| 162 | struct be_mcc_wrb *wrb; | ||
| 163 | struct be_cmd_req_modify_eq_delay *req; | ||
| 164 | unsigned int tag = 0; | ||
| 165 | int i; | ||
| 166 | |||
| 167 | spin_lock(&ctrl->mbox_lock); | ||
| 168 | tag = alloc_mcc_tag(phba); | ||
| 169 | if (!tag) { | ||
| 170 | spin_unlock(&ctrl->mbox_lock); | ||
| 171 | return tag; | ||
| 172 | } | ||
| 173 | |||
| 174 | wrb = wrb_from_mccq(phba); | ||
| 175 | req = embedded_payload(wrb); | ||
| 176 | |||
| 177 | wrb->tag0 |= tag; | ||
| 178 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | ||
| 179 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
| 180 | OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); | ||
| 181 | |||
| 182 | req->num_eq = cpu_to_le32(num); | ||
| 183 | for (i = 0; i < num; i++) { | ||
| 184 | req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); | ||
| 185 | req->delay[i].phase = 0; | ||
| 186 | req->delay[i].delay_multiplier = | ||
| 187 | cpu_to_le32(set_eqd[i].delay_multiplier); | ||
| 188 | } | ||
| 189 | |||
| 190 | be_mcc_notify(phba); | ||
| 191 | spin_unlock(&ctrl->mbox_lock); | ||
| 192 | return tag; | ||
| 193 | } | ||
| 194 | |||
| 158 | /** | 195 | /** |
| 159 | * mgmt_reopen_session()- Reopen a session based on reopen_type | 196 | * mgmt_reopen_session()- Reopen a session based on reopen_type |
| 160 | * @phba: Device priv structure instance | 197 | * @phba: Device priv structure instance |
| @@ -447,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, | |||
| 447 | struct be_dma_mem *nonemb_cmd) | 484 | struct be_dma_mem *nonemb_cmd) |
| 448 | { | 485 | { |
| 449 | struct be_cmd_resp_hdr *resp; | 486 | struct be_cmd_resp_hdr *resp; |
| 450 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | 487 | struct be_mcc_wrb *wrb; |
| 451 | struct be_sge *mcc_sge = nonembedded_sgl(wrb); | 488 | struct be_sge *mcc_sge; |
| 452 | unsigned int tag = 0; | 489 | unsigned int tag = 0; |
| 453 | struct iscsi_bsg_request *bsg_req = job->request; | 490 | struct iscsi_bsg_request *bsg_req = job->request; |
| 454 | struct be_bsg_vendor_cmd *req = nonemb_cmd->va; | 491 | struct be_bsg_vendor_cmd *req = nonemb_cmd->va; |
| @@ -465,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, | |||
| 465 | req->sector = sector; | 502 | req->sector = sector; |
| 466 | req->offset = offset; | 503 | req->offset = offset; |
| 467 | spin_lock(&ctrl->mbox_lock); | 504 | spin_lock(&ctrl->mbox_lock); |
| 468 | memset(wrb, 0, sizeof(*wrb)); | ||
| 469 | 505 | ||
| 470 | switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { | 506 | switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { |
| 471 | case BEISCSI_WRITE_FLASH: | 507 | case BEISCSI_WRITE_FLASH: |
| @@ -495,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, | |||
| 495 | return tag; | 531 | return tag; |
| 496 | } | 532 | } |
| 497 | 533 | ||
| 534 | wrb = wrb_from_mccq(phba); | ||
| 535 | mcc_sge = nonembedded_sgl(wrb); | ||
| 498 | be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, | 536 | be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, |
| 499 | job->request_payload.sg_cnt); | 537 | job->request_payload.sg_cnt); |
| 500 | mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | 538 | mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); |
| @@ -525,7 +563,6 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num) | |||
| 525 | int status = 0; | 563 | int status = 0; |
| 526 | 564 | ||
| 527 | spin_lock(&ctrl->mbox_lock); | 565 | spin_lock(&ctrl->mbox_lock); |
| 528 | memset(wrb, 0, sizeof(*wrb)); | ||
| 529 | 566 | ||
| 530 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 567 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
| 531 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | 568 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, |
| @@ -675,7 +712,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 675 | struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; | 712 | struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; |
| 676 | struct be_ctrl_info *ctrl = &phba->ctrl; | 713 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 677 | struct be_mcc_wrb *wrb; | 714 | struct be_mcc_wrb *wrb; |
| 678 | struct tcp_connect_and_offload_in *req; | 715 | struct tcp_connect_and_offload_in_v1 *req; |
| 679 | unsigned short def_hdr_id; | 716 | unsigned short def_hdr_id; |
| 680 | unsigned short def_data_id; | 717 | unsigned short def_data_id; |
| 681 | struct phys_addr template_address = { 0, 0 }; | 718 | struct phys_addr template_address = { 0, 0 }; |
| @@ -702,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 702 | return tag; | 739 | return tag; |
| 703 | } | 740 | } |
| 704 | wrb = wrb_from_mccq(phba); | 741 | wrb = wrb_from_mccq(phba); |
| 705 | memset(wrb, 0, sizeof(*wrb)); | ||
| 706 | sge = nonembedded_sgl(wrb); | 742 | sge = nonembedded_sgl(wrb); |
| 707 | 743 | ||
| 708 | req = nonemb_cmd->va; | 744 | req = nonemb_cmd->va; |
| 709 | memset(req, 0, sizeof(*req)); | 745 | memset(req, 0, sizeof(*req)); |
| 710 | wrb->tag0 |= tag; | 746 | wrb->tag0 |= tag; |
| 711 | 747 | ||
| 712 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); | 748 | be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); |
| 713 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | 749 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, |
| 714 | OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, | 750 | OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, |
| 715 | sizeof(*req)); | 751 | nonemb_cmd->size); |
| 716 | if (dst_addr->sa_family == PF_INET) { | 752 | if (dst_addr->sa_family == PF_INET) { |
| 717 | __be32 s_addr = daddr_in->sin_addr.s_addr; | 753 | __be32 s_addr = daddr_in->sin_addr.s_addr; |
| 718 | req->ip_address.ip_type = BE2_IPV4; | 754 | req->ip_address.ip_type = BE2_IPV4; |
| @@ -758,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
| 758 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | 794 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); |
| 759 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | 795 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); |
| 760 | sge->len = cpu_to_le32(nonemb_cmd->size); | 796 | sge->len = cpu_to_le32(nonemb_cmd->size); |
| 797 | |||
| 798 | if (!is_chip_be2_be3r(phba)) { | ||
| 799 | req->hdr.version = MBX_CMD_VER1; | ||
| 800 | req->tcp_window_size = 0; | ||
| 801 | req->tcp_window_scale_count = 2; | ||
| 802 | } | ||
| 803 | |||
| 761 | be_mcc_notify(phba); | 804 | be_mcc_notify(phba); |
| 762 | spin_unlock(&ctrl->mbox_lock); | 805 | spin_unlock(&ctrl->mbox_lock); |
| 763 | return tag; | 806 | return tag; |
| @@ -804,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, | |||
| 804 | int resp_buf_len) | 847 | int resp_buf_len) |
| 805 | { | 848 | { |
| 806 | struct be_ctrl_info *ctrl = &phba->ctrl; | 849 | struct be_ctrl_info *ctrl = &phba->ctrl; |
| 807 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | 850 | struct be_mcc_wrb *wrb; |
| 808 | struct be_sge *sge; | 851 | struct be_sge *sge; |
| 809 | unsigned int tag; | 852 | unsigned int tag; |
| 810 | int rc = 0; | 853 | int rc = 0; |
| @@ -816,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, | |||
| 816 | rc = -ENOMEM; | 859 | rc = -ENOMEM; |
| 817 | goto free_cmd; | 860 | goto free_cmd; |
| 818 | } | 861 | } |
| 819 | memset(wrb, 0, sizeof(*wrb)); | 862 | |
| 863 | wrb = wrb_from_mccq(phba); | ||
| 820 | wrb->tag0 |= tag; | 864 | wrb->tag0 |= tag; |
| 821 | sge = nonembedded_sgl(wrb); | 865 | sge = nonembedded_sgl(wrb); |
| 822 | 866 | ||
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 01b8c97284c0..24a8fc577477 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h | |||
| @@ -335,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, | |||
| 335 | void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, | 335 | void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, |
| 336 | struct wrb_handle *pwrb_handle); | 336 | struct wrb_handle *pwrb_handle); |
| 337 | void beiscsi_ue_detect(struct beiscsi_hba *phba); | 337 | void beiscsi_ue_detect(struct beiscsi_hba *phba); |
| 338 | int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, | ||
| 339 | struct be_set_eqd *, int num); | ||
| 338 | 340 | ||
| 339 | #endif | 341 | #endif |
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index cc0fbcdc5192..7593b7c1d336 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c | |||
| @@ -507,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) | |||
| 507 | struct bfad_vport_s *vport; | 507 | struct bfad_vport_s *vport; |
| 508 | int rc; | 508 | int rc; |
| 509 | 509 | ||
| 510 | vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); | 510 | vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC); |
| 511 | if (!vport) { | 511 | if (!vport) { |
| 512 | bfa_trc(bfad, 0); | 512 | bfa_trc(bfad, 0); |
| 513 | return; | 513 | return; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 46a37657307f..512aed3ae4f1 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
| @@ -1966,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) | |||
| 1966 | { | 1966 | { |
| 1967 | int i; | 1967 | int i; |
| 1968 | int segment_count; | 1968 | int segment_count; |
| 1969 | int hash_table_size; | ||
| 1970 | u32 *pbl; | 1969 | u32 *pbl; |
| 1971 | 1970 | ||
| 1972 | segment_count = hba->hash_tbl_segment_count; | 1971 | if (hba->hash_tbl_segments) { |
| 1973 | hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * | ||
| 1974 | sizeof(struct fcoe_hash_table_entry); | ||
| 1975 | 1972 | ||
| 1976 | pbl = hba->hash_tbl_pbl; | 1973 | pbl = hba->hash_tbl_pbl; |
| 1977 | for (i = 0; i < segment_count; ++i) { | 1974 | if (pbl) { |
| 1978 | dma_addr_t dma_address; | 1975 | segment_count = hba->hash_tbl_segment_count; |
| 1976 | for (i = 0; i < segment_count; ++i) { | ||
| 1977 | dma_addr_t dma_address; | ||
| 1979 | 1978 | ||
| 1980 | dma_address = le32_to_cpu(*pbl); | 1979 | dma_address = le32_to_cpu(*pbl); |
| 1981 | ++pbl; | 1980 | ++pbl; |
| 1982 | dma_address += ((u64)le32_to_cpu(*pbl)) << 32; | 1981 | dma_address += ((u64)le32_to_cpu(*pbl)) << 32; |
| 1983 | ++pbl; | 1982 | ++pbl; |
| 1984 | dma_free_coherent(&hba->pcidev->dev, | 1983 | dma_free_coherent(&hba->pcidev->dev, |
| 1985 | BNX2FC_HASH_TBL_CHUNK_SIZE, | 1984 | BNX2FC_HASH_TBL_CHUNK_SIZE, |
| 1986 | hba->hash_tbl_segments[i], | 1985 | hba->hash_tbl_segments[i], |
| 1987 | dma_address); | 1986 | dma_address); |
| 1987 | } | ||
| 1988 | } | ||
| 1988 | 1989 | ||
| 1990 | kfree(hba->hash_tbl_segments); | ||
| 1991 | hba->hash_tbl_segments = NULL; | ||
| 1989 | } | 1992 | } |
| 1990 | 1993 | ||
| 1991 | if (hba->hash_tbl_pbl) { | 1994 | if (hba->hash_tbl_pbl) { |
| @@ -2023,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) | |||
| 2023 | dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); | 2026 | dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); |
| 2024 | if (!dma_segment_array) { | 2027 | if (!dma_segment_array) { |
| 2025 | printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); | 2028 | printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); |
| 2026 | return -ENOMEM; | 2029 | goto cleanup_ht; |
| 2027 | } | 2030 | } |
| 2028 | 2031 | ||
| 2029 | for (i = 0; i < segment_count; ++i) { | 2032 | for (i = 0; i < segment_count; ++i) { |
| @@ -2034,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) | |||
| 2034 | GFP_KERNEL); | 2037 | GFP_KERNEL); |
| 2035 | if (!hba->hash_tbl_segments[i]) { | 2038 | if (!hba->hash_tbl_segments[i]) { |
| 2036 | printk(KERN_ERR PFX "hash segment alloc failed\n"); | 2039 | printk(KERN_ERR PFX "hash segment alloc failed\n"); |
| 2037 | while (--i >= 0) { | 2040 | goto cleanup_dma; |
| 2038 | dma_free_coherent(&hba->pcidev->dev, | ||
| 2039 | BNX2FC_HASH_TBL_CHUNK_SIZE, | ||
| 2040 | hba->hash_tbl_segments[i], | ||
| 2041 | dma_segment_array[i]); | ||
| 2042 | hba->hash_tbl_segments[i] = NULL; | ||
| 2043 | } | ||
| 2044 | kfree(dma_segment_array); | ||
| 2045 | return -ENOMEM; | ||
| 2046 | } | 2041 | } |
| 2047 | memset(hba->hash_tbl_segments[i], 0, | 2042 | memset(hba->hash_tbl_segments[i], 0, |
| 2048 | BNX2FC_HASH_TBL_CHUNK_SIZE); | 2043 | BNX2FC_HASH_TBL_CHUNK_SIZE); |
| @@ -2054,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) | |||
| 2054 | GFP_KERNEL); | 2049 | GFP_KERNEL); |
| 2055 | if (!hba->hash_tbl_pbl) { | 2050 | if (!hba->hash_tbl_pbl) { |
| 2056 | printk(KERN_ERR PFX "hash table pbl alloc failed\n"); | 2051 | printk(KERN_ERR PFX "hash table pbl alloc failed\n"); |
| 2057 | kfree(dma_segment_array); | 2052 | goto cleanup_dma; |
| 2058 | return -ENOMEM; | ||
| 2059 | } | 2053 | } |
| 2060 | memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); | 2054 | memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); |
| 2061 | 2055 | ||
| @@ -2080,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) | |||
| 2080 | } | 2074 | } |
| 2081 | kfree(dma_segment_array); | 2075 | kfree(dma_segment_array); |
| 2082 | return 0; | 2076 | return 0; |
| 2077 | |||
| 2078 | cleanup_dma: | ||
| 2079 | for (i = 0; i < segment_count; ++i) { | ||
| 2080 | if (hba->hash_tbl_segments[i]) | ||
| 2081 | dma_free_coherent(&hba->pcidev->dev, | ||
| 2082 | BNX2FC_HASH_TBL_CHUNK_SIZE, | ||
| 2083 | hba->hash_tbl_segments[i], | ||
| 2084 | dma_segment_array[i]); | ||
| 2085 | } | ||
| 2086 | |||
| 2087 | kfree(dma_segment_array); | ||
| 2088 | |||
| 2089 | cleanup_ht: | ||
| 2090 | kfree(hba->hash_tbl_segments); | ||
| 2091 | hba->hash_tbl_segments = NULL; | ||
| 2092 | return -ENOMEM; | ||
| 2083 | } | 2093 | } |
| 2084 | 2094 | ||
| 2085 | /** | 2095 | /** |
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index eb29fe7eaf49..0a667fe05006 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c | |||
| @@ -3,8 +3,6 @@ | |||
| 3 | #define PSEUDO_DMA | 3 | #define PSEUDO_DMA |
| 4 | #define DONT_USE_INTR | 4 | #define DONT_USE_INTR |
| 5 | #define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ | 5 | #define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ |
| 6 | #define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\ | ||
| 7 | NDEBUG_SELECTION+NDEBUG_ARBITRATION) | ||
| 8 | #define DMA_WORKS_RIGHT | 6 | #define DMA_WORKS_RIGHT |
| 9 | 7 | ||
| 10 | 8 | ||
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index f37f3e3dd5d5..6504a195c874 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c | |||
| @@ -390,7 +390,7 @@ static int esas2r_probe(struct pci_dev *pcid, | |||
| 390 | esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), | 390 | esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), |
| 391 | "pci_enable_device() OK"); | 391 | "pci_enable_device() OK"); |
| 392 | esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), | 392 | esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), |
| 393 | "after pci_device_enable() enable_cnt: %d", | 393 | "after pci_enable_device() enable_cnt: %d", |
| 394 | pcid->enable_cnt.counter); | 394 | pcid->enable_cnt.counter); |
| 395 | 395 | ||
| 396 | host = scsi_host_alloc(&driver_template, host_alloc_size); | 396 | host = scsi_host_alloc(&driver_template, host_alloc_size); |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 528d43b7b569..1d3521e13d77 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
| @@ -39,14 +39,15 @@ | |||
| 39 | 39 | ||
| 40 | #define DRV_NAME "fnic" | 40 | #define DRV_NAME "fnic" |
| 41 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" | 41 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" |
| 42 | #define DRV_VERSION "1.5.0.45" | 42 | #define DRV_VERSION "1.6.0.10" |
| 43 | #define PFX DRV_NAME ": " | 43 | #define PFX DRV_NAME ": " |
| 44 | #define DFX DRV_NAME "%d: " | 44 | #define DFX DRV_NAME "%d: " |
| 45 | 45 | ||
| 46 | #define DESC_CLEAN_LOW_WATERMARK 8 | 46 | #define DESC_CLEAN_LOW_WATERMARK 8 |
| 47 | #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ | 47 | #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ |
| 48 | #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ | 48 | #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ |
| 49 | #define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ | 49 | #define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */ |
| 50 | #define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ | ||
| 50 | #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ | 51 | #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ |
| 51 | #define FNIC_DFLT_QUEUE_DEPTH 32 | 52 | #define FNIC_DFLT_QUEUE_DEPTH 32 |
| 52 | #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ | 53 | #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ |
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index b6073f875761..2c613bdea78f 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c | |||
| @@ -25,6 +25,21 @@ static struct dentry *fnic_trace_debugfs_file; | |||
| 25 | static struct dentry *fnic_trace_enable; | 25 | static struct dentry *fnic_trace_enable; |
| 26 | static struct dentry *fnic_stats_debugfs_root; | 26 | static struct dentry *fnic_stats_debugfs_root; |
| 27 | 27 | ||
| 28 | static struct dentry *fnic_fc_trace_debugfs_file; | ||
| 29 | static struct dentry *fnic_fc_rdata_trace_debugfs_file; | ||
| 30 | static struct dentry *fnic_fc_trace_enable; | ||
| 31 | static struct dentry *fnic_fc_trace_clear; | ||
| 32 | |||
| 33 | struct fc_trace_flag_type { | ||
| 34 | u8 fc_row_file; | ||
| 35 | u8 fc_normal_file; | ||
| 36 | u8 fnic_trace; | ||
| 37 | u8 fc_trace; | ||
| 38 | u8 fc_clear; | ||
| 39 | }; | ||
| 40 | |||
| 41 | static struct fc_trace_flag_type *fc_trc_flag; | ||
| 42 | |||
| 28 | /* | 43 | /* |
| 29 | * fnic_debugfs_init - Initialize debugfs for fnic debug logging | 44 | * fnic_debugfs_init - Initialize debugfs for fnic debug logging |
| 30 | * | 45 | * |
| @@ -56,6 +71,18 @@ int fnic_debugfs_init(void) | |||
| 56 | return rc; | 71 | return rc; |
| 57 | } | 72 | } |
| 58 | 73 | ||
| 74 | /* Allocate memory to structure */ | ||
| 75 | fc_trc_flag = (struct fc_trace_flag_type *) | ||
| 76 | vmalloc(sizeof(struct fc_trace_flag_type)); | ||
| 77 | |||
| 78 | if (fc_trc_flag) { | ||
| 79 | fc_trc_flag->fc_row_file = 0; | ||
| 80 | fc_trc_flag->fc_normal_file = 1; | ||
| 81 | fc_trc_flag->fnic_trace = 2; | ||
| 82 | fc_trc_flag->fc_trace = 3; | ||
| 83 | fc_trc_flag->fc_clear = 4; | ||
| 84 | } | ||
| 85 | |||
| 59 | rc = 0; | 86 | rc = 0; |
| 60 | return rc; | 87 | return rc; |
| 61 | } | 88 | } |
| @@ -74,15 +101,19 @@ void fnic_debugfs_terminate(void) | |||
| 74 | 101 | ||
| 75 | debugfs_remove(fnic_trace_debugfs_root); | 102 | debugfs_remove(fnic_trace_debugfs_root); |
| 76 | fnic_trace_debugfs_root = NULL; | 103 | fnic_trace_debugfs_root = NULL; |
| 104 | |||
| 105 | if (fc_trc_flag) | ||
| 106 | vfree(fc_trc_flag); | ||
| 77 | } | 107 | } |
| 78 | 108 | ||
| 79 | /* | 109 | /* |
| 80 | * fnic_trace_ctrl_open - Open the trace_enable file | 110 | * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace |
| 111 | * Or Open fc_trace_enable file for fc_trace | ||
| 81 | * @inode: The inode pointer. | 112 | * @inode: The inode pointer. |
| 82 | * @file: The file pointer to attach the trace enable/disable flag. | 113 | * @file: The file pointer to attach the trace enable/disable flag. |
| 83 | * | 114 | * |
| 84 | * Description: | 115 | * Description: |
| 85 | * This routine opens a debugsfs file trace_enable. | 116 | * This routine opens a debugsfs file trace_enable or fc_trace_enable. |
| 86 | * | 117 | * |
| 87 | * Returns: | 118 | * Returns: |
| 88 | * This function returns zero if successful. | 119 | * This function returns zero if successful. |
| @@ -94,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp) | |||
| 94 | } | 125 | } |
| 95 | 126 | ||
| 96 | /* | 127 | /* |
| 97 | * fnic_trace_ctrl_read - Read a trace_enable debugfs file | 128 | * fnic_trace_ctrl_read - |
| 129 | * Read trace_enable ,fc_trace_enable | ||
| 130 | * or fc_trace_clear debugfs file | ||
| 98 | * @filp: The file pointer to read from. | 131 | * @filp: The file pointer to read from. |
| 99 | * @ubuf: The buffer to copy the data to. | 132 | * @ubuf: The buffer to copy the data to. |
| 100 | * @cnt: The number of bytes to read. | 133 | * @cnt: The number of bytes to read. |
| 101 | * @ppos: The position in the file to start reading from. | 134 | * @ppos: The position in the file to start reading from. |
| 102 | * | 135 | * |
| 103 | * Description: | 136 | * Description: |
| 104 | * This routine reads value of variable fnic_tracing_enabled | 137 | * This routine reads value of variable fnic_tracing_enabled or |
| 105 | * and stores into local @buf. It will start reading file at @ppos and | 138 | * fnic_fc_tracing_enabled or fnic_fc_trace_cleared |
| 139 | * and stores into local @buf. | ||
| 140 | * It will start reading file at @ppos and | ||
| 106 | * copy up to @cnt of data to @ubuf from @buf. | 141 | * copy up to @cnt of data to @ubuf from @buf. |
| 107 | * | 142 | * |
| 108 | * Returns: | 143 | * Returns: |
| @@ -114,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp, | |||
| 114 | { | 149 | { |
| 115 | char buf[64]; | 150 | char buf[64]; |
| 116 | int len; | 151 | int len; |
| 117 | len = sprintf(buf, "%u\n", fnic_tracing_enabled); | 152 | u8 *trace_type; |
| 153 | len = 0; | ||
| 154 | trace_type = (u8 *)filp->private_data; | ||
| 155 | if (*trace_type == fc_trc_flag->fnic_trace) | ||
| 156 | len = sprintf(buf, "%u\n", fnic_tracing_enabled); | ||
| 157 | else if (*trace_type == fc_trc_flag->fc_trace) | ||
| 158 | len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled); | ||
| 159 | else if (*trace_type == fc_trc_flag->fc_clear) | ||
| 160 | len = sprintf(buf, "%u\n", fnic_fc_trace_cleared); | ||
| 161 | else | ||
| 162 | pr_err("fnic: Cannot read to any debugfs file\n"); | ||
| 118 | 163 | ||
| 119 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); | 164 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); |
| 120 | } | 165 | } |
| 121 | 166 | ||
| 122 | /* | 167 | /* |
| 123 | * fnic_trace_ctrl_write - Write to trace_enable debugfs file | 168 | * fnic_trace_ctrl_write - |
| 169 | * Write to trace_enable, fc_trace_enable or | ||
| 170 | * fc_trace_clear debugfs file | ||
| 124 | * @filp: The file pointer to write from. | 171 | * @filp: The file pointer to write from. |
| 125 | * @ubuf: The buffer to copy the data from. | 172 | * @ubuf: The buffer to copy the data from. |
| 126 | * @cnt: The number of bytes to write. | 173 | * @cnt: The number of bytes to write. |
| @@ -128,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp, | |||
| 128 | * | 175 | * |
| 129 | * Description: | 176 | * Description: |
| 130 | * This routine writes data from user buffer @ubuf to buffer @buf and | 177 | * This routine writes data from user buffer @ubuf to buffer @buf and |
| 131 | * sets fnic_tracing_enabled value as per user input. | 178 | * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared |
| 179 | * value as per user input. | ||
| 132 | * | 180 | * |
| 133 | * Returns: | 181 | * Returns: |
| 134 | * This function returns the amount of data that was written. | 182 | * This function returns the amount of data that was written. |
| @@ -140,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp, | |||
| 140 | char buf[64]; | 188 | char buf[64]; |
| 141 | unsigned long val; | 189 | unsigned long val; |
| 142 | int ret; | 190 | int ret; |
| 191 | u8 *trace_type; | ||
| 192 | trace_type = (u8 *)filp->private_data; | ||
| 143 | 193 | ||
| 144 | if (cnt >= sizeof(buf)) | 194 | if (cnt >= sizeof(buf)) |
| 145 | return -EINVAL; | 195 | return -EINVAL; |
| @@ -153,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp, | |||
| 153 | if (ret < 0) | 203 | if (ret < 0) |
| 154 | return ret; | 204 | return ret; |
| 155 | 205 | ||
| 156 | fnic_tracing_enabled = val; | 206 | if (*trace_type == fc_trc_flag->fnic_trace) |
| 207 | fnic_tracing_enabled = val; | ||
| 208 | else if (*trace_type == fc_trc_flag->fc_trace) | ||
| 209 | fnic_fc_tracing_enabled = val; | ||
| 210 | else if (*trace_type == fc_trc_flag->fc_clear) | ||
| 211 | fnic_fc_trace_cleared = val; | ||
| 212 | else | ||
| 213 | pr_err("fnic: cannot write to any debufs file\n"); | ||
| 214 | |||
| 157 | (*ppos)++; | 215 | (*ppos)++; |
| 158 | 216 | ||
| 159 | return cnt; | 217 | return cnt; |
| 160 | } | 218 | } |
| 161 | 219 | ||
| 220 | static const struct file_operations fnic_trace_ctrl_fops = { | ||
| 221 | .owner = THIS_MODULE, | ||
| 222 | .open = fnic_trace_ctrl_open, | ||
| 223 | .read = fnic_trace_ctrl_read, | ||
| 224 | .write = fnic_trace_ctrl_write, | ||
| 225 | }; | ||
| 226 | |||
| 162 | /* | 227 | /* |
| 163 | * fnic_trace_debugfs_open - Open the fnic trace log | 228 | * fnic_trace_debugfs_open - Open the fnic trace log |
| 164 | * @inode: The inode pointer | 229 | * @inode: The inode pointer |
| @@ -178,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode, | |||
| 178 | struct file *file) | 243 | struct file *file) |
| 179 | { | 244 | { |
| 180 | fnic_dbgfs_t *fnic_dbg_prt; | 245 | fnic_dbgfs_t *fnic_dbg_prt; |
| 246 | u8 *rdata_ptr; | ||
| 247 | rdata_ptr = (u8 *)inode->i_private; | ||
| 181 | fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); | 248 | fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); |
| 182 | if (!fnic_dbg_prt) | 249 | if (!fnic_dbg_prt) |
| 183 | return -ENOMEM; | 250 | return -ENOMEM; |
| 184 | 251 | ||
| 185 | fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE))); | 252 | if (*rdata_ptr == fc_trc_flag->fnic_trace) { |
| 186 | if (!fnic_dbg_prt->buffer) { | 253 | fnic_dbg_prt->buffer = vmalloc(3 * |
| 187 | kfree(fnic_dbg_prt); | 254 | (trace_max_pages * PAGE_SIZE)); |
| 188 | return -ENOMEM; | 255 | if (!fnic_dbg_prt->buffer) { |
| 256 | kfree(fnic_dbg_prt); | ||
| 257 | return -ENOMEM; | ||
| 258 | } | ||
| 259 | memset((void *)fnic_dbg_prt->buffer, 0, | ||
| 260 | 3 * (trace_max_pages * PAGE_SIZE)); | ||
| 261 | fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); | ||
| 262 | } else { | ||
| 263 | fnic_dbg_prt->buffer = | ||
| 264 | vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); | ||
| 265 | if (!fnic_dbg_prt->buffer) { | ||
| 266 | kfree(fnic_dbg_prt); | ||
| 267 | return -ENOMEM; | ||
| 268 | } | ||
| 269 | memset((void *)fnic_dbg_prt->buffer, 0, | ||
| 270 | 3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); | ||
| 271 | fnic_dbg_prt->buffer_len = | ||
| 272 | fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr); | ||
| 189 | } | 273 | } |
| 190 | memset((void *)fnic_dbg_prt->buffer, 0, | ||
| 191 | (3*(trace_max_pages * PAGE_SIZE))); | ||
| 192 | fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); | ||
| 193 | file->private_data = fnic_dbg_prt; | 274 | file->private_data = fnic_dbg_prt; |
| 275 | |||
| 194 | return 0; | 276 | return 0; |
| 195 | } | 277 | } |
| 196 | 278 | ||
| @@ -272,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode, | |||
| 272 | return 0; | 354 | return 0; |
| 273 | } | 355 | } |
| 274 | 356 | ||
| 275 | static const struct file_operations fnic_trace_ctrl_fops = { | ||
| 276 | .owner = THIS_MODULE, | ||
| 277 | .open = fnic_trace_ctrl_open, | ||
| 278 | .read = fnic_trace_ctrl_read, | ||
| 279 | .write = fnic_trace_ctrl_write, | ||
| 280 | }; | ||
| 281 | |||
| 282 | static const struct file_operations fnic_trace_debugfs_fops = { | 357 | static const struct file_operations fnic_trace_debugfs_fops = { |
| 283 | .owner = THIS_MODULE, | 358 | .owner = THIS_MODULE, |
| 284 | .open = fnic_trace_debugfs_open, | 359 | .open = fnic_trace_debugfs_open, |
| @@ -306,9 +381,10 @@ int fnic_trace_debugfs_init(void) | |||
| 306 | return rc; | 381 | return rc; |
| 307 | } | 382 | } |
| 308 | fnic_trace_enable = debugfs_create_file("tracing_enable", | 383 | fnic_trace_enable = debugfs_create_file("tracing_enable", |
| 309 | S_IFREG|S_IRUGO|S_IWUSR, | 384 | S_IFREG|S_IRUGO|S_IWUSR, |
| 310 | fnic_trace_debugfs_root, | 385 | fnic_trace_debugfs_root, |
| 311 | NULL, &fnic_trace_ctrl_fops); | 386 | &(fc_trc_flag->fnic_trace), |
| 387 | &fnic_trace_ctrl_fops); | ||
| 312 | 388 | ||
| 313 | if (!fnic_trace_enable) { | 389 | if (!fnic_trace_enable) { |
| 314 | printk(KERN_DEBUG | 390 | printk(KERN_DEBUG |
| @@ -317,10 +393,10 @@ int fnic_trace_debugfs_init(void) | |||
| 317 | } | 393 | } |
| 318 | 394 | ||
| 319 | fnic_trace_debugfs_file = debugfs_create_file("trace", | 395 | fnic_trace_debugfs_file = debugfs_create_file("trace", |
| 320 | S_IFREG|S_IRUGO|S_IWUSR, | 396 | S_IFREG|S_IRUGO|S_IWUSR, |
| 321 | fnic_trace_debugfs_root, | 397 | fnic_trace_debugfs_root, |
| 322 | NULL, | 398 | &(fc_trc_flag->fnic_trace), |
| 323 | &fnic_trace_debugfs_fops); | 399 | &fnic_trace_debugfs_fops); |
| 324 | 400 | ||
| 325 | if (!fnic_trace_debugfs_file) { | 401 | if (!fnic_trace_debugfs_file) { |
| 326 | printk(KERN_DEBUG | 402 | printk(KERN_DEBUG |
| @@ -340,14 +416,104 @@ int fnic_trace_debugfs_init(void) | |||
| 340 | */ | 416 | */ |
| 341 | void fnic_trace_debugfs_terminate(void) | 417 | void fnic_trace_debugfs_terminate(void) |
| 342 | { | 418 | { |
| 343 | if (fnic_trace_debugfs_file) { | 419 | debugfs_remove(fnic_trace_debugfs_file); |
| 344 | debugfs_remove(fnic_trace_debugfs_file); | 420 | fnic_trace_debugfs_file = NULL; |
| 345 | fnic_trace_debugfs_file = NULL; | 421 | |
| 422 | debugfs_remove(fnic_trace_enable); | ||
| 423 | fnic_trace_enable = NULL; | ||
| 424 | } | ||
| 425 | |||
| 426 | /* | ||
| 427 | * fnic_fc_trace_debugfs_init - | ||
| 428 | * Initialize debugfs for fnic control frame trace logging | ||
| 429 | * | ||
| 430 | * Description: | ||
| 431 | * When Debugfs is configured this routine sets up the fnic_fc debugfs | ||
| 432 | * file system. If not already created, this routine will create the | ||
| 433 | * create file trace to log fnic fc trace buffer output into debugfs and | ||
| 434 | * it will also create file fc_trace_enable to control enable/disable of | ||
| 435 | * trace logging into trace buffer. | ||
| 436 | */ | ||
| 437 | |||
| 438 | int fnic_fc_trace_debugfs_init(void) | ||
| 439 | { | ||
| 440 | int rc = -1; | ||
| 441 | |||
| 442 | if (!fnic_trace_debugfs_root) { | ||
| 443 | pr_err("fnic:Debugfs root directory doesn't exist\n"); | ||
| 444 | return rc; | ||
| 445 | } | ||
| 446 | |||
| 447 | fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable", | ||
| 448 | S_IFREG|S_IRUGO|S_IWUSR, | ||
| 449 | fnic_trace_debugfs_root, | ||
| 450 | &(fc_trc_flag->fc_trace), | ||
| 451 | &fnic_trace_ctrl_fops); | ||
| 452 | |||
| 453 | if (!fnic_fc_trace_enable) { | ||
| 454 | pr_err("fnic: Failed create fc_trace_enable file\n"); | ||
| 455 | return rc; | ||
| 456 | } | ||
| 457 | |||
| 458 | fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear", | ||
| 459 | S_IFREG|S_IRUGO|S_IWUSR, | ||
| 460 | fnic_trace_debugfs_root, | ||
| 461 | &(fc_trc_flag->fc_clear), | ||
| 462 | &fnic_trace_ctrl_fops); | ||
| 463 | |||
| 464 | if (!fnic_fc_trace_clear) { | ||
| 465 | pr_err("fnic: Failed to create fc_trace_enable file\n"); | ||
| 466 | return rc; | ||
| 467 | } | ||
| 468 | |||
| 469 | fnic_fc_rdata_trace_debugfs_file = | ||
| 470 | debugfs_create_file("fc_trace_rdata", | ||
| 471 | S_IFREG|S_IRUGO|S_IWUSR, | ||
| 472 | fnic_trace_debugfs_root, | ||
| 473 | &(fc_trc_flag->fc_normal_file), | ||
| 474 | &fnic_trace_debugfs_fops); | ||
| 475 | |||
| 476 | if (!fnic_fc_rdata_trace_debugfs_file) { | ||
| 477 | pr_err("fnic: Failed create fc_rdata_trace file\n"); | ||
| 478 | return rc; | ||
| 346 | } | 479 | } |
| 347 | if (fnic_trace_enable) { | 480 | |
| 348 | debugfs_remove(fnic_trace_enable); | 481 | fnic_fc_trace_debugfs_file = |
| 349 | fnic_trace_enable = NULL; | 482 | debugfs_create_file("fc_trace", |
| 483 | S_IFREG|S_IRUGO|S_IWUSR, | ||
| 484 | fnic_trace_debugfs_root, | ||
| 485 | &(fc_trc_flag->fc_row_file), | ||
| 486 | &fnic_trace_debugfs_fops); | ||
| 487 | |||
| 488 | if (!fnic_fc_trace_debugfs_file) { | ||
| 489 | pr_err("fnic: Failed to create fc_trace file\n"); | ||
| 490 | return rc; | ||
| 350 | } | 491 | } |
| 492 | rc = 0; | ||
| 493 | return rc; | ||
| 494 | } | ||
| 495 | |||
| 496 | /* | ||
| 497 | * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure | ||
| 498 | * | ||
| 499 | * Description: | ||
| 500 | * When Debugfs is configured this routine removes debugfs file system | ||
| 501 | * elements that are specific to fnic_fc trace logging. | ||
| 502 | */ | ||
| 503 | |||
| 504 | void fnic_fc_trace_debugfs_terminate(void) | ||
| 505 | { | ||
| 506 | debugfs_remove(fnic_fc_trace_debugfs_file); | ||
| 507 | fnic_fc_trace_debugfs_file = NULL; | ||
| 508 | |||
| 509 | debugfs_remove(fnic_fc_rdata_trace_debugfs_file); | ||
| 510 | fnic_fc_rdata_trace_debugfs_file = NULL; | ||
| 511 | |||
| 512 | debugfs_remove(fnic_fc_trace_enable); | ||
| 513 | fnic_fc_trace_enable = NULL; | ||
| 514 | |||
| 515 | debugfs_remove(fnic_fc_trace_clear); | ||
| 516 | fnic_fc_trace_clear = NULL; | ||
| 351 | } | 517 | } |
| 352 | 518 | ||
| 353 | /* | 519 | /* |
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 1671325aec7f..1b948f633fc5 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c | |||
| @@ -66,19 +66,35 @@ void fnic_handle_link(struct work_struct *work) | |||
| 66 | fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); | 66 | fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); |
| 67 | 67 | ||
| 68 | if (old_link_status == fnic->link_status) { | 68 | if (old_link_status == fnic->link_status) { |
| 69 | if (!fnic->link_status) | 69 | if (!fnic->link_status) { |
| 70 | /* DOWN -> DOWN */ | 70 | /* DOWN -> DOWN */ |
| 71 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 71 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 72 | else { | 72 | fnic_fc_trace_set_data(fnic->lport->host->host_no, |
| 73 | FNIC_FC_LE, "Link Status: DOWN->DOWN", | ||
| 74 | strlen("Link Status: DOWN->DOWN")); | ||
| 75 | } else { | ||
| 73 | if (old_link_down_cnt != fnic->link_down_cnt) { | 76 | if (old_link_down_cnt != fnic->link_down_cnt) { |
| 74 | /* UP -> DOWN -> UP */ | 77 | /* UP -> DOWN -> UP */ |
| 75 | fnic->lport->host_stats.link_failure_count++; | 78 | fnic->lport->host_stats.link_failure_count++; |
| 76 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 79 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 80 | fnic_fc_trace_set_data( | ||
| 81 | fnic->lport->host->host_no, | ||
| 82 | FNIC_FC_LE, | ||
| 83 | "Link Status:UP_DOWN_UP", | ||
| 84 | strlen("Link_Status:UP_DOWN_UP") | ||
| 85 | ); | ||
| 77 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 86 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
| 78 | "link down\n"); | 87 | "link down\n"); |
| 79 | fcoe_ctlr_link_down(&fnic->ctlr); | 88 | fcoe_ctlr_link_down(&fnic->ctlr); |
| 80 | if (fnic->config.flags & VFCF_FIP_CAPABLE) { | 89 | if (fnic->config.flags & VFCF_FIP_CAPABLE) { |
| 81 | /* start FCoE VLAN discovery */ | 90 | /* start FCoE VLAN discovery */ |
| 91 | fnic_fc_trace_set_data( | ||
| 92 | fnic->lport->host->host_no, | ||
| 93 | FNIC_FC_LE, | ||
| 94 | "Link Status: UP_DOWN_UP_VLAN", | ||
| 95 | strlen( | ||
| 96 | "Link Status: UP_DOWN_UP_VLAN") | ||
| 97 | ); | ||
| 82 | fnic_fcoe_send_vlan_req(fnic); | 98 | fnic_fcoe_send_vlan_req(fnic); |
| 83 | return; | 99 | return; |
| 84 | } | 100 | } |
| @@ -88,22 +104,36 @@ void fnic_handle_link(struct work_struct *work) | |||
| 88 | } else | 104 | } else |
| 89 | /* UP -> UP */ | 105 | /* UP -> UP */ |
| 90 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 106 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 107 | fnic_fc_trace_set_data( | ||
| 108 | fnic->lport->host->host_no, FNIC_FC_LE, | ||
| 109 | "Link Status: UP_UP", | ||
| 110 | strlen("Link Status: UP_UP")); | ||
| 91 | } | 111 | } |
| 92 | } else if (fnic->link_status) { | 112 | } else if (fnic->link_status) { |
| 93 | /* DOWN -> UP */ | 113 | /* DOWN -> UP */ |
| 94 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 114 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 95 | if (fnic->config.flags & VFCF_FIP_CAPABLE) { | 115 | if (fnic->config.flags & VFCF_FIP_CAPABLE) { |
| 96 | /* start FCoE VLAN discovery */ | 116 | /* start FCoE VLAN discovery */ |
| 117 | fnic_fc_trace_set_data( | ||
| 118 | fnic->lport->host->host_no, | ||
| 119 | FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", | ||
| 120 | strlen("Link Status: DOWN_UP_VLAN")); | ||
| 97 | fnic_fcoe_send_vlan_req(fnic); | 121 | fnic_fcoe_send_vlan_req(fnic); |
| 98 | return; | 122 | return; |
| 99 | } | 123 | } |
| 100 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); | 124 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); |
| 125 | fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, | ||
| 126 | "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); | ||
| 101 | fcoe_ctlr_link_up(&fnic->ctlr); | 127 | fcoe_ctlr_link_up(&fnic->ctlr); |
| 102 | } else { | 128 | } else { |
| 103 | /* UP -> DOWN */ | 129 | /* UP -> DOWN */ |
| 104 | fnic->lport->host_stats.link_failure_count++; | 130 | fnic->lport->host_stats.link_failure_count++; |
| 105 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 131 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 106 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); | 132 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); |
| 133 | fnic_fc_trace_set_data( | ||
| 134 | fnic->lport->host->host_no, FNIC_FC_LE, | ||
| 135 | "Link Status: UP_DOWN", | ||
| 136 | strlen("Link Status: UP_DOWN")); | ||
| 107 | fcoe_ctlr_link_down(&fnic->ctlr); | 137 | fcoe_ctlr_link_down(&fnic->ctlr); |
| 108 | } | 138 | } |
| 109 | 139 | ||
| @@ -267,11 +297,6 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, | |||
| 267 | 297 | ||
| 268 | if (desc->fip_dtype == FIP_DT_FLOGI) { | 298 | if (desc->fip_dtype == FIP_DT_FLOGI) { |
| 269 | 299 | ||
| 270 | shost_printk(KERN_DEBUG, lport->host, | ||
| 271 | " FIP TYPE FLOGI: fab name:%llx " | ||
| 272 | "vfid:%d map:%x\n", | ||
| 273 | fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, | ||
| 274 | fip->sel_fcf->fc_map); | ||
| 275 | if (dlen < sizeof(*els) + sizeof(*fh) + 1) | 300 | if (dlen < sizeof(*els) + sizeof(*fh) + 1) |
| 276 | return 0; | 301 | return 0; |
| 277 | 302 | ||
| @@ -616,6 +641,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) | |||
| 616 | "using UCSM\n"); | 641 | "using UCSM\n"); |
| 617 | goto drop; | 642 | goto drop; |
| 618 | } | 643 | } |
| 644 | if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, | ||
| 645 | FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { | ||
| 646 | printk(KERN_ERR "fnic ctlr frame trace error!!!"); | ||
| 647 | } | ||
| 619 | skb_queue_tail(&fnic->fip_frame_queue, skb); | 648 | skb_queue_tail(&fnic->fip_frame_queue, skb); |
| 620 | queue_work(fnic_fip_queue, &fnic->fip_frame_work); | 649 | queue_work(fnic_fip_queue, &fnic->fip_frame_work); |
| 621 | return 1; /* let caller know packet was used */ | 650 | return 1; /* let caller know packet was used */ |
| @@ -844,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
| 844 | } | 873 | } |
| 845 | fr_dev(fp) = fnic->lport; | 874 | fr_dev(fp) = fnic->lport; |
| 846 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 875 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
| 876 | if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, | ||
| 877 | (char *)skb->data, skb->len)) != 0) { | ||
| 878 | printk(KERN_ERR "fnic ctlr frame trace error!!!"); | ||
| 879 | } | ||
| 847 | 880 | ||
| 848 | skb_queue_tail(&fnic->frame_queue, skb); | 881 | skb_queue_tail(&fnic->frame_queue, skb); |
| 849 | queue_work(fnic_event_queue, &fnic->frame_work); | 882 | queue_work(fnic_event_queue, &fnic->frame_work); |
| @@ -951,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
| 951 | vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); | 984 | vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); |
| 952 | vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; | 985 | vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; |
| 953 | vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); | 986 | vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); |
| 987 | if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, | ||
| 988 | FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { | ||
| 989 | printk(KERN_ERR "fnic ctlr frame trace error!!!"); | ||
| 990 | } | ||
| 991 | } else { | ||
| 992 | if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, | ||
| 993 | FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { | ||
| 994 | printk(KERN_ERR "fnic ctlr frame trace error!!!"); | ||
| 995 | } | ||
| 954 | } | 996 | } |
| 955 | 997 | ||
| 956 | pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | 998 | pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
| @@ -1023,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | |||
| 1023 | 1065 | ||
| 1024 | pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); | 1066 | pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); |
| 1025 | 1067 | ||
| 1068 | if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, | ||
| 1069 | (char *)eth_hdr, tot_len)) != 0) { | ||
| 1070 | printk(KERN_ERR "fnic ctlr frame trace error!!!"); | ||
| 1071 | } | ||
| 1072 | |||
| 1026 | spin_lock_irqsave(&fnic->wq_lock[0], flags); | 1073 | spin_lock_irqsave(&fnic->wq_lock[0], flags); |
| 1027 | 1074 | ||
| 1028 | if (!vnic_wq_desc_avail(wq)) { | 1075 | if (!vnic_wq_desc_avail(wq)) { |
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 33e4ec2bfe73..8c56fdc3a456 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
| @@ -74,6 +74,11 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); | |||
| 74 | MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " | 74 | MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " |
| 75 | "for fnic trace buffer"); | 75 | "for fnic trace buffer"); |
| 76 | 76 | ||
| 77 | unsigned int fnic_fc_trace_max_pages = 64; | ||
| 78 | module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR); | ||
| 79 | MODULE_PARM_DESC(fnic_fc_trace_max_pages, | ||
| 80 | "Total allocated memory pages for fc trace buffer"); | ||
| 81 | |||
| 77 | static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; | 82 | static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; |
| 78 | module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); | 83 | module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); |
| 79 | MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); | 84 | MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); |
| @@ -111,7 +116,7 @@ static struct scsi_host_template fnic_host_template = { | |||
| 111 | .change_queue_type = fc_change_queue_type, | 116 | .change_queue_type = fc_change_queue_type, |
| 112 | .this_id = -1, | 117 | .this_id = -1, |
| 113 | .cmd_per_lun = 3, | 118 | .cmd_per_lun = 3, |
| 114 | .can_queue = FNIC_MAX_IO_REQ, | 119 | .can_queue = FNIC_DFLT_IO_REQ, |
| 115 | .use_clustering = ENABLE_CLUSTERING, | 120 | .use_clustering = ENABLE_CLUSTERING, |
| 116 | .sg_tablesize = FNIC_MAX_SG_DESC_CNT, | 121 | .sg_tablesize = FNIC_MAX_SG_DESC_CNT, |
| 117 | .max_sectors = 0xffff, | 122 | .max_sectors = 0xffff, |
| @@ -773,6 +778,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 773 | shost_printk(KERN_INFO, fnic->lport->host, | 778 | shost_printk(KERN_INFO, fnic->lport->host, |
| 774 | "firmware uses non-FIP mode\n"); | 779 | "firmware uses non-FIP mode\n"); |
| 775 | fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); | 780 | fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); |
| 781 | fnic->ctlr.state = FIP_ST_NON_FIP; | ||
| 776 | } | 782 | } |
| 777 | fnic->state = FNIC_IN_FC_MODE; | 783 | fnic->state = FNIC_IN_FC_MODE; |
| 778 | 784 | ||
| @@ -1033,11 +1039,20 @@ static int __init fnic_init_module(void) | |||
| 1033 | /* Allocate memory for trace buffer */ | 1039 | /* Allocate memory for trace buffer */ |
| 1034 | err = fnic_trace_buf_init(); | 1040 | err = fnic_trace_buf_init(); |
| 1035 | if (err < 0) { | 1041 | if (err < 0) { |
| 1036 | printk(KERN_ERR PFX "Trace buffer initialization Failed " | 1042 | printk(KERN_ERR PFX |
| 1037 | "Fnic Tracing utility is disabled\n"); | 1043 | "Trace buffer initialization Failed. " |
| 1044 | "Fnic Tracing utility is disabled\n"); | ||
| 1038 | fnic_trace_free(); | 1045 | fnic_trace_free(); |
| 1039 | } | 1046 | } |
| 1040 | 1047 | ||
| 1048 | /* Allocate memory for fc trace buffer */ | ||
| 1049 | err = fnic_fc_trace_init(); | ||
| 1050 | if (err < 0) { | ||
| 1051 | printk(KERN_ERR PFX "FC trace buffer initialization Failed " | ||
| 1052 | "FC frame tracing utility is disabled\n"); | ||
| 1053 | fnic_fc_trace_free(); | ||
| 1054 | } | ||
| 1055 | |||
| 1041 | /* Create a cache for allocation of default size sgls */ | 1056 | /* Create a cache for allocation of default size sgls */ |
| 1042 | len = sizeof(struct fnic_dflt_sgl_list); | 1057 | len = sizeof(struct fnic_dflt_sgl_list); |
| 1043 | fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create | 1058 | fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create |
| @@ -1118,6 +1133,7 @@ err_create_fnic_sgl_slab_max: | |||
| 1118 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); | 1133 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); |
| 1119 | err_create_fnic_sgl_slab_dflt: | 1134 | err_create_fnic_sgl_slab_dflt: |
| 1120 | fnic_trace_free(); | 1135 | fnic_trace_free(); |
| 1136 | fnic_fc_trace_free(); | ||
| 1121 | fnic_debugfs_terminate(); | 1137 | fnic_debugfs_terminate(); |
| 1122 | return err; | 1138 | return err; |
| 1123 | } | 1139 | } |
| @@ -1135,6 +1151,7 @@ static void __exit fnic_cleanup_module(void) | |||
| 1135 | kmem_cache_destroy(fnic_io_req_cache); | 1151 | kmem_cache_destroy(fnic_io_req_cache); |
| 1136 | fc_release_transport(fnic_fc_transport); | 1152 | fc_release_transport(fnic_fc_transport); |
| 1137 | fnic_trace_free(); | 1153 | fnic_trace_free(); |
| 1154 | fnic_fc_trace_free(); | ||
| 1138 | fnic_debugfs_terminate(); | 1155 | fnic_debugfs_terminate(); |
| 1139 | } | 1156 | } |
| 1140 | 1157 | ||
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 0521436d05d6..ea28b5ca4c73 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
| @@ -1312,8 +1312,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) | |||
| 1312 | 1312 | ||
| 1313 | cleanup_scsi_cmd: | 1313 | cleanup_scsi_cmd: |
| 1314 | sc->result = DID_TRANSPORT_DISRUPTED << 16; | 1314 | sc->result = DID_TRANSPORT_DISRUPTED << 16; |
| 1315 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" | 1315 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
| 1316 | " DID_TRANSPORT_DISRUPTED\n"); | 1316 | "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n", |
| 1317 | __func__, (jiffies - start_time)); | ||
| 1317 | 1318 | ||
| 1318 | if (atomic64_read(&fnic->io_cmpl_skip)) | 1319 | if (atomic64_read(&fnic->io_cmpl_skip)) |
| 1319 | atomic64_dec(&fnic->io_cmpl_skip); | 1320 | atomic64_dec(&fnic->io_cmpl_skip); |
| @@ -1733,6 +1734,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
| 1733 | struct fnic_stats *fnic_stats; | 1734 | struct fnic_stats *fnic_stats; |
| 1734 | struct abort_stats *abts_stats; | 1735 | struct abort_stats *abts_stats; |
| 1735 | struct terminate_stats *term_stats; | 1736 | struct terminate_stats *term_stats; |
| 1737 | enum fnic_ioreq_state old_ioreq_state; | ||
| 1736 | int tag; | 1738 | int tag; |
| 1737 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1739 | DECLARE_COMPLETION_ONSTACK(tm_done); |
| 1738 | 1740 | ||
| @@ -1793,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
| 1793 | * the completion wont be done till mid-layer, since abort | 1795 | * the completion wont be done till mid-layer, since abort |
| 1794 | * has already started. | 1796 | * has already started. |
| 1795 | */ | 1797 | */ |
| 1798 | old_ioreq_state = CMD_STATE(sc); | ||
| 1796 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | 1799 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; |
| 1797 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | 1800 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; |
| 1798 | 1801 | ||
| @@ -1816,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
| 1816 | if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, | 1819 | if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, |
| 1817 | fc_lun.scsi_lun, io_req)) { | 1820 | fc_lun.scsi_lun, io_req)) { |
| 1818 | spin_lock_irqsave(io_lock, flags); | 1821 | spin_lock_irqsave(io_lock, flags); |
| 1822 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) | ||
| 1823 | CMD_STATE(sc) = old_ioreq_state; | ||
| 1819 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1824 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
| 1820 | if (io_req) | 1825 | if (io_req) |
| 1821 | io_req->abts_done = NULL; | 1826 | io_req->abts_done = NULL; |
| @@ -1859,12 +1864,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
| 1859 | if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { | 1864 | if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { |
| 1860 | spin_unlock_irqrestore(io_lock, flags); | 1865 | spin_unlock_irqrestore(io_lock, flags); |
| 1861 | if (task_req == FCPIO_ITMF_ABT_TASK) { | 1866 | if (task_req == FCPIO_ITMF_ABT_TASK) { |
| 1862 | FNIC_SCSI_DBG(KERN_INFO, | ||
| 1863 | fnic->lport->host, "Abort Driver Timeout\n"); | ||
| 1864 | atomic64_inc(&abts_stats->abort_drv_timeouts); | 1867 | atomic64_inc(&abts_stats->abort_drv_timeouts); |
| 1865 | } else { | 1868 | } else { |
| 1866 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
| 1867 | "Terminate Driver Timeout\n"); | ||
| 1868 | atomic64_inc(&term_stats->terminate_drv_timeouts); | 1869 | atomic64_inc(&term_stats->terminate_drv_timeouts); |
| 1869 | } | 1870 | } |
| 1870 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; | 1871 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; |
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index e002e7187dc0..c77285926827 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
| 21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
| 23 | #include <linux/time.h> | ||
| 23 | #include "fnic_io.h" | 24 | #include "fnic_io.h" |
| 24 | #include "fnic.h" | 25 | #include "fnic.h" |
| 25 | 26 | ||
| @@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock); | |||
| 32 | static fnic_trace_dbg_t fnic_trace_entries; | 33 | static fnic_trace_dbg_t fnic_trace_entries; |
| 33 | int fnic_tracing_enabled = 1; | 34 | int fnic_tracing_enabled = 1; |
| 34 | 35 | ||
| 36 | /* static char *fnic_fc_ctlr_trace_buf_p; */ | ||
| 37 | |||
| 38 | static int fc_trace_max_entries; | ||
| 39 | static unsigned long fnic_fc_ctlr_trace_buf_p; | ||
| 40 | static fnic_trace_dbg_t fc_trace_entries; | ||
| 41 | int fnic_fc_tracing_enabled = 1; | ||
| 42 | int fnic_fc_trace_cleared = 1; | ||
| 43 | static DEFINE_SPINLOCK(fnic_fc_trace_lock); | ||
| 44 | |||
| 45 | |||
| 35 | /* | 46 | /* |
| 36 | * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information | 47 | * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information |
| 37 | * | 48 | * |
| @@ -428,10 +439,10 @@ int fnic_trace_buf_init(void) | |||
| 428 | } | 439 | } |
| 429 | err = fnic_trace_debugfs_init(); | 440 | err = fnic_trace_debugfs_init(); |
| 430 | if (err < 0) { | 441 | if (err < 0) { |
| 431 | printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n"); | 442 | pr_err("fnic: Failed to initialize debugfs for tracing\n"); |
| 432 | goto err_fnic_trace_debugfs_init; | 443 | goto err_fnic_trace_debugfs_init; |
| 433 | } | 444 | } |
| 434 | printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n"); | 445 | pr_info("fnic: Successfully Initialized Trace Buffer\n"); |
| 435 | return err; | 446 | return err; |
| 436 | err_fnic_trace_debugfs_init: | 447 | err_fnic_trace_debugfs_init: |
| 437 | fnic_trace_free(); | 448 | fnic_trace_free(); |
| @@ -456,3 +467,314 @@ void fnic_trace_free(void) | |||
| 456 | } | 467 | } |
| 457 | printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); | 468 | printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); |
| 458 | } | 469 | } |
| 470 | |||
| 471 | /* | ||
| 472 | * fnic_fc_ctlr_trace_buf_init - | ||
| 473 | * Initialize trace buffer to log fnic control frames | ||
| 474 | * Description: | ||
| 475 | * Initialize trace buffer data structure by allocating | ||
| 476 | * required memory for trace data as well as for Indexes. | ||
| 477 | * Frame size is 256 bytes and | ||
| 478 | * memory is allocated for 1024 entries of 256 bytes. | ||
| 479 | * Page_offset(Index) is set to the address of trace entry | ||
| 480 | * and page_offset is initialized by adding frame size | ||
| 481 | * to the previous page_offset entry. | ||
| 482 | */ | ||
| 483 | |||
| 484 | int fnic_fc_trace_init(void) | ||
| 485 | { | ||
| 486 | unsigned long fc_trace_buf_head; | ||
| 487 | int err = 0; | ||
| 488 | int i; | ||
| 489 | |||
| 490 | fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ | ||
| 491 | FC_TRC_SIZE_BYTES; | ||
| 492 | fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc( | ||
| 493 | fnic_fc_trace_max_pages * PAGE_SIZE); | ||
| 494 | if (!fnic_fc_ctlr_trace_buf_p) { | ||
| 495 | pr_err("fnic: Failed to allocate memory for " | ||
| 496 | "FC Control Trace Buf\n"); | ||
| 497 | err = -ENOMEM; | ||
| 498 | goto err_fnic_fc_ctlr_trace_buf_init; | ||
| 499 | } | ||
| 500 | |||
| 501 | memset((void *)fnic_fc_ctlr_trace_buf_p, 0, | ||
| 502 | fnic_fc_trace_max_pages * PAGE_SIZE); | ||
| 503 | |||
| 504 | /* Allocate memory for page offset */ | ||
| 505 | fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries * | ||
| 506 | sizeof(unsigned long)); | ||
| 507 | if (!fc_trace_entries.page_offset) { | ||
| 508 | pr_err("fnic:Failed to allocate memory for page_offset\n"); | ||
| 509 | if (fnic_fc_ctlr_trace_buf_p) { | ||
| 510 | pr_err("fnic: Freeing FC Control Trace Buf\n"); | ||
| 511 | vfree((void *)fnic_fc_ctlr_trace_buf_p); | ||
| 512 | fnic_fc_ctlr_trace_buf_p = 0; | ||
| 513 | } | ||
| 514 | err = -ENOMEM; | ||
| 515 | goto err_fnic_fc_ctlr_trace_buf_init; | ||
| 516 | } | ||
| 517 | memset((void *)fc_trace_entries.page_offset, 0, | ||
| 518 | (fc_trace_max_entries * sizeof(unsigned long))); | ||
| 519 | |||
| 520 | fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; | ||
| 521 | fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; | ||
| 522 | |||
| 523 | /* | ||
| 524 | * Set up fc_trace_entries.page_offset field with memory location | ||
| 525 | * for every trace entry | ||
| 526 | */ | ||
| 527 | for (i = 0; i < fc_trace_max_entries; i++) { | ||
| 528 | fc_trace_entries.page_offset[i] = fc_trace_buf_head; | ||
| 529 | fc_trace_buf_head += FC_TRC_SIZE_BYTES; | ||
| 530 | } | ||
| 531 | err = fnic_fc_trace_debugfs_init(); | ||
| 532 | if (err < 0) { | ||
| 533 | pr_err("fnic: Failed to initialize FC_CTLR tracing.\n"); | ||
| 534 | goto err_fnic_fc_ctlr_trace_debugfs_init; | ||
| 535 | } | ||
| 536 | pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); | ||
| 537 | return err; | ||
| 538 | |||
| 539 | err_fnic_fc_ctlr_trace_debugfs_init: | ||
| 540 | fnic_fc_trace_free(); | ||
| 541 | err_fnic_fc_ctlr_trace_buf_init: | ||
| 542 | return err; | ||
| 543 | } | ||
| 544 | |||
| 545 | /* | ||
| 546 | * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. | ||
| 547 | */ | ||
| 548 | void fnic_fc_trace_free(void) | ||
| 549 | { | ||
| 550 | fnic_fc_tracing_enabled = 0; | ||
| 551 | fnic_fc_trace_debugfs_terminate(); | ||
| 552 | if (fc_trace_entries.page_offset) { | ||
| 553 | vfree((void *)fc_trace_entries.page_offset); | ||
| 554 | fc_trace_entries.page_offset = NULL; | ||
| 555 | } | ||
| 556 | if (fnic_fc_ctlr_trace_buf_p) { | ||
| 557 | vfree((void *)fnic_fc_ctlr_trace_buf_p); | ||
| 558 | fnic_fc_ctlr_trace_buf_p = 0; | ||
| 559 | } | ||
| 560 | pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); | ||
| 561 | } | ||
| 562 | |||
| 563 | /* | ||
| 564 | * fnic_fc_ctlr_set_trace_data: | ||
| 565 | * Maintain rd & wr idx accordingly and set data | ||
| 566 | * Passed parameters: | ||
| 567 | * host_no: host number accociated with fnic | ||
| 568 | * frame_type: send_frame, rece_frame or link event | ||
| 569 | * fc_frame: pointer to fc_frame | ||
| 570 | * frame_len: Length of the fc_frame | ||
| 571 | * Description: | ||
| 572 | * This routine will get next available wr_idx and | ||
| 573 | * copy all passed trace data to the buffer pointed by wr_idx | ||
| 574 | * and increment wr_idx. It will also make sure that we dont | ||
| 575 | * overwrite the entry which we are reading and also | ||
| 576 | * wrap around if we reach the maximum entries. | ||
| 577 | * Returned Value: | ||
| 578 | * It will return 0 for success or -1 for failure | ||
| 579 | */ | ||
| 580 | int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, | ||
| 581 | char *frame, u32 fc_trc_frame_len) | ||
| 582 | { | ||
| 583 | unsigned long flags; | ||
| 584 | struct fc_trace_hdr *fc_buf; | ||
| 585 | unsigned long eth_fcoe_hdr_len; | ||
| 586 | char *fc_trace; | ||
| 587 | |||
| 588 | if (fnic_fc_tracing_enabled == 0) | ||
| 589 | return 0; | ||
| 590 | |||
| 591 | spin_lock_irqsave(&fnic_fc_trace_lock, flags); | ||
| 592 | |||
| 593 | if (fnic_fc_trace_cleared == 1) { | ||
| 594 | fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; | ||
| 595 | pr_info("fnic: Reseting the read idx\n"); | ||
| 596 | memset((void *)fnic_fc_ctlr_trace_buf_p, 0, | ||
| 597 | fnic_fc_trace_max_pages * PAGE_SIZE); | ||
| 598 | fnic_fc_trace_cleared = 0; | ||
| 599 | } | ||
| 600 | |||
| 601 | fc_buf = (struct fc_trace_hdr *) | ||
| 602 | fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; | ||
| 603 | |||
| 604 | fc_trace_entries.wr_idx++; | ||
| 605 | |||
| 606 | if (fc_trace_entries.wr_idx >= fc_trace_max_entries) | ||
| 607 | fc_trace_entries.wr_idx = 0; | ||
| 608 | |||
| 609 | if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { | ||
| 610 | fc_trace_entries.rd_idx++; | ||
| 611 | if (fc_trace_entries.rd_idx >= fc_trace_max_entries) | ||
| 612 | fc_trace_entries.rd_idx = 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | fc_buf->time_stamp = CURRENT_TIME; | ||
| 616 | fc_buf->host_no = host_no; | ||
| 617 | fc_buf->frame_type = frame_type; | ||
| 618 | |||
| 619 | fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); | ||
| 620 | |||
| 621 | /* During the receive path, we do not have eth hdr as well as fcoe hdr | ||
| 622 | * at trace entry point so we will stuff 0xff just to make it generic. | ||
| 623 | */ | ||
| 624 | if (frame_type == FNIC_FC_RECV) { | ||
| 625 | eth_fcoe_hdr_len = sizeof(struct ethhdr) + | ||
| 626 | sizeof(struct fcoe_hdr); | ||
| 627 | fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len; | ||
| 628 | memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); | ||
| 629 | /* Copy the rest of data frame */ | ||
| 630 | memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, | ||
| 631 | min_t(u8, fc_trc_frame_len, | ||
| 632 | (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); | ||
| 633 | } else { | ||
| 634 | memcpy((char *)fc_trace, (void *)frame, | ||
| 635 | min_t(u8, fc_trc_frame_len, | ||
| 636 | (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); | ||
| 637 | } | ||
| 638 | |||
| 639 | /* Store the actual received length */ | ||
| 640 | fc_buf->frame_len = fc_trc_frame_len; | ||
| 641 | |||
| 642 | spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); | ||
| 643 | return 0; | ||
| 644 | } | ||
| 645 | |||
| 646 | /* | ||
| 647 | * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file | ||
| 648 | * Passed parameter: | ||
| 649 | * @fnic_dbgfs_t: pointer to debugfs trace buffer | ||
| 650 | * rdata_flag: 1 => Unformated file | ||
| 651 | * 0 => formated file | ||
| 652 | * Description: | ||
| 653 | * This routine will copy the trace data to memory file with | ||
| 654 | * proper formatting and also copy to another memory | ||
| 655 | * file without formatting for further procesing. | ||
| 656 | * Retrun Value: | ||
| 657 | * Number of bytes that were dumped into fnic_dbgfs_t | ||
| 658 | */ | ||
| 659 | |||
| 660 | int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) | ||
| 661 | { | ||
| 662 | int rd_idx, wr_idx; | ||
| 663 | unsigned long flags; | ||
| 664 | int len = 0, j; | ||
| 665 | struct fc_trace_hdr *tdata; | ||
| 666 | char *fc_trace; | ||
| 667 | |||
| 668 | spin_lock_irqsave(&fnic_fc_trace_lock, flags); | ||
| 669 | if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { | ||
| 670 | spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); | ||
| 671 | pr_info("fnic: Buffer is empty\n"); | ||
| 672 | return 0; | ||
| 673 | } | ||
| 674 | rd_idx = fc_trace_entries.rd_idx; | ||
| 675 | wr_idx = fc_trace_entries.wr_idx; | ||
| 676 | if (rdata_flag == 0) { | ||
| 677 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 678 | (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, | ||
| 679 | "Time Stamp (UTC)\t\t" | ||
| 680 | "Host No: F Type: len: FCoE_FRAME:\n"); | ||
| 681 | } | ||
| 682 | |||
| 683 | while (rd_idx != wr_idx) { | ||
| 684 | tdata = (struct fc_trace_hdr *) | ||
| 685 | fc_trace_entries.page_offset[rd_idx]; | ||
| 686 | if (!tdata) { | ||
| 687 | pr_info("fnic: Rd data is NULL\n"); | ||
| 688 | spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); | ||
| 689 | return 0; | ||
| 690 | } | ||
| 691 | if (rdata_flag == 0) { | ||
| 692 | copy_and_format_trace_data(tdata, | ||
| 693 | fnic_dbgfs_prt, &len, rdata_flag); | ||
| 694 | } else { | ||
| 695 | fc_trace = (char *)tdata; | ||
| 696 | for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { | ||
| 697 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 698 | (fnic_fc_trace_max_pages * PAGE_SIZE * 3) | ||
| 699 | - len, "%02x", fc_trace[j] & 0xff); | ||
| 700 | } /* for loop */ | ||
| 701 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 702 | (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, | ||
| 703 | "\n"); | ||
| 704 | } | ||
| 705 | rd_idx++; | ||
| 706 | if (rd_idx > (fc_trace_max_entries - 1)) | ||
| 707 | rd_idx = 0; | ||
| 708 | } | ||
| 709 | |||
| 710 | spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); | ||
| 711 | return len; | ||
| 712 | } | ||
| 713 | |||
| 714 | /* | ||
| 715 | * copy_and_format_trace_data: Copy formatted data to char * buffer | ||
| 716 | * Passed Parameter: | ||
| 717 | * @fc_trace_hdr_t: pointer to trace data | ||
| 718 | * @fnic_dbgfs_t: pointer to debugfs trace buffer | ||
| 719 | * @orig_len: pointer to len | ||
| 720 | * rdata_flag: 0 => Formated file, 1 => Unformated file | ||
| 721 | * Description: | ||
| 722 | * This routine will format and copy the passed trace data | ||
| 723 | * for formated file or unformated file accordingly. | ||
| 724 | */ | ||
| 725 | |||
| 726 | void copy_and_format_trace_data(struct fc_trace_hdr *tdata, | ||
| 727 | fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, | ||
| 728 | u8 rdata_flag) | ||
| 729 | { | ||
| 730 | struct tm tm; | ||
| 731 | int j, i = 1, len; | ||
| 732 | char *fc_trace, *fmt; | ||
| 733 | int ethhdr_len = sizeof(struct ethhdr) - 1; | ||
| 734 | int fcoehdr_len = sizeof(struct fcoe_hdr); | ||
| 735 | int fchdr_len = sizeof(struct fc_frame_header); | ||
| 736 | int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; | ||
| 737 | |||
| 738 | tdata->frame_type = tdata->frame_type & 0x7F; | ||
| 739 | |||
| 740 | len = *orig_len; | ||
| 741 | |||
| 742 | time_to_tm(tdata->time_stamp.tv_sec, 0, &tm); | ||
| 743 | |||
| 744 | fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; | ||
| 745 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 746 | (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, | ||
| 747 | fmt, | ||
| 748 | tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, | ||
| 749 | tm.tm_hour, tm.tm_min, tm.tm_sec, | ||
| 750 | tdata->time_stamp.tv_nsec, tdata->host_no, | ||
| 751 | tdata->frame_type, tdata->frame_len); | ||
| 752 | |||
| 753 | fc_trace = (char *)FC_TRACE_ADDRESS(tdata); | ||
| 754 | |||
| 755 | for (j = 0; j < min_t(u8, tdata->frame_len, | ||
| 756 | (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { | ||
| 757 | if (tdata->frame_type == FNIC_FC_LE) { | ||
| 758 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 759 | max_size - len, "%c", fc_trace[j]); | ||
| 760 | } else { | ||
| 761 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 762 | max_size - len, "%02x", fc_trace[j] & 0xff); | ||
| 763 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 764 | max_size - len, " "); | ||
| 765 | if (j == ethhdr_len || | ||
| 766 | j == ethhdr_len + fcoehdr_len || | ||
| 767 | j == ethhdr_len + fcoehdr_len + fchdr_len || | ||
| 768 | (i > 3 && j%fchdr_len == 0)) { | ||
| 769 | len += snprintf(fnic_dbgfs_prt->buffer | ||
| 770 | + len, (fnic_fc_trace_max_pages | ||
| 771 | * PAGE_SIZE * 3) - len, | ||
| 772 | "\n\t\t\t\t\t\t\t\t"); | ||
| 773 | i++; | ||
| 774 | } | ||
| 775 | } /* end of else*/ | ||
| 776 | } /* End of for loop*/ | ||
| 777 | len += snprintf(fnic_dbgfs_prt->buffer + len, | ||
| 778 | max_size - len, "\n"); | ||
| 779 | *orig_len = len; | ||
| 780 | } | ||
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h index d412f2ee3c4f..a8aa0578fcb0 100644 --- a/drivers/scsi/fnic/fnic_trace.h +++ b/drivers/scsi/fnic/fnic_trace.h | |||
| @@ -19,6 +19,17 @@ | |||
| 19 | #define __FNIC_TRACE_H__ | 19 | #define __FNIC_TRACE_H__ |
| 20 | 20 | ||
| 21 | #define FNIC_ENTRY_SIZE_BYTES 64 | 21 | #define FNIC_ENTRY_SIZE_BYTES 64 |
| 22 | #define FC_TRC_SIZE_BYTES 256 | ||
| 23 | #define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr) | ||
| 24 | |||
| 25 | /* | ||
| 26 | * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type | ||
| 27 | * of frame 1 => Eth frame, 0=> FC frame | ||
| 28 | */ | ||
| 29 | |||
| 30 | #define FNIC_FC_RECV 0x52 /* Character R */ | ||
| 31 | #define FNIC_FC_SEND 0x54 /* Character T */ | ||
| 32 | #define FNIC_FC_LE 0x4C /* Character L */ | ||
| 22 | 33 | ||
| 23 | extern ssize_t simple_read_from_buffer(void __user *to, | 34 | extern ssize_t simple_read_from_buffer(void __user *to, |
| 24 | size_t count, | 35 | size_t count, |
| @@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages; | |||
| 30 | extern int fnic_tracing_enabled; | 41 | extern int fnic_tracing_enabled; |
| 31 | extern unsigned int trace_max_pages; | 42 | extern unsigned int trace_max_pages; |
| 32 | 43 | ||
| 44 | extern unsigned int fnic_fc_trace_max_pages; | ||
| 45 | extern int fnic_fc_tracing_enabled; | ||
| 46 | extern int fnic_fc_trace_cleared; | ||
| 47 | |||
| 33 | typedef struct fnic_trace_dbg { | 48 | typedef struct fnic_trace_dbg { |
| 34 | int wr_idx; | 49 | int wr_idx; |
| 35 | int rd_idx; | 50 | int rd_idx; |
| @@ -56,6 +71,16 @@ struct fnic_trace_data { | |||
| 56 | 71 | ||
| 57 | typedef struct fnic_trace_data fnic_trace_data_t; | 72 | typedef struct fnic_trace_data fnic_trace_data_t; |
| 58 | 73 | ||
| 74 | struct fc_trace_hdr { | ||
| 75 | struct timespec time_stamp; | ||
| 76 | u32 host_no; | ||
| 77 | u8 frame_type; | ||
| 78 | u8 frame_len; | ||
| 79 | } __attribute__((__packed__)); | ||
| 80 | |||
| 81 | #define FC_TRACE_ADDRESS(a) \ | ||
| 82 | ((unsigned long)(a) + sizeof(struct fc_trace_hdr)) | ||
| 83 | |||
| 59 | #define FNIC_TRACE_ENTRY_SIZE \ | 84 | #define FNIC_TRACE_ENTRY_SIZE \ |
| 60 | (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) | 85 | (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) |
| 61 | 86 | ||
| @@ -88,4 +113,17 @@ int fnic_debugfs_init(void); | |||
| 88 | void fnic_debugfs_terminate(void); | 113 | void fnic_debugfs_terminate(void); |
| 89 | int fnic_trace_debugfs_init(void); | 114 | int fnic_trace_debugfs_init(void); |
| 90 | void fnic_trace_debugfs_terminate(void); | 115 | void fnic_trace_debugfs_terminate(void); |
| 116 | |||
| 117 | /* Fnic FC CTLR Trace releated function */ | ||
| 118 | int fnic_fc_trace_init(void); | ||
| 119 | void fnic_fc_trace_free(void); | ||
| 120 | int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, | ||
| 121 | char *frame, u32 fc_frame_len); | ||
| 122 | int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag); | ||
| 123 | void copy_and_format_trace_data(struct fc_trace_hdr *tdata, | ||
| 124 | fnic_dbgfs_t *fnic_dbgfs_prt, | ||
| 125 | int *len, u8 rdata_flag); | ||
| 126 | int fnic_fc_trace_debugfs_init(void); | ||
| 127 | void fnic_fc_trace_debugfs_terminate(void); | ||
| 128 | |||
| 91 | #endif | 129 | #endif |
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 7176365e916b..a1bc8ca958e1 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c | |||
| @@ -78,10 +78,6 @@ | |||
| 78 | * | 78 | * |
| 79 | */ | 79 | */ |
| 80 | 80 | ||
| 81 | /* | ||
| 82 | * $Log: generic_NCR5380.c,v $ | ||
| 83 | */ | ||
| 84 | |||
| 85 | /* settings for DTC3181E card with only Mustek scanner attached */ | 81 | /* settings for DTC3181E card with only Mustek scanner attached */ |
| 86 | #define USLEEP | 82 | #define USLEEP |
| 87 | #define USLEEP_POLL 1 | 83 | #define USLEEP_POLL 1 |
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 1bcdb7beb77b..703adf78e0b2 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h | |||
| @@ -25,10 +25,6 @@ | |||
| 25 | * 1+ (800) 334-5454 | 25 | * 1+ (800) 334-5454 |
| 26 | */ | 26 | */ |
| 27 | 27 | ||
| 28 | /* | ||
| 29 | * $Log: generic_NCR5380.h,v $ | ||
| 30 | */ | ||
| 31 | |||
| 32 | #ifndef GENERIC_NCR5380_H | 28 | #ifndef GENERIC_NCR5380_H |
| 33 | #define GENERIC_NCR5380_H | 29 | #define GENERIC_NCR5380_H |
| 34 | 30 | ||
| @@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *); | |||
| 58 | #define CAN_QUEUE 16 | 54 | #define CAN_QUEUE 16 |
| 59 | #endif | 55 | #endif |
| 60 | 56 | ||
| 61 | #ifndef HOSTS_C | ||
| 62 | |||
| 63 | #define __STRVAL(x) #x | 57 | #define __STRVAL(x) #x |
| 64 | #define STRVAL(x) __STRVAL(x) | 58 | #define STRVAL(x) __STRVAL(x) |
| 65 | 59 | ||
| @@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *); | |||
| 131 | #define BOARD_NCR53C400A 2 | 125 | #define BOARD_NCR53C400A 2 |
| 132 | #define BOARD_DTC3181E 3 | 126 | #define BOARD_DTC3181E 3 |
| 133 | 127 | ||
| 134 | #endif /* else def HOSTS_C */ | ||
| 135 | #endif /* ndef ASM */ | 128 | #endif /* ndef ASM */ |
| 136 | #endif /* GENERIC_NCR5380_H */ | 129 | #endif /* GENERIC_NCR5380_H */ |
| 137 | 130 | ||
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9a6e4a2cd072..5858600bfe59 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -115,9 +115,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = { | |||
| 115 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, | 115 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, |
| 116 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, | 116 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, |
| 117 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, | 117 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, |
| 118 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, | ||
| 118 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, | 119 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, |
| 119 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, | 120 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, |
| 120 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, | 121 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, |
| 122 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, | ||
| 123 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, | ||
| 124 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, | ||
| 125 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, | ||
| 126 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, | ||
| 121 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, | 127 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, |
| 122 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, | 128 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, |
| 123 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, | 129 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, |
| @@ -165,9 +171,15 @@ static struct board_type products[] = { | |||
| 165 | {0x21C3103C, "Smart Array", &SA5_access}, | 171 | {0x21C3103C, "Smart Array", &SA5_access}, |
| 166 | {0x21C4103C, "Smart Array", &SA5_access}, | 172 | {0x21C4103C, "Smart Array", &SA5_access}, |
| 167 | {0x21C5103C, "Smart Array", &SA5_access}, | 173 | {0x21C5103C, "Smart Array", &SA5_access}, |
| 174 | {0x21C6103C, "Smart Array", &SA5_access}, | ||
| 168 | {0x21C7103C, "Smart Array", &SA5_access}, | 175 | {0x21C7103C, "Smart Array", &SA5_access}, |
| 169 | {0x21C8103C, "Smart Array", &SA5_access}, | 176 | {0x21C8103C, "Smart Array", &SA5_access}, |
| 170 | {0x21C9103C, "Smart Array", &SA5_access}, | 177 | {0x21C9103C, "Smart Array", &SA5_access}, |
| 178 | {0x21CA103C, "Smart Array", &SA5_access}, | ||
| 179 | {0x21CB103C, "Smart Array", &SA5_access}, | ||
| 180 | {0x21CC103C, "Smart Array", &SA5_access}, | ||
| 181 | {0x21CD103C, "Smart Array", &SA5_access}, | ||
| 182 | {0x21CE103C, "Smart Array", &SA5_access}, | ||
| 171 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, | 183 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, |
| 172 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, | 184 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, |
| 173 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, | 185 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, |
| @@ -2836,6 +2848,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
| 2836 | 2848 | ||
| 2837 | /* Get the list of physical devices */ | 2849 | /* Get the list of physical devices */ |
| 2838 | physicals = kzalloc(reportsize, GFP_KERNEL); | 2850 | physicals = kzalloc(reportsize, GFP_KERNEL); |
| 2851 | if (physicals == NULL) | ||
| 2852 | return 0; | ||
| 2839 | if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, | 2853 | if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, |
| 2840 | reportsize, extended)) { | 2854 | reportsize, extended)) { |
| 2841 | dev_err(&h->pdev->dev, | 2855 | dev_err(&h->pdev->dev, |
| @@ -2963,19 +2977,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, | |||
| 2963 | static int hpsa_hba_mode_enabled(struct ctlr_info *h) | 2977 | static int hpsa_hba_mode_enabled(struct ctlr_info *h) |
| 2964 | { | 2978 | { |
| 2965 | int rc; | 2979 | int rc; |
| 2980 | int hba_mode_enabled; | ||
| 2966 | struct bmic_controller_parameters *ctlr_params; | 2981 | struct bmic_controller_parameters *ctlr_params; |
| 2967 | ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), | 2982 | ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), |
| 2968 | GFP_KERNEL); | 2983 | GFP_KERNEL); |
| 2969 | 2984 | ||
| 2970 | if (!ctlr_params) | 2985 | if (!ctlr_params) |
| 2971 | return 0; | 2986 | return -ENOMEM; |
| 2972 | rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, | 2987 | rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, |
| 2973 | sizeof(struct bmic_controller_parameters)); | 2988 | sizeof(struct bmic_controller_parameters)); |
| 2974 | if (rc != 0) { | 2989 | if (rc) { |
| 2975 | kfree(ctlr_params); | 2990 | kfree(ctlr_params); |
| 2976 | return 0; | 2991 | return rc; |
| 2977 | } | 2992 | } |
| 2978 | return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0; | 2993 | |
| 2994 | hba_mode_enabled = | ||
| 2995 | ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); | ||
| 2996 | kfree(ctlr_params); | ||
| 2997 | return hba_mode_enabled; | ||
| 2979 | } | 2998 | } |
| 2980 | 2999 | ||
| 2981 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | 3000 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
| @@ -3001,7 +3020,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
| 3001 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; | 3020 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; |
| 3002 | int i, n_ext_target_devs, ndevs_to_allocate; | 3021 | int i, n_ext_target_devs, ndevs_to_allocate; |
| 3003 | int raid_ctlr_position; | 3022 | int raid_ctlr_position; |
| 3004 | u8 rescan_hba_mode; | 3023 | int rescan_hba_mode; |
| 3005 | DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); | 3024 | DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); |
| 3006 | 3025 | ||
| 3007 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); | 3026 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); |
| @@ -3016,6 +3035,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
| 3016 | memset(lunzerobits, 0, sizeof(lunzerobits)); | 3035 | memset(lunzerobits, 0, sizeof(lunzerobits)); |
| 3017 | 3036 | ||
| 3018 | rescan_hba_mode = hpsa_hba_mode_enabled(h); | 3037 | rescan_hba_mode = hpsa_hba_mode_enabled(h); |
| 3038 | if (rescan_hba_mode < 0) | ||
| 3039 | goto out; | ||
| 3019 | 3040 | ||
| 3020 | if (!h->hba_mode_enabled && rescan_hba_mode) | 3041 | if (!h->hba_mode_enabled && rescan_hba_mode) |
| 3021 | dev_warn(&h->pdev->dev, "HBA mode enabled\n"); | 3042 | dev_warn(&h->pdev->dev, "HBA mode enabled\n"); |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 44235a27e1b6..1e3cf33a82cf 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
| @@ -90,6 +90,7 @@ struct bmic_controller_parameters { | |||
| 90 | u8 automatic_drive_slamming; | 90 | u8 automatic_drive_slamming; |
| 91 | u8 reserved1; | 91 | u8 reserved1; |
| 92 | u8 nvram_flags; | 92 | u8 nvram_flags; |
| 93 | #define HBA_MODE_ENABLED_FLAG (1 << 3) | ||
| 93 | u8 cache_nvram_flags; | 94 | u8 cache_nvram_flags; |
| 94 | u8 drive_config_flags; | 95 | u8 drive_config_flags; |
| 95 | u16 reserved2; | 96 | u16 reserved2; |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 26dc005bb0f0..ecd7bd304efe 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -1442,9 +1442,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) | |||
| 1442 | conn->task = NULL; | 1442 | conn->task = NULL; |
| 1443 | } | 1443 | } |
| 1444 | /* regular RX path uses back_lock */ | 1444 | /* regular RX path uses back_lock */ |
| 1445 | spin_lock_bh(&conn->session->back_lock); | 1445 | spin_lock(&conn->session->back_lock); |
| 1446 | __iscsi_put_task(task); | 1446 | __iscsi_put_task(task); |
| 1447 | spin_unlock_bh(&conn->session->back_lock); | 1447 | spin_unlock(&conn->session->back_lock); |
| 1448 | return rc; | 1448 | return rc; |
| 1449 | } | 1449 | } |
| 1450 | 1450 | ||
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6bb51f8e3c1b..393662c24df5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
| @@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) | |||
| 265 | return NULL; | 265 | return NULL; |
| 266 | 266 | ||
| 267 | q->hba_index = idx; | 267 | q->hba_index = idx; |
| 268 | |||
| 269 | /* | ||
| 270 | * insert barrier for instruction interlock : data from the hardware | ||
| 271 | * must have the valid bit checked before it can be copied and acted | ||
| 272 | * upon. Given what was seen in lpfc_sli4_cq_get() of speculative | ||
| 273 | * instructions allowing action on content before valid bit checked, | ||
| 274 | * add barrier here as well. May not be needed as "content" is a | ||
| 275 | * single 32-bit entity here (vs multi word structure for cq's). | ||
| 276 | */ | ||
| 277 | mb(); | ||
| 268 | return eqe; | 278 | return eqe; |
| 269 | } | 279 | } |
| 270 | 280 | ||
| @@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) | |||
| 370 | 380 | ||
| 371 | cqe = q->qe[q->hba_index].cqe; | 381 | cqe = q->qe[q->hba_index].cqe; |
| 372 | q->hba_index = idx; | 382 | q->hba_index = idx; |
| 383 | |||
| 384 | /* | ||
| 385 | * insert barrier for instruction interlock : data from the hardware | ||
| 386 | * must have the valid bit checked before it can be copied and acted | ||
| 387 | * upon. Speculative instructions were allowing a bcopy at the start | ||
| 388 | * of lpfc_sli4_fp_handle_wcqe(), which is called immediately | ||
| 389 | * after our return, to copy data before the valid bit check above | ||
| 390 | * was done. As such, some of the copied data was stale. The barrier | ||
| 391 | * ensures the check is before any data is copied. | ||
| 392 | */ | ||
| 393 | mb(); | ||
| 373 | return cqe; | 394 | return cqe; |
| 374 | } | 395 | } |
| 375 | 396 | ||
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index f5cdc68cd5b6..6a039eb1cbce 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c | |||
| @@ -25,10 +25,6 @@ | |||
| 25 | * 1+ (800) 334-5454 | 25 | * 1+ (800) 334-5454 |
| 26 | */ | 26 | */ |
| 27 | 27 | ||
| 28 | /* | ||
| 29 | * $Log: mac_NCR5380.c,v $ | ||
| 30 | */ | ||
| 31 | |||
| 32 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 33 | #include <linux/stddef.h> | 29 | #include <linux/stddef.h> |
| 34 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
| @@ -58,12 +54,6 @@ | |||
| 58 | 54 | ||
| 59 | #include "NCR5380.h" | 55 | #include "NCR5380.h" |
| 60 | 56 | ||
| 61 | #if 0 | ||
| 62 | #define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION) | ||
| 63 | #else | ||
| 64 | #define NDEBUG (NDEBUG_ABORT) | ||
| 65 | #endif | ||
| 66 | |||
| 67 | #define RESET_BOOT | 57 | #define RESET_BOOT |
| 68 | #define DRIVER_SETUP | 58 | #define DRIVER_SETUP |
| 69 | 59 | ||
diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h index 7dc62fce1c4c..06969b06e54b 100644 --- a/drivers/scsi/mac_scsi.h +++ b/drivers/scsi/mac_scsi.h | |||
| @@ -22,10 +22,6 @@ | |||
| 22 | * 1+ (800) 334-5454 | 22 | * 1+ (800) 334-5454 |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | /* | ||
| 26 | * $Log: cumana_NCR5380.h,v $ | ||
| 27 | */ | ||
| 28 | |||
| 29 | #ifndef MAC_NCR5380_H | 25 | #ifndef MAC_NCR5380_H |
| 30 | #define MAC_NCR5380_H | 26 | #define MAC_NCR5380_H |
| 31 | 27 | ||
| @@ -51,8 +47,6 @@ | |||
| 51 | 47 | ||
| 52 | #include <scsi/scsicam.h> | 48 | #include <scsi/scsicam.h> |
| 53 | 49 | ||
| 54 | #ifndef HOSTS_C | ||
| 55 | |||
| 56 | #define NCR5380_implementation_fields \ | 50 | #define NCR5380_implementation_fields \ |
| 57 | int port, ctrl | 51 | int port, ctrl |
| 58 | 52 | ||
| @@ -75,10 +69,6 @@ | |||
| 75 | #define NCR5380_show_info macscsi_show_info | 69 | #define NCR5380_show_info macscsi_show_info |
| 76 | #define NCR5380_write_info macscsi_write_info | 70 | #define NCR5380_write_info macscsi_write_info |
| 77 | 71 | ||
| 78 | #define BOARD_NORMAL 0 | ||
| 79 | #define BOARD_NCR53C400 1 | ||
| 80 | |||
| 81 | #endif /* ndef HOSTS_C */ | ||
| 82 | #endif /* ndef ASM */ | 72 | #endif /* ndef ASM */ |
| 83 | #endif /* MAC_NCR5380_H */ | 73 | #endif /* MAC_NCR5380_H */ |
| 84 | 74 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d84d02c2aad9..112799b131a9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -3061,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) | |||
| 3061 | u32 cur_state; | 3061 | u32 cur_state; |
| 3062 | u32 abs_state, curr_abs_state; | 3062 | u32 abs_state, curr_abs_state; |
| 3063 | 3063 | ||
| 3064 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; | 3064 | abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); |
| 3065 | fw_state = abs_state & MFI_STATE_MASK; | ||
| 3065 | 3066 | ||
| 3066 | if (fw_state != MFI_STATE_READY) | 3067 | if (fw_state != MFI_STATE_READY) |
| 3067 | printk(KERN_INFO "megasas: Waiting for FW to come to ready" | 3068 | printk(KERN_INFO "megasas: Waiting for FW to come to ready" |
| @@ -3069,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) | |||
| 3069 | 3070 | ||
| 3070 | while (fw_state != MFI_STATE_READY) { | 3071 | while (fw_state != MFI_STATE_READY) { |
| 3071 | 3072 | ||
| 3072 | abs_state = | ||
| 3073 | instance->instancet->read_fw_status_reg(instance->reg_set); | ||
| 3074 | |||
| 3075 | switch (fw_state) { | 3073 | switch (fw_state) { |
| 3076 | 3074 | ||
| 3077 | case MFI_STATE_FAULT: | 3075 | case MFI_STATE_FAULT: |
| @@ -3223,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) | |||
| 3223 | * The cur_state should not last for more than max_wait secs | 3221 | * The cur_state should not last for more than max_wait secs |
| 3224 | */ | 3222 | */ |
| 3225 | for (i = 0; i < (max_wait * 1000); i++) { | 3223 | for (i = 0; i < (max_wait * 1000); i++) { |
| 3226 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & | 3224 | curr_abs_state = instance->instancet-> |
| 3227 | MFI_STATE_MASK ; | 3225 | read_fw_status_reg(instance->reg_set); |
| 3228 | curr_abs_state = | ||
| 3229 | instance->instancet->read_fw_status_reg(instance->reg_set); | ||
| 3230 | 3226 | ||
| 3231 | if (abs_state == curr_abs_state) { | 3227 | if (abs_state == curr_abs_state) { |
| 3232 | msleep(1); | 3228 | msleep(1); |
| @@ -3242,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) | |||
| 3242 | "in %d secs\n", fw_state, max_wait); | 3238 | "in %d secs\n", fw_state, max_wait); |
| 3243 | return -ENODEV; | 3239 | return -ENODEV; |
| 3244 | } | 3240 | } |
| 3241 | |||
| 3242 | abs_state = curr_abs_state; | ||
| 3243 | fw_state = curr_abs_state & MFI_STATE_MASK; | ||
| 3245 | } | 3244 | } |
| 3246 | printk(KERN_INFO "megasas: FW now in Ready state\n"); | 3245 | printk(KERN_INFO "megasas: FW now in Ready state\n"); |
| 3247 | 3246 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index bde63f7452bd..8b88118e20e6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
| @@ -1739,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) | |||
| 1739 | list_for_each_entry_safe(chain_req, next, | 1739 | list_for_each_entry_safe(chain_req, next, |
| 1740 | &ioc->scsi_lookup[i].chain_list, tracker_list) { | 1740 | &ioc->scsi_lookup[i].chain_list, tracker_list) { |
| 1741 | list_del_init(&chain_req->tracker_list); | 1741 | list_del_init(&chain_req->tracker_list); |
| 1742 | list_add_tail(&chain_req->tracker_list, | 1742 | list_add(&chain_req->tracker_list, |
| 1743 | &ioc->free_chain_list); | 1743 | &ioc->free_chain_list); |
| 1744 | } | 1744 | } |
| 1745 | } | 1745 | } |
| 1746 | ioc->scsi_lookup[i].cb_idx = 0xFF; | 1746 | ioc->scsi_lookup[i].cb_idx = 0xFF; |
| 1747 | ioc->scsi_lookup[i].scmd = NULL; | 1747 | ioc->scsi_lookup[i].scmd = NULL; |
| 1748 | ioc->scsi_lookup[i].direct_io = 0; | 1748 | ioc->scsi_lookup[i].direct_io = 0; |
| 1749 | list_add_tail(&ioc->scsi_lookup[i].tracker_list, | 1749 | list_add(&ioc->scsi_lookup[i].tracker_list, |
| 1750 | &ioc->free_list); | 1750 | &ioc->free_list); |
| 1751 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | 1751 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); |
| 1752 | 1752 | ||
| @@ -1764,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) | |||
| 1764 | /* hi-priority */ | 1764 | /* hi-priority */ |
| 1765 | i = smid - ioc->hi_priority_smid; | 1765 | i = smid - ioc->hi_priority_smid; |
| 1766 | ioc->hpr_lookup[i].cb_idx = 0xFF; | 1766 | ioc->hpr_lookup[i].cb_idx = 0xFF; |
| 1767 | list_add_tail(&ioc->hpr_lookup[i].tracker_list, | 1767 | list_add(&ioc->hpr_lookup[i].tracker_list, |
| 1768 | &ioc->hpr_free_list); | 1768 | &ioc->hpr_free_list); |
| 1769 | } else if (smid <= ioc->hba_queue_depth) { | 1769 | } else if (smid <= ioc->hba_queue_depth) { |
| 1770 | /* internal queue */ | 1770 | /* internal queue */ |
| 1771 | i = smid - ioc->internal_smid; | 1771 | i = smid - ioc->internal_smid; |
| 1772 | ioc->internal_lookup[i].cb_idx = 0xFF; | 1772 | ioc->internal_lookup[i].cb_idx = 0xFF; |
| 1773 | list_add_tail(&ioc->internal_lookup[i].tracker_list, | 1773 | list_add(&ioc->internal_lookup[i].tracker_list, |
| 1774 | &ioc->internal_free_list); | 1774 | &ioc->internal_free_list); |
| 1775 | } | 1775 | } |
| 1776 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | 1776 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 1f2ac3a28621..fd3b998c75b1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
| @@ -1065,7 +1065,7 @@ void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | |||
| 1065 | u32 reply); | 1065 | u32 reply); |
| 1066 | int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, | 1066 | int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, |
| 1067 | uint channel, uint id, uint lun, u8 type, u16 smid_task, | 1067 | uint channel, uint id, uint lun, u8 type, u16 smid_task, |
| 1068 | ulong timeout, unsigned long serial_number, enum mutex_type m_type); | 1068 | ulong timeout, enum mutex_type m_type); |
| 1069 | void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); | 1069 | void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); |
| 1070 | void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); | 1070 | void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); |
| 1071 | void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); | 1071 | void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index b7f887c9b0bf..62df8f9d4271 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
| @@ -987,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg, | |||
| 987 | mpt2sas_scsih_issue_tm(ioc, | 987 | mpt2sas_scsih_issue_tm(ioc, |
| 988 | le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, | 988 | le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, |
| 989 | 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, | 989 | 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, |
| 990 | 0, TM_MUTEX_ON); | 990 | TM_MUTEX_ON); |
| 991 | ioc->tm_cmds.status = MPT2_CMD_NOT_USED; | 991 | ioc->tm_cmds.status = MPT2_CMD_NOT_USED; |
| 992 | } else | 992 | } else |
| 993 | mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, | 993 | mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 6fd7d40b2c4d..5055f925d2cd 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
| @@ -2368,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 2368 | * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) | 2368 | * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) |
| 2369 | * @smid_task: smid assigned to the task | 2369 | * @smid_task: smid assigned to the task |
| 2370 | * @timeout: timeout in seconds | 2370 | * @timeout: timeout in seconds |
| 2371 | * @serial_number: the serial_number from scmd | ||
| 2372 | * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF | 2371 | * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF |
| 2373 | * Context: user | 2372 | * Context: user |
| 2374 | * | 2373 | * |
| @@ -2381,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
| 2381 | int | 2380 | int |
| 2382 | mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, | 2381 | mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, |
| 2383 | uint id, uint lun, u8 type, u16 smid_task, ulong timeout, | 2382 | uint id, uint lun, u8 type, u16 smid_task, ulong timeout, |
| 2384 | unsigned long serial_number, enum mutex_type m_type) | 2383 | enum mutex_type m_type) |
| 2385 | { | 2384 | { |
| 2386 | Mpi2SCSITaskManagementRequest_t *mpi_request; | 2385 | Mpi2SCSITaskManagementRequest_t *mpi_request; |
| 2387 | Mpi2SCSITaskManagementReply_t *mpi_reply; | 2386 | Mpi2SCSITaskManagementReply_t *mpi_reply; |
| @@ -2634,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd) | |||
| 2634 | handle = sas_device_priv_data->sas_target->handle; | 2633 | handle = sas_device_priv_data->sas_target->handle; |
| 2635 | r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, | 2634 | r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, |
| 2636 | scmd->device->id, scmd->device->lun, | 2635 | scmd->device->id, scmd->device->lun, |
| 2637 | MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, | 2636 | MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); |
| 2638 | scmd->serial_number, TM_MUTEX_ON); | ||
| 2639 | 2637 | ||
| 2640 | out: | 2638 | out: |
| 2641 | sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", | 2639 | sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", |
| @@ -2696,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) | |||
| 2696 | 2694 | ||
| 2697 | r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, | 2695 | r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, |
| 2698 | scmd->device->id, scmd->device->lun, | 2696 | scmd->device->id, scmd->device->lun, |
| 2699 | MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, | 2697 | MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); |
| 2700 | TM_MUTEX_ON); | ||
| 2701 | 2698 | ||
| 2702 | out: | 2699 | out: |
| 2703 | sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", | 2700 | sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", |
| @@ -2757,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) | |||
| 2757 | 2754 | ||
| 2758 | r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, | 2755 | r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, |
| 2759 | scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, | 2756 | scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, |
| 2760 | 30, 0, TM_MUTEX_ON); | 2757 | 30, TM_MUTEX_ON); |
| 2761 | 2758 | ||
| 2762 | out: | 2759 | out: |
| 2763 | starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", | 2760 | starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", |
| @@ -3953,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | |||
| 3953 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full | 3950 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full |
| 3954 | */ | 3951 | */ |
| 3955 | static int | 3952 | static int |
| 3956 | _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | 3953 | _scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
| 3957 | { | 3954 | { |
| 3958 | struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); | 3955 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); |
| 3959 | struct MPT2SAS_DEVICE *sas_device_priv_data; | 3956 | struct MPT2SAS_DEVICE *sas_device_priv_data; |
| 3960 | struct MPT2SAS_TARGET *sas_target_priv_data; | 3957 | struct MPT2SAS_TARGET *sas_target_priv_data; |
| 3961 | struct _raid_device *raid_device; | 3958 | struct _raid_device *raid_device; |
| @@ -3963,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
| 3963 | u32 mpi_control; | 3960 | u32 mpi_control; |
| 3964 | u16 smid; | 3961 | u16 smid; |
| 3965 | 3962 | ||
| 3966 | scmd->scsi_done = done; | ||
| 3967 | sas_device_priv_data = scmd->device->hostdata; | 3963 | sas_device_priv_data = scmd->device->hostdata; |
| 3968 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 3964 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { |
| 3969 | scmd->result = DID_NO_CONNECT << 16; | 3965 | scmd->result = DID_NO_CONNECT << 16; |
| @@ -4039,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
| 4039 | MPT_TARGET_FLAGS_RAID_COMPONENT) | 4035 | MPT_TARGET_FLAGS_RAID_COMPONENT) |
| 4040 | mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; | 4036 | mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; |
| 4041 | else | 4037 | else |
| 4042 | mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; | 4038 | mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; |
| 4043 | mpi_request->DevHandle = | 4039 | mpi_request->DevHandle = |
| 4044 | cpu_to_le16(sas_device_priv_data->sas_target->handle); | 4040 | cpu_to_le16(sas_device_priv_data->sas_target->handle); |
| 4045 | mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); | 4041 | mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); |
| @@ -4083,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
| 4083 | return SCSI_MLQUEUE_HOST_BUSY; | 4079 | return SCSI_MLQUEUE_HOST_BUSY; |
| 4084 | } | 4080 | } |
| 4085 | 4081 | ||
| 4086 | static DEF_SCSI_QCMD(_scsih_qcmd) | ||
| 4087 | |||
| 4088 | /** | 4082 | /** |
| 4089 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data | 4083 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data |
| 4090 | * @sense_buffer: sense data returned by target | 4084 | * @sense_buffer: sense data returned by target |
| @@ -5880,7 +5874,7 @@ broadcast_aen_retry: | |||
| 5880 | 5874 | ||
| 5881 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | 5875 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); |
| 5882 | r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, | 5876 | r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, |
| 5883 | MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, | 5877 | MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, |
| 5884 | TM_MUTEX_OFF); | 5878 | TM_MUTEX_OFF); |
| 5885 | if (r == FAILED) { | 5879 | if (r == FAILED) { |
| 5886 | sdev_printk(KERN_WARNING, sdev, | 5880 | sdev_printk(KERN_WARNING, sdev, |
| @@ -5922,7 +5916,7 @@ broadcast_aen_retry: | |||
| 5922 | 5916 | ||
| 5923 | r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, | 5917 | r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, |
| 5924 | sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, | 5918 | sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, |
| 5925 | scmd->serial_number, TM_MUTEX_OFF); | 5919 | TM_MUTEX_OFF); |
| 5926 | if (r == FAILED) { | 5920 | if (r == FAILED) { |
| 5927 | sdev_printk(KERN_WARNING, sdev, | 5921 | sdev_printk(KERN_WARNING, sdev, |
| 5928 | "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " | 5922 | "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 0ebf5d913c80..9b90a6fef706 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h | |||
| @@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); | |||
| 993 | 993 | ||
| 994 | int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, | 994 | int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
| 995 | uint channel, uint id, uint lun, u8 type, u16 smid_task, | 995 | uint channel, uint id, uint lun, u8 type, u16 smid_task, |
| 996 | ulong timeout, unsigned long serial_number, enum mutex_type m_type); | 996 | ulong timeout, enum mutex_type m_type); |
| 997 | void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); | 997 | void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); |
| 998 | void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); | 998 | void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); |
| 999 | void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); | 999 | void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 9b89de14a0a3..ba9cbe598a91 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c | |||
| @@ -980,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, | |||
| 980 | mpt3sas_scsih_issue_tm(ioc, | 980 | mpt3sas_scsih_issue_tm(ioc, |
| 981 | le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, | 981 | le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, |
| 982 | 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, | 982 | 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, |
| 983 | 0, TM_MUTEX_ON); | 983 | TM_MUTEX_ON); |
| 984 | } else | 984 | } else |
| 985 | mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, | 985 | mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
| 986 | FORCE_BIG_HAMMER); | 986 | FORCE_BIG_HAMMER); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index a961fe11b527..18e713db1d32 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
| @@ -2029,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
| 2029 | * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) | 2029 | * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) |
| 2030 | * @smid_task: smid assigned to the task | 2030 | * @smid_task: smid assigned to the task |
| 2031 | * @timeout: timeout in seconds | 2031 | * @timeout: timeout in seconds |
| 2032 | * @serial_number: the serial_number from scmd | ||
| 2033 | * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF | 2032 | * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF |
| 2034 | * Context: user | 2033 | * Context: user |
| 2035 | * | 2034 | * |
| @@ -2042,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
| 2042 | int | 2041 | int |
| 2043 | mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, | 2042 | mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, |
| 2044 | uint id, uint lun, u8 type, u16 smid_task, ulong timeout, | 2043 | uint id, uint lun, u8 type, u16 smid_task, ulong timeout, |
| 2045 | unsigned long serial_number, enum mutex_type m_type) | 2044 | enum mutex_type m_type) |
| 2046 | { | 2045 | { |
| 2047 | Mpi2SCSITaskManagementRequest_t *mpi_request; | 2046 | Mpi2SCSITaskManagementRequest_t *mpi_request; |
| 2048 | Mpi2SCSITaskManagementReply_t *mpi_reply; | 2047 | Mpi2SCSITaskManagementReply_t *mpi_reply; |
| @@ -2293,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd) | |||
| 2293 | handle = sas_device_priv_data->sas_target->handle; | 2292 | handle = sas_device_priv_data->sas_target->handle; |
| 2294 | r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, | 2293 | r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, |
| 2295 | scmd->device->id, scmd->device->lun, | 2294 | scmd->device->id, scmd->device->lun, |
| 2296 | MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, | 2295 | MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); |
| 2297 | scmd->serial_number, TM_MUTEX_ON); | ||
| 2298 | 2296 | ||
| 2299 | out: | 2297 | out: |
| 2300 | sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", | 2298 | sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", |
| @@ -2353,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) | |||
| 2353 | 2351 | ||
| 2354 | r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, | 2352 | r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, |
| 2355 | scmd->device->id, scmd->device->lun, | 2353 | scmd->device->id, scmd->device->lun, |
| 2356 | MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, | 2354 | MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); |
| 2357 | TM_MUTEX_ON); | ||
| 2358 | 2355 | ||
| 2359 | out: | 2356 | out: |
| 2360 | sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", | 2357 | sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", |
| @@ -2414,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) | |||
| 2414 | 2411 | ||
| 2415 | r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, | 2412 | r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, |
| 2416 | scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, | 2413 | scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, |
| 2417 | 30, 0, TM_MUTEX_ON); | 2414 | 30, TM_MUTEX_ON); |
| 2418 | 2415 | ||
| 2419 | out: | 2416 | out: |
| 2420 | starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", | 2417 | starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", |
| @@ -3518,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) | |||
| 3518 | 3515 | ||
| 3519 | 3516 | ||
| 3520 | /** | 3517 | /** |
| 3521 | * _scsih_qcmd_lck - main scsi request entry point | 3518 | * _scsih_qcmd - main scsi request entry point |
| 3522 | * @scmd: pointer to scsi command object | 3519 | * @scmd: pointer to scsi command object |
| 3523 | * @done: function pointer to be invoked on completion | 3520 | * @done: function pointer to be invoked on completion |
| 3524 | * | 3521 | * |
| @@ -3529,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) | |||
| 3529 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full | 3526 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full |
| 3530 | */ | 3527 | */ |
| 3531 | static int | 3528 | static int |
| 3532 | _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | 3529 | _scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
| 3533 | { | 3530 | { |
| 3534 | struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); | 3531 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
| 3535 | struct MPT3SAS_DEVICE *sas_device_priv_data; | 3532 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
| 3536 | struct MPT3SAS_TARGET *sas_target_priv_data; | 3533 | struct MPT3SAS_TARGET *sas_target_priv_data; |
| 3537 | Mpi2SCSIIORequest_t *mpi_request; | 3534 | Mpi2SCSIIORequest_t *mpi_request; |
| @@ -3544,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
| 3544 | scsi_print_command(scmd); | 3541 | scsi_print_command(scmd); |
| 3545 | #endif | 3542 | #endif |
| 3546 | 3543 | ||
| 3547 | scmd->scsi_done = done; | ||
| 3548 | sas_device_priv_data = scmd->device->hostdata; | 3544 | sas_device_priv_data = scmd->device->hostdata; |
| 3549 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 3545 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { |
| 3550 | scmd->result = DID_NO_CONNECT << 16; | 3546 | scmd->result = DID_NO_CONNECT << 16; |
| @@ -3659,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
| 3659 | out: | 3655 | out: |
| 3660 | return SCSI_MLQUEUE_HOST_BUSY; | 3656 | return SCSI_MLQUEUE_HOST_BUSY; |
| 3661 | } | 3657 | } |
| 3662 | static DEF_SCSI_QCMD(_scsih_qcmd) | ||
| 3663 | |||
| 3664 | 3658 | ||
| 3665 | /** | 3659 | /** |
| 3666 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data | 3660 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data |
| @@ -5425,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 5425 | 5419 | ||
| 5426 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | 5420 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); |
| 5427 | r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, | 5421 | r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, |
| 5428 | MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, | 5422 | MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, |
| 5429 | TM_MUTEX_OFF); | 5423 | TM_MUTEX_OFF); |
| 5430 | if (r == FAILED) { | 5424 | if (r == FAILED) { |
| 5431 | sdev_printk(KERN_WARNING, sdev, | 5425 | sdev_printk(KERN_WARNING, sdev, |
| @@ -5467,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 5467 | 5461 | ||
| 5468 | r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, | 5462 | r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, |
| 5469 | sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, | 5463 | sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, |
| 5470 | scmd->serial_number, TM_MUTEX_OFF); | 5464 | TM_MUTEX_OFF); |
| 5471 | if (r == FAILED) { | 5465 | if (r == FAILED) { |
| 5472 | sdev_printk(KERN_WARNING, sdev, | 5466 | sdev_printk(KERN_WARNING, sdev, |
| 5473 | "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " | 5467 | "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 5ff978be249d..eacee48a955c 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
| @@ -728,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = { | |||
| 728 | .class_mask = 0, | 728 | .class_mask = 0, |
| 729 | .driver_data = chip_9485, | 729 | .driver_data = chip_9485, |
| 730 | }, | 730 | }, |
| 731 | { | ||
| 732 | .vendor = PCI_VENDOR_ID_MARVELL_EXT, | ||
| 733 | .device = 0x9485, | ||
| 734 | .subvendor = PCI_ANY_ID, | ||
| 735 | .subdevice = 0x9485, | ||
| 736 | .class = 0, | ||
| 737 | .class_mask = 0, | ||
| 738 | .driver_data = chip_9485, | ||
| 739 | }, | ||
| 731 | { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ | 740 | { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ |
| 732 | { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | 741 | { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 733 | { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ | 742 | { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index 3721342835e9..aa528f53c533 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h | |||
| @@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *); | |||
| 129 | #define CAN_QUEUE 32 | 129 | #define CAN_QUEUE 32 |
| 130 | #endif | 130 | #endif |
| 131 | 131 | ||
| 132 | #ifndef HOSTS_C | ||
| 133 | |||
| 134 | #define NCR5380_implementation_fields \ | 132 | #define NCR5380_implementation_fields \ |
| 135 | volatile unsigned short io_port | 133 | volatile unsigned short io_port |
| 136 | 134 | ||
| @@ -171,6 +169,5 @@ static int pas16_bus_reset(Scsi_Cmnd *); | |||
| 171 | 169 | ||
| 172 | #define PAS16_IRQS 0xd4a8 | 170 | #define PAS16_IRQS 0xd4a8 |
| 173 | 171 | ||
| 174 | #endif /* else def HOSTS_C */ | ||
| 175 | #endif /* ndef ASM */ | 172 | #endif /* ndef ASM */ |
| 176 | #endif /* PAS16_H */ | 173 | #endif /* PAS16_H */ |
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 28b4e8139153..fe5eee4d0a11 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c | |||
| @@ -395,6 +395,8 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, | |||
| 395 | payload.offset = 0; | 395 | payload.offset = 0; |
| 396 | payload.length = 4096; | 396 | payload.length = 4096; |
| 397 | payload.func_specific = kzalloc(4096, GFP_KERNEL); | 397 | payload.func_specific = kzalloc(4096, GFP_KERNEL); |
| 398 | if (!payload.func_specific) | ||
| 399 | return -ENOMEM; | ||
| 398 | PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); | 400 | PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); |
| 399 | wait_for_completion(&completion); | 401 | wait_for_completion(&completion); |
| 400 | virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; | 402 | virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; |
| @@ -402,6 +404,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, | |||
| 402 | bios_index++) | 404 | bios_index++) |
| 403 | str += sprintf(str, "%c", | 405 | str += sprintf(str, "%c", |
| 404 | *((u8 *)((u8 *)virt_addr+bios_index))); | 406 | *((u8 *)((u8 *)virt_addr+bios_index))); |
| 407 | kfree(payload.func_specific); | ||
| 405 | return str - buf; | 408 | return str - buf; |
| 406 | } | 409 | } |
| 407 | static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); | 410 | static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 07befcf365b8..16fe5196e6d9 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -664,7 +664,7 @@ do_read: | |||
| 664 | } | 664 | } |
| 665 | 665 | ||
| 666 | rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, | 666 | rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, |
| 667 | addr, offset, SFP_BLOCK_SIZE, 0); | 667 | addr, offset, SFP_BLOCK_SIZE, BIT_1); |
| 668 | if (rval != QLA_SUCCESS) { | 668 | if (rval != QLA_SUCCESS) { |
| 669 | ql_log(ql_log_warn, vha, 0x706d, | 669 | ql_log(ql_log_warn, vha, 0x706d, |
| 670 | "Unable to read SFP data (%x/%x/%x).\n", rval, | 670 | "Unable to read SFP data (%x/%x/%x).\n", rval, |
| @@ -1495,7 +1495,7 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, | |||
| 1495 | 1495 | ||
| 1496 | if (!ha->fw_dumped) | 1496 | if (!ha->fw_dumped) |
| 1497 | size = 0; | 1497 | size = 0; |
| 1498 | else if (IS_QLA82XX(ha)) | 1498 | else if (IS_P3P_TYPE(ha)) |
| 1499 | size = ha->md_template_size + ha->md_dump_size; | 1499 | size = ha->md_template_size + ha->md_dump_size; |
| 1500 | else | 1500 | else |
| 1501 | size = ha->fw_dump_len; | 1501 | size = ha->fw_dump_len; |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 71ff340f6de4..524f9eb7fcd1 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -2054,9 +2054,49 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job) | |||
| 2054 | bsg_job->reply->reply_payload_rcv_len = sizeof(sr); | 2054 | bsg_job->reply->reply_payload_rcv_len = sizeof(sr); |
| 2055 | break; | 2055 | break; |
| 2056 | default: | 2056 | default: |
| 2057 | ql_log(ql_log_warn, vha, 0x708c, | 2057 | ql_dbg(ql_dbg_user, vha, 0x708c, |
| 2058 | "Unknown serdes cmd %x.\n", sr.cmd); | 2058 | "Unknown serdes cmd %x.\n", sr.cmd); |
| 2059 | rval = -EDOM; | 2059 | rval = -EINVAL; |
| 2060 | break; | ||
| 2061 | } | ||
| 2062 | |||
| 2063 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
| 2064 | rval ? EXT_STATUS_MAILBOX : 0; | ||
| 2065 | |||
| 2066 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
| 2067 | bsg_job->reply->result = DID_OK << 16; | ||
| 2068 | bsg_job->job_done(bsg_job); | ||
| 2069 | return 0; | ||
| 2070 | } | ||
| 2071 | |||
| 2072 | static int | ||
| 2073 | qla8044_serdes_op(struct fc_bsg_job *bsg_job) | ||
| 2074 | { | ||
| 2075 | struct Scsi_Host *host = bsg_job->shost; | ||
| 2076 | scsi_qla_host_t *vha = shost_priv(host); | ||
| 2077 | int rval = 0; | ||
| 2078 | struct qla_serdes_reg_ex sr; | ||
| 2079 | |||
| 2080 | memset(&sr, 0, sizeof(sr)); | ||
| 2081 | |||
| 2082 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | ||
| 2083 | bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); | ||
| 2084 | |||
| 2085 | switch (sr.cmd) { | ||
| 2086 | case INT_SC_SERDES_WRITE_REG: | ||
| 2087 | rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); | ||
| 2088 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
| 2089 | break; | ||
| 2090 | case INT_SC_SERDES_READ_REG: | ||
| 2091 | rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); | ||
| 2092 | sg_copy_from_buffer(bsg_job->reply_payload.sg_list, | ||
| 2093 | bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); | ||
| 2094 | bsg_job->reply->reply_payload_rcv_len = sizeof(sr); | ||
| 2095 | break; | ||
| 2096 | default: | ||
| 2097 | ql_dbg(ql_dbg_user, vha, 0x70cf, | ||
| 2098 | "Unknown serdes cmd %x.\n", sr.cmd); | ||
| 2099 | rval = -EINVAL; | ||
| 2060 | break; | 2100 | break; |
| 2061 | } | 2101 | } |
| 2062 | 2102 | ||
| @@ -2121,6 +2161,9 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) | |||
| 2121 | case QL_VND_SERDES_OP: | 2161 | case QL_VND_SERDES_OP: |
| 2122 | return qla26xx_serdes_op(bsg_job); | 2162 | return qla26xx_serdes_op(bsg_job); |
| 2123 | 2163 | ||
| 2164 | case QL_VND_SERDES_OP_EX: | ||
| 2165 | return qla8044_serdes_op(bsg_job); | ||
| 2166 | |||
| 2124 | default: | 2167 | default: |
| 2125 | return -ENOSYS; | 2168 | return -ENOSYS; |
| 2126 | } | 2169 | } |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index e5c2126221e9..d38f9efa56fa 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -24,6 +24,7 @@ | |||
| 24 | #define QL_VND_READ_I2C 0x11 | 24 | #define QL_VND_READ_I2C 0x11 |
| 25 | #define QL_VND_FX00_MGMT_CMD 0x12 | 25 | #define QL_VND_FX00_MGMT_CMD 0x12 |
| 26 | #define QL_VND_SERDES_OP 0x13 | 26 | #define QL_VND_SERDES_OP 0x13 |
| 27 | #define QL_VND_SERDES_OP_EX 0x14 | ||
| 27 | 28 | ||
| 28 | /* BSG Vendor specific subcode returns */ | 29 | /* BSG Vendor specific subcode returns */ |
| 29 | #define EXT_STATUS_OK 0 | 30 | #define EXT_STATUS_OK 0 |
| @@ -225,4 +226,10 @@ struct qla_serdes_reg { | |||
| 225 | uint16_t val; | 226 | uint16_t val; |
| 226 | } __packed; | 227 | } __packed; |
| 227 | 228 | ||
| 229 | struct qla_serdes_reg_ex { | ||
| 230 | uint16_t cmd; | ||
| 231 | uint32_t addr; | ||
| 232 | uint32_t val; | ||
| 233 | } __packed; | ||
| 234 | |||
| 228 | #endif | 235 | #endif |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 97255f7c3975..c72ee97bf3f7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -15,7 +15,7 @@ | |||
| 15 | * | | | 0x0144,0x0146 | | 15 | * | | | 0x0144,0x0146 | |
| 16 | * | | | 0x015b-0x0160 | | 16 | * | | | 0x015b-0x0160 | |
| 17 | * | | | 0x016e-0x0170 | | 17 | * | | | 0x016e-0x0170 | |
| 18 | * | Mailbox commands | 0x1187 | 0x1018-0x1019 | | 18 | * | Mailbox commands | 0x118d | 0x1018-0x1019 | |
| 19 | * | | | 0x10ca | | 19 | * | | | 0x10ca | |
| 20 | * | | | 0x1115-0x1116 | | 20 | * | | | 0x1115-0x1116 | |
| 21 | * | | | 0x111a-0x111b | | 21 | * | | | 0x111a-0x111b | |
| @@ -45,12 +45,16 @@ | |||
| 45 | * | | | 0x70ad-0x70ae | | 45 | * | | | 0x70ad-0x70ae | |
| 46 | * | | | 0x70d7-0x70db | | 46 | * | | | 0x70d7-0x70db | |
| 47 | * | | | 0x70de-0x70df | | 47 | * | | | 0x70de-0x70df | |
| 48 | * | Task Management | 0x803d | 0x8025-0x8026 | | 48 | * | Task Management | 0x803d | 0x8000,0x800b | |
| 49 | * | | | 0x800b,0x8039 | | 49 | * | | | 0x8019 | |
| 50 | * | | | 0x8025,0x8026 | | ||
| 51 | * | | | 0x8031,0x8032 | | ||
| 52 | * | | | 0x8039,0x803c | | ||
| 50 | * | AER/EEH | 0x9011 | | | 53 | * | AER/EEH | 0x9011 | | |
| 51 | * | Virtual Port | 0xa007 | | | 54 | * | Virtual Port | 0xa007 | | |
| 52 | * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 | | 55 | * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | |
| 53 | * | | | 0xb09e,0xb0ae | | 56 | * | | | 0xb09e,0xb0ae | |
| 57 | * | | | 0xb0c3,0xb0c6 | | ||
| 54 | * | | | 0xb0e0-0xb0ef | | 58 | * | | | 0xb0e0-0xb0ef | |
| 55 | * | | | 0xb085,0xb0dc | | 59 | * | | | 0xb085,0xb0dc | |
| 56 | * | | | 0xb107,0xb108 | | 60 | * | | | 0xb107,0xb108 | |
| @@ -60,12 +64,12 @@ | |||
| 60 | * | | | 0xb13c-0xb140 | | 64 | * | | | 0xb13c-0xb140 | |
| 61 | * | | | 0xb149 | | 65 | * | | | 0xb149 | |
| 62 | * | MultiQ | 0xc00c | | | 66 | * | MultiQ | 0xc00c | | |
| 63 | * | Misc | 0xd2ff | 0xd017-0xd019 | | 67 | * | Misc | 0xd212 | 0xd017-0xd019 | |
| 64 | * | | | 0xd020 | | 68 | * | | | 0xd020 | |
| 65 | * | | | 0xd02e-0xd0ff | | 69 | * | | | 0xd030-0xd0ff | |
| 66 | * | | | 0xd101-0xd1fe | | 70 | * | | | 0xd101-0xd1fe | |
| 67 | * | | | 0xd212-0xd2fe | | 71 | * | | | 0xd213-0xd2fe | |
| 68 | * | Target Mode | 0xe070 | 0xe021 | | 72 | * | Target Mode | 0xe078 | | |
| 69 | * | Target Mode Management | 0xf072 | 0xf002-0xf003 | | 73 | * | Target Mode Management | 0xf072 | 0xf002-0xf003 | |
| 70 | * | | | 0xf046-0xf049 | | 74 | * | | | 0xf046-0xf049 | |
| 71 | * | Target Mode Task Management | 0x1000b | | | 75 | * | Target Mode Task Management | 0x1000b | | |
| @@ -277,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, | |||
| 277 | if (rval != QLA_SUCCESS) | 281 | if (rval != QLA_SUCCESS) |
| 278 | return rval; | 282 | return rval; |
| 279 | 283 | ||
| 284 | set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); | ||
| 285 | |||
| 280 | /* External Memory. */ | 286 | /* External Memory. */ |
| 281 | return qla24xx_dump_ram(ha, 0x100000, *nxt, | 287 | rval = qla24xx_dump_ram(ha, 0x100000, *nxt, |
| 282 | ha->fw_memory_size - 0x100000 + 1, nxt); | 288 | ha->fw_memory_size - 0x100000 + 1, nxt); |
| 289 | if (rval == QLA_SUCCESS) | ||
| 290 | set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); | ||
| 291 | |||
| 292 | return rval; | ||
| 283 | } | 293 | } |
| 284 | 294 | ||
| 285 | static uint32_t * | 295 | static uint32_t * |
| @@ -296,23 +306,15 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, | |||
| 296 | return buf; | 306 | return buf; |
| 297 | } | 307 | } |
| 298 | 308 | ||
| 299 | int | 309 | void |
| 300 | qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) | 310 | qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) |
| 301 | { | 311 | { |
| 302 | int rval = QLA_SUCCESS; | ||
| 303 | uint32_t cnt; | ||
| 304 | |||
| 305 | WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); | 312 | WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); |
| 306 | for (cnt = 30000; | ||
| 307 | ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && | ||
| 308 | rval == QLA_SUCCESS; cnt--) { | ||
| 309 | if (cnt) | ||
| 310 | udelay(100); | ||
| 311 | else | ||
| 312 | rval = QLA_FUNCTION_TIMEOUT; | ||
| 313 | } | ||
| 314 | 313 | ||
| 315 | return rval; | 314 | /* 100 usec delay is sufficient enough for hardware to pause RISC */ |
| 315 | udelay(100); | ||
| 316 | if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) | ||
| 317 | set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); | ||
| 316 | } | 318 | } |
| 317 | 319 | ||
| 318 | int | 320 | int |
| @@ -320,10 +322,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha) | |||
| 320 | { | 322 | { |
| 321 | int rval = QLA_SUCCESS; | 323 | int rval = QLA_SUCCESS; |
| 322 | uint32_t cnt; | 324 | uint32_t cnt; |
| 323 | uint16_t mb0, wd; | 325 | uint16_t wd; |
| 324 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 326 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
| 325 | 327 | ||
| 326 | /* Reset RISC. */ | 328 | /* |
| 329 | * Reset RISC. The delay is dependent on system architecture. | ||
| 330 | * Driver can proceed with the reset sequence after waiting | ||
| 331 | * for a timeout period. | ||
| 332 | */ | ||
| 327 | WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); | 333 | WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); |
| 328 | for (cnt = 0; cnt < 30000; cnt++) { | 334 | for (cnt = 0; cnt < 30000; cnt++) { |
| 329 | if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) | 335 | if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) |
| @@ -331,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha) | |||
| 331 | 337 | ||
| 332 | udelay(10); | 338 | udelay(10); |
| 333 | } | 339 | } |
| 340 | if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) | ||
| 341 | set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); | ||
| 334 | 342 | ||
| 335 | WRT_REG_DWORD(®->ctrl_status, | 343 | WRT_REG_DWORD(®->ctrl_status, |
| 336 | CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); | 344 | CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); |
| 337 | pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); | 345 | pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); |
| 338 | 346 | ||
| 339 | udelay(100); | 347 | udelay(100); |
| 340 | /* Wait for firmware to complete NVRAM accesses. */ | ||
| 341 | mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); | ||
| 342 | for (cnt = 10000 ; cnt && mb0; cnt--) { | ||
| 343 | udelay(5); | ||
| 344 | mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); | ||
| 345 | barrier(); | ||
| 346 | } | ||
| 347 | 348 | ||
| 348 | /* Wait for soft-reset to complete. */ | 349 | /* Wait for soft-reset to complete. */ |
| 349 | for (cnt = 0; cnt < 30000; cnt++) { | 350 | for (cnt = 0; cnt < 30000; cnt++) { |
| @@ -353,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha) | |||
| 353 | 354 | ||
| 354 | udelay(10); | 355 | udelay(10); |
| 355 | } | 356 | } |
| 357 | if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) | ||
| 358 | set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); | ||
| 359 | |||
| 356 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); | 360 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); |
| 357 | RD_REG_DWORD(®->hccr); /* PCI Posting. */ | 361 | RD_REG_DWORD(®->hccr); /* PCI Posting. */ |
| 358 | 362 | ||
| 359 | for (cnt = 30000; RD_REG_WORD(®->mailbox0) != 0 && | 363 | for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && |
| 360 | rval == QLA_SUCCESS; cnt--) { | 364 | rval == QLA_SUCCESS; cnt--) { |
| 361 | if (cnt) | 365 | if (cnt) |
| 362 | udelay(100); | 366 | udelay(10); |
| 363 | else | 367 | else |
| 364 | rval = QLA_FUNCTION_TIMEOUT; | 368 | rval = QLA_FUNCTION_TIMEOUT; |
| 365 | } | 369 | } |
| 370 | if (rval == QLA_SUCCESS) | ||
| 371 | set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); | ||
| 366 | 372 | ||
| 367 | return rval; | 373 | return rval; |
| 368 | } | 374 | } |
| @@ -659,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) | |||
| 659 | 665 | ||
| 660 | if (rval != QLA_SUCCESS) { | 666 | if (rval != QLA_SUCCESS) { |
| 661 | ql_log(ql_log_warn, vha, 0xd000, | 667 | ql_log(ql_log_warn, vha, 0xd000, |
| 662 | "Failed to dump firmware (%x).\n", rval); | 668 | "Failed to dump firmware (%x), dump status flags (0x%lx).\n", |
| 669 | rval, ha->fw_dump_cap_flags); | ||
| 663 | ha->fw_dumped = 0; | 670 | ha->fw_dumped = 0; |
| 664 | } else { | 671 | } else { |
| 665 | ql_log(ql_log_info, vha, 0xd001, | 672 | ql_log(ql_log_info, vha, 0xd001, |
| 666 | "Firmware dump saved to temp buffer (%ld/%p).\n", | 673 | "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", |
| 667 | vha->host_no, ha->fw_dump); | 674 | vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); |
| 668 | ha->fw_dumped = 1; | 675 | ha->fw_dumped = 1; |
| 669 | qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); | 676 | qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); |
| 670 | } | 677 | } |
| @@ -1053,6 +1060,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1053 | 1060 | ||
| 1054 | risc_address = ext_mem_cnt = 0; | 1061 | risc_address = ext_mem_cnt = 0; |
| 1055 | flags = 0; | 1062 | flags = 0; |
| 1063 | ha->fw_dump_cap_flags = 0; | ||
| 1056 | 1064 | ||
| 1057 | if (!hardware_locked) | 1065 | if (!hardware_locked) |
| 1058 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1066 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| @@ -1075,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1075 | 1083 | ||
| 1076 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); | 1084 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); |
| 1077 | 1085 | ||
| 1078 | /* Pause RISC. */ | 1086 | /* |
| 1079 | rval = qla24xx_pause_risc(reg); | 1087 | * Pause RISC. No need to track timeout, as resetting the chip |
| 1080 | if (rval != QLA_SUCCESS) | 1088 | * is the right approach incase of pause timeout |
| 1081 | goto qla24xx_fw_dump_failed_0; | 1089 | */ |
| 1090 | qla24xx_pause_risc(reg, ha); | ||
| 1082 | 1091 | ||
| 1083 | /* Host interface registers. */ | 1092 | /* Host interface registers. */ |
| 1084 | dmp_reg = ®->flash_addr; | 1093 | dmp_reg = ®->flash_addr; |
| @@ -1302,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1302 | 1311 | ||
| 1303 | risc_address = ext_mem_cnt = 0; | 1312 | risc_address = ext_mem_cnt = 0; |
| 1304 | flags = 0; | 1313 | flags = 0; |
| 1314 | ha->fw_dump_cap_flags = 0; | ||
| 1305 | 1315 | ||
| 1306 | if (!hardware_locked) | 1316 | if (!hardware_locked) |
| 1307 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1317 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| @@ -1325,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1325 | 1335 | ||
| 1326 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); | 1336 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); |
| 1327 | 1337 | ||
| 1328 | /* Pause RISC. */ | 1338 | /* |
| 1329 | rval = qla24xx_pause_risc(reg); | 1339 | * Pause RISC. No need to track timeout, as resetting the chip |
| 1330 | if (rval != QLA_SUCCESS) | 1340 | * is the right approach incase of pause timeout |
| 1331 | goto qla25xx_fw_dump_failed_0; | 1341 | */ |
| 1342 | qla24xx_pause_risc(reg, ha); | ||
| 1332 | 1343 | ||
| 1333 | /* Host/Risc registers. */ | 1344 | /* Host/Risc registers. */ |
| 1334 | iter_reg = fw->host_risc_reg; | 1345 | iter_reg = fw->host_risc_reg; |
| @@ -1619,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1619 | 1630 | ||
| 1620 | risc_address = ext_mem_cnt = 0; | 1631 | risc_address = ext_mem_cnt = 0; |
| 1621 | flags = 0; | 1632 | flags = 0; |
| 1633 | ha->fw_dump_cap_flags = 0; | ||
| 1622 | 1634 | ||
| 1623 | if (!hardware_locked) | 1635 | if (!hardware_locked) |
| 1624 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1636 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| @@ -1641,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1641 | 1653 | ||
| 1642 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); | 1654 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); |
| 1643 | 1655 | ||
| 1644 | /* Pause RISC. */ | 1656 | /* |
| 1645 | rval = qla24xx_pause_risc(reg); | 1657 | * Pause RISC. No need to track timeout, as resetting the chip |
| 1646 | if (rval != QLA_SUCCESS) | 1658 | * is the right approach incase of pause timeout |
| 1647 | goto qla81xx_fw_dump_failed_0; | 1659 | */ |
| 1660 | qla24xx_pause_risc(reg, ha); | ||
| 1648 | 1661 | ||
| 1649 | /* Host/Risc registers. */ | 1662 | /* Host/Risc registers. */ |
| 1650 | iter_reg = fw->host_risc_reg; | 1663 | iter_reg = fw->host_risc_reg; |
| @@ -1938,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1938 | 1951 | ||
| 1939 | risc_address = ext_mem_cnt = 0; | 1952 | risc_address = ext_mem_cnt = 0; |
| 1940 | flags = 0; | 1953 | flags = 0; |
| 1954 | ha->fw_dump_cap_flags = 0; | ||
| 1941 | 1955 | ||
| 1942 | if (!hardware_locked) | 1956 | if (!hardware_locked) |
| 1943 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1957 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| @@ -1959,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1959 | 1973 | ||
| 1960 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); | 1974 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); |
| 1961 | 1975 | ||
| 1962 | /* Pause RISC. */ | 1976 | /* |
| 1963 | rval = qla24xx_pause_risc(reg); | 1977 | * Pause RISC. No need to track timeout, as resetting the chip |
| 1964 | if (rval != QLA_SUCCESS) | 1978 | * is the right approach incase of pause timeout |
| 1965 | goto qla83xx_fw_dump_failed_0; | 1979 | */ |
| 1980 | qla24xx_pause_risc(reg, ha); | ||
| 1966 | 1981 | ||
| 1967 | WRT_REG_DWORD(®->iobase_addr, 0x6000); | 1982 | WRT_REG_DWORD(®->iobase_addr, 0x6000); |
| 1968 | dmp_reg = ®->iobase_window; | 1983 | dmp_reg = ®->iobase_window; |
| @@ -2385,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 2385 | nxt += sizeof(fw->code_ram); | 2400 | nxt += sizeof(fw->code_ram); |
| 2386 | nxt += (ha->fw_memory_size - 0x100000 + 1); | 2401 | nxt += (ha->fw_memory_size - 0x100000 + 1); |
| 2387 | goto copy_queue; | 2402 | goto copy_queue; |
| 2388 | } else | 2403 | } else { |
| 2404 | set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); | ||
| 2389 | ql_log(ql_log_warn, vha, 0xd010, | 2405 | ql_log(ql_log_warn, vha, 0xd010, |
| 2390 | "bigger hammer success?\n"); | 2406 | "bigger hammer success?\n"); |
| 2407 | } | ||
| 2391 | } | 2408 | } |
| 2392 | 2409 | ||
| 2393 | rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), | 2410 | rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index cc961040f8b1..e1fc4e66966a 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -353,5 +353,6 @@ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, | |||
| 353 | uint32_t, void **); | 353 | uint32_t, void **); |
| 354 | extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, | 354 | extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, |
| 355 | uint32_t, void **); | 355 | uint32_t, void **); |
| 356 | extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *); | 356 | extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, |
| 357 | struct qla_hw_data *); | ||
| 357 | extern int qla24xx_soft_reset(struct qla_hw_data *); | 358 | extern int qla24xx_soft_reset(struct qla_hw_data *); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6a106136716c..1fa010448666 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -965,6 +965,13 @@ struct mbx_cmd_32 { | |||
| 965 | */ | 965 | */ |
| 966 | #define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */ | 966 | #define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */ |
| 967 | 967 | ||
| 968 | /* | ||
| 969 | * ISP8044 mailbox commands | ||
| 970 | */ | ||
| 971 | #define MBC_SET_GET_ETH_SERDES_REG 0x150 | ||
| 972 | #define HCS_WRITE_SERDES 0x3 | ||
| 973 | #define HCS_READ_SERDES 0x4 | ||
| 974 | |||
| 968 | /* Firmware return data sizes */ | 975 | /* Firmware return data sizes */ |
| 969 | #define FCAL_MAP_SIZE 128 | 976 | #define FCAL_MAP_SIZE 128 |
| 970 | 977 | ||
| @@ -1622,10 +1629,20 @@ typedef struct { | |||
| 1622 | #define PO_MODE_DIF_PASS 2 | 1629 | #define PO_MODE_DIF_PASS 2 |
| 1623 | #define PO_MODE_DIF_REPLACE 3 | 1630 | #define PO_MODE_DIF_REPLACE 3 |
| 1624 | #define PO_MODE_DIF_TCP_CKSUM 6 | 1631 | #define PO_MODE_DIF_TCP_CKSUM 6 |
| 1625 | #define PO_ENABLE_DIF_BUNDLING BIT_8 | ||
| 1626 | #define PO_ENABLE_INCR_GUARD_SEED BIT_3 | 1632 | #define PO_ENABLE_INCR_GUARD_SEED BIT_3 |
| 1627 | #define PO_DISABLE_INCR_REF_TAG BIT_5 | ||
| 1628 | #define PO_DISABLE_GUARD_CHECK BIT_4 | 1633 | #define PO_DISABLE_GUARD_CHECK BIT_4 |
| 1634 | #define PO_DISABLE_INCR_REF_TAG BIT_5 | ||
| 1635 | #define PO_DIS_HEADER_MODE BIT_7 | ||
| 1636 | #define PO_ENABLE_DIF_BUNDLING BIT_8 | ||
| 1637 | #define PO_DIS_FRAME_MODE BIT_9 | ||
| 1638 | #define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */ | ||
| 1639 | #define PO_DIS_VALD_APP_REF_ESC BIT_11 | ||
| 1640 | |||
| 1641 | #define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */ | ||
| 1642 | #define PO_DIS_REF_TAG_REPL BIT_13 | ||
| 1643 | #define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */ | ||
| 1644 | #define PO_DIS_REF_TAG_VALD BIT_15 | ||
| 1645 | |||
| 1629 | /* | 1646 | /* |
| 1630 | * ISP queue - 64-Bit addressing, continuation crc entry structure definition. | 1647 | * ISP queue - 64-Bit addressing, continuation crc entry structure definition. |
| 1631 | */ | 1648 | */ |
| @@ -1748,6 +1765,8 @@ typedef struct { | |||
| 1748 | #define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */ | 1765 | #define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */ |
| 1749 | #define CS_PORT_BUSY 0x2B /* Port Busy */ | 1766 | #define CS_PORT_BUSY 0x2B /* Port Busy */ |
| 1750 | #define CS_COMPLETE_CHKCOND 0x30 /* Error? */ | 1767 | #define CS_COMPLETE_CHKCOND 0x30 /* Error? */ |
| 1768 | #define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request | ||
| 1769 | failure */ | ||
| 1751 | #define CS_BAD_PAYLOAD 0x80 /* Driver defined */ | 1770 | #define CS_BAD_PAYLOAD 0x80 /* Driver defined */ |
| 1752 | #define CS_UNKNOWN 0x81 /* Driver defined */ | 1771 | #define CS_UNKNOWN 0x81 /* Driver defined */ |
| 1753 | #define CS_RETRY 0x82 /* Driver defined */ | 1772 | #define CS_RETRY 0x82 /* Driver defined */ |
| @@ -2676,6 +2695,7 @@ struct rsp_que { | |||
| 2676 | uint32_t __iomem *rsp_q_out; | 2695 | uint32_t __iomem *rsp_q_out; |
| 2677 | uint16_t ring_index; | 2696 | uint16_t ring_index; |
| 2678 | uint16_t out_ptr; | 2697 | uint16_t out_ptr; |
| 2698 | uint16_t *in_ptr; /* queue shadow in index */ | ||
| 2679 | uint16_t length; | 2699 | uint16_t length; |
| 2680 | uint16_t options; | 2700 | uint16_t options; |
| 2681 | uint16_t rid; | 2701 | uint16_t rid; |
| @@ -2702,6 +2722,7 @@ struct req_que { | |||
| 2702 | uint32_t __iomem *req_q_out; | 2722 | uint32_t __iomem *req_q_out; |
| 2703 | uint16_t ring_index; | 2723 | uint16_t ring_index; |
| 2704 | uint16_t in_ptr; | 2724 | uint16_t in_ptr; |
| 2725 | uint16_t *out_ptr; /* queue shadow out index */ | ||
| 2705 | uint16_t cnt; | 2726 | uint16_t cnt; |
| 2706 | uint16_t length; | 2727 | uint16_t length; |
| 2707 | uint16_t options; | 2728 | uint16_t options; |
| @@ -2907,6 +2928,8 @@ struct qla_hw_data { | |||
| 2907 | #define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 | 2928 | #define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 |
| 2908 | #define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 | 2929 | #define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 |
| 2909 | #define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 | 2930 | #define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 |
| 2931 | #define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 | ||
| 2932 | |||
| 2910 | uint32_t device_type; | 2933 | uint32_t device_type; |
| 2911 | #define DT_ISP2100 BIT_0 | 2934 | #define DT_ISP2100 BIT_0 |
| 2912 | #define DT_ISP2200 BIT_1 | 2935 | #define DT_ISP2200 BIT_1 |
| @@ -2928,7 +2951,8 @@ struct qla_hw_data { | |||
| 2928 | #define DT_ISPFX00 BIT_17 | 2951 | #define DT_ISPFX00 BIT_17 |
| 2929 | #define DT_ISP8044 BIT_18 | 2952 | #define DT_ISP8044 BIT_18 |
| 2930 | #define DT_ISP2071 BIT_19 | 2953 | #define DT_ISP2071 BIT_19 |
| 2931 | #define DT_ISP_LAST (DT_ISP2071 << 1) | 2954 | #define DT_ISP2271 BIT_20 |
| 2955 | #define DT_ISP_LAST (DT_ISP2271 << 1) | ||
| 2932 | 2956 | ||
| 2933 | #define DT_T10_PI BIT_25 | 2957 | #define DT_T10_PI BIT_25 |
| 2934 | #define DT_IIDMA BIT_26 | 2958 | #define DT_IIDMA BIT_26 |
| @@ -2959,6 +2983,7 @@ struct qla_hw_data { | |||
| 2959 | #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) | 2983 | #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) |
| 2960 | #define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) | 2984 | #define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) |
| 2961 | #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) | 2985 | #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) |
| 2986 | #define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) | ||
| 2962 | 2987 | ||
| 2963 | #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ | 2988 | #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ |
| 2964 | IS_QLA6312(ha) || IS_QLA6322(ha)) | 2989 | IS_QLA6312(ha) || IS_QLA6322(ha)) |
| @@ -2967,7 +2992,7 @@ struct qla_hw_data { | |||
| 2967 | #define IS_QLA25XX(ha) (IS_QLA2532(ha)) | 2992 | #define IS_QLA25XX(ha) (IS_QLA2532(ha)) |
| 2968 | #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) | 2993 | #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) |
| 2969 | #define IS_QLA84XX(ha) (IS_QLA8432(ha)) | 2994 | #define IS_QLA84XX(ha) (IS_QLA8432(ha)) |
| 2970 | #define IS_QLA27XX(ha) (IS_QLA2071(ha)) | 2995 | #define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha)) |
| 2971 | #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ | 2996 | #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ |
| 2972 | IS_QLA84XX(ha)) | 2997 | IS_QLA84XX(ha)) |
| 2973 | #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ | 2998 | #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ |
| @@ -3006,6 +3031,7 @@ struct qla_hw_data { | |||
| 3006 | (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) | 3031 | (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) |
| 3007 | #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) | 3032 | #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) |
| 3008 | #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) | 3033 | #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) |
| 3034 | #define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) | ||
| 3009 | 3035 | ||
| 3010 | /* HBA serial number */ | 3036 | /* HBA serial number */ |
| 3011 | uint8_t serial0; | 3037 | uint8_t serial0; |
| @@ -3136,7 +3162,15 @@ struct qla_hw_data { | |||
| 3136 | struct qla2xxx_fw_dump *fw_dump; | 3162 | struct qla2xxx_fw_dump *fw_dump; |
| 3137 | uint32_t fw_dump_len; | 3163 | uint32_t fw_dump_len; |
| 3138 | int fw_dumped; | 3164 | int fw_dumped; |
| 3165 | unsigned long fw_dump_cap_flags; | ||
| 3166 | #define RISC_PAUSE_CMPL 0 | ||
| 3167 | #define DMA_SHUTDOWN_CMPL 1 | ||
| 3168 | #define ISP_RESET_CMPL 2 | ||
| 3169 | #define RISC_RDY_AFT_RESET 3 | ||
| 3170 | #define RISC_SRAM_DUMP_CMPL 4 | ||
| 3171 | #define RISC_EXT_MEM_DUMP_CMPL 5 | ||
| 3139 | int fw_dump_reading; | 3172 | int fw_dump_reading; |
| 3173 | int prev_minidump_failed; | ||
| 3140 | dma_addr_t eft_dma; | 3174 | dma_addr_t eft_dma; |
| 3141 | void *eft; | 3175 | void *eft; |
| 3142 | /* Current size of mctp dump is 0x086064 bytes */ | 3176 | /* Current size of mctp dump is 0x086064 bytes */ |
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 32ab80957688..2ca39b8e7166 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 3a7353eaccbd..eb8f57249f1d 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -371,7 +371,10 @@ struct init_cb_24xx { | |||
| 371 | * BIT 14 = Data Rate bit 1 | 371 | * BIT 14 = Data Rate bit 1 |
| 372 | * BIT 15 = Data Rate bit 2 | 372 | * BIT 15 = Data Rate bit 2 |
| 373 | * BIT 16 = Enable 75 ohm Termination Select | 373 | * BIT 16 = Enable 75 ohm Termination Select |
| 374 | * BIT 17-31 = Reserved | 374 | * BIT 17-28 = Reserved |
| 375 | * BIT 29 = Enable response queue 0 in index shadowing | ||
| 376 | * BIT 30 = Enable request queue 0 out index shadowing | ||
| 377 | * BIT 31 = Reserved | ||
| 375 | */ | 378 | */ |
| 376 | uint32_t firmware_options_3; | 379 | uint32_t firmware_options_3; |
| 377 | uint16_t qos; | 380 | uint16_t qos; |
| @@ -1134,13 +1137,6 @@ struct device_reg_24xx { | |||
| 1134 | #define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */ | 1137 | #define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */ |
| 1135 | #define MAX_MULTI_ID_FABRIC 256 /* ... */ | 1138 | #define MAX_MULTI_ID_FABRIC 256 /* ... */ |
| 1136 | 1139 | ||
| 1137 | #define for_each_mapped_vp_idx(_ha, _idx) \ | ||
| 1138 | for (_idx = find_next_bit((_ha)->vp_idx_map, \ | ||
| 1139 | (_ha)->max_npiv_vports + 1, 1); \ | ||
| 1140 | _idx <= (_ha)->max_npiv_vports; \ | ||
| 1141 | _idx = find_next_bit((_ha)->vp_idx_map, \ | ||
| 1142 | (_ha)->max_npiv_vports + 1, _idx + 1)) \ | ||
| 1143 | |||
| 1144 | struct mid_conf_entry_24xx { | 1140 | struct mid_conf_entry_24xx { |
| 1145 | uint16_t reserved_1; | 1141 | uint16_t reserved_1; |
| 1146 | 1142 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index e665e8109933..d48dea8fab1b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); | |||
| 220 | 220 | ||
| 221 | extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); | 221 | extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); |
| 222 | extern int qla2x00_issue_marker(scsi_qla_host_t *, int); | 222 | extern int qla2x00_issue_marker(scsi_qla_host_t *, int); |
| 223 | extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, | ||
| 224 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | ||
| 225 | extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, | ||
| 226 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | ||
| 227 | extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, | ||
| 228 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | ||
| 229 | |||
| 223 | 230 | ||
| 224 | /* | 231 | /* |
| 225 | * Global Function Prototypes in qla_mbx.c source file. | 232 | * Global Function Prototypes in qla_mbx.c source file. |
| @@ -347,6 +354,11 @@ extern int | |||
| 347 | qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *); | 354 | qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *); |
| 348 | 355 | ||
| 349 | extern int | 356 | extern int |
| 357 | qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t); | ||
| 358 | extern int | ||
| 359 | qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *); | ||
| 360 | |||
| 361 | extern int | ||
| 350 | qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); | 362 | qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); |
| 351 | 363 | ||
| 352 | extern int | 364 | extern int |
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index e377f9d2f92a..a0df3b1b3823 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 38aeb54cd9d8..e2184412617d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -1476,6 +1476,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
| 1476 | } | 1476 | } |
| 1477 | 1477 | ||
| 1478 | ha->fw_dumped = 0; | 1478 | ha->fw_dumped = 0; |
| 1479 | ha->fw_dump_cap_flags = 0; | ||
| 1479 | dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; | 1480 | dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; |
| 1480 | req_q_size = rsp_q_size = 0; | 1481 | req_q_size = rsp_q_size = 0; |
| 1481 | 1482 | ||
| @@ -2061,6 +2062,10 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
| 2061 | icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); | 2062 | icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); |
| 2062 | icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); | 2063 | icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); |
| 2063 | 2064 | ||
| 2065 | if (IS_SHADOW_REG_CAPABLE(ha)) | ||
| 2066 | icb->firmware_options_2 |= | ||
| 2067 | __constant_cpu_to_le32(BIT_30|BIT_29); | ||
| 2068 | |||
| 2064 | if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { | 2069 | if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
| 2065 | icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); | 2070 | icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); |
| 2066 | icb->rid = __constant_cpu_to_le16(rid); | 2071 | icb->rid = __constant_cpu_to_le16(rid); |
| @@ -2138,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
| 2138 | req = ha->req_q_map[que]; | 2143 | req = ha->req_q_map[que]; |
| 2139 | if (!req) | 2144 | if (!req) |
| 2140 | continue; | 2145 | continue; |
| 2146 | req->out_ptr = (void *)(req->ring + req->length); | ||
| 2147 | *req->out_ptr = 0; | ||
| 2141 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) | 2148 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) |
| 2142 | req->outstanding_cmds[cnt] = NULL; | 2149 | req->outstanding_cmds[cnt] = NULL; |
| 2143 | 2150 | ||
| @@ -2153,6 +2160,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
| 2153 | rsp = ha->rsp_q_map[que]; | 2160 | rsp = ha->rsp_q_map[que]; |
| 2154 | if (!rsp) | 2161 | if (!rsp) |
| 2155 | continue; | 2162 | continue; |
| 2163 | rsp->in_ptr = (void *)(rsp->ring + rsp->length); | ||
| 2164 | *rsp->in_ptr = 0; | ||
| 2156 | /* Initialize response queue entries */ | 2165 | /* Initialize response queue entries */ |
| 2157 | if (IS_QLAFX00(ha)) | 2166 | if (IS_QLAFX00(ha)) |
| 2158 | qlafx00_init_response_q_entries(rsp); | 2167 | qlafx00_init_response_q_entries(rsp); |
| @@ -3406,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
| 3406 | fcport->d_id.b.domain, | 3415 | fcport->d_id.b.domain, |
| 3407 | fcport->d_id.b.area, | 3416 | fcport->d_id.b.area, |
| 3408 | fcport->d_id.b.al_pa); | 3417 | fcport->d_id.b.al_pa); |
| 3409 | fcport->loop_id = FC_NO_LOOP_ID; | 3418 | qla2x00_clear_loop_id(fcport); |
| 3410 | } | 3419 | } |
| 3411 | } | 3420 | } |
| 3412 | } | 3421 | } |
| @@ -4727,7 +4736,6 @@ static int | |||
| 4727 | qla2x00_restart_isp(scsi_qla_host_t *vha) | 4736 | qla2x00_restart_isp(scsi_qla_host_t *vha) |
| 4728 | { | 4737 | { |
| 4729 | int status = 0; | 4738 | int status = 0; |
| 4730 | uint32_t wait_time; | ||
| 4731 | struct qla_hw_data *ha = vha->hw; | 4739 | struct qla_hw_data *ha = vha->hw; |
| 4732 | struct req_que *req = ha->req_q_map[0]; | 4740 | struct req_que *req = ha->req_q_map[0]; |
| 4733 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 4741 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
| @@ -4744,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) | |||
| 4744 | if (!status && !(status = qla2x00_init_rings(vha))) { | 4752 | if (!status && !(status = qla2x00_init_rings(vha))) { |
| 4745 | clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); | 4753 | clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); |
| 4746 | ha->flags.chip_reset_done = 1; | 4754 | ha->flags.chip_reset_done = 1; |
| 4755 | |||
| 4747 | /* Initialize the queues in use */ | 4756 | /* Initialize the queues in use */ |
| 4748 | qla25xx_init_queues(ha); | 4757 | qla25xx_init_queues(ha); |
| 4749 | 4758 | ||
| 4750 | status = qla2x00_fw_ready(vha); | 4759 | status = qla2x00_fw_ready(vha); |
| 4751 | if (!status) { | 4760 | if (!status) { |
| 4752 | ql_dbg(ql_dbg_taskm, vha, 0x8031, | ||
| 4753 | "Start configure loop status = %d.\n", status); | ||
| 4754 | |||
| 4755 | /* Issue a marker after FW becomes ready. */ | 4761 | /* Issue a marker after FW becomes ready. */ |
| 4756 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); | 4762 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); |
| 4757 | 4763 | ||
| @@ -4766,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) | |||
| 4766 | qlt_24xx_process_atio_queue(vha); | 4772 | qlt_24xx_process_atio_queue(vha); |
| 4767 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 4773 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 4768 | 4774 | ||
| 4769 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ | 4775 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); |
| 4770 | wait_time = 256; | ||
| 4771 | do { | ||
| 4772 | clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | ||
| 4773 | qla2x00_configure_loop(vha); | ||
| 4774 | wait_time--; | ||
| 4775 | } while (!atomic_read(&vha->loop_down_timer) && | ||
| 4776 | !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) | ||
| 4777 | && wait_time && (test_bit(LOOP_RESYNC_NEEDED, | ||
| 4778 | &vha->dpc_flags))); | ||
| 4779 | } | 4776 | } |
| 4780 | 4777 | ||
| 4781 | /* if no cable then assume it's good */ | 4778 | /* if no cable then assume it's good */ |
| 4782 | if ((vha->device_flags & DFLG_NO_CABLE)) | 4779 | if ((vha->device_flags & DFLG_NO_CABLE)) |
| 4783 | status = 0; | 4780 | status = 0; |
| 4784 | |||
| 4785 | ql_dbg(ql_dbg_taskm, vha, 0x8032, | ||
| 4786 | "Configure loop done, status = 0x%x.\n", status); | ||
| 4787 | } | 4781 | } |
| 4788 | return (status); | 4782 | return (status); |
| 4789 | } | 4783 | } |
| @@ -6130,7 +6124,6 @@ int | |||
| 6130 | qla82xx_restart_isp(scsi_qla_host_t *vha) | 6124 | qla82xx_restart_isp(scsi_qla_host_t *vha) |
| 6131 | { | 6125 | { |
| 6132 | int status, rval; | 6126 | int status, rval; |
| 6133 | uint32_t wait_time; | ||
| 6134 | struct qla_hw_data *ha = vha->hw; | 6127 | struct qla_hw_data *ha = vha->hw; |
| 6135 | struct req_que *req = ha->req_q_map[0]; | 6128 | struct req_que *req = ha->req_q_map[0]; |
| 6136 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 6129 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
| @@ -6144,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
| 6144 | 6137 | ||
| 6145 | status = qla2x00_fw_ready(vha); | 6138 | status = qla2x00_fw_ready(vha); |
| 6146 | if (!status) { | 6139 | if (!status) { |
| 6147 | ql_log(ql_log_info, vha, 0x803c, | ||
| 6148 | "Start configure loop, status =%d.\n", status); | ||
| 6149 | |||
| 6150 | /* Issue a marker after FW becomes ready. */ | 6140 | /* Issue a marker after FW becomes ready. */ |
| 6151 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); | 6141 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); |
| 6152 | |||
| 6153 | vha->flags.online = 1; | 6142 | vha->flags.online = 1; |
| 6154 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ | 6143 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); |
| 6155 | wait_time = 256; | ||
| 6156 | do { | ||
| 6157 | clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | ||
| 6158 | qla2x00_configure_loop(vha); | ||
| 6159 | wait_time--; | ||
| 6160 | } while (!atomic_read(&vha->loop_down_timer) && | ||
| 6161 | !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) && | ||
| 6162 | wait_time && | ||
| 6163 | (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))); | ||
| 6164 | } | 6144 | } |
| 6165 | 6145 | ||
| 6166 | /* if no cable then assume it's good */ | 6146 | /* if no cable then assume it's good */ |
| 6167 | if ((vha->device_flags & DFLG_NO_CABLE)) | 6147 | if ((vha->device_flags & DFLG_NO_CABLE)) |
| 6168 | status = 0; | 6148 | status = 0; |
| 6169 | |||
| 6170 | ql_log(ql_log_info, vha, 0x8000, | ||
| 6171 | "Configure loop done, status = 0x%x.\n", status); | ||
| 6172 | } | 6149 | } |
| 6173 | 6150 | ||
| 6174 | if (!status) { | 6151 | if (!status) { |
| @@ -6182,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
| 6182 | vha->marker_needed = 1; | 6159 | vha->marker_needed = 1; |
| 6183 | } | 6160 | } |
| 6184 | 6161 | ||
| 6185 | vha->flags.online = 1; | ||
| 6186 | |||
| 6187 | ha->isp_ops->enable_intrs(ha); | 6162 | ha->isp_ops->enable_intrs(ha); |
| 6188 | 6163 | ||
| 6189 | ha->isp_abort_cnt = 0; | 6164 | ha->isp_abort_cnt = 0; |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index ce8b5fb0f347..b3b1d6fc2d6c 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
| @@ -1,10 +1,11 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include "qla_target.h" | ||
| 8 | /** | 9 | /** |
| 9 | * qla24xx_calc_iocbs() - Determine number of Command Type 3 and | 10 | * qla24xx_calc_iocbs() - Determine number of Command Type 3 and |
| 10 | * Continuation Type 1 IOCBs to allocate. | 11 | * Continuation Type 1 IOCBs to allocate. |
| @@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) { | |||
| 128 | } | 129 | } |
| 129 | 130 | ||
| 130 | static inline void | 131 | static inline void |
| 131 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) | 132 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, |
| 133 | struct qla_tgt_cmd *tc) | ||
| 132 | { | 134 | { |
| 133 | struct dsd_dma *dsd_ptr, *tdsd_ptr; | 135 | struct dsd_dma *dsd_ptr, *tdsd_ptr; |
| 134 | struct crc_context *ctx; | 136 | struct crc_context *ctx; |
| 135 | 137 | ||
| 136 | ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); | 138 | if (sp) |
| 139 | ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); | ||
| 140 | else if (tc) | ||
| 141 | ctx = (struct crc_context *)tc->ctx; | ||
| 142 | else { | ||
| 143 | BUG(); | ||
| 144 | return; | ||
| 145 | } | ||
| 137 | 146 | ||
| 138 | /* clean up allocated prev pool */ | 147 | /* clean up allocated prev pool */ |
| 139 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, | 148 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index e607568bce49..760931529592 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, | |||
| 936 | return 1; | 936 | return 1; |
| 937 | } | 937 | } |
| 938 | 938 | ||
| 939 | static int | 939 | int |
| 940 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | 940 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, |
| 941 | uint32_t *dsd, uint16_t tot_dsds) | 941 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
| 942 | { | 942 | { |
| 943 | void *next_dsd; | 943 | void *next_dsd; |
| 944 | uint8_t avail_dsds = 0; | 944 | uint8_t avail_dsds = 0; |
| @@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | |||
| 948 | uint32_t *cur_dsd = dsd; | 948 | uint32_t *cur_dsd = dsd; |
| 949 | uint16_t used_dsds = tot_dsds; | 949 | uint16_t used_dsds = tot_dsds; |
| 950 | 950 | ||
| 951 | uint32_t prot_int; | 951 | uint32_t prot_int; /* protection interval */ |
| 952 | uint32_t partial; | 952 | uint32_t partial; |
| 953 | struct qla2_sgx sgx; | 953 | struct qla2_sgx sgx; |
| 954 | dma_addr_t sle_dma; | 954 | dma_addr_t sle_dma; |
| 955 | uint32_t sle_dma_len, tot_prot_dma_len = 0; | 955 | uint32_t sle_dma_len, tot_prot_dma_len = 0; |
| 956 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 956 | struct scsi_cmnd *cmd; |
| 957 | 957 | struct scsi_qla_host *vha; | |
| 958 | prot_int = cmd->device->sector_size; | ||
| 959 | 958 | ||
| 960 | memset(&sgx, 0, sizeof(struct qla2_sgx)); | 959 | memset(&sgx, 0, sizeof(struct qla2_sgx)); |
| 961 | sgx.tot_bytes = scsi_bufflen(cmd); | 960 | if (sp) { |
| 962 | sgx.cur_sg = scsi_sglist(cmd); | 961 | vha = sp->fcport->vha; |
| 963 | sgx.sp = sp; | 962 | cmd = GET_CMD_SP(sp); |
| 964 | 963 | prot_int = cmd->device->sector_size; | |
| 965 | sg_prot = scsi_prot_sglist(cmd); | 964 | |
| 965 | sgx.tot_bytes = scsi_bufflen(cmd); | ||
| 966 | sgx.cur_sg = scsi_sglist(cmd); | ||
| 967 | sgx.sp = sp; | ||
| 968 | |||
| 969 | sg_prot = scsi_prot_sglist(cmd); | ||
| 970 | } else if (tc) { | ||
| 971 | vha = tc->vha; | ||
| 972 | prot_int = tc->blk_sz; | ||
| 973 | sgx.tot_bytes = tc->bufflen; | ||
| 974 | sgx.cur_sg = tc->sg; | ||
| 975 | sg_prot = tc->prot_sg; | ||
| 976 | } else { | ||
| 977 | BUG(); | ||
| 978 | return 1; | ||
| 979 | } | ||
| 966 | 980 | ||
| 967 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { | 981 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { |
| 968 | 982 | ||
| @@ -995,10 +1009,18 @@ alloc_and_fill: | |||
| 995 | return 1; | 1009 | return 1; |
| 996 | } | 1010 | } |
| 997 | 1011 | ||
| 998 | list_add_tail(&dsd_ptr->list, | 1012 | if (sp) { |
| 999 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 1013 | list_add_tail(&dsd_ptr->list, |
| 1014 | &((struct crc_context *) | ||
| 1015 | sp->u.scmd.ctx)->dsd_list); | ||
| 1016 | |||
| 1017 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
| 1018 | } else { | ||
| 1019 | list_add_tail(&dsd_ptr->list, | ||
| 1020 | &(tc->ctx->dsd_list)); | ||
| 1021 | tc->ctx_dsd_alloced = 1; | ||
| 1022 | } | ||
| 1000 | 1023 | ||
| 1001 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
| 1002 | 1024 | ||
| 1003 | /* add new list to cmd iocb or last list */ | 1025 | /* add new list to cmd iocb or last list */ |
| 1004 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 1026 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
| @@ -1033,21 +1055,35 @@ alloc_and_fill: | |||
| 1033 | return 0; | 1055 | return 0; |
| 1034 | } | 1056 | } |
| 1035 | 1057 | ||
| 1036 | static int | 1058 | int |
| 1037 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | 1059 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, |
| 1038 | uint16_t tot_dsds) | 1060 | uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
| 1039 | { | 1061 | { |
| 1040 | void *next_dsd; | 1062 | void *next_dsd; |
| 1041 | uint8_t avail_dsds = 0; | 1063 | uint8_t avail_dsds = 0; |
| 1042 | uint32_t dsd_list_len; | 1064 | uint32_t dsd_list_len; |
| 1043 | struct dsd_dma *dsd_ptr; | 1065 | struct dsd_dma *dsd_ptr; |
| 1044 | struct scatterlist *sg; | 1066 | struct scatterlist *sg, *sgl; |
| 1045 | uint32_t *cur_dsd = dsd; | 1067 | uint32_t *cur_dsd = dsd; |
| 1046 | int i; | 1068 | int i; |
| 1047 | uint16_t used_dsds = tot_dsds; | 1069 | uint16_t used_dsds = tot_dsds; |
| 1048 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 1070 | struct scsi_cmnd *cmd; |
| 1071 | struct scsi_qla_host *vha; | ||
| 1072 | |||
| 1073 | if (sp) { | ||
| 1074 | cmd = GET_CMD_SP(sp); | ||
| 1075 | sgl = scsi_sglist(cmd); | ||
| 1076 | vha = sp->fcport->vha; | ||
| 1077 | } else if (tc) { | ||
| 1078 | sgl = tc->sg; | ||
| 1079 | vha = tc->vha; | ||
| 1080 | } else { | ||
| 1081 | BUG(); | ||
| 1082 | return 1; | ||
| 1083 | } | ||
| 1049 | 1084 | ||
| 1050 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { | 1085 | |
| 1086 | for_each_sg(sgl, sg, tot_dsds, i) { | ||
| 1051 | dma_addr_t sle_dma; | 1087 | dma_addr_t sle_dma; |
| 1052 | 1088 | ||
| 1053 | /* Allocate additional continuation packets? */ | 1089 | /* Allocate additional continuation packets? */ |
| @@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
| 1076 | return 1; | 1112 | return 1; |
| 1077 | } | 1113 | } |
| 1078 | 1114 | ||
| 1079 | list_add_tail(&dsd_ptr->list, | 1115 | if (sp) { |
| 1080 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 1116 | list_add_tail(&dsd_ptr->list, |
| 1117 | &((struct crc_context *) | ||
| 1118 | sp->u.scmd.ctx)->dsd_list); | ||
| 1081 | 1119 | ||
| 1082 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | 1120 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
| 1121 | } else { | ||
| 1122 | list_add_tail(&dsd_ptr->list, | ||
| 1123 | &(tc->ctx->dsd_list)); | ||
| 1124 | tc->ctx_dsd_alloced = 1; | ||
| 1125 | } | ||
| 1083 | 1126 | ||
| 1084 | /* add new list to cmd iocb or last list */ | 1127 | /* add new list to cmd iocb or last list */ |
| 1085 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 1128 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
| @@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
| 1102 | return 0; | 1145 | return 0; |
| 1103 | } | 1146 | } |
| 1104 | 1147 | ||
| 1105 | static int | 1148 | int |
| 1106 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | 1149 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, |
| 1107 | uint32_t *dsd, | 1150 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
| 1108 | uint16_t tot_dsds) | ||
| 1109 | { | 1151 | { |
| 1110 | void *next_dsd; | 1152 | void *next_dsd; |
| 1111 | uint8_t avail_dsds = 0; | 1153 | uint8_t avail_dsds = 0; |
| 1112 | uint32_t dsd_list_len; | 1154 | uint32_t dsd_list_len; |
| 1113 | struct dsd_dma *dsd_ptr; | 1155 | struct dsd_dma *dsd_ptr; |
| 1114 | struct scatterlist *sg; | 1156 | struct scatterlist *sg, *sgl; |
| 1115 | int i; | 1157 | int i; |
| 1116 | struct scsi_cmnd *cmd; | 1158 | struct scsi_cmnd *cmd; |
| 1117 | uint32_t *cur_dsd = dsd; | 1159 | uint32_t *cur_dsd = dsd; |
| 1118 | uint16_t used_dsds = tot_dsds; | 1160 | uint16_t used_dsds = tot_dsds; |
| 1161 | struct scsi_qla_host *vha; | ||
| 1162 | |||
| 1163 | if (sp) { | ||
| 1164 | cmd = GET_CMD_SP(sp); | ||
| 1165 | sgl = scsi_prot_sglist(cmd); | ||
| 1166 | vha = sp->fcport->vha; | ||
| 1167 | } else if (tc) { | ||
| 1168 | vha = tc->vha; | ||
| 1169 | sgl = tc->prot_sg; | ||
| 1170 | } else { | ||
| 1171 | BUG(); | ||
| 1172 | return 1; | ||
| 1173 | } | ||
| 1119 | 1174 | ||
| 1120 | cmd = GET_CMD_SP(sp); | 1175 | ql_dbg(ql_dbg_tgt, vha, 0xe021, |
| 1121 | scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { | 1176 | "%s: enter\n", __func__); |
| 1177 | |||
| 1178 | for_each_sg(sgl, sg, tot_dsds, i) { | ||
| 1122 | dma_addr_t sle_dma; | 1179 | dma_addr_t sle_dma; |
| 1123 | 1180 | ||
| 1124 | /* Allocate additional continuation packets? */ | 1181 | /* Allocate additional continuation packets? */ |
| @@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | |||
| 1147 | return 1; | 1204 | return 1; |
| 1148 | } | 1205 | } |
| 1149 | 1206 | ||
| 1150 | list_add_tail(&dsd_ptr->list, | 1207 | if (sp) { |
| 1151 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 1208 | list_add_tail(&dsd_ptr->list, |
| 1209 | &((struct crc_context *) | ||
| 1210 | sp->u.scmd.ctx)->dsd_list); | ||
| 1152 | 1211 | ||
| 1153 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | 1212 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
| 1213 | } else { | ||
| 1214 | list_add_tail(&dsd_ptr->list, | ||
| 1215 | &(tc->ctx->dsd_list)); | ||
| 1216 | tc->ctx_dsd_alloced = 1; | ||
| 1217 | } | ||
| 1154 | 1218 | ||
| 1155 | /* add new list to cmd iocb or last list */ | 1219 | /* add new list to cmd iocb or last list */ |
| 1156 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 1220 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
| @@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
| 1386 | 1450 | ||
| 1387 | if (!bundling && tot_prot_dsds) { | 1451 | if (!bundling && tot_prot_dsds) { |
| 1388 | if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, | 1452 | if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, |
| 1389 | cur_dsd, tot_dsds)) | 1453 | cur_dsd, tot_dsds, NULL)) |
| 1390 | goto crc_queuing_error; | 1454 | goto crc_queuing_error; |
| 1391 | } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | 1455 | } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, |
| 1392 | (tot_dsds - tot_prot_dsds))) | 1456 | (tot_dsds - tot_prot_dsds), NULL)) |
| 1393 | goto crc_queuing_error; | 1457 | goto crc_queuing_error; |
| 1394 | 1458 | ||
| 1395 | if (bundling && tot_prot_dsds) { | 1459 | if (bundling && tot_prot_dsds) { |
| @@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
| 1398 | __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); | 1462 | __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); |
| 1399 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; | 1463 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; |
| 1400 | if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, | 1464 | if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, |
| 1401 | tot_prot_dsds)) | 1465 | tot_prot_dsds, NULL)) |
| 1402 | goto crc_queuing_error; | 1466 | goto crc_queuing_error; |
| 1403 | } | 1467 | } |
| 1404 | return QLA_SUCCESS; | 1468 | return QLA_SUCCESS; |
| @@ -1478,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp) | |||
| 1478 | tot_dsds = nseg; | 1542 | tot_dsds = nseg; |
| 1479 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); | 1543 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); |
| 1480 | if (req->cnt < (req_cnt + 2)) { | 1544 | if (req->cnt < (req_cnt + 2)) { |
| 1481 | cnt = RD_REG_DWORD_RELAXED(req->req_q_out); | 1545 | cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
| 1482 | 1546 | RD_REG_DWORD_RELAXED(req->req_q_out); | |
| 1483 | if (req->ring_index < cnt) | 1547 | if (req->ring_index < cnt) |
| 1484 | req->cnt = cnt - req->ring_index; | 1548 | req->cnt = cnt - req->ring_index; |
| 1485 | else | 1549 | else |
| @@ -1697,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
| 1697 | tot_prot_dsds = nseg; | 1761 | tot_prot_dsds = nseg; |
| 1698 | tot_dsds += nseg; | 1762 | tot_dsds += nseg; |
| 1699 | if (req->cnt < (req_cnt + 2)) { | 1763 | if (req->cnt < (req_cnt + 2)) { |
| 1700 | cnt = RD_REG_DWORD_RELAXED(req->req_q_out); | 1764 | cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
| 1701 | 1765 | RD_REG_DWORD_RELAXED(req->req_q_out); | |
| 1702 | if (req->ring_index < cnt) | 1766 | if (req->ring_index < cnt) |
| 1703 | req->cnt = cnt - req->ring_index; | 1767 | req->cnt = cnt - req->ring_index; |
| 1704 | else | 1768 | else |
| @@ -2825,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) | |||
| 2825 | 2889 | ||
| 2826 | /* Check for room on request queue. */ | 2890 | /* Check for room on request queue. */ |
| 2827 | if (req->cnt < req_cnt + 2) { | 2891 | if (req->cnt < req_cnt + 2) { |
| 2828 | cnt = RD_REG_DWORD_RELAXED(req->req_q_out); | 2892 | cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
| 2829 | 2893 | RD_REG_DWORD_RELAXED(req->req_q_out); | |
| 2830 | if (req->ring_index < cnt) | 2894 | if (req->ring_index < cnt) |
| 2831 | req->cnt = cnt - req->ring_index; | 2895 | req->cnt = cnt - req->ring_index; |
| 2832 | else | 2896 | else |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 95314ef2e505..a56825c73c31 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -2009,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
| 2009 | ql_dbg(ql_dbg_io, vha, 0x3017, | 2009 | ql_dbg(ql_dbg_io, vha, 0x3017, |
| 2010 | "Invalid status handle (0x%x).\n", sts->handle); | 2010 | "Invalid status handle (0x%x).\n", sts->handle); |
| 2011 | 2011 | ||
| 2012 | if (IS_P3P_TYPE(ha)) | 2012 | if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { |
| 2013 | set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); | 2013 | if (IS_P3P_TYPE(ha)) |
| 2014 | else | 2014 | set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); |
| 2015 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 2015 | else |
| 2016 | qla2xxx_wake_dpc(vha); | 2016 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
| 2017 | qla2xxx_wake_dpc(vha); | ||
| 2018 | } | ||
| 2017 | return; | 2019 | return; |
| 2018 | } | 2020 | } |
| 2019 | 2021 | ||
| @@ -2472,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
| 2472 | if (pkt->entry_status != 0) { | 2474 | if (pkt->entry_status != 0) { |
| 2473 | qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); | 2475 | qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); |
| 2474 | 2476 | ||
| 2475 | (void)qlt_24xx_process_response_error(vha, pkt); | 2477 | if (qlt_24xx_process_response_error(vha, pkt)) |
| 2478 | goto process_err; | ||
| 2476 | 2479 | ||
| 2477 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 2480 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
| 2478 | wmb(); | 2481 | wmb(); |
| 2479 | continue; | 2482 | continue; |
| 2480 | } | 2483 | } |
| 2484 | process_err: | ||
| 2481 | 2485 | ||
| 2482 | switch (pkt->entry_type) { | 2486 | switch (pkt->entry_type) { |
| 2483 | case STATUS_TYPE: | 2487 | case STATUS_TYPE: |
| @@ -2494,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
| 2494 | qla24xx_logio_entry(vha, rsp->req, | 2498 | qla24xx_logio_entry(vha, rsp->req, |
| 2495 | (struct logio_entry_24xx *)pkt); | 2499 | (struct logio_entry_24xx *)pkt); |
| 2496 | break; | 2500 | break; |
| 2497 | case CT_IOCB_TYPE: | 2501 | case CT_IOCB_TYPE: |
| 2498 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); | 2502 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); |
| 2499 | break; | 2503 | break; |
| 2500 | case ELS_IOCB_TYPE: | 2504 | case ELS_IOCB_TYPE: |
| 2501 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); | 2505 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); |
| 2502 | break; | 2506 | break; |
| 2503 | case ABTS_RECV_24XX: | 2507 | case ABTS_RECV_24XX: |
| @@ -2506,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
| 2506 | case ABTS_RESP_24XX: | 2510 | case ABTS_RESP_24XX: |
| 2507 | case CTIO_TYPE7: | 2511 | case CTIO_TYPE7: |
| 2508 | case NOTIFY_ACK_TYPE: | 2512 | case NOTIFY_ACK_TYPE: |
| 2513 | case CTIO_CRC2: | ||
| 2509 | qlt_response_pkt_all_vps(vha, (response_t *)pkt); | 2514 | qlt_response_pkt_all_vps(vha, (response_t *)pkt); |
| 2510 | break; | 2515 | break; |
| 2511 | case MARKER_TYPE: | 2516 | case MARKER_TYPE: |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2528709c4add..1c33a77db5c2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -1319,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) | |||
| 1319 | 1319 | ||
| 1320 | left = 0; | 1320 | left = 0; |
| 1321 | 1321 | ||
| 1322 | list = kzalloc(dma_size, GFP_KERNEL); | 1322 | list = kmemdup(pmap, dma_size, GFP_KERNEL); |
| 1323 | if (!list) { | 1323 | if (!list) { |
| 1324 | ql_log(ql_log_warn, vha, 0x1140, | 1324 | ql_log(ql_log_warn, vha, 0x1140, |
| 1325 | "%s(%ld): failed to allocate node names list " | 1325 | "%s(%ld): failed to allocate node names list " |
| @@ -1328,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) | |||
| 1328 | goto out_free; | 1328 | goto out_free; |
| 1329 | } | 1329 | } |
| 1330 | 1330 | ||
| 1331 | memcpy(list, pmap, dma_size); | ||
| 1332 | restart: | 1331 | restart: |
| 1333 | dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); | 1332 | dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); |
| 1334 | } | 1333 | } |
| @@ -2644,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp) | |||
| 2644 | ql_dbg(ql_dbg_mbx, vha, 0x1090, | 2643 | ql_dbg(ql_dbg_mbx, vha, 0x1090, |
| 2645 | "Failed to complete IOCB -- completion status (%x).\n", | 2644 | "Failed to complete IOCB -- completion status (%x).\n", |
| 2646 | le16_to_cpu(abt->nport_handle)); | 2645 | le16_to_cpu(abt->nport_handle)); |
| 2647 | rval = QLA_FUNCTION_FAILED; | 2646 | if (abt->nport_handle == CS_IOCB_ERROR) |
| 2647 | rval = QLA_FUNCTION_PARAMETER_ERROR; | ||
| 2648 | else | ||
| 2649 | rval = QLA_FUNCTION_FAILED; | ||
| 2648 | } else { | 2650 | } else { |
| 2649 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, | 2651 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, |
| 2650 | "Done %s.\n", __func__); | 2652 | "Done %s.\n", __func__); |
| @@ -2879,6 +2881,78 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) | |||
| 2879 | return rval; | 2881 | return rval; |
| 2880 | } | 2882 | } |
| 2881 | 2883 | ||
| 2884 | int | ||
| 2885 | qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) | ||
| 2886 | { | ||
| 2887 | int rval; | ||
| 2888 | mbx_cmd_t mc; | ||
| 2889 | mbx_cmd_t *mcp = &mc; | ||
| 2890 | |||
| 2891 | if (!IS_QLA8044(vha->hw)) | ||
| 2892 | return QLA_FUNCTION_FAILED; | ||
| 2893 | |||
| 2894 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186, | ||
| 2895 | "Entered %s.\n", __func__); | ||
| 2896 | |||
| 2897 | mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; | ||
| 2898 | mcp->mb[1] = HCS_WRITE_SERDES; | ||
| 2899 | mcp->mb[3] = LSW(addr); | ||
| 2900 | mcp->mb[4] = MSW(addr); | ||
| 2901 | mcp->mb[5] = LSW(data); | ||
| 2902 | mcp->mb[6] = MSW(data); | ||
| 2903 | mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; | ||
| 2904 | mcp->in_mb = MBX_0; | ||
| 2905 | mcp->tov = MBX_TOV_SECONDS; | ||
| 2906 | mcp->flags = 0; | ||
| 2907 | rval = qla2x00_mailbox_command(vha, mcp); | ||
| 2908 | |||
| 2909 | if (rval != QLA_SUCCESS) { | ||
| 2910 | ql_dbg(ql_dbg_mbx, vha, 0x1187, | ||
| 2911 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
| 2912 | } else { | ||
| 2913 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, | ||
| 2914 | "Done %s.\n", __func__); | ||
| 2915 | } | ||
| 2916 | |||
| 2917 | return rval; | ||
| 2918 | } | ||
| 2919 | |||
| 2920 | int | ||
| 2921 | qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) | ||
| 2922 | { | ||
| 2923 | int rval; | ||
| 2924 | mbx_cmd_t mc; | ||
| 2925 | mbx_cmd_t *mcp = &mc; | ||
| 2926 | |||
| 2927 | if (!IS_QLA8044(vha->hw)) | ||
| 2928 | return QLA_FUNCTION_FAILED; | ||
| 2929 | |||
| 2930 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, | ||
| 2931 | "Entered %s.\n", __func__); | ||
| 2932 | |||
| 2933 | mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; | ||
| 2934 | mcp->mb[1] = HCS_READ_SERDES; | ||
| 2935 | mcp->mb[3] = LSW(addr); | ||
| 2936 | mcp->mb[4] = MSW(addr); | ||
| 2937 | mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; | ||
| 2938 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | ||
| 2939 | mcp->tov = MBX_TOV_SECONDS; | ||
| 2940 | mcp->flags = 0; | ||
| 2941 | rval = qla2x00_mailbox_command(vha, mcp); | ||
| 2942 | |||
| 2943 | *data = mcp->mb[2] << 16 | mcp->mb[1]; | ||
| 2944 | |||
| 2945 | if (rval != QLA_SUCCESS) { | ||
| 2946 | ql_dbg(ql_dbg_mbx, vha, 0x118a, | ||
| 2947 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
| 2948 | } else { | ||
| 2949 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, | ||
| 2950 | "Done %s.\n", __func__); | ||
| 2951 | } | ||
| 2952 | |||
| 2953 | return rval; | ||
| 2954 | } | ||
| 2955 | |||
| 2882 | /** | 2956 | /** |
| 2883 | * qla2x00_set_serdes_params() - | 2957 | * qla2x00_set_serdes_params() - |
| 2884 | * @ha: HA context | 2958 | * @ha: HA context |
| @@ -3660,6 +3734,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
| 3660 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, | 3734 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, |
| 3661 | "Entered %s.\n", __func__); | 3735 | "Entered %s.\n", __func__); |
| 3662 | 3736 | ||
| 3737 | if (IS_SHADOW_REG_CAPABLE(ha)) | ||
| 3738 | req->options |= BIT_13; | ||
| 3739 | |||
| 3663 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3740 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
| 3664 | mcp->mb[1] = req->options; | 3741 | mcp->mb[1] = req->options; |
| 3665 | mcp->mb[2] = MSW(LSD(req->dma)); | 3742 | mcp->mb[2] = MSW(LSD(req->dma)); |
| @@ -3679,7 +3756,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
| 3679 | /* que in ptr index */ | 3756 | /* que in ptr index */ |
| 3680 | mcp->mb[8] = 0; | 3757 | mcp->mb[8] = 0; |
| 3681 | /* que out ptr index */ | 3758 | /* que out ptr index */ |
| 3682 | mcp->mb[9] = 0; | 3759 | mcp->mb[9] = *req->out_ptr = 0; |
| 3683 | mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| | 3760 | mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| |
| 3684 | MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 3761 | MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
| 3685 | mcp->in_mb = MBX_0; | 3762 | mcp->in_mb = MBX_0; |
| @@ -3688,7 +3765,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
| 3688 | 3765 | ||
| 3689 | if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) | 3766 | if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) |
| 3690 | mcp->in_mb |= MBX_1; | 3767 | mcp->in_mb |= MBX_1; |
| 3691 | if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) { | 3768 | if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
| 3692 | mcp->out_mb |= MBX_15; | 3769 | mcp->out_mb |= MBX_15; |
| 3693 | /* debug q create issue in SR-IOV */ | 3770 | /* debug q create issue in SR-IOV */ |
| 3694 | mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; | 3771 | mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; |
| @@ -3697,7 +3774,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
| 3697 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3774 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 3698 | if (!(req->options & BIT_0)) { | 3775 | if (!(req->options & BIT_0)) { |
| 3699 | WRT_REG_DWORD(req->req_q_in, 0); | 3776 | WRT_REG_DWORD(req->req_q_in, 0); |
| 3700 | if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha)) | 3777 | if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) |
| 3701 | WRT_REG_DWORD(req->req_q_out, 0); | 3778 | WRT_REG_DWORD(req->req_q_out, 0); |
| 3702 | } | 3779 | } |
| 3703 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 3780 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| @@ -3726,6 +3803,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
| 3726 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, | 3803 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, |
| 3727 | "Entered %s.\n", __func__); | 3804 | "Entered %s.\n", __func__); |
| 3728 | 3805 | ||
| 3806 | if (IS_SHADOW_REG_CAPABLE(ha)) | ||
| 3807 | rsp->options |= BIT_13; | ||
| 3808 | |||
| 3729 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3809 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
| 3730 | mcp->mb[1] = rsp->options; | 3810 | mcp->mb[1] = rsp->options; |
| 3731 | mcp->mb[2] = MSW(LSD(rsp->dma)); | 3811 | mcp->mb[2] = MSW(LSD(rsp->dma)); |
| @@ -3740,7 +3820,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
| 3740 | 3820 | ||
| 3741 | mcp->mb[4] = rsp->id; | 3821 | mcp->mb[4] = rsp->id; |
| 3742 | /* que in ptr index */ | 3822 | /* que in ptr index */ |
| 3743 | mcp->mb[8] = 0; | 3823 | mcp->mb[8] = *rsp->in_ptr = 0; |
| 3744 | /* que out ptr index */ | 3824 | /* que out ptr index */ |
| 3745 | mcp->mb[9] = 0; | 3825 | mcp->mb[9] = 0; |
| 3746 | mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 | 3826 | mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 |
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f0a852257f99..89998244f48d 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 0aaf6a9c87d3..abeb3901498b 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -527,21 +527,63 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) | |||
| 527 | struct qla_hw_data *ha = vha->hw; | 527 | struct qla_hw_data *ha = vha->hw; |
| 528 | int i, core; | 528 | int i, core; |
| 529 | uint32_t cnt; | 529 | uint32_t cnt; |
| 530 | uint32_t reg_val; | ||
| 531 | |||
| 532 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
| 533 | |||
| 534 | QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); | ||
| 535 | QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); | ||
| 536 | |||
| 537 | /* stop the XOR DMA engines */ | ||
| 538 | QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); | ||
| 539 | QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); | ||
| 540 | QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); | ||
| 541 | QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); | ||
| 542 | |||
| 543 | /* stop the IDMA engines */ | ||
| 544 | reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); | ||
| 545 | reg_val &= ~(1<<12); | ||
| 546 | QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); | ||
| 547 | |||
| 548 | reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); | ||
| 549 | reg_val &= ~(1<<12); | ||
| 550 | QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); | ||
| 551 | |||
| 552 | reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); | ||
| 553 | reg_val &= ~(1<<12); | ||
| 554 | QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); | ||
| 555 | |||
| 556 | reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); | ||
| 557 | reg_val &= ~(1<<12); | ||
| 558 | QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); | ||
| 559 | |||
| 560 | for (i = 0; i < 100000; i++) { | ||
| 561 | if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && | ||
| 562 | (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) | ||
| 563 | break; | ||
| 564 | udelay(100); | ||
| 565 | } | ||
| 530 | 566 | ||
| 531 | /* Set all 4 cores in reset */ | 567 | /* Set all 4 cores in reset */ |
| 532 | for (i = 0; i < 4; i++) { | 568 | for (i = 0; i < 4; i++) { |
| 533 | QLAFX00_SET_HBA_SOC_REG(ha, | 569 | QLAFX00_SET_HBA_SOC_REG(ha, |
| 534 | (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); | 570 | (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); |
| 535 | } | ||
| 536 | |||
| 537 | /* Set all 4 core Clock gating control */ | ||
| 538 | for (i = 0; i < 4; i++) { | ||
| 539 | QLAFX00_SET_HBA_SOC_REG(ha, | 571 | QLAFX00_SET_HBA_SOC_REG(ha, |
| 540 | (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); | 572 | (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); |
| 541 | } | 573 | } |
| 542 | 574 | ||
| 543 | /* Reset all units in Fabric */ | 575 | /* Reset all units in Fabric */ |
| 544 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101)); | 576 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); |
| 577 | |||
| 578 | /* */ | ||
| 579 | QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); | ||
| 580 | QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); | ||
| 581 | |||
| 582 | /* Set all 4 core Memory Power Down Registers */ | ||
| 583 | for (i = 0; i < 5; i++) { | ||
| 584 | QLAFX00_SET_HBA_SOC_REG(ha, | ||
| 585 | (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); | ||
| 586 | } | ||
| 545 | 587 | ||
| 546 | /* Reset all interrupt control registers */ | 588 | /* Reset all interrupt control registers */ |
| 547 | for (i = 0; i < 115; i++) { | 589 | for (i = 0; i < 115; i++) { |
| @@ -564,20 +606,19 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) | |||
| 564 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); | 606 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); |
| 565 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); | 607 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); |
| 566 | 608 | ||
| 567 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
| 568 | |||
| 569 | /* Kick in Fabric units */ | 609 | /* Kick in Fabric units */ |
| 570 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); | 610 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); |
| 571 | 611 | ||
| 572 | /* Kick in Core0 to start boot process */ | 612 | /* Kick in Core0 to start boot process */ |
| 573 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); | 613 | QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); |
| 574 | 614 | ||
| 615 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
| 616 | |||
| 575 | /* Wait 10secs for soft-reset to complete. */ | 617 | /* Wait 10secs for soft-reset to complete. */ |
| 576 | for (cnt = 10; cnt; cnt--) { | 618 | for (cnt = 10; cnt; cnt--) { |
| 577 | msleep(1000); | 619 | msleep(1000); |
| 578 | barrier(); | 620 | barrier(); |
| 579 | } | 621 | } |
| 580 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
| 581 | } | 622 | } |
| 582 | 623 | ||
| 583 | /** | 624 | /** |
| @@ -597,7 +638,6 @@ qlafx00_soft_reset(scsi_qla_host_t *vha) | |||
| 597 | 638 | ||
| 598 | ha->isp_ops->disable_intrs(ha); | 639 | ha->isp_ops->disable_intrs(ha); |
| 599 | qlafx00_soc_cpu_reset(vha); | 640 | qlafx00_soc_cpu_reset(vha); |
| 600 | ha->isp_ops->enable_intrs(ha); | ||
| 601 | } | 641 | } |
| 602 | 642 | ||
| 603 | /** | 643 | /** |
| @@ -2675,7 +2715,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha, | |||
| 2675 | uint16_t lreq_q_out = 0; | 2715 | uint16_t lreq_q_out = 0; |
| 2676 | 2716 | ||
| 2677 | lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); | 2717 | lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); |
| 2678 | lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out); | 2718 | lreq_q_out = rsp->ring_index; |
| 2679 | 2719 | ||
| 2680 | while (lreq_q_in != lreq_q_out) { | 2720 | while (lreq_q_in != lreq_q_out) { |
| 2681 | lptr = rsp->ring_ptr; | 2721 | lptr = rsp->ring_ptr; |
| @@ -3426,7 +3466,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) | |||
| 3426 | sp->fcport->vha, 0x3047, | 3466 | sp->fcport->vha, 0x3047, |
| 3427 | (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); | 3467 | (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); |
| 3428 | 3468 | ||
| 3429 | memcpy((void *)pfxiocb, &fx_iocb, | 3469 | memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, |
| 3430 | sizeof(struct fxdisc_entry_fx00)); | 3470 | sizeof(struct fxdisc_entry_fx00)); |
| 3431 | wmb(); | 3471 | wmb(); |
| 3432 | } | 3472 | } |
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h index e529dfaeb854..aeaa1b40b1fc 100644 --- a/drivers/scsi/qla2xxx/qla_mr.h +++ b/drivers/scsi/qla2xxx/qla_mr.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -351,6 +351,7 @@ struct config_info_data { | |||
| 351 | #define SOC_FABRIC_RST_CONTROL_REG 0x0020840 | 351 | #define SOC_FABRIC_RST_CONTROL_REG 0x0020840 |
| 352 | #define SOC_FABRIC_CONTROL_REG 0x0020200 | 352 | #define SOC_FABRIC_CONTROL_REG 0x0020200 |
| 353 | #define SOC_FABRIC_CONFIG_REG 0x0020204 | 353 | #define SOC_FABRIC_CONFIG_REG 0x0020204 |
| 354 | #define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C | ||
| 354 | 355 | ||
| 355 | #define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00 | 356 | #define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00 |
| 356 | #define SOC_CORE_TIMER_REG 0x0021850 | 357 | #define SOC_CORE_TIMER_REG 0x0021850 |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 5511e24b1f11..58f3c912d96e 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -848,6 +848,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha) | |||
| 848 | { | 848 | { |
| 849 | int done = 0, timeout = 0; | 849 | int done = 0, timeout = 0; |
| 850 | uint32_t lock_owner = 0; | 850 | uint32_t lock_owner = 0; |
| 851 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | ||
| 851 | 852 | ||
| 852 | while (!done) { | 853 | while (!done) { |
| 853 | /* acquire semaphore2 from PCI HW block */ | 854 | /* acquire semaphore2 from PCI HW block */ |
| @@ -856,17 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha) | |||
| 856 | break; | 857 | break; |
| 857 | if (timeout >= qla82xx_rom_lock_timeout) { | 858 | if (timeout >= qla82xx_rom_lock_timeout) { |
| 858 | lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); | 859 | lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); |
| 860 | ql_log(ql_log_warn, vha, 0xb157, | ||
| 861 | "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", | ||
| 862 | __func__, ha->portnum, lock_owner); | ||
| 859 | return -1; | 863 | return -1; |
| 860 | } | 864 | } |
| 861 | timeout++; | 865 | timeout++; |
| 862 | } | 866 | } |
| 863 | qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); | 867 | qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum); |
| 864 | return 0; | 868 | return 0; |
| 865 | } | 869 | } |
| 866 | 870 | ||
| 867 | static void | 871 | static void |
| 868 | qla82xx_rom_unlock(struct qla_hw_data *ha) | 872 | qla82xx_rom_unlock(struct qla_hw_data *ha) |
| 869 | { | 873 | { |
| 874 | qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff); | ||
| 870 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | 875 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); |
| 871 | } | 876 | } |
| 872 | 877 | ||
| @@ -950,6 +955,7 @@ static int | |||
| 950 | qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) | 955 | qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) |
| 951 | { | 956 | { |
| 952 | int ret, loops = 0; | 957 | int ret, loops = 0; |
| 958 | uint32_t lock_owner = 0; | ||
| 953 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 959 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
| 954 | 960 | ||
| 955 | while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { | 961 | while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { |
| @@ -958,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) | |||
| 958 | loops++; | 964 | loops++; |
| 959 | } | 965 | } |
| 960 | if (loops >= 50000) { | 966 | if (loops >= 50000) { |
| 967 | lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); | ||
| 961 | ql_log(ql_log_fatal, vha, 0x00b9, | 968 | ql_log(ql_log_fatal, vha, 0x00b9, |
| 962 | "Failed to acquire SEM2 lock.\n"); | 969 | "Failed to acquire SEM2 lock, Lock Owner %u.\n", |
| 970 | lock_owner); | ||
| 963 | return -1; | 971 | return -1; |
| 964 | } | 972 | } |
| 965 | ret = qla82xx_do_rom_fast_read(ha, addr, valp); | 973 | ret = qla82xx_do_rom_fast_read(ha, addr, valp); |
| @@ -1057,6 +1065,7 @@ static int | |||
| 1057 | ql82xx_rom_lock_d(struct qla_hw_data *ha) | 1065 | ql82xx_rom_lock_d(struct qla_hw_data *ha) |
| 1058 | { | 1066 | { |
| 1059 | int loops = 0; | 1067 | int loops = 0; |
| 1068 | uint32_t lock_owner = 0; | ||
| 1060 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 1069 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
| 1061 | 1070 | ||
| 1062 | while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { | 1071 | while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { |
| @@ -1065,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha) | |||
| 1065 | loops++; | 1074 | loops++; |
| 1066 | } | 1075 | } |
| 1067 | if (loops >= 50000) { | 1076 | if (loops >= 50000) { |
| 1077 | lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); | ||
| 1068 | ql_log(ql_log_warn, vha, 0xb010, | 1078 | ql_log(ql_log_warn, vha, 0xb010, |
| 1069 | "ROM lock failed.\n"); | 1079 | "ROM lock failed, Lock Owner %u.\n", lock_owner); |
| 1070 | return -1; | 1080 | return -1; |
| 1071 | } | 1081 | } |
| 1072 | return 0; | 1082 | return 0; |
| @@ -2811,12 +2821,14 @@ static void | |||
| 2811 | qla82xx_rom_lock_recovery(struct qla_hw_data *ha) | 2821 | qla82xx_rom_lock_recovery(struct qla_hw_data *ha) |
| 2812 | { | 2822 | { |
| 2813 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 2823 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
| 2824 | uint32_t lock_owner = 0; | ||
| 2814 | 2825 | ||
| 2815 | if (qla82xx_rom_lock(ha)) | 2826 | if (qla82xx_rom_lock(ha)) { |
| 2827 | lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); | ||
| 2816 | /* Someone else is holding the lock. */ | 2828 | /* Someone else is holding the lock. */ |
| 2817 | ql_log(ql_log_info, vha, 0xb022, | 2829 | ql_log(ql_log_info, vha, 0xb022, |
| 2818 | "Resetting rom_lock.\n"); | 2830 | "Resetting rom_lock, Lock Owner %u.\n", lock_owner); |
| 2819 | 2831 | } | |
| 2820 | /* | 2832 | /* |
| 2821 | * Either we got the lock, or someone | 2833 | * Either we got the lock, or someone |
| 2822 | * else died while holding it. | 2834 | * else died while holding it. |
| @@ -2840,47 +2852,30 @@ static int | |||
| 2840 | qla82xx_device_bootstrap(scsi_qla_host_t *vha) | 2852 | qla82xx_device_bootstrap(scsi_qla_host_t *vha) |
| 2841 | { | 2853 | { |
| 2842 | int rval = QLA_SUCCESS; | 2854 | int rval = QLA_SUCCESS; |
| 2843 | int i, timeout; | 2855 | int i; |
| 2844 | uint32_t old_count, count; | 2856 | uint32_t old_count, count; |
| 2845 | struct qla_hw_data *ha = vha->hw; | 2857 | struct qla_hw_data *ha = vha->hw; |
| 2846 | int need_reset = 0, peg_stuck = 1; | 2858 | int need_reset = 0; |
| 2847 | 2859 | ||
| 2848 | need_reset = qla82xx_need_reset(ha); | 2860 | need_reset = qla82xx_need_reset(ha); |
| 2849 | 2861 | ||
| 2850 | old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); | ||
| 2851 | |||
| 2852 | for (i = 0; i < 10; i++) { | ||
| 2853 | timeout = msleep_interruptible(200); | ||
| 2854 | if (timeout) { | ||
| 2855 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | ||
| 2856 | QLA8XXX_DEV_FAILED); | ||
| 2857 | return QLA_FUNCTION_FAILED; | ||
| 2858 | } | ||
| 2859 | |||
| 2860 | count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); | ||
| 2861 | if (count != old_count) | ||
| 2862 | peg_stuck = 0; | ||
| 2863 | } | ||
| 2864 | |||
| 2865 | if (need_reset) { | 2862 | if (need_reset) { |
| 2866 | /* We are trying to perform a recovery here. */ | 2863 | /* We are trying to perform a recovery here. */ |
| 2867 | if (peg_stuck) | 2864 | if (ha->flags.isp82xx_fw_hung) |
| 2868 | qla82xx_rom_lock_recovery(ha); | 2865 | qla82xx_rom_lock_recovery(ha); |
| 2869 | goto dev_initialize; | ||
| 2870 | } else { | 2866 | } else { |
| 2871 | /* Start of day for this ha context. */ | 2867 | old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); |
| 2872 | if (peg_stuck) { | 2868 | for (i = 0; i < 10; i++) { |
| 2873 | /* Either we are the first or recovery in progress. */ | 2869 | msleep(200); |
| 2874 | qla82xx_rom_lock_recovery(ha); | 2870 | count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); |
| 2875 | goto dev_initialize; | 2871 | if (count != old_count) { |
| 2876 | } else | 2872 | rval = QLA_SUCCESS; |
| 2877 | /* Firmware already running. */ | 2873 | goto dev_ready; |
| 2878 | goto dev_ready; | 2874 | } |
| 2875 | } | ||
| 2876 | qla82xx_rom_lock_recovery(ha); | ||
| 2879 | } | 2877 | } |
| 2880 | 2878 | ||
| 2881 | return rval; | ||
| 2882 | |||
| 2883 | dev_initialize: | ||
| 2884 | /* set to DEV_INITIALIZING */ | 2879 | /* set to DEV_INITIALIZING */ |
| 2885 | ql_log(ql_log_info, vha, 0x009e, | 2880 | ql_log(ql_log_info, vha, 0x009e, |
| 2886 | "HW State: INITIALIZING.\n"); | 2881 | "HW State: INITIALIZING.\n"); |
| @@ -3142,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha) | |||
| 3142 | 3137 | ||
| 3143 | if (ql2xmdenable) { | 3138 | if (ql2xmdenable) { |
| 3144 | if (!ha->fw_dumped) { | 3139 | if (!ha->fw_dumped) { |
| 3145 | if (fw_major_version != ha->fw_major_version || | 3140 | if ((fw_major_version != ha->fw_major_version || |
| 3146 | fw_minor_version != ha->fw_minor_version || | 3141 | fw_minor_version != ha->fw_minor_version || |
| 3147 | fw_subminor_version != ha->fw_subminor_version) { | 3142 | fw_subminor_version != ha->fw_subminor_version) || |
| 3143 | (ha->prev_minidump_failed)) { | ||
| 3148 | ql_dbg(ql_dbg_p3p, vha, 0xb02d, | 3144 | ql_dbg(ql_dbg_p3p, vha, 0xb02d, |
| 3149 | "Firmware version differs " | 3145 | "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n", |
| 3150 | "Previous version: %d:%d:%d - " | ||
| 3151 | "New version: %d:%d:%d\n", | ||
| 3152 | fw_major_version, fw_minor_version, | 3146 | fw_major_version, fw_minor_version, |
| 3153 | fw_subminor_version, | 3147 | fw_subminor_version, |
| 3154 | ha->fw_major_version, | 3148 | ha->fw_major_version, |
| 3155 | ha->fw_minor_version, | 3149 | ha->fw_minor_version, |
| 3156 | ha->fw_subminor_version); | 3150 | ha->fw_subminor_version, |
| 3151 | ha->prev_minidump_failed); | ||
| 3157 | /* Release MiniDump resources */ | 3152 | /* Release MiniDump resources */ |
| 3158 | qla82xx_md_free(vha); | 3153 | qla82xx_md_free(vha); |
| 3159 | /* ALlocate MiniDump resources */ | 3154 | /* ALlocate MiniDump resources */ |
| @@ -3682,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) | |||
| 3682 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { | 3677 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
| 3683 | sp = req->outstanding_cmds[cnt]; | 3678 | sp = req->outstanding_cmds[cnt]; |
| 3684 | if (sp) { | 3679 | if (sp) { |
| 3685 | if (!sp->u.scmd.ctx || | 3680 | if ((!sp->u.scmd.ctx || |
| 3686 | (sp->flags & SRB_FCP_CMND_DMA_VALID)) { | 3681 | (sp->flags & |
| 3682 | SRB_FCP_CMND_DMA_VALID)) && | ||
| 3683 | !ha->flags.isp82xx_fw_hung) { | ||
| 3687 | spin_unlock_irqrestore( | 3684 | spin_unlock_irqrestore( |
| 3688 | &ha->hardware_lock, flags); | 3685 | &ha->hardware_lock, flags); |
| 3689 | if (ha->isp_ops->abort_command(sp)) { | 3686 | if (ha->isp_ops->abort_command(sp)) { |
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 1bb93dbbccbb..59c477883a73 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -333,9 +333,6 @@ | |||
| 333 | #define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) | 333 | #define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) |
| 334 | #define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) | 334 | #define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) |
| 335 | 335 | ||
| 336 | /* Lock IDs for ROM lock */ | ||
| 337 | #define ROM_LOCK_DRIVER 0x0d417340 | ||
| 338 | |||
| 339 | #define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */ | 336 | #define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */ |
| 340 | #define QLA82XX_PCI_CRB_WINDOW(A) \ | 337 | #define QLA82XX_PCI_CRB_WINDOW(A) \ |
| 341 | (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) | 338 | (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) |
| @@ -1186,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, | |||
| 1186 | #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 | 1183 | #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 |
| 1187 | 1184 | ||
| 1188 | #define qla82xx_get_temp_val(x) ((x) >> 16) | 1185 | #define qla82xx_get_temp_val(x) ((x) >> 16) |
| 1186 | #define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF) | ||
| 1189 | #define qla82xx_get_temp_state(x) ((x) & 0xffff) | 1187 | #define qla82xx_get_temp_state(x) ((x) & 0xffff) |
| 1190 | #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) | 1188 | #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) |
| 1191 | 1189 | ||
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 86cf10815db0..da9e3902f219 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c | |||
| @@ -1,17 +1,20 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <linux/vmalloc.h> | 8 | #include <linux/vmalloc.h> |
| 9 | #include <linux/delay.h> | ||
| 9 | 10 | ||
| 10 | #include "qla_def.h" | 11 | #include "qla_def.h" |
| 11 | #include "qla_gbl.h" | 12 | #include "qla_gbl.h" |
| 12 | 13 | ||
| 13 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 14 | 15 | ||
| 16 | #define TIMEOUT_100_MS 100 | ||
| 17 | |||
| 15 | /* 8044 Flash Read/Write functions */ | 18 | /* 8044 Flash Read/Write functions */ |
| 16 | uint32_t | 19 | uint32_t |
| 17 | qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) | 20 | qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) |
| @@ -117,6 +120,95 @@ qla8044_read_write_crb_reg(struct scsi_qla_host *vha, | |||
| 117 | qla8044_wr_reg_indirect(vha, waddr, value); | 120 | qla8044_wr_reg_indirect(vha, waddr, value); |
| 118 | } | 121 | } |
| 119 | 122 | ||
| 123 | static int | ||
| 124 | qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, | ||
| 125 | uint32_t mask) | ||
| 126 | { | ||
| 127 | unsigned long timeout; | ||
| 128 | uint32_t temp; | ||
| 129 | |||
| 130 | /* jiffies after 100ms */ | ||
| 131 | timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); | ||
| 132 | do { | ||
| 133 | qla8044_rd_reg_indirect(vha, addr1, &temp); | ||
| 134 | if ((temp & mask) != 0) | ||
| 135 | break; | ||
| 136 | if (time_after_eq(jiffies, timeout)) { | ||
| 137 | ql_log(ql_log_warn, vha, 0xb151, | ||
| 138 | "Error in processing rdmdio entry\n"); | ||
| 139 | return -1; | ||
| 140 | } | ||
| 141 | } while (1); | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static uint32_t | ||
| 147 | qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha, | ||
| 148 | uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr) | ||
| 149 | { | ||
| 150 | uint32_t temp; | ||
| 151 | int ret = 0; | ||
| 152 | |||
| 153 | ret = qla8044_poll_wait_for_ready(vha, addr1, mask); | ||
| 154 | if (ret == -1) | ||
| 155 | return -1; | ||
| 156 | |||
| 157 | temp = (0x40000000 | addr); | ||
| 158 | qla8044_wr_reg_indirect(vha, addr1, temp); | ||
| 159 | |||
| 160 | ret = qla8044_poll_wait_for_ready(vha, addr1, mask); | ||
| 161 | if (ret == -1) | ||
| 162 | return 0; | ||
| 163 | |||
| 164 | qla8044_rd_reg_indirect(vha, addr3, &ret); | ||
| 165 | |||
| 166 | return ret; | ||
| 167 | } | ||
| 168 | |||
| 169 | |||
| 170 | static int | ||
| 171 | qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha, | ||
| 172 | uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) | ||
| 173 | { | ||
| 174 | unsigned long timeout; | ||
| 175 | uint32_t temp; | ||
| 176 | |||
| 177 | /* jiffies after 100 msecs */ | ||
| 178 | timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); | ||
| 179 | do { | ||
| 180 | temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); | ||
| 181 | if ((temp & 0x1) != 1) | ||
| 182 | break; | ||
| 183 | if (time_after_eq(jiffies, timeout)) { | ||
| 184 | ql_log(ql_log_warn, vha, 0xb152, | ||
| 185 | "Error in processing mdiobus idle\n"); | ||
| 186 | return -1; | ||
| 187 | } | ||
| 188 | } while (1); | ||
| 189 | |||
| 190 | return 0; | ||
| 191 | } | ||
| 192 | |||
| 193 | static int | ||
| 194 | qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1, | ||
| 195 | uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) | ||
| 196 | { | ||
| 197 | int ret = 0; | ||
| 198 | |||
| 199 | ret = qla8044_poll_wait_for_ready(vha, addr1, mask); | ||
| 200 | if (ret == -1) | ||
| 201 | return -1; | ||
| 202 | |||
| 203 | qla8044_wr_reg_indirect(vha, addr3, value); | ||
| 204 | qla8044_wr_reg_indirect(vha, addr1, addr); | ||
| 205 | |||
| 206 | ret = qla8044_poll_wait_for_ready(vha, addr1, mask); | ||
| 207 | if (ret == -1) | ||
| 208 | return -1; | ||
| 209 | |||
| 210 | return 0; | ||
| 211 | } | ||
| 120 | /* | 212 | /* |
| 121 | * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, | 213 | * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, |
| 122 | * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. | 214 | * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. |
| @@ -356,8 +448,8 @@ qla8044_flash_lock(scsi_qla_host_t *vha) | |||
| 356 | lock_owner = qla8044_rd_reg(ha, | 448 | lock_owner = qla8044_rd_reg(ha, |
| 357 | QLA8044_FLASH_LOCK_ID); | 449 | QLA8044_FLASH_LOCK_ID); |
| 358 | ql_log(ql_log_warn, vha, 0xb113, | 450 | ql_log(ql_log_warn, vha, 0xb113, |
| 359 | "%s: flash lock by %d failed, held by %d\n", | 451 | "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", |
| 360 | __func__, ha->portnum, lock_owner); | 452 | __func__, ha->portnum, lock_owner); |
| 361 | ret_val = QLA_FUNCTION_FAILED; | 453 | ret_val = QLA_FUNCTION_FAILED; |
| 362 | break; | 454 | break; |
| 363 | } | 455 | } |
| @@ -1541,7 +1633,7 @@ static void | |||
| 1541 | qla8044_need_reset_handler(struct scsi_qla_host *vha) | 1633 | qla8044_need_reset_handler(struct scsi_qla_host *vha) |
| 1542 | { | 1634 | { |
| 1543 | uint32_t dev_state = 0, drv_state, drv_active; | 1635 | uint32_t dev_state = 0, drv_state, drv_active; |
| 1544 | unsigned long reset_timeout, dev_init_timeout; | 1636 | unsigned long reset_timeout; |
| 1545 | struct qla_hw_data *ha = vha->hw; | 1637 | struct qla_hw_data *ha = vha->hw; |
| 1546 | 1638 | ||
| 1547 | ql_log(ql_log_fatal, vha, 0xb0c2, | 1639 | ql_log(ql_log_fatal, vha, 0xb0c2, |
| @@ -1555,84 +1647,78 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha) | |||
| 1555 | qla8044_idc_lock(ha); | 1647 | qla8044_idc_lock(ha); |
| 1556 | } | 1648 | } |
| 1557 | 1649 | ||
| 1650 | dev_state = qla8044_rd_direct(vha, | ||
| 1651 | QLA8044_CRB_DEV_STATE_INDEX); | ||
| 1558 | drv_state = qla8044_rd_direct(vha, | 1652 | drv_state = qla8044_rd_direct(vha, |
| 1559 | QLA8044_CRB_DRV_STATE_INDEX); | 1653 | QLA8044_CRB_DRV_STATE_INDEX); |
| 1560 | drv_active = qla8044_rd_direct(vha, | 1654 | drv_active = qla8044_rd_direct(vha, |
| 1561 | QLA8044_CRB_DRV_ACTIVE_INDEX); | 1655 | QLA8044_CRB_DRV_ACTIVE_INDEX); |
| 1562 | 1656 | ||
| 1563 | ql_log(ql_log_info, vha, 0xb0c5, | 1657 | ql_log(ql_log_info, vha, 0xb0c5, |
| 1564 | "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", | 1658 | "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", |
| 1565 | __func__, vha->host_no, drv_state, drv_active); | 1659 | __func__, vha->host_no, drv_state, drv_active, dev_state); |
| 1566 | 1660 | ||
| 1567 | if (!ha->flags.nic_core_reset_owner) { | 1661 | qla8044_set_rst_ready(vha); |
| 1568 | ql_dbg(ql_dbg_p3p, vha, 0xb0c3, | ||
| 1569 | "%s(%ld): reset acknowledged\n", | ||
| 1570 | __func__, vha->host_no); | ||
| 1571 | qla8044_set_rst_ready(vha); | ||
| 1572 | 1662 | ||
| 1573 | /* Non-reset owners ACK Reset and wait for device INIT state | 1663 | /* wait for 10 seconds for reset ack from all functions */ |
| 1574 | * as part of Reset Recovery by Reset Owner | 1664 | reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); |
| 1575 | */ | ||
| 1576 | dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); | ||
| 1577 | 1665 | ||
| 1578 | do { | 1666 | do { |
| 1579 | if (time_after_eq(jiffies, dev_init_timeout)) { | 1667 | if (time_after_eq(jiffies, reset_timeout)) { |
| 1580 | ql_log(ql_log_info, vha, 0xb0c4, | 1668 | ql_log(ql_log_info, vha, 0xb0c4, |
| 1581 | "%s: Non Reset owner: Reset Ack Timeout!\n", | 1669 | "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", |
| 1582 | __func__); | 1670 | __func__, ha->portnum, drv_state, drv_active); |
| 1583 | break; | 1671 | break; |
| 1584 | } | 1672 | } |
| 1585 | 1673 | ||
| 1586 | qla8044_idc_unlock(ha); | 1674 | qla8044_idc_unlock(ha); |
| 1587 | msleep(1000); | 1675 | msleep(1000); |
| 1588 | qla8044_idc_lock(ha); | 1676 | qla8044_idc_lock(ha); |
| 1589 | 1677 | ||
| 1590 | dev_state = qla8044_rd_direct(vha, | 1678 | dev_state = qla8044_rd_direct(vha, |
| 1591 | QLA8044_CRB_DEV_STATE_INDEX); | 1679 | QLA8044_CRB_DEV_STATE_INDEX); |
| 1592 | } while (((drv_state & drv_active) != drv_active) && | 1680 | drv_state = qla8044_rd_direct(vha, |
| 1593 | (dev_state == QLA8XXX_DEV_NEED_RESET)); | 1681 | QLA8044_CRB_DRV_STATE_INDEX); |
| 1682 | drv_active = qla8044_rd_direct(vha, | ||
| 1683 | QLA8044_CRB_DRV_ACTIVE_INDEX); | ||
| 1684 | } while (((drv_state & drv_active) != drv_active) && | ||
| 1685 | (dev_state == QLA8XXX_DEV_NEED_RESET)); | ||
| 1686 | |||
| 1687 | /* Remove IDC participation of functions not acknowledging */ | ||
| 1688 | if (drv_state != drv_active) { | ||
| 1689 | ql_log(ql_log_info, vha, 0xb0c7, | ||
| 1690 | "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", | ||
| 1691 | __func__, vha->host_no, ha->portnum, | ||
| 1692 | (drv_active ^ drv_state)); | ||
| 1693 | drv_active = drv_active & drv_state; | ||
| 1694 | qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, | ||
| 1695 | drv_active); | ||
| 1594 | } else { | 1696 | } else { |
| 1595 | qla8044_set_rst_ready(vha); | 1697 | /* |
| 1596 | 1698 | * Reset owner should execute reset recovery, | |
| 1597 | /* wait for 10 seconds for reset ack from all functions */ | 1699 | * if all functions acknowledged |
| 1598 | reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); | 1700 | */ |
| 1599 | 1701 | if ((ha->flags.nic_core_reset_owner) && | |
| 1600 | while ((drv_state & drv_active) != drv_active) { | 1702 | (dev_state == QLA8XXX_DEV_NEED_RESET)) { |
| 1601 | if (time_after_eq(jiffies, reset_timeout)) { | 1703 | ha->flags.nic_core_reset_owner = 0; |
| 1602 | ql_log(ql_log_info, vha, 0xb0c6, | 1704 | qla8044_device_bootstrap(vha); |
| 1603 | "%s: RESET TIMEOUT!" | 1705 | return; |
| 1604 | "drv_state: 0x%08x, drv_active: 0x%08x\n", | ||
| 1605 | QLA2XXX_DRIVER_NAME, drv_state, drv_active); | ||
| 1606 | break; | ||
| 1607 | } | ||
| 1608 | |||
| 1609 | qla8044_idc_unlock(ha); | ||
| 1610 | msleep(1000); | ||
| 1611 | qla8044_idc_lock(ha); | ||
| 1612 | |||
| 1613 | drv_state = qla8044_rd_direct(vha, | ||
| 1614 | QLA8044_CRB_DRV_STATE_INDEX); | ||
| 1615 | drv_active = qla8044_rd_direct(vha, | ||
| 1616 | QLA8044_CRB_DRV_ACTIVE_INDEX); | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | if (drv_state != drv_active) { | ||
| 1620 | ql_log(ql_log_info, vha, 0xb0c7, | ||
| 1621 | "%s(%ld): Reset_owner turning off drv_active " | ||
| 1622 | "of non-acking function 0x%x\n", __func__, | ||
| 1623 | vha->host_no, (drv_active ^ drv_state)); | ||
| 1624 | drv_active = drv_active & drv_state; | ||
| 1625 | qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, | ||
| 1626 | drv_active); | ||
| 1627 | } | 1706 | } |
| 1707 | } | ||
| 1628 | 1708 | ||
| 1629 | /* | 1709 | /* Exit if non active function */ |
| 1630 | * Clear RESET OWNER, will be set at next reset | 1710 | if (!(drv_active & (1 << ha->portnum))) { |
| 1631 | * by next RST_OWNER | ||
| 1632 | */ | ||
| 1633 | ha->flags.nic_core_reset_owner = 0; | 1711 | ha->flags.nic_core_reset_owner = 0; |
| 1712 | return; | ||
| 1713 | } | ||
| 1634 | 1714 | ||
| 1635 | /* Start Reset Recovery */ | 1715 | /* |
| 1716 | * Execute Reset Recovery if Reset Owner or Function 7 | ||
| 1717 | * is the only active function | ||
| 1718 | */ | ||
| 1719 | if (ha->flags.nic_core_reset_owner || | ||
| 1720 | ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) { | ||
| 1721 | ha->flags.nic_core_reset_owner = 0; | ||
| 1636 | qla8044_device_bootstrap(vha); | 1722 | qla8044_device_bootstrap(vha); |
| 1637 | } | 1723 | } |
| 1638 | } | 1724 | } |
| @@ -1655,6 +1741,19 @@ qla8044_set_drv_active(struct scsi_qla_host *vha) | |||
| 1655 | qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); | 1741 | qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); |
| 1656 | } | 1742 | } |
| 1657 | 1743 | ||
| 1744 | static int | ||
| 1745 | qla8044_check_drv_active(struct scsi_qla_host *vha) | ||
| 1746 | { | ||
| 1747 | uint32_t drv_active; | ||
| 1748 | struct qla_hw_data *ha = vha->hw; | ||
| 1749 | |||
| 1750 | drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); | ||
| 1751 | if (drv_active & (1 << ha->portnum)) | ||
| 1752 | return QLA_SUCCESS; | ||
| 1753 | else | ||
| 1754 | return QLA_TEST_FAILED; | ||
| 1755 | } | ||
| 1756 | |||
| 1658 | static void | 1757 | static void |
| 1659 | qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) | 1758 | qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) |
| 1660 | { | 1759 | { |
| @@ -1837,14 +1936,16 @@ qla8044_device_state_handler(struct scsi_qla_host *vha) | |||
| 1837 | 1936 | ||
| 1838 | while (1) { | 1937 | while (1) { |
| 1839 | if (time_after_eq(jiffies, dev_init_timeout)) { | 1938 | if (time_after_eq(jiffies, dev_init_timeout)) { |
| 1840 | ql_log(ql_log_warn, vha, 0xb0cf, | 1939 | if (qla8044_check_drv_active(vha) == QLA_SUCCESS) { |
| 1841 | "%s: Device Init Failed 0x%x = %s\n", | 1940 | ql_log(ql_log_warn, vha, 0xb0cf, |
| 1842 | QLA2XXX_DRIVER_NAME, dev_state, | 1941 | "%s: Device Init Failed 0x%x = %s\n", |
| 1843 | dev_state < MAX_STATES ? | 1942 | QLA2XXX_DRIVER_NAME, dev_state, |
| 1844 | qdev_state(dev_state) : "Unknown"); | 1943 | dev_state < MAX_STATES ? |
| 1845 | 1944 | qdev_state(dev_state) : "Unknown"); | |
| 1846 | qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, | 1945 | qla8044_wr_direct(vha, |
| 1847 | QLA8XXX_DEV_FAILED); | 1946 | QLA8044_CRB_DEV_STATE_INDEX, |
| 1947 | QLA8XXX_DEV_FAILED); | ||
| 1948 | } | ||
| 1848 | } | 1949 | } |
| 1849 | 1950 | ||
| 1850 | dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); | 1951 | dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); |
| @@ -2017,6 +2118,13 @@ qla8044_watchdog(struct scsi_qla_host *vha) | |||
| 2017 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { | 2118 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { |
| 2018 | dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); | 2119 | dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); |
| 2019 | 2120 | ||
| 2121 | if (qla8044_check_fw_alive(vha)) { | ||
| 2122 | ha->flags.isp82xx_fw_hung = 1; | ||
| 2123 | ql_log(ql_log_warn, vha, 0xb10a, | ||
| 2124 | "Firmware hung.\n"); | ||
| 2125 | qla82xx_clear_pending_mbx(vha); | ||
| 2126 | } | ||
| 2127 | |||
| 2020 | if (qla8044_check_temp(vha)) { | 2128 | if (qla8044_check_temp(vha)) { |
| 2021 | set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); | 2129 | set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); |
| 2022 | ha->flags.isp82xx_fw_hung = 1; | 2130 | ha->flags.isp82xx_fw_hung = 1; |
| @@ -2037,7 +2145,7 @@ qla8044_watchdog(struct scsi_qla_host *vha) | |||
| 2037 | qla2xxx_wake_dpc(vha); | 2145 | qla2xxx_wake_dpc(vha); |
| 2038 | } else { | 2146 | } else { |
| 2039 | /* Check firmware health */ | 2147 | /* Check firmware health */ |
| 2040 | if (qla8044_check_fw_alive(vha)) { | 2148 | if (ha->flags.isp82xx_fw_hung) { |
| 2041 | halt_status = qla8044_rd_direct(vha, | 2149 | halt_status = qla8044_rd_direct(vha, |
| 2042 | QLA8044_PEG_HALT_STATUS1_INDEX); | 2150 | QLA8044_PEG_HALT_STATUS1_INDEX); |
| 2043 | if (halt_status & | 2151 | if (halt_status & |
| @@ -2073,12 +2181,8 @@ qla8044_watchdog(struct scsi_qla_host *vha) | |||
| 2073 | __func__); | 2181 | __func__); |
| 2074 | set_bit(ISP_ABORT_NEEDED, | 2182 | set_bit(ISP_ABORT_NEEDED, |
| 2075 | &vha->dpc_flags); | 2183 | &vha->dpc_flags); |
| 2076 | qla82xx_clear_pending_mbx(vha); | ||
| 2077 | } | 2184 | } |
| 2078 | } | 2185 | } |
| 2079 | ha->flags.isp82xx_fw_hung = 1; | ||
| 2080 | ql_log(ql_log_warn, vha, 0xb10a, | ||
| 2081 | "Firmware hung.\n"); | ||
| 2082 | qla2xxx_wake_dpc(vha); | 2186 | qla2xxx_wake_dpc(vha); |
| 2083 | } | 2187 | } |
| 2084 | } | 2188 | } |
| @@ -2286,8 +2390,6 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha, | |||
| 2286 | } | 2390 | } |
| 2287 | 2391 | ||
| 2288 | if (j >= MAX_CTL_CHECK) { | 2392 | if (j >= MAX_CTL_CHECK) { |
| 2289 | printk_ratelimited(KERN_ERR | ||
| 2290 | "%s: failed to read through agent\n", __func__); | ||
| 2291 | write_unlock_irqrestore(&ha->hw_lock, flags); | 2393 | write_unlock_irqrestore(&ha->hw_lock, flags); |
| 2292 | return QLA_SUCCESS; | 2394 | return QLA_SUCCESS; |
| 2293 | } | 2395 | } |
| @@ -2882,6 +2984,231 @@ error_exit: | |||
| 2882 | return rval; | 2984 | return rval; |
| 2883 | } | 2985 | } |
| 2884 | 2986 | ||
| 2987 | static uint32_t | ||
| 2988 | qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, | ||
| 2989 | struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) | ||
| 2990 | { | ||
| 2991 | int loop_cnt; | ||
| 2992 | uint32_t addr1, addr2, value, data, temp, wrVal; | ||
| 2993 | uint8_t stride, stride2; | ||
| 2994 | uint16_t count; | ||
| 2995 | uint32_t poll, mask, data_size, modify_mask; | ||
| 2996 | uint32_t wait_count = 0; | ||
| 2997 | |||
| 2998 | uint32_t *data_ptr = *d_ptr; | ||
| 2999 | |||
| 3000 | struct qla8044_minidump_entry_rddfe *rddfe; | ||
| 3001 | rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr; | ||
| 3002 | |||
| 3003 | addr1 = rddfe->addr_1; | ||
| 3004 | value = rddfe->value; | ||
| 3005 | stride = rddfe->stride; | ||
| 3006 | stride2 = rddfe->stride2; | ||
| 3007 | count = rddfe->count; | ||
| 3008 | |||
| 3009 | poll = rddfe->poll; | ||
| 3010 | mask = rddfe->mask; | ||
| 3011 | modify_mask = rddfe->modify_mask; | ||
| 3012 | data_size = rddfe->data_size; | ||
| 3013 | |||
| 3014 | addr2 = addr1 + stride; | ||
| 3015 | |||
| 3016 | for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { | ||
| 3017 | qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value)); | ||
| 3018 | |||
| 3019 | wait_count = 0; | ||
| 3020 | while (wait_count < poll) { | ||
| 3021 | qla8044_rd_reg_indirect(vha, addr1, &temp); | ||
| 3022 | if ((temp & mask) != 0) | ||
| 3023 | break; | ||
| 3024 | wait_count++; | ||
| 3025 | } | ||
| 3026 | |||
| 3027 | if (wait_count == poll) { | ||
| 3028 | ql_log(ql_log_warn, vha, 0xb153, | ||
| 3029 | "%s: TIMEOUT\n", __func__); | ||
| 3030 | goto error; | ||
| 3031 | } else { | ||
| 3032 | qla8044_rd_reg_indirect(vha, addr2, &temp); | ||
| 3033 | temp = temp & modify_mask; | ||
| 3034 | temp = (temp | ((loop_cnt << 16) | loop_cnt)); | ||
| 3035 | wrVal = ((temp << 16) | temp); | ||
| 3036 | |||
| 3037 | qla8044_wr_reg_indirect(vha, addr2, wrVal); | ||
| 3038 | qla8044_wr_reg_indirect(vha, addr1, value); | ||
| 3039 | |||
| 3040 | wait_count = 0; | ||
| 3041 | while (wait_count < poll) { | ||
| 3042 | qla8044_rd_reg_indirect(vha, addr1, &temp); | ||
| 3043 | if ((temp & mask) != 0) | ||
| 3044 | break; | ||
| 3045 | wait_count++; | ||
| 3046 | } | ||
| 3047 | if (wait_count == poll) { | ||
| 3048 | ql_log(ql_log_warn, vha, 0xb154, | ||
| 3049 | "%s: TIMEOUT\n", __func__); | ||
| 3050 | goto error; | ||
| 3051 | } | ||
| 3052 | |||
| 3053 | qla8044_wr_reg_indirect(vha, addr1, | ||
| 3054 | ((0x40000000 | value) + stride2)); | ||
| 3055 | wait_count = 0; | ||
| 3056 | while (wait_count < poll) { | ||
| 3057 | qla8044_rd_reg_indirect(vha, addr1, &temp); | ||
| 3058 | if ((temp & mask) != 0) | ||
| 3059 | break; | ||
| 3060 | wait_count++; | ||
| 3061 | } | ||
| 3062 | |||
| 3063 | if (wait_count == poll) { | ||
| 3064 | ql_log(ql_log_warn, vha, 0xb155, | ||
| 3065 | "%s: TIMEOUT\n", __func__); | ||
| 3066 | goto error; | ||
| 3067 | } | ||
| 3068 | |||
| 3069 | qla8044_rd_reg_indirect(vha, addr2, &data); | ||
| 3070 | |||
| 3071 | *data_ptr++ = wrVal; | ||
| 3072 | *data_ptr++ = data; | ||
| 3073 | } | ||
| 3074 | |||
| 3075 | } | ||
| 3076 | |||
| 3077 | *d_ptr = data_ptr; | ||
| 3078 | return QLA_SUCCESS; | ||
| 3079 | |||
| 3080 | error: | ||
| 3081 | return -1; | ||
| 3082 | |||
| 3083 | } | ||
| 3084 | |||
| 3085 | static uint32_t | ||
| 3086 | qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, | ||
| 3087 | struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) | ||
| 3088 | { | ||
| 3089 | int ret = 0; | ||
| 3090 | uint32_t addr1, addr2, value1, value2, data, selVal; | ||
| 3091 | uint8_t stride1, stride2; | ||
| 3092 | uint32_t addr3, addr4, addr5, addr6, addr7; | ||
| 3093 | uint16_t count, loop_cnt; | ||
| 3094 | uint32_t poll, mask; | ||
| 3095 | uint32_t *data_ptr = *d_ptr; | ||
| 3096 | |||
| 3097 | struct qla8044_minidump_entry_rdmdio *rdmdio; | ||
| 3098 | |||
| 3099 | rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr; | ||
| 3100 | |||
| 3101 | addr1 = rdmdio->addr_1; | ||
| 3102 | addr2 = rdmdio->addr_2; | ||
| 3103 | value1 = rdmdio->value_1; | ||
| 3104 | stride1 = rdmdio->stride_1; | ||
| 3105 | stride2 = rdmdio->stride_2; | ||
| 3106 | count = rdmdio->count; | ||
| 3107 | |||
| 3108 | poll = rdmdio->poll; | ||
| 3109 | mask = rdmdio->mask; | ||
| 3110 | value2 = rdmdio->value_2; | ||
| 3111 | |||
| 3112 | addr3 = addr1 + stride1; | ||
| 3113 | |||
| 3114 | for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { | ||
| 3115 | ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, | ||
| 3116 | addr3, mask); | ||
| 3117 | if (ret == -1) | ||
| 3118 | goto error; | ||
| 3119 | |||
| 3120 | addr4 = addr2 - stride1; | ||
| 3121 | ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, | ||
| 3122 | value2); | ||
| 3123 | if (ret == -1) | ||
| 3124 | goto error; | ||
| 3125 | |||
| 3126 | addr5 = addr2 - (2 * stride1); | ||
| 3127 | ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, | ||
| 3128 | value1); | ||
| 3129 | if (ret == -1) | ||
| 3130 | goto error; | ||
| 3131 | |||
| 3132 | addr6 = addr2 - (3 * stride1); | ||
| 3133 | ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, | ||
| 3134 | addr6, 0x2); | ||
| 3135 | if (ret == -1) | ||
| 3136 | goto error; | ||
| 3137 | |||
| 3138 | ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, | ||
| 3139 | addr3, mask); | ||
| 3140 | if (ret == -1) | ||
| 3141 | goto error; | ||
| 3142 | |||
| 3143 | addr7 = addr2 - (4 * stride1); | ||
| 3144 | data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, | ||
| 3145 | mask, addr7); | ||
| 3146 | if (data == -1) | ||
| 3147 | goto error; | ||
| 3148 | |||
| 3149 | selVal = (value2 << 18) | (value1 << 2) | 2; | ||
| 3150 | |||
| 3151 | stride2 = rdmdio->stride_2; | ||
| 3152 | *data_ptr++ = selVal; | ||
| 3153 | *data_ptr++ = data; | ||
| 3154 | |||
| 3155 | value1 = value1 + stride2; | ||
| 3156 | *d_ptr = data_ptr; | ||
| 3157 | } | ||
| 3158 | |||
| 3159 | return 0; | ||
| 3160 | |||
| 3161 | error: | ||
| 3162 | return -1; | ||
| 3163 | } | ||
| 3164 | |||
| 3165 | static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, | ||
| 3166 | struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) | ||
| 3167 | { | ||
| 3168 | uint32_t addr1, addr2, value1, value2, poll, mask, r_value; | ||
| 3169 | uint32_t wait_count = 0; | ||
| 3170 | struct qla8044_minidump_entry_pollwr *pollwr_hdr; | ||
| 3171 | |||
| 3172 | pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; | ||
| 3173 | addr1 = pollwr_hdr->addr_1; | ||
| 3174 | addr2 = pollwr_hdr->addr_2; | ||
| 3175 | value1 = pollwr_hdr->value_1; | ||
| 3176 | value2 = pollwr_hdr->value_2; | ||
| 3177 | |||
| 3178 | poll = pollwr_hdr->poll; | ||
| 3179 | mask = pollwr_hdr->mask; | ||
| 3180 | |||
| 3181 | while (wait_count < poll) { | ||
| 3182 | qla8044_rd_reg_indirect(vha, addr1, &r_value); | ||
| 3183 | |||
| 3184 | if ((r_value & poll) != 0) | ||
| 3185 | break; | ||
| 3186 | wait_count++; | ||
| 3187 | } | ||
| 3188 | |||
| 3189 | if (wait_count == poll) { | ||
| 3190 | ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__); | ||
| 3191 | goto error; | ||
| 3192 | } | ||
| 3193 | |||
| 3194 | qla8044_wr_reg_indirect(vha, addr2, value2); | ||
| 3195 | qla8044_wr_reg_indirect(vha, addr1, value1); | ||
| 3196 | |||
| 3197 | wait_count = 0; | ||
| 3198 | while (wait_count < poll) { | ||
| 3199 | qla8044_rd_reg_indirect(vha, addr1, &r_value); | ||
| 3200 | |||
| 3201 | if ((r_value & poll) != 0) | ||
| 3202 | break; | ||
| 3203 | wait_count++; | ||
| 3204 | } | ||
| 3205 | |||
| 3206 | return QLA_SUCCESS; | ||
| 3207 | |||
| 3208 | error: | ||
| 3209 | return -1; | ||
| 3210 | } | ||
| 3211 | |||
| 2885 | /* | 3212 | /* |
| 2886 | * | 3213 | * |
| 2887 | * qla8044_collect_md_data - Retrieve firmware minidump data. | 3214 | * qla8044_collect_md_data - Retrieve firmware minidump data. |
| @@ -3089,6 +3416,24 @@ qla8044_collect_md_data(struct scsi_qla_host *vha) | |||
| 3089 | if (rval != QLA_SUCCESS) | 3416 | if (rval != QLA_SUCCESS) |
| 3090 | qla8044_mark_entry_skipped(vha, entry_hdr, i); | 3417 | qla8044_mark_entry_skipped(vha, entry_hdr, i); |
| 3091 | break; | 3418 | break; |
| 3419 | case QLA8044_RDDFE: | ||
| 3420 | rval = qla8044_minidump_process_rddfe(vha, entry_hdr, | ||
| 3421 | &data_ptr); | ||
| 3422 | if (rval != QLA_SUCCESS) | ||
| 3423 | qla8044_mark_entry_skipped(vha, entry_hdr, i); | ||
| 3424 | break; | ||
| 3425 | case QLA8044_RDMDIO: | ||
| 3426 | rval = qla8044_minidump_process_rdmdio(vha, entry_hdr, | ||
| 3427 | &data_ptr); | ||
| 3428 | if (rval != QLA_SUCCESS) | ||
| 3429 | qla8044_mark_entry_skipped(vha, entry_hdr, i); | ||
| 3430 | break; | ||
| 3431 | case QLA8044_POLLWR: | ||
| 3432 | rval = qla8044_minidump_process_pollwr(vha, entry_hdr, | ||
| 3433 | &data_ptr); | ||
| 3434 | if (rval != QLA_SUCCESS) | ||
| 3435 | qla8044_mark_entry_skipped(vha, entry_hdr, i); | ||
| 3436 | break; | ||
| 3092 | case QLA82XX_RDNOP: | 3437 | case QLA82XX_RDNOP: |
| 3093 | default: | 3438 | default: |
| 3094 | qla8044_mark_entry_skipped(vha, entry_hdr, i); | 3439 | qla8044_mark_entry_skipped(vha, entry_hdr, i); |
| @@ -3110,6 +3455,7 @@ skip_nxt_entry: | |||
| 3110 | "Dump data mismatch: Data collected: " | 3455 | "Dump data mismatch: Data collected: " |
| 3111 | "[0x%x], total_data_size:[0x%x]\n", | 3456 | "[0x%x], total_data_size:[0x%x]\n", |
| 3112 | data_collected, ha->md_dump_size); | 3457 | data_collected, ha->md_dump_size); |
| 3458 | rval = QLA_FUNCTION_FAILED; | ||
| 3113 | goto md_failed; | 3459 | goto md_failed; |
| 3114 | } | 3460 | } |
| 3115 | 3461 | ||
| @@ -3134,10 +3480,12 @@ qla8044_get_minidump(struct scsi_qla_host *vha) | |||
| 3134 | 3480 | ||
| 3135 | if (!qla8044_collect_md_data(vha)) { | 3481 | if (!qla8044_collect_md_data(vha)) { |
| 3136 | ha->fw_dumped = 1; | 3482 | ha->fw_dumped = 1; |
| 3483 | ha->prev_minidump_failed = 0; | ||
| 3137 | } else { | 3484 | } else { |
| 3138 | ql_log(ql_log_fatal, vha, 0xb0db, | 3485 | ql_log(ql_log_fatal, vha, 0xb0db, |
| 3139 | "%s: Unable to collect minidump\n", | 3486 | "%s: Unable to collect minidump\n", |
| 3140 | __func__); | 3487 | __func__); |
| 3488 | ha->prev_minidump_failed = 1; | ||
| 3141 | } | 3489 | } |
| 3142 | } | 3490 | } |
| 3143 | 3491 | ||
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h index 2ab2eabab908..ada36057d7cd 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.h +++ b/drivers/scsi/qla2xxx/qla_nx2.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -133,6 +133,7 @@ | |||
| 133 | #define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) | 133 | #define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) |
| 134 | #define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) | 134 | #define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) |
| 135 | #define QLA8044_LINK_SPEED_FACTOR 10 | 135 | #define QLA8044_LINK_SPEED_FACTOR 10 |
| 136 | #define QLA8044_FUN7_ACTIVE_INDEX 0x80 | ||
| 136 | 137 | ||
| 137 | /* FLASH API Defines */ | 138 | /* FLASH API Defines */ |
| 138 | #define QLA8044_FLASH_MAX_WAIT_USEC 100 | 139 | #define QLA8044_FLASH_MAX_WAIT_USEC 100 |
| @@ -431,6 +432,50 @@ struct qla8044_minidump_entry_pollrd { | |||
| 431 | uint32_t rsvd_1; | 432 | uint32_t rsvd_1; |
| 432 | } __packed; | 433 | } __packed; |
| 433 | 434 | ||
| 435 | struct qla8044_minidump_entry_rddfe { | ||
| 436 | struct qla8044_minidump_entry_hdr h; | ||
| 437 | uint32_t addr_1; | ||
| 438 | uint32_t value; | ||
| 439 | uint8_t stride; | ||
| 440 | uint8_t stride2; | ||
| 441 | uint16_t count; | ||
| 442 | uint32_t poll; | ||
| 443 | uint32_t mask; | ||
| 444 | uint32_t modify_mask; | ||
| 445 | uint32_t data_size; | ||
| 446 | uint32_t rsvd; | ||
| 447 | |||
| 448 | } __packed; | ||
| 449 | |||
| 450 | struct qla8044_minidump_entry_rdmdio { | ||
| 451 | struct qla8044_minidump_entry_hdr h; | ||
| 452 | |||
| 453 | uint32_t addr_1; | ||
| 454 | uint32_t addr_2; | ||
| 455 | uint32_t value_1; | ||
| 456 | uint8_t stride_1; | ||
| 457 | uint8_t stride_2; | ||
| 458 | uint16_t count; | ||
| 459 | uint32_t poll; | ||
| 460 | uint32_t mask; | ||
| 461 | uint32_t value_2; | ||
| 462 | uint32_t data_size; | ||
| 463 | |||
| 464 | } __packed; | ||
| 465 | |||
| 466 | struct qla8044_minidump_entry_pollwr { | ||
| 467 | struct qla8044_minidump_entry_hdr h; | ||
| 468 | uint32_t addr_1; | ||
| 469 | uint32_t addr_2; | ||
| 470 | uint32_t value_1; | ||
| 471 | uint32_t value_2; | ||
| 472 | uint32_t poll; | ||
| 473 | uint32_t mask; | ||
| 474 | uint32_t data_size; | ||
| 475 | uint32_t rsvd; | ||
| 476 | |||
| 477 | } __packed; | ||
| 478 | |||
| 434 | /* RDMUX2 Entry */ | 479 | /* RDMUX2 Entry */ |
| 435 | struct qla8044_minidump_entry_rdmux2 { | 480 | struct qla8044_minidump_entry_rdmux2 { |
| 436 | struct qla8044_minidump_entry_hdr h; | 481 | struct qla8044_minidump_entry_hdr h; |
| @@ -516,6 +561,9 @@ static const uint32_t qla8044_reg_tbl[] = { | |||
| 516 | #define QLA8044_DBG_RSVD_ARRAY_LEN 8 | 561 | #define QLA8044_DBG_RSVD_ARRAY_LEN 8 |
| 517 | #define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16 | 562 | #define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16 |
| 518 | #define QLA8044_SS_PCI_INDEX 0 | 563 | #define QLA8044_SS_PCI_INDEX 0 |
| 564 | #define QLA8044_RDDFE 38 | ||
| 565 | #define QLA8044_RDMDIO 39 | ||
| 566 | #define QLA8044_POLLWR 40 | ||
| 519 | 567 | ||
| 520 | struct qla8044_minidump_template_hdr { | 568 | struct qla8044_minidump_template_hdr { |
| 521 | uint32_t entry_type; | 569 | uint32_t entry_type; |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index afc84814e9bb..d96bfb55e57b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) | |||
| 616 | 616 | ||
| 617 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { | 617 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { |
| 618 | /* List assured to be having elements */ | 618 | /* List assured to be having elements */ |
| 619 | qla2x00_clean_dsd_pool(ha, sp); | 619 | qla2x00_clean_dsd_pool(ha, sp, NULL); |
| 620 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; | 620 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; |
| 621 | } | 621 | } |
| 622 | 622 | ||
| @@ -781,7 +781,7 @@ static int | |||
| 781 | qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) | 781 | qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) |
| 782 | { | 782 | { |
| 783 | #define ABORT_POLLING_PERIOD 1000 | 783 | #define ABORT_POLLING_PERIOD 1000 |
| 784 | #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) | 784 | #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) |
| 785 | unsigned long wait_iter = ABORT_WAIT_ITER; | 785 | unsigned long wait_iter = ABORT_WAIT_ITER; |
| 786 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 786 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
| 787 | struct qla_hw_data *ha = vha->hw; | 787 | struct qla_hw_data *ha = vha->hw; |
| @@ -844,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) | |||
| 844 | } | 844 | } |
| 845 | 845 | ||
| 846 | /* | 846 | /* |
| 847 | * qla2x00_wait_for_reset_ready | 847 | * qla2x00_wait_for_hba_ready |
| 848 | * Wait till the HBA is online after going through | 848 | * Wait till the HBA is ready before doing driver unload |
| 849 | * <= MAX_RETRIES_OF_ISP_ABORT or | ||
| 850 | * finally HBA is disabled ie marked offline or flash | ||
| 851 | * operations are in progress. | ||
| 852 | * | 849 | * |
| 853 | * Input: | 850 | * Input: |
| 854 | * ha - pointer to host adapter structure | 851 | * ha - pointer to host adapter structure |
| @@ -857,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) | |||
| 857 | * Does context switching-Release SPIN_LOCK | 854 | * Does context switching-Release SPIN_LOCK |
| 858 | * (if any) before calling this routine. | 855 | * (if any) before calling this routine. |
| 859 | * | 856 | * |
| 860 | * Return: | ||
| 861 | * Success (Adapter is online/no flash ops) : 0 | ||
| 862 | * Failed (Adapter is offline/disabled/flash ops in progress) : 1 | ||
| 863 | */ | 857 | */ |
| 864 | static int | 858 | static void |
| 865 | qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) | 859 | qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) |
| 866 | { | 860 | { |
| 867 | int return_status; | ||
| 868 | unsigned long wait_online; | ||
| 869 | struct qla_hw_data *ha = vha->hw; | 861 | struct qla_hw_data *ha = vha->hw; |
| 870 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | ||
| 871 | 862 | ||
| 872 | wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); | 863 | while ((!(vha->flags.online) || ha->dpc_active || |
| 873 | while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || | 864 | ha->flags.mbox_busy)) |
| 874 | test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || | ||
| 875 | test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || | ||
| 876 | ha->optrom_state != QLA_SWAITING || | ||
| 877 | ha->dpc_active) && time_before(jiffies, wait_online)) | ||
| 878 | msleep(1000); | 865 | msleep(1000); |
| 879 | |||
| 880 | if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING) | ||
| 881 | return_status = QLA_SUCCESS; | ||
| 882 | else | ||
| 883 | return_status = QLA_FUNCTION_FAILED; | ||
| 884 | |||
| 885 | ql_dbg(ql_dbg_taskm, vha, 0x8019, | ||
| 886 | "%s return status=%d.\n", __func__, return_status); | ||
| 887 | |||
| 888 | return return_status; | ||
| 889 | } | 866 | } |
| 890 | 867 | ||
| 891 | int | 868 | int |
| @@ -945,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 945 | int ret; | 922 | int ret; |
| 946 | unsigned int id, lun; | 923 | unsigned int id, lun; |
| 947 | unsigned long flags; | 924 | unsigned long flags; |
| 948 | int wait = 0; | 925 | int rval, wait = 0; |
| 949 | struct qla_hw_data *ha = vha->hw; | 926 | struct qla_hw_data *ha = vha->hw; |
| 950 | 927 | ||
| 951 | if (!CMD_SP(cmd)) | 928 | if (!CMD_SP(cmd)) |
| @@ -974,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 974 | sp_get(sp); | 951 | sp_get(sp); |
| 975 | 952 | ||
| 976 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 953 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 977 | if (ha->isp_ops->abort_command(sp)) { | 954 | rval = ha->isp_ops->abort_command(sp); |
| 978 | ret = FAILED; | 955 | if (rval) { |
| 956 | if (rval == QLA_FUNCTION_PARAMETER_ERROR) { | ||
| 957 | /* | ||
| 958 | * Decrement the ref_count since we can't find the | ||
| 959 | * command | ||
| 960 | */ | ||
| 961 | atomic_dec(&sp->ref_count); | ||
| 962 | ret = SUCCESS; | ||
| 963 | } else | ||
| 964 | ret = FAILED; | ||
| 965 | |||
| 979 | ql_dbg(ql_dbg_taskm, vha, 0x8003, | 966 | ql_dbg(ql_dbg_taskm, vha, 0x8003, |
| 980 | "Abort command mbx failed cmd=%p.\n", cmd); | 967 | "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval); |
| 981 | } else { | 968 | } else { |
| 982 | ql_dbg(ql_dbg_taskm, vha, 0x8004, | 969 | ql_dbg(ql_dbg_taskm, vha, 0x8004, |
| 983 | "Abort command mbx success cmd=%p.\n", cmd); | 970 | "Abort command mbx success cmd=%p.\n", cmd); |
| @@ -985,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 985 | } | 972 | } |
| 986 | 973 | ||
| 987 | spin_lock_irqsave(&ha->hardware_lock, flags); | 974 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 975 | /* | ||
| 976 | * Clear the slot in the oustanding_cmds array if we can't find the | ||
| 977 | * command to reclaim the resources. | ||
| 978 | */ | ||
| 979 | if (rval == QLA_FUNCTION_PARAMETER_ERROR) | ||
| 980 | vha->req->outstanding_cmds[sp->handle] = NULL; | ||
| 988 | sp->done(ha, sp, 0); | 981 | sp->done(ha, sp, 0); |
| 989 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 982 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 990 | 983 | ||
| @@ -1236,7 +1229,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
| 1236 | ql_log(ql_log_info, vha, 0x8018, | 1229 | ql_log(ql_log_info, vha, 0x8018, |
| 1237 | "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); | 1230 | "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); |
| 1238 | 1231 | ||
| 1239 | if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) | 1232 | /* |
| 1233 | * No point in issuing another reset if one is active. Also do not | ||
| 1234 | * attempt a reset if we are updating flash. | ||
| 1235 | */ | ||
| 1236 | if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) | ||
| 1240 | goto eh_host_reset_lock; | 1237 | goto eh_host_reset_lock; |
| 1241 | 1238 | ||
| 1242 | if (vha != base_vha) { | 1239 | if (vha != base_vha) { |
| @@ -2270,6 +2267,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) | |||
| 2270 | ha->device_type |= DT_IIDMA; | 2267 | ha->device_type |= DT_IIDMA; |
| 2271 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; | 2268 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
| 2272 | break; | 2269 | break; |
| 2270 | case PCI_DEVICE_ID_QLOGIC_ISP2271: | ||
| 2271 | ha->device_type |= DT_ISP2271; | ||
| 2272 | ha->device_type |= DT_ZIO_SUPPORTED; | ||
| 2273 | ha->device_type |= DT_FWI2; | ||
| 2274 | ha->device_type |= DT_IIDMA; | ||
| 2275 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; | ||
| 2276 | break; | ||
| 2273 | } | 2277 | } |
| 2274 | 2278 | ||
| 2275 | if (IS_QLA82XX(ha)) | 2279 | if (IS_QLA82XX(ha)) |
| @@ -2346,7 +2350,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2346 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || | 2350 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || |
| 2347 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || | 2351 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || |
| 2348 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || | 2352 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || |
| 2349 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) { | 2353 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || |
| 2354 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) { | ||
| 2350 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | 2355 | bars = pci_select_bars(pdev, IORESOURCE_MEM); |
| 2351 | mem_only = 1; | 2356 | mem_only = 1; |
| 2352 | ql_dbg_pci(ql_dbg_init, pdev, 0x0007, | 2357 | ql_dbg_pci(ql_dbg_init, pdev, 0x0007, |
| @@ -2877,6 +2882,7 @@ skip_dpc: | |||
| 2877 | 2882 | ||
| 2878 | base_vha->flags.init_done = 1; | 2883 | base_vha->flags.init_done = 1; |
| 2879 | base_vha->flags.online = 1; | 2884 | base_vha->flags.online = 1; |
| 2885 | ha->prev_minidump_failed = 0; | ||
| 2880 | 2886 | ||
| 2881 | ql_dbg(ql_dbg_init, base_vha, 0x00f2, | 2887 | ql_dbg(ql_dbg_init, base_vha, 0x00f2, |
| 2882 | "Init done and hba is online.\n"); | 2888 | "Init done and hba is online.\n"); |
| @@ -3136,6 +3142,8 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
| 3136 | base_vha = pci_get_drvdata(pdev); | 3142 | base_vha = pci_get_drvdata(pdev); |
| 3137 | ha = base_vha->hw; | 3143 | ha = base_vha->hw; |
| 3138 | 3144 | ||
| 3145 | qla2x00_wait_for_hba_ready(base_vha); | ||
| 3146 | |||
| 3139 | set_bit(UNLOADING, &base_vha->dpc_flags); | 3147 | set_bit(UNLOADING, &base_vha->dpc_flags); |
| 3140 | 3148 | ||
| 3141 | if (IS_QLAFX00(ha)) | 3149 | if (IS_QLAFX00(ha)) |
| @@ -3645,6 +3653,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha) | |||
| 3645 | ha->eft = NULL; | 3653 | ha->eft = NULL; |
| 3646 | ha->eft_dma = 0; | 3654 | ha->eft_dma = 0; |
| 3647 | ha->fw_dumped = 0; | 3655 | ha->fw_dumped = 0; |
| 3656 | ha->fw_dump_cap_flags = 0; | ||
| 3648 | ha->fw_dump_reading = 0; | 3657 | ha->fw_dump_reading = 0; |
| 3649 | ha->fw_dump = NULL; | 3658 | ha->fw_dump = NULL; |
| 3650 | ha->fw_dump_len = 0; | 3659 | ha->fw_dump_len = 0; |
| @@ -4913,12 +4922,13 @@ qla2x00_do_dpc(void *data) | |||
| 4913 | if (qlafx00_reset_initialize(base_vha)) { | 4922 | if (qlafx00_reset_initialize(base_vha)) { |
| 4914 | /* Failed. Abort isp later. */ | 4923 | /* Failed. Abort isp later. */ |
| 4915 | if (!test_bit(UNLOADING, | 4924 | if (!test_bit(UNLOADING, |
| 4916 | &base_vha->dpc_flags)) | 4925 | &base_vha->dpc_flags)) { |
| 4917 | set_bit(ISP_UNRECOVERABLE, | 4926 | set_bit(ISP_UNRECOVERABLE, |
| 4918 | &base_vha->dpc_flags); | 4927 | &base_vha->dpc_flags); |
| 4919 | ql_dbg(ql_dbg_dpc, base_vha, | 4928 | ql_dbg(ql_dbg_dpc, base_vha, |
| 4920 | 0x4021, | 4929 | 0x4021, |
| 4921 | "Reset Recovery Failed\n"); | 4930 | "Reset Recovery Failed\n"); |
| 4931 | } | ||
| 4922 | } | 4932 | } |
| 4923 | } | 4933 | } |
| 4924 | 4934 | ||
| @@ -5077,8 +5087,10 @@ intr_on_check: | |||
| 5077 | ha->isp_ops->enable_intrs(ha); | 5087 | ha->isp_ops->enable_intrs(ha); |
| 5078 | 5088 | ||
| 5079 | if (test_and_clear_bit(BEACON_BLINK_NEEDED, | 5089 | if (test_and_clear_bit(BEACON_BLINK_NEEDED, |
| 5080 | &base_vha->dpc_flags)) | 5090 | &base_vha->dpc_flags)) { |
| 5081 | ha->isp_ops->beacon_blink(base_vha); | 5091 | if (ha->beacon_blink_led == 1) |
| 5092 | ha->isp_ops->beacon_blink(base_vha); | ||
| 5093 | } | ||
| 5082 | 5094 | ||
| 5083 | if (!IS_QLAFX00(ha)) | 5095 | if (!IS_QLAFX00(ha)) |
| 5084 | qla2x00_do_dpc_all_vps(base_vha); | 5096 | qla2x00_do_dpc_all_vps(base_vha); |
| @@ -5325,7 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
| 5325 | #define FW_ISP82XX 7 | 5337 | #define FW_ISP82XX 7 |
| 5326 | #define FW_ISP2031 8 | 5338 | #define FW_ISP2031 8 |
| 5327 | #define FW_ISP8031 9 | 5339 | #define FW_ISP8031 9 |
| 5328 | #define FW_ISP2071 10 | 5340 | #define FW_ISP27XX 10 |
| 5329 | 5341 | ||
| 5330 | #define FW_FILE_ISP21XX "ql2100_fw.bin" | 5342 | #define FW_FILE_ISP21XX "ql2100_fw.bin" |
| 5331 | #define FW_FILE_ISP22XX "ql2200_fw.bin" | 5343 | #define FW_FILE_ISP22XX "ql2200_fw.bin" |
| @@ -5337,7 +5349,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
| 5337 | #define FW_FILE_ISP82XX "ql8200_fw.bin" | 5349 | #define FW_FILE_ISP82XX "ql8200_fw.bin" |
| 5338 | #define FW_FILE_ISP2031 "ql2600_fw.bin" | 5350 | #define FW_FILE_ISP2031 "ql2600_fw.bin" |
| 5339 | #define FW_FILE_ISP8031 "ql8300_fw.bin" | 5351 | #define FW_FILE_ISP8031 "ql8300_fw.bin" |
| 5340 | #define FW_FILE_ISP2071 "ql2700_fw.bin" | 5352 | #define FW_FILE_ISP27XX "ql2700_fw.bin" |
| 5341 | 5353 | ||
| 5342 | 5354 | ||
| 5343 | static DEFINE_MUTEX(qla_fw_lock); | 5355 | static DEFINE_MUTEX(qla_fw_lock); |
| @@ -5353,7 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = { | |||
| 5353 | { .name = FW_FILE_ISP82XX, }, | 5365 | { .name = FW_FILE_ISP82XX, }, |
| 5354 | { .name = FW_FILE_ISP2031, }, | 5366 | { .name = FW_FILE_ISP2031, }, |
| 5355 | { .name = FW_FILE_ISP8031, }, | 5367 | { .name = FW_FILE_ISP8031, }, |
| 5356 | { .name = FW_FILE_ISP2071, }, | 5368 | { .name = FW_FILE_ISP27XX, }, |
| 5357 | }; | 5369 | }; |
| 5358 | 5370 | ||
| 5359 | struct fw_blob * | 5371 | struct fw_blob * |
| @@ -5382,8 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) | |||
| 5382 | blob = &qla_fw_blobs[FW_ISP2031]; | 5394 | blob = &qla_fw_blobs[FW_ISP2031]; |
| 5383 | } else if (IS_QLA8031(ha)) { | 5395 | } else if (IS_QLA8031(ha)) { |
| 5384 | blob = &qla_fw_blobs[FW_ISP8031]; | 5396 | blob = &qla_fw_blobs[FW_ISP8031]; |
| 5385 | } else if (IS_QLA2071(ha)) { | 5397 | } else if (IS_QLA27XX(ha)) { |
| 5386 | blob = &qla_fw_blobs[FW_ISP2071]; | 5398 | blob = &qla_fw_blobs[FW_ISP27XX]; |
| 5387 | } else { | 5399 | } else { |
| 5388 | return NULL; | 5400 | return NULL; |
| 5389 | } | 5401 | } |
| @@ -5714,6 +5726,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { | |||
| 5714 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, | 5726 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, |
| 5715 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, | 5727 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, |
| 5716 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, | 5728 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, |
| 5729 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, | ||
| 5717 | { 0 }, | 5730 | { 0 }, |
| 5718 | }; | 5731 | }; |
| 5719 | MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); | 5732 | MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); |
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index 46ef0ac48f44..2fb7ebfbbc38 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index f28123e8ed65..bca173e56f16 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -1727,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha) | |||
| 1727 | if (IS_QLA2031(ha)) { | 1727 | if (IS_QLA2031(ha)) { |
| 1728 | led_select_value = qla83xx_select_led_port(ha); | 1728 | led_select_value = qla83xx_select_led_port(ha); |
| 1729 | 1729 | ||
| 1730 | qla83xx_wr_reg(vha, led_select_value, 0x40002000); | 1730 | qla83xx_wr_reg(vha, led_select_value, 0x40000230); |
| 1731 | qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000); | 1731 | qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230); |
| 1732 | msleep(1000); | ||
| 1733 | qla83xx_wr_reg(vha, led_select_value, 0x40004000); | ||
| 1734 | qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000); | ||
| 1735 | } else if (IS_QLA8031(ha)) { | 1732 | } else if (IS_QLA8031(ha)) { |
| 1736 | led_select_value = qla83xx_select_led_port(ha); | 1733 | led_select_value = qla83xx_select_led_port(ha); |
| 1737 | 1734 | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0cb73074c199..b1d10f9935c7 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
| @@ -182,6 +182,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, | |||
| 182 | void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, | 182 | void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, |
| 183 | struct atio_from_isp *atio) | 183 | struct atio_from_isp *atio) |
| 184 | { | 184 | { |
| 185 | ql_dbg(ql_dbg_tgt, vha, 0xe072, | ||
| 186 | "%s: qla_target(%d): type %x ox_id %04x\n", | ||
| 187 | __func__, vha->vp_idx, atio->u.raw.entry_type, | ||
| 188 | be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); | ||
| 189 | |||
| 185 | switch (atio->u.raw.entry_type) { | 190 | switch (atio->u.raw.entry_type) { |
| 186 | case ATIO_TYPE7: | 191 | case ATIO_TYPE7: |
| 187 | { | 192 | { |
| @@ -236,6 +241,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, | |||
| 236 | void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) | 241 | void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) |
| 237 | { | 242 | { |
| 238 | switch (pkt->entry_type) { | 243 | switch (pkt->entry_type) { |
| 244 | case CTIO_CRC2: | ||
| 245 | ql_dbg(ql_dbg_tgt, vha, 0xe073, | ||
| 246 | "qla_target(%d):%s: CRC2 Response pkt\n", | ||
| 247 | vha->vp_idx, __func__); | ||
| 239 | case CTIO_TYPE7: | 248 | case CTIO_TYPE7: |
| 240 | { | 249 | { |
| 241 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; | 250 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; |
| @@ -1350,13 +1359,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) | |||
| 1350 | 1359 | ||
| 1351 | prm->cmd->sg_mapped = 1; | 1360 | prm->cmd->sg_mapped = 1; |
| 1352 | 1361 | ||
| 1353 | /* | 1362 | if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { |
| 1354 | * If greater than four sg entries then we need to allocate | 1363 | /* |
| 1355 | * the continuation entries | 1364 | * If greater than four sg entries then we need to allocate |
| 1356 | */ | 1365 | * the continuation entries |
| 1357 | if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) | 1366 | */ |
| 1358 | prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - | 1367 | if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) |
| 1359 | prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); | 1368 | prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - |
| 1369 | prm->tgt->datasegs_per_cmd, | ||
| 1370 | prm->tgt->datasegs_per_cont); | ||
| 1371 | } else { | ||
| 1372 | /* DIF */ | ||
| 1373 | if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || | ||
| 1374 | (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { | ||
| 1375 | prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); | ||
| 1376 | prm->tot_dsds = prm->seg_cnt; | ||
| 1377 | } else | ||
| 1378 | prm->tot_dsds = prm->seg_cnt; | ||
| 1379 | |||
| 1380 | if (cmd->prot_sg_cnt) { | ||
| 1381 | prm->prot_sg = cmd->prot_sg; | ||
| 1382 | prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, | ||
| 1383 | cmd->prot_sg, cmd->prot_sg_cnt, | ||
| 1384 | cmd->dma_data_direction); | ||
| 1385 | if (unlikely(prm->prot_seg_cnt == 0)) | ||
| 1386 | goto out_err; | ||
| 1387 | |||
| 1388 | if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || | ||
| 1389 | (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { | ||
| 1390 | /* Dif Bundling not support here */ | ||
| 1391 | prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, | ||
| 1392 | cmd->blk_sz); | ||
| 1393 | prm->tot_dsds += prm->prot_seg_cnt; | ||
| 1394 | } else | ||
| 1395 | prm->tot_dsds += prm->prot_seg_cnt; | ||
| 1396 | } | ||
| 1397 | } | ||
| 1360 | 1398 | ||
| 1361 | ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", | 1399 | ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", |
| 1362 | prm->seg_cnt, prm->req_cnt); | 1400 | prm->seg_cnt, prm->req_cnt); |
| @@ -1377,6 +1415,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha, | |||
| 1377 | BUG_ON(!cmd->sg_mapped); | 1415 | BUG_ON(!cmd->sg_mapped); |
| 1378 | pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); | 1416 | pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); |
| 1379 | cmd->sg_mapped = 0; | 1417 | cmd->sg_mapped = 0; |
| 1418 | |||
| 1419 | if (cmd->prot_sg_cnt) | ||
| 1420 | pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, | ||
| 1421 | cmd->dma_data_direction); | ||
| 1422 | |||
| 1423 | if (cmd->ctx_dsd_alloced) | ||
| 1424 | qla2x00_clean_dsd_pool(ha, NULL, cmd); | ||
| 1425 | |||
| 1426 | if (cmd->ctx) | ||
| 1427 | dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); | ||
| 1380 | } | 1428 | } |
| 1381 | 1429 | ||
| 1382 | static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, | 1430 | static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, |
| @@ -1665,8 +1713,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, | |||
| 1665 | return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; | 1713 | return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; |
| 1666 | } | 1714 | } |
| 1667 | 1715 | ||
| 1668 | ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", | 1716 | ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", |
| 1669 | vha->vp_idx, cmd->tag); | 1717 | vha->vp_idx, cmd->tag, |
| 1718 | be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); | ||
| 1670 | 1719 | ||
| 1671 | prm->cmd = cmd; | 1720 | prm->cmd = cmd; |
| 1672 | prm->tgt = tgt; | 1721 | prm->tgt = tgt; |
| @@ -1902,6 +1951,323 @@ skip_explict_conf: | |||
| 1902 | /* Sense with len > 24, is it possible ??? */ | 1951 | /* Sense with len > 24, is it possible ??? */ |
| 1903 | } | 1952 | } |
| 1904 | 1953 | ||
| 1954 | |||
| 1955 | |||
| 1956 | /* diff */ | ||
| 1957 | static inline int | ||
| 1958 | qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) | ||
| 1959 | { | ||
| 1960 | /* | ||
| 1961 | * Uncomment when corresponding SCSI changes are done. | ||
| 1962 | * | ||
| 1963 | if (!sp->cmd->prot_chk) | ||
| 1964 | return 0; | ||
| 1965 | * | ||
| 1966 | */ | ||
| 1967 | switch (se_cmd->prot_op) { | ||
| 1968 | case TARGET_PROT_DOUT_INSERT: | ||
| 1969 | case TARGET_PROT_DIN_STRIP: | ||
| 1970 | if (ql2xenablehba_err_chk >= 1) | ||
| 1971 | return 1; | ||
| 1972 | break; | ||
| 1973 | case TARGET_PROT_DOUT_PASS: | ||
| 1974 | case TARGET_PROT_DIN_PASS: | ||
| 1975 | if (ql2xenablehba_err_chk >= 2) | ||
| 1976 | return 1; | ||
| 1977 | break; | ||
| 1978 | case TARGET_PROT_DIN_INSERT: | ||
| 1979 | case TARGET_PROT_DOUT_STRIP: | ||
| 1980 | return 1; | ||
| 1981 | default: | ||
| 1982 | break; | ||
| 1983 | } | ||
| 1984 | return 0; | ||
| 1985 | } | ||
| 1986 | |||
| 1987 | /* | ||
| 1988 | * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command | ||
| 1989 | * | ||
| 1990 | */ | ||
| 1991 | static inline void | ||
| 1992 | qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) | ||
| 1993 | { | ||
| 1994 | uint32_t lba = 0xffffffff & se_cmd->t_task_lba; | ||
| 1995 | |||
| 1996 | /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 | ||
| 1997 | * have been immplemented by TCM, before AppTag is avail. | ||
| 1998 | * Look for modesense_handlers[] | ||
| 1999 | */ | ||
| 2000 | ctx->app_tag = __constant_cpu_to_le16(0); | ||
| 2001 | ctx->app_tag_mask[0] = 0x0; | ||
| 2002 | ctx->app_tag_mask[1] = 0x0; | ||
| 2003 | |||
| 2004 | switch (se_cmd->prot_type) { | ||
| 2005 | case TARGET_DIF_TYPE0_PROT: | ||
| 2006 | /* | ||
| 2007 | * No check for ql2xenablehba_err_chk, as it would be an | ||
| 2008 | * I/O error if hba tag generation is not done. | ||
| 2009 | */ | ||
| 2010 | ctx->ref_tag = cpu_to_le32(lba); | ||
| 2011 | |||
| 2012 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
| 2013 | break; | ||
| 2014 | |||
| 2015 | /* enable ALL bytes of the ref tag */ | ||
| 2016 | ctx->ref_tag_mask[0] = 0xff; | ||
| 2017 | ctx->ref_tag_mask[1] = 0xff; | ||
| 2018 | ctx->ref_tag_mask[2] = 0xff; | ||
| 2019 | ctx->ref_tag_mask[3] = 0xff; | ||
| 2020 | break; | ||
| 2021 | /* | ||
| 2022 | * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and | ||
| 2023 | * 16 bit app tag. | ||
| 2024 | */ | ||
| 2025 | case TARGET_DIF_TYPE1_PROT: | ||
| 2026 | ctx->ref_tag = cpu_to_le32(lba); | ||
| 2027 | |||
| 2028 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
| 2029 | break; | ||
| 2030 | |||
| 2031 | /* enable ALL bytes of the ref tag */ | ||
| 2032 | ctx->ref_tag_mask[0] = 0xff; | ||
| 2033 | ctx->ref_tag_mask[1] = 0xff; | ||
| 2034 | ctx->ref_tag_mask[2] = 0xff; | ||
| 2035 | ctx->ref_tag_mask[3] = 0xff; | ||
| 2036 | break; | ||
| 2037 | /* | ||
| 2038 | * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to | ||
| 2039 | * match LBA in CDB + N | ||
| 2040 | */ | ||
| 2041 | case TARGET_DIF_TYPE2_PROT: | ||
| 2042 | ctx->ref_tag = cpu_to_le32(lba); | ||
| 2043 | |||
| 2044 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
| 2045 | break; | ||
| 2046 | |||
| 2047 | /* enable ALL bytes of the ref tag */ | ||
| 2048 | ctx->ref_tag_mask[0] = 0xff; | ||
| 2049 | ctx->ref_tag_mask[1] = 0xff; | ||
| 2050 | ctx->ref_tag_mask[2] = 0xff; | ||
| 2051 | ctx->ref_tag_mask[3] = 0xff; | ||
| 2052 | break; | ||
| 2053 | |||
| 2054 | /* For Type 3 protection: 16 bit GUARD only */ | ||
| 2055 | case TARGET_DIF_TYPE3_PROT: | ||
| 2056 | ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = | ||
| 2057 | ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; | ||
| 2058 | break; | ||
| 2059 | } | ||
| 2060 | } | ||
| 2061 | |||
| 2062 | |||
| 2063 | static inline int | ||
| 2064 | qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | ||
| 2065 | { | ||
| 2066 | uint32_t *cur_dsd; | ||
| 2067 | int sgc; | ||
| 2068 | uint32_t transfer_length = 0; | ||
| 2069 | uint32_t data_bytes; | ||
| 2070 | uint32_t dif_bytes; | ||
| 2071 | uint8_t bundling = 1; | ||
| 2072 | uint8_t *clr_ptr; | ||
| 2073 | struct crc_context *crc_ctx_pkt = NULL; | ||
| 2074 | struct qla_hw_data *ha; | ||
| 2075 | struct ctio_crc2_to_fw *pkt; | ||
| 2076 | dma_addr_t crc_ctx_dma; | ||
| 2077 | uint16_t fw_prot_opts = 0; | ||
| 2078 | struct qla_tgt_cmd *cmd = prm->cmd; | ||
| 2079 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
| 2080 | uint32_t h; | ||
| 2081 | struct atio_from_isp *atio = &prm->cmd->atio; | ||
| 2082 | |||
| 2083 | sgc = 0; | ||
| 2084 | ha = vha->hw; | ||
| 2085 | |||
| 2086 | pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; | ||
| 2087 | prm->pkt = pkt; | ||
| 2088 | memset(pkt, 0, sizeof(*pkt)); | ||
| 2089 | |||
| 2090 | ql_dbg(ql_dbg_tgt, vha, 0xe071, | ||
| 2091 | "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", | ||
| 2092 | vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, | ||
| 2093 | prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); | ||
| 2094 | |||
| 2095 | if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || | ||
| 2096 | (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) | ||
| 2097 | bundling = 0; | ||
| 2098 | |||
| 2099 | /* Compute dif len and adjust data len to incude protection */ | ||
| 2100 | data_bytes = cmd->bufflen; | ||
| 2101 | dif_bytes = (data_bytes / cmd->blk_sz) * 8; | ||
| 2102 | |||
| 2103 | switch (se_cmd->prot_op) { | ||
| 2104 | case TARGET_PROT_DIN_INSERT: | ||
| 2105 | case TARGET_PROT_DOUT_STRIP: | ||
| 2106 | transfer_length = data_bytes; | ||
| 2107 | data_bytes += dif_bytes; | ||
| 2108 | break; | ||
| 2109 | |||
| 2110 | case TARGET_PROT_DIN_STRIP: | ||
| 2111 | case TARGET_PROT_DOUT_INSERT: | ||
| 2112 | case TARGET_PROT_DIN_PASS: | ||
| 2113 | case TARGET_PROT_DOUT_PASS: | ||
| 2114 | transfer_length = data_bytes + dif_bytes; | ||
| 2115 | break; | ||
| 2116 | |||
| 2117 | default: | ||
| 2118 | BUG(); | ||
| 2119 | break; | ||
| 2120 | } | ||
| 2121 | |||
| 2122 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
| 2123 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ | ||
| 2124 | /* HBA error checking enabled */ | ||
| 2125 | else if (IS_PI_UNINIT_CAPABLE(ha)) { | ||
| 2126 | if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || | ||
| 2127 | (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) | ||
| 2128 | fw_prot_opts |= PO_DIS_VALD_APP_ESC; | ||
| 2129 | else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) | ||
| 2130 | fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; | ||
| 2131 | } | ||
| 2132 | |||
| 2133 | switch (se_cmd->prot_op) { | ||
| 2134 | case TARGET_PROT_DIN_INSERT: | ||
| 2135 | case TARGET_PROT_DOUT_INSERT: | ||
| 2136 | fw_prot_opts |= PO_MODE_DIF_INSERT; | ||
| 2137 | break; | ||
| 2138 | case TARGET_PROT_DIN_STRIP: | ||
| 2139 | case TARGET_PROT_DOUT_STRIP: | ||
| 2140 | fw_prot_opts |= PO_MODE_DIF_REMOVE; | ||
| 2141 | break; | ||
| 2142 | case TARGET_PROT_DIN_PASS: | ||
| 2143 | case TARGET_PROT_DOUT_PASS: | ||
| 2144 | fw_prot_opts |= PO_MODE_DIF_PASS; | ||
| 2145 | /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ | ||
| 2146 | break; | ||
| 2147 | default:/* Normal Request */ | ||
| 2148 | fw_prot_opts |= PO_MODE_DIF_PASS; | ||
| 2149 | break; | ||
| 2150 | } | ||
| 2151 | |||
| 2152 | |||
| 2153 | /* ---- PKT ---- */ | ||
| 2154 | /* Update entry type to indicate Command Type CRC_2 IOCB */ | ||
| 2155 | pkt->entry_type = CTIO_CRC2; | ||
| 2156 | pkt->entry_count = 1; | ||
| 2157 | pkt->vp_index = vha->vp_idx; | ||
| 2158 | |||
| 2159 | h = qlt_make_handle(vha); | ||
| 2160 | if (unlikely(h == QLA_TGT_NULL_HANDLE)) { | ||
| 2161 | /* | ||
| 2162 | * CTIO type 7 from the firmware doesn't provide a way to | ||
| 2163 | * know the initiator's LOOP ID, hence we can't find | ||
| 2164 | * the session and, so, the command. | ||
| 2165 | */ | ||
| 2166 | return -EAGAIN; | ||
| 2167 | } else | ||
| 2168 | ha->tgt.cmds[h-1] = prm->cmd; | ||
| 2169 | |||
| 2170 | |||
| 2171 | pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; | ||
| 2172 | pkt->nport_handle = prm->cmd->loop_id; | ||
| 2173 | pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); | ||
| 2174 | pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | ||
| 2175 | pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | ||
| 2176 | pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; | ||
| 2177 | pkt->exchange_addr = atio->u.isp24.exchange_addr; | ||
| 2178 | pkt->ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); | ||
| 2179 | pkt->flags |= (atio->u.isp24.attr << 9); | ||
| 2180 | pkt->relative_offset = cpu_to_le32(prm->cmd->offset); | ||
| 2181 | |||
| 2182 | /* Set transfer direction */ | ||
| 2183 | if (cmd->dma_data_direction == DMA_TO_DEVICE) | ||
| 2184 | pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); | ||
| 2185 | else if (cmd->dma_data_direction == DMA_FROM_DEVICE) | ||
| 2186 | pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); | ||
| 2187 | |||
| 2188 | |||
| 2189 | pkt->dseg_count = prm->tot_dsds; | ||
| 2190 | /* Fibre channel byte count */ | ||
| 2191 | pkt->transfer_length = cpu_to_le32(transfer_length); | ||
| 2192 | |||
| 2193 | |||
| 2194 | /* ----- CRC context -------- */ | ||
| 2195 | |||
| 2196 | /* Allocate CRC context from global pool */ | ||
| 2197 | crc_ctx_pkt = cmd->ctx = | ||
| 2198 | dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); | ||
| 2199 | |||
| 2200 | if (!crc_ctx_pkt) | ||
| 2201 | goto crc_queuing_error; | ||
| 2202 | |||
| 2203 | /* Zero out CTX area. */ | ||
| 2204 | clr_ptr = (uint8_t *)crc_ctx_pkt; | ||
| 2205 | memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); | ||
| 2206 | |||
| 2207 | crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; | ||
| 2208 | INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); | ||
| 2209 | |||
| 2210 | /* Set handle */ | ||
| 2211 | crc_ctx_pkt->handle = pkt->handle; | ||
| 2212 | |||
| 2213 | qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); | ||
| 2214 | |||
| 2215 | pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); | ||
| 2216 | pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); | ||
| 2217 | pkt->crc_context_len = CRC_CONTEXT_LEN_FW; | ||
| 2218 | |||
| 2219 | |||
| 2220 | if (!bundling) { | ||
| 2221 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; | ||
| 2222 | } else { | ||
| 2223 | /* | ||
| 2224 | * Configure Bundling if we need to fetch interlaving | ||
| 2225 | * protection PCI accesses | ||
| 2226 | */ | ||
| 2227 | fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; | ||
| 2228 | crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); | ||
| 2229 | crc_ctx_pkt->u.bundling.dseg_count = | ||
| 2230 | cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); | ||
| 2231 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; | ||
| 2232 | } | ||
| 2233 | |||
| 2234 | /* Finish the common fields of CRC pkt */ | ||
| 2235 | crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); | ||
| 2236 | crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); | ||
| 2237 | crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); | ||
| 2238 | crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); | ||
| 2239 | |||
| 2240 | |||
| 2241 | /* Walks data segments */ | ||
| 2242 | pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); | ||
| 2243 | |||
| 2244 | if (!bundling && prm->prot_seg_cnt) { | ||
| 2245 | if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, | ||
| 2246 | prm->tot_dsds, cmd)) | ||
| 2247 | goto crc_queuing_error; | ||
| 2248 | } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, | ||
| 2249 | (prm->tot_dsds - prm->prot_seg_cnt), cmd)) | ||
| 2250 | goto crc_queuing_error; | ||
| 2251 | |||
| 2252 | if (bundling && prm->prot_seg_cnt) { | ||
| 2253 | /* Walks dif segments */ | ||
| 2254 | pkt->add_flags |= | ||
| 2255 | __constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA); | ||
| 2256 | |||
| 2257 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; | ||
| 2258 | if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, | ||
| 2259 | prm->prot_seg_cnt, cmd)) | ||
| 2260 | goto crc_queuing_error; | ||
| 2261 | } | ||
| 2262 | return QLA_SUCCESS; | ||
| 2263 | |||
| 2264 | crc_queuing_error: | ||
| 2265 | /* Cleanup will be performed by the caller */ | ||
| 2266 | |||
| 2267 | return QLA_FUNCTION_FAILED; | ||
| 2268 | } | ||
| 2269 | |||
| 2270 | |||
| 1905 | /* | 2271 | /* |
| 1906 | * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * | 2272 | * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * |
| 1907 | * QLA_TGT_XMIT_STATUS for >= 24xx silicon | 2273 | * QLA_TGT_XMIT_STATUS for >= 24xx silicon |
| @@ -1921,9 +2287,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
| 1921 | qlt_check_srr_debug(cmd, &xmit_type); | 2287 | qlt_check_srr_debug(cmd, &xmit_type); |
| 1922 | 2288 | ||
| 1923 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, | 2289 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, |
| 1924 | "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " | 2290 | "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", |
| 1925 | "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? | 2291 | (xmit_type & QLA_TGT_XMIT_STATUS) ? |
| 1926 | 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); | 2292 | 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, |
| 2293 | &cmd->se_cmd); | ||
| 1927 | 2294 | ||
| 1928 | res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, | 2295 | res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, |
| 1929 | &full_req_cnt); | 2296 | &full_req_cnt); |
| @@ -1941,7 +2308,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
| 1941 | if (unlikely(res)) | 2308 | if (unlikely(res)) |
| 1942 | goto out_unmap_unlock; | 2309 | goto out_unmap_unlock; |
| 1943 | 2310 | ||
| 1944 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | 2311 | if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) |
| 2312 | res = qlt_build_ctio_crc2_pkt(&prm, vha); | ||
| 2313 | else | ||
| 2314 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
| 1945 | if (unlikely(res != 0)) | 2315 | if (unlikely(res != 0)) |
| 1946 | goto out_unmap_unlock; | 2316 | goto out_unmap_unlock; |
| 1947 | 2317 | ||
| @@ -1953,7 +2323,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
| 1953 | __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | | 2323 | __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | |
| 1954 | CTIO7_FLAGS_STATUS_MODE_0); | 2324 | CTIO7_FLAGS_STATUS_MODE_0); |
| 1955 | 2325 | ||
| 1956 | qlt_load_data_segments(&prm, vha); | 2326 | if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) |
| 2327 | qlt_load_data_segments(&prm, vha); | ||
| 1957 | 2328 | ||
| 1958 | if (prm.add_status_pkt == 0) { | 2329 | if (prm.add_status_pkt == 0) { |
| 1959 | if (xmit_type & QLA_TGT_XMIT_STATUS) { | 2330 | if (xmit_type & QLA_TGT_XMIT_STATUS) { |
| @@ -1983,8 +2354,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
| 1983 | ql_dbg(ql_dbg_tgt, vha, 0xe019, | 2354 | ql_dbg(ql_dbg_tgt, vha, 0xe019, |
| 1984 | "Building additional status packet\n"); | 2355 | "Building additional status packet\n"); |
| 1985 | 2356 | ||
| 2357 | /* | ||
| 2358 | * T10Dif: ctio_crc2_to_fw overlay ontop of | ||
| 2359 | * ctio7_to_24xx | ||
| 2360 | */ | ||
| 1986 | memcpy(ctio, pkt, sizeof(*ctio)); | 2361 | memcpy(ctio, pkt, sizeof(*ctio)); |
| 2362 | /* reset back to CTIO7 */ | ||
| 1987 | ctio->entry_count = 1; | 2363 | ctio->entry_count = 1; |
| 2364 | ctio->entry_type = CTIO_TYPE7; | ||
| 1988 | ctio->dseg_count = 0; | 2365 | ctio->dseg_count = 0; |
| 1989 | ctio->u.status1.flags &= ~__constant_cpu_to_le16( | 2366 | ctio->u.status1.flags &= ~__constant_cpu_to_le16( |
| 1990 | CTIO7_FLAGS_DATA_IN); | 2367 | CTIO7_FLAGS_DATA_IN); |
| @@ -1993,6 +2370,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
| 1993 | pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; | 2370 | pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; |
| 1994 | pkt->u.status0.flags |= __constant_cpu_to_le16( | 2371 | pkt->u.status0.flags |= __constant_cpu_to_le16( |
| 1995 | CTIO7_FLAGS_DONT_RET_CTIO); | 2372 | CTIO7_FLAGS_DONT_RET_CTIO); |
| 2373 | |||
| 2374 | /* qlt_24xx_init_ctio_to_isp will correct | ||
| 2375 | * all neccessary fields that's part of CTIO7. | ||
| 2376 | * There should be no residual of CTIO-CRC2 data. | ||
| 2377 | */ | ||
| 1996 | qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, | 2378 | qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, |
| 1997 | &prm); | 2379 | &prm); |
| 1998 | pr_debug("Status CTIO7: %p\n", ctio); | 2380 | pr_debug("Status CTIO7: %p\n", ctio); |
| @@ -2041,8 +2423,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) | |||
| 2041 | if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) | 2423 | if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) |
| 2042 | return -EIO; | 2424 | return -EIO; |
| 2043 | 2425 | ||
| 2044 | ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", | 2426 | ql_dbg(ql_dbg_tgt, vha, 0xe01b, |
| 2045 | (int)vha->vp_idx); | 2427 | "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", |
| 2428 | __func__, (int)vha->vp_idx, &cmd->se_cmd, | ||
| 2429 | be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); | ||
| 2046 | 2430 | ||
| 2047 | /* Calculate number of entries and segments required */ | 2431 | /* Calculate number of entries and segments required */ |
| 2048 | if (qlt_pci_map_calc_cnt(&prm) != 0) | 2432 | if (qlt_pci_map_calc_cnt(&prm) != 0) |
| @@ -2054,14 +2438,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) | |||
| 2054 | res = qlt_check_reserve_free_req(vha, prm.req_cnt); | 2438 | res = qlt_check_reserve_free_req(vha, prm.req_cnt); |
| 2055 | if (res != 0) | 2439 | if (res != 0) |
| 2056 | goto out_unlock_free_unmap; | 2440 | goto out_unlock_free_unmap; |
| 2441 | if (cmd->se_cmd.prot_op) | ||
| 2442 | res = qlt_build_ctio_crc2_pkt(&prm, vha); | ||
| 2443 | else | ||
| 2444 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
| 2057 | 2445 | ||
| 2058 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
| 2059 | if (unlikely(res != 0)) | 2446 | if (unlikely(res != 0)) |
| 2060 | goto out_unlock_free_unmap; | 2447 | goto out_unlock_free_unmap; |
| 2061 | pkt = (struct ctio7_to_24xx *)prm.pkt; | 2448 | pkt = (struct ctio7_to_24xx *)prm.pkt; |
| 2062 | pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | | 2449 | pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | |
| 2063 | CTIO7_FLAGS_STATUS_MODE_0); | 2450 | CTIO7_FLAGS_STATUS_MODE_0); |
| 2064 | qlt_load_data_segments(&prm, vha); | 2451 | |
| 2452 | if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) | ||
| 2453 | qlt_load_data_segments(&prm, vha); | ||
| 2065 | 2454 | ||
| 2066 | cmd->state = QLA_TGT_STATE_NEED_DATA; | 2455 | cmd->state = QLA_TGT_STATE_NEED_DATA; |
| 2067 | 2456 | ||
| @@ -2079,6 +2468,143 @@ out_unlock_free_unmap: | |||
| 2079 | } | 2468 | } |
| 2080 | EXPORT_SYMBOL(qlt_rdy_to_xfer); | 2469 | EXPORT_SYMBOL(qlt_rdy_to_xfer); |
| 2081 | 2470 | ||
| 2471 | |||
| 2472 | /* | ||
| 2473 | * Checks the guard or meta-data for the type of error | ||
| 2474 | * detected by the HBA. | ||
| 2475 | */ | ||
| 2476 | static inline int | ||
| 2477 | qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, | ||
| 2478 | struct ctio_crc_from_fw *sts) | ||
| 2479 | { | ||
| 2480 | uint8_t *ap = &sts->actual_dif[0]; | ||
| 2481 | uint8_t *ep = &sts->expected_dif[0]; | ||
| 2482 | uint32_t e_ref_tag, a_ref_tag; | ||
| 2483 | uint16_t e_app_tag, a_app_tag; | ||
| 2484 | uint16_t e_guard, a_guard; | ||
| 2485 | uint64_t lba = cmd->se_cmd.t_task_lba; | ||
| 2486 | |||
| 2487 | a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); | ||
| 2488 | a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); | ||
| 2489 | a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); | ||
| 2490 | |||
| 2491 | e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); | ||
| 2492 | e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); | ||
| 2493 | e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); | ||
| 2494 | |||
| 2495 | ql_dbg(ql_dbg_tgt, vha, 0xe075, | ||
| 2496 | "iocb(s) %p Returned STATUS.\n", sts); | ||
| 2497 | |||
| 2498 | ql_dbg(ql_dbg_tgt, vha, 0xf075, | ||
| 2499 | "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", | ||
| 2500 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
| 2501 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); | ||
| 2502 | |||
| 2503 | /* | ||
| 2504 | * Ignore sector if: | ||
| 2505 | * For type 3: ref & app tag is all 'f's | ||
| 2506 | * For type 0,1,2: app tag is all 'f's | ||
| 2507 | */ | ||
| 2508 | if ((a_app_tag == 0xffff) && | ||
| 2509 | ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || | ||
| 2510 | (a_ref_tag == 0xffffffff))) { | ||
| 2511 | uint32_t blocks_done; | ||
| 2512 | |||
| 2513 | /* 2TB boundary case covered automatically with this */ | ||
| 2514 | blocks_done = e_ref_tag - (uint32_t)lba + 1; | ||
| 2515 | cmd->se_cmd.bad_sector = e_ref_tag; | ||
| 2516 | cmd->se_cmd.pi_err = 0; | ||
| 2517 | ql_dbg(ql_dbg_tgt, vha, 0xf074, | ||
| 2518 | "need to return scsi good\n"); | ||
| 2519 | |||
| 2520 | /* Update protection tag */ | ||
| 2521 | if (cmd->prot_sg_cnt) { | ||
| 2522 | uint32_t i, j = 0, k = 0, num_ent; | ||
| 2523 | struct scatterlist *sg, *sgl; | ||
| 2524 | |||
| 2525 | |||
| 2526 | sgl = cmd->prot_sg; | ||
| 2527 | |||
| 2528 | /* Patch the corresponding protection tags */ | ||
| 2529 | for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { | ||
| 2530 | num_ent = sg_dma_len(sg) / 8; | ||
| 2531 | if (k + num_ent < blocks_done) { | ||
| 2532 | k += num_ent; | ||
| 2533 | continue; | ||
| 2534 | } | ||
| 2535 | j = blocks_done - k - 1; | ||
| 2536 | k = blocks_done; | ||
| 2537 | break; | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | if (k != blocks_done) { | ||
| 2541 | ql_log(ql_log_warn, vha, 0xf076, | ||
| 2542 | "unexpected tag values tag:lba=%u:%llu)\n", | ||
| 2543 | e_ref_tag, (unsigned long long)lba); | ||
| 2544 | goto out; | ||
| 2545 | } | ||
| 2546 | |||
| 2547 | #if 0 | ||
| 2548 | struct sd_dif_tuple *spt; | ||
| 2549 | /* TODO: | ||
| 2550 | * This section came from initiator. Is it valid here? | ||
| 2551 | * should ulp be override with actual val??? | ||
| 2552 | */ | ||
| 2553 | spt = page_address(sg_page(sg)) + sg->offset; | ||
| 2554 | spt += j; | ||
| 2555 | |||
| 2556 | spt->app_tag = 0xffff; | ||
| 2557 | if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) | ||
| 2558 | spt->ref_tag = 0xffffffff; | ||
| 2559 | #endif | ||
| 2560 | } | ||
| 2561 | |||
| 2562 | return 0; | ||
| 2563 | } | ||
| 2564 | |||
| 2565 | /* check guard */ | ||
| 2566 | if (e_guard != a_guard) { | ||
| 2567 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; | ||
| 2568 | cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; | ||
| 2569 | |||
| 2570 | ql_log(ql_log_warn, vha, 0xe076, | ||
| 2571 | "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | ||
| 2572 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
| 2573 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | ||
| 2574 | a_guard, e_guard, cmd); | ||
| 2575 | goto out; | ||
| 2576 | } | ||
| 2577 | |||
| 2578 | /* check ref tag */ | ||
| 2579 | if (e_ref_tag != a_ref_tag) { | ||
| 2580 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; | ||
| 2581 | cmd->se_cmd.bad_sector = e_ref_tag; | ||
| 2582 | |||
| 2583 | ql_log(ql_log_warn, vha, 0xe077, | ||
| 2584 | "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | ||
| 2585 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
| 2586 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | ||
| 2587 | a_guard, e_guard, cmd); | ||
| 2588 | goto out; | ||
| 2589 | } | ||
| 2590 | |||
| 2591 | /* check appl tag */ | ||
| 2592 | if (e_app_tag != a_app_tag) { | ||
| 2593 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; | ||
| 2594 | cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; | ||
| 2595 | |||
| 2596 | ql_log(ql_log_warn, vha, 0xe078, | ||
| 2597 | "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | ||
| 2598 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
| 2599 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | ||
| 2600 | a_guard, e_guard, cmd); | ||
| 2601 | goto out; | ||
| 2602 | } | ||
| 2603 | out: | ||
| 2604 | return 1; | ||
| 2605 | } | ||
| 2606 | |||
| 2607 | |||
| 2082 | /* If hardware_lock held on entry, might drop it, then reaquire */ | 2608 | /* If hardware_lock held on entry, might drop it, then reaquire */ |
| 2083 | /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ | 2609 | /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ |
| 2084 | static int __qlt_send_term_exchange(struct scsi_qla_host *vha, | 2610 | static int __qlt_send_term_exchange(struct scsi_qla_host *vha, |
| @@ -2155,18 +2681,36 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, | |||
| 2155 | rc = __qlt_send_term_exchange(vha, cmd, atio); | 2681 | rc = __qlt_send_term_exchange(vha, cmd, atio); |
| 2156 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | 2682 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); |
| 2157 | done: | 2683 | done: |
| 2158 | if (rc == 1) { | 2684 | /* |
| 2685 | * Terminate exchange will tell fw to release any active CTIO | ||
| 2686 | * that's in FW posession and cleanup the exchange. | ||
| 2687 | * | ||
| 2688 | * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still | ||
| 2689 | * down at FW. Free the cmd later when CTIO comes back later | ||
| 2690 | * w/aborted(0x2) status. | ||
| 2691 | * | ||
| 2692 | * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already | ||
| 2693 | * back w/some err. Free the cmd now. | ||
| 2694 | */ | ||
| 2695 | if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) { | ||
| 2159 | if (!ha_locked && !in_interrupt()) | 2696 | if (!ha_locked && !in_interrupt()) |
| 2160 | msleep(250); /* just in case */ | 2697 | msleep(250); /* just in case */ |
| 2161 | 2698 | ||
| 2699 | if (cmd->sg_mapped) | ||
| 2700 | qlt_unmap_sg(vha, cmd); | ||
| 2162 | vha->hw->tgt.tgt_ops->free_cmd(cmd); | 2701 | vha->hw->tgt.tgt_ops->free_cmd(cmd); |
| 2163 | } | 2702 | } |
| 2703 | return; | ||
| 2164 | } | 2704 | } |
| 2165 | 2705 | ||
| 2166 | void qlt_free_cmd(struct qla_tgt_cmd *cmd) | 2706 | void qlt_free_cmd(struct qla_tgt_cmd *cmd) |
| 2167 | { | 2707 | { |
| 2168 | BUG_ON(cmd->sg_mapped); | 2708 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, |
| 2709 | "%s: se_cmd[%p] ox_id %04x\n", | ||
| 2710 | __func__, &cmd->se_cmd, | ||
| 2711 | be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); | ||
| 2169 | 2712 | ||
| 2713 | BUG_ON(cmd->sg_mapped); | ||
| 2170 | if (unlikely(cmd->free_sg)) | 2714 | if (unlikely(cmd->free_sg)) |
| 2171 | kfree(cmd->sg); | 2715 | kfree(cmd->sg); |
| 2172 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); | 2716 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); |
| @@ -2374,6 +2918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
| 2374 | case CTIO_LIP_RESET: | 2918 | case CTIO_LIP_RESET: |
| 2375 | case CTIO_TARGET_RESET: | 2919 | case CTIO_TARGET_RESET: |
| 2376 | case CTIO_ABORTED: | 2920 | case CTIO_ABORTED: |
| 2921 | /* driver request abort via Terminate exchange */ | ||
| 2377 | case CTIO_TIMEOUT: | 2922 | case CTIO_TIMEOUT: |
| 2378 | case CTIO_INVALID_RX_ID: | 2923 | case CTIO_INVALID_RX_ID: |
| 2379 | /* They are OK */ | 2924 | /* They are OK */ |
| @@ -2404,18 +2949,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
| 2404 | else | 2949 | else |
| 2405 | return; | 2950 | return; |
| 2406 | 2951 | ||
| 2952 | case CTIO_DIF_ERROR: { | ||
| 2953 | struct ctio_crc_from_fw *crc = | ||
| 2954 | (struct ctio_crc_from_fw *)ctio; | ||
| 2955 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, | ||
| 2956 | "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", | ||
| 2957 | vha->vp_idx, status, cmd->state, se_cmd, | ||
| 2958 | *((u64 *)&crc->actual_dif[0]), | ||
| 2959 | *((u64 *)&crc->expected_dif[0])); | ||
| 2960 | |||
| 2961 | if (qlt_handle_dif_error(vha, cmd, ctio)) { | ||
| 2962 | if (cmd->state == QLA_TGT_STATE_NEED_DATA) { | ||
| 2963 | /* scsi Write/xfer rdy complete */ | ||
| 2964 | goto skip_term; | ||
| 2965 | } else { | ||
| 2966 | /* scsi read/xmit respond complete | ||
| 2967 | * call handle dif to send scsi status | ||
| 2968 | * rather than terminate exchange. | ||
| 2969 | */ | ||
| 2970 | cmd->state = QLA_TGT_STATE_PROCESSED; | ||
| 2971 | ha->tgt.tgt_ops->handle_dif_err(cmd); | ||
| 2972 | return; | ||
| 2973 | } | ||
| 2974 | } else { | ||
| 2975 | /* Need to generate a SCSI good completion. | ||
| 2976 | * because FW did not send scsi status. | ||
| 2977 | */ | ||
| 2978 | status = 0; | ||
| 2979 | goto skip_term; | ||
| 2980 | } | ||
| 2981 | break; | ||
| 2982 | } | ||
| 2407 | default: | 2983 | default: |
| 2408 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, | 2984 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, |
| 2409 | "qla_target(%d): CTIO with error status " | 2985 | "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", |
| 2410 | "0x%x received (state %x, se_cmd %p\n", | ||
| 2411 | vha->vp_idx, status, cmd->state, se_cmd); | 2986 | vha->vp_idx, status, cmd->state, se_cmd); |
| 2412 | break; | 2987 | break; |
| 2413 | } | 2988 | } |
| 2414 | 2989 | ||
| 2415 | if (cmd->state != QLA_TGT_STATE_NEED_DATA) | 2990 | |
| 2991 | /* "cmd->state == QLA_TGT_STATE_ABORTED" means | ||
| 2992 | * cmd is already aborted/terminated, we don't | ||
| 2993 | * need to terminate again. The exchange is already | ||
| 2994 | * cleaned up/freed at FW level. Just cleanup at driver | ||
| 2995 | * level. | ||
| 2996 | */ | ||
| 2997 | if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && | ||
| 2998 | (cmd->state != QLA_TGT_STATE_ABORTED)) { | ||
| 2416 | if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) | 2999 | if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) |
| 2417 | return; | 3000 | return; |
| 3001 | } | ||
| 2418 | } | 3002 | } |
| 3003 | skip_term: | ||
| 2419 | 3004 | ||
| 2420 | if (cmd->state == QLA_TGT_STATE_PROCESSED) { | 3005 | if (cmd->state == QLA_TGT_STATE_PROCESSED) { |
| 2421 | ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); | 3006 | ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); |
| @@ -2444,7 +3029,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
| 2444 | "not return a CTIO complete\n", vha->vp_idx, cmd->state); | 3029 | "not return a CTIO complete\n", vha->vp_idx, cmd->state); |
| 2445 | } | 3030 | } |
| 2446 | 3031 | ||
| 2447 | if (unlikely(status != CTIO_SUCCESS)) { | 3032 | if (unlikely(status != CTIO_SUCCESS) && |
| 3033 | (cmd->state != QLA_TGT_STATE_ABORTED)) { | ||
| 2448 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); | 3034 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); |
| 2449 | dump_stack(); | 3035 | dump_stack(); |
| 2450 | } | 3036 | } |
| @@ -2563,8 +3149,9 @@ static void qlt_do_work(struct work_struct *work) | |||
| 2563 | atio->u.isp24.fcp_cmnd.add_cdb_len])); | 3149 | atio->u.isp24.fcp_cmnd.add_cdb_len])); |
| 2564 | 3150 | ||
| 2565 | ql_dbg(ql_dbg_tgt, vha, 0xe022, | 3151 | ql_dbg(ql_dbg_tgt, vha, 0xe022, |
| 2566 | "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", | 3152 | "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", |
| 2567 | cmd, cmd->unpacked_lun, cmd->tag); | 3153 | cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, |
| 3154 | cmd->atio.u.isp24.fcp_hdr.ox_id); | ||
| 2568 | 3155 | ||
| 2569 | ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, | 3156 | ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, |
| 2570 | fcp_task_attr, data_dir, bidi); | 3157 | fcp_task_attr, data_dir, bidi); |
| @@ -3527,11 +4114,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, | |||
| 3527 | switch (atio->u.raw.entry_type) { | 4114 | switch (atio->u.raw.entry_type) { |
| 3528 | case ATIO_TYPE7: | 4115 | case ATIO_TYPE7: |
| 3529 | ql_dbg(ql_dbg_tgt, vha, 0xe02d, | 4116 | ql_dbg(ql_dbg_tgt, vha, 0xe02d, |
| 3530 | "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " | 4117 | "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n", |
| 3531 | "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", | ||
| 3532 | vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, | 4118 | vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, |
| 3533 | atio->u.isp24.fcp_cmnd.rddata, | 4119 | atio->u.isp24.fcp_cmnd.rddata, |
| 3534 | atio->u.isp24.fcp_cmnd.wrdata, | 4120 | atio->u.isp24.fcp_cmnd.wrdata, |
| 4121 | atio->u.isp24.fcp_cmnd.cdb[0], | ||
| 3535 | atio->u.isp24.fcp_cmnd.add_cdb_len, | 4122 | atio->u.isp24.fcp_cmnd.add_cdb_len, |
| 3536 | be32_to_cpu(get_unaligned((uint32_t *) | 4123 | be32_to_cpu(get_unaligned((uint32_t *) |
| 3537 | &atio->u.isp24.fcp_cmnd.add_cdb[ | 4124 | &atio->u.isp24.fcp_cmnd.add_cdb[ |
| @@ -3629,11 +4216,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) | |||
| 3629 | tgt->irq_cmd_count++; | 4216 | tgt->irq_cmd_count++; |
| 3630 | 4217 | ||
| 3631 | switch (pkt->entry_type) { | 4218 | switch (pkt->entry_type) { |
| 4219 | case CTIO_CRC2: | ||
| 3632 | case CTIO_TYPE7: | 4220 | case CTIO_TYPE7: |
| 3633 | { | 4221 | { |
| 3634 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; | 4222 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; |
| 3635 | ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", | 4223 | ql_dbg(ql_dbg_tgt, vha, 0xe030, |
| 3636 | vha->vp_idx); | 4224 | "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", |
| 4225 | entry->entry_type, vha->vp_idx); | ||
| 3637 | qlt_do_ctio_completion(vha, entry->handle, | 4226 | qlt_do_ctio_completion(vha, entry->handle, |
| 3638 | le16_to_cpu(entry->status)|(pkt->entry_status << 16), | 4227 | le16_to_cpu(entry->status)|(pkt->entry_status << 16), |
| 3639 | entry); | 4228 | entry); |
| @@ -4768,6 +5357,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha, | |||
| 4768 | case ABTS_RESP_24XX: | 5357 | case ABTS_RESP_24XX: |
| 4769 | case CTIO_TYPE7: | 5358 | case CTIO_TYPE7: |
| 4770 | case NOTIFY_ACK_TYPE: | 5359 | case NOTIFY_ACK_TYPE: |
| 5360 | case CTIO_CRC2: | ||
| 4771 | return 1; | 5361 | return 1; |
| 4772 | default: | 5362 | default: |
| 4773 | return 0; | 5363 | return 0; |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index ce33d8c26406..f873e10451d2 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
| @@ -293,6 +293,7 @@ struct ctio_to_2xxx { | |||
| 293 | #define CTIO_ABORTED 0x02 | 293 | #define CTIO_ABORTED 0x02 |
| 294 | #define CTIO_INVALID_RX_ID 0x08 | 294 | #define CTIO_INVALID_RX_ID 0x08 |
| 295 | #define CTIO_TIMEOUT 0x0B | 295 | #define CTIO_TIMEOUT 0x0B |
| 296 | #define CTIO_DIF_ERROR 0x0C /* DIF error detected */ | ||
| 296 | #define CTIO_LIP_RESET 0x0E | 297 | #define CTIO_LIP_RESET 0x0E |
| 297 | #define CTIO_TARGET_RESET 0x17 | 298 | #define CTIO_TARGET_RESET 0x17 |
| 298 | #define CTIO_PORT_UNAVAILABLE 0x28 | 299 | #define CTIO_PORT_UNAVAILABLE 0x28 |
| @@ -498,11 +499,12 @@ struct ctio7_from_24xx { | |||
| 498 | #define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 | 499 | #define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 |
| 499 | #define CTIO7_FLAGS_STATUS_MODE_0 0 | 500 | #define CTIO7_FLAGS_STATUS_MODE_0 0 |
| 500 | #define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 | 501 | #define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 |
| 502 | #define CTIO7_FLAGS_STATUS_MODE_2 BIT_7 | ||
| 501 | #define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 | 503 | #define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 |
| 502 | #define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 | 504 | #define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 |
| 503 | #define CTIO7_FLAGS_DSD_PTR BIT_2 | 505 | #define CTIO7_FLAGS_DSD_PTR BIT_2 |
| 504 | #define CTIO7_FLAGS_DATA_IN BIT_1 | 506 | #define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */ |
| 505 | #define CTIO7_FLAGS_DATA_OUT BIT_0 | 507 | #define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */ |
| 506 | 508 | ||
| 507 | #define ELS_PLOGI 0x3 | 509 | #define ELS_PLOGI 0x3 |
| 508 | #define ELS_FLOGI 0x4 | 510 | #define ELS_FLOGI 0x4 |
| @@ -514,6 +516,68 @@ struct ctio7_from_24xx { | |||
| 514 | #define ELS_ADISC 0x52 | 516 | #define ELS_ADISC 0x52 |
| 515 | 517 | ||
| 516 | /* | 518 | /* |
| 519 | *CTIO Type CRC_2 IOCB | ||
| 520 | */ | ||
| 521 | struct ctio_crc2_to_fw { | ||
| 522 | uint8_t entry_type; /* Entry type. */ | ||
| 523 | #define CTIO_CRC2 0x7A | ||
| 524 | uint8_t entry_count; /* Entry count. */ | ||
| 525 | uint8_t sys_define; /* System defined. */ | ||
| 526 | uint8_t entry_status; /* Entry Status. */ | ||
| 527 | |||
| 528 | uint32_t handle; /* System handle. */ | ||
| 529 | uint16_t nport_handle; /* N_PORT handle. */ | ||
| 530 | uint16_t timeout; /* Command timeout. */ | ||
| 531 | |||
| 532 | uint16_t dseg_count; /* Data segment count. */ | ||
| 533 | uint8_t vp_index; | ||
| 534 | uint8_t add_flags; /* additional flags */ | ||
| 535 | #define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 | ||
| 536 | |||
| 537 | uint8_t initiator_id[3]; /* initiator ID */ | ||
| 538 | uint8_t reserved1; | ||
| 539 | uint32_t exchange_addr; /* rcv exchange address */ | ||
| 540 | uint16_t reserved2; | ||
| 541 | uint16_t flags; /* refer to CTIO7 flags values */ | ||
| 542 | uint32_t residual; | ||
| 543 | uint16_t ox_id; | ||
| 544 | uint16_t scsi_status; | ||
| 545 | uint32_t relative_offset; | ||
| 546 | uint32_t reserved5; | ||
| 547 | uint32_t transfer_length; /* total fc transfer length */ | ||
| 548 | uint32_t reserved6; | ||
| 549 | uint32_t crc_context_address[2];/* Data segment address. */ | ||
| 550 | uint16_t crc_context_len; /* Data segment length. */ | ||
| 551 | uint16_t reserved_1; /* MUST be set to 0. */ | ||
| 552 | } __packed; | ||
| 553 | |||
| 554 | /* CTIO Type CRC_x Status IOCB */ | ||
| 555 | struct ctio_crc_from_fw { | ||
| 556 | uint8_t entry_type; /* Entry type. */ | ||
| 557 | uint8_t entry_count; /* Entry count. */ | ||
| 558 | uint8_t sys_define; /* System defined. */ | ||
| 559 | uint8_t entry_status; /* Entry Status. */ | ||
| 560 | |||
| 561 | uint32_t handle; /* System handle. */ | ||
| 562 | uint16_t status; | ||
| 563 | uint16_t timeout; /* Command timeout. */ | ||
| 564 | uint16_t dseg_count; /* Data segment count. */ | ||
| 565 | uint32_t reserved1; | ||
| 566 | uint16_t state_flags; | ||
| 567 | #define CTIO_CRC_SF_DIF_CHOPPED BIT_4 | ||
| 568 | |||
| 569 | uint32_t exchange_address; /* rcv exchange address */ | ||
| 570 | uint16_t reserved2; | ||
| 571 | uint16_t flags; | ||
| 572 | uint32_t resid_xfer_length; | ||
| 573 | uint16_t ox_id; | ||
| 574 | uint8_t reserved3[12]; | ||
| 575 | uint16_t runt_guard; /* reported runt blk guard */ | ||
| 576 | uint8_t actual_dif[8]; | ||
| 577 | uint8_t expected_dif[8]; | ||
| 578 | } __packed; | ||
| 579 | |||
| 580 | /* | ||
| 517 | * ISP queue - ABTS received/response entries structure definition for 24xx. | 581 | * ISP queue - ABTS received/response entries structure definition for 24xx. |
| 518 | */ | 582 | */ |
| 519 | #define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ | 583 | #define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ |
| @@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl { | |||
| 641 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, | 705 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, |
| 642 | unsigned char *, uint32_t, int, int, int); | 706 | unsigned char *, uint32_t, int, int, int); |
| 643 | void (*handle_data)(struct qla_tgt_cmd *); | 707 | void (*handle_data)(struct qla_tgt_cmd *); |
| 708 | void (*handle_dif_err)(struct qla_tgt_cmd *); | ||
| 644 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, | 709 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, |
| 645 | uint32_t); | 710 | uint32_t); |
| 646 | void (*free_cmd)(struct qla_tgt_cmd *); | 711 | void (*free_cmd)(struct qla_tgt_cmd *); |
| @@ -829,9 +894,9 @@ struct qla_tgt_sess { | |||
| 829 | }; | 894 | }; |
| 830 | 895 | ||
| 831 | struct qla_tgt_cmd { | 896 | struct qla_tgt_cmd { |
| 897 | struct se_cmd se_cmd; | ||
| 832 | struct qla_tgt_sess *sess; | 898 | struct qla_tgt_sess *sess; |
| 833 | int state; | 899 | int state; |
| 834 | struct se_cmd se_cmd; | ||
| 835 | struct work_struct free_work; | 900 | struct work_struct free_work; |
| 836 | struct work_struct work; | 901 | struct work_struct work; |
| 837 | /* Sense buffer that will be mapped into outgoing status */ | 902 | /* Sense buffer that will be mapped into outgoing status */ |
| @@ -843,6 +908,7 @@ struct qla_tgt_cmd { | |||
| 843 | unsigned int free_sg:1; | 908 | unsigned int free_sg:1; |
| 844 | unsigned int aborted:1; /* Needed in case of SRR */ | 909 | unsigned int aborted:1; /* Needed in case of SRR */ |
| 845 | unsigned int write_data_transferred:1; | 910 | unsigned int write_data_transferred:1; |
| 911 | unsigned int ctx_dsd_alloced:1; | ||
| 846 | 912 | ||
| 847 | struct scatterlist *sg; /* cmd data buffer SG vector */ | 913 | struct scatterlist *sg; /* cmd data buffer SG vector */ |
| 848 | int sg_cnt; /* SG segments count */ | 914 | int sg_cnt; /* SG segments count */ |
| @@ -857,6 +923,12 @@ struct qla_tgt_cmd { | |||
| 857 | struct scsi_qla_host *vha; | 923 | struct scsi_qla_host *vha; |
| 858 | 924 | ||
| 859 | struct atio_from_isp atio; | 925 | struct atio_from_isp atio; |
| 926 | /* t10dif */ | ||
| 927 | struct scatterlist *prot_sg; | ||
| 928 | uint32_t prot_sg_cnt; | ||
| 929 | uint32_t blk_sz; | ||
| 930 | struct crc_context *ctx; | ||
| 931 | |||
| 860 | }; | 932 | }; |
| 861 | 933 | ||
| 862 | struct qla_tgt_sess_work_param { | 934 | struct qla_tgt_sess_work_param { |
| @@ -901,6 +973,10 @@ struct qla_tgt_prm { | |||
| 901 | int sense_buffer_len; | 973 | int sense_buffer_len; |
| 902 | int residual; | 974 | int residual; |
| 903 | int add_status_pkt; | 975 | int add_status_pkt; |
| 976 | /* dif */ | ||
| 977 | struct scatterlist *prot_sg; | ||
| 978 | uint16_t prot_seg_cnt; | ||
| 979 | uint16_t tot_dsds; | ||
| 904 | }; | 980 | }; |
| 905 | 981 | ||
| 906 | struct qla_tgt_srr_imm { | 982 | struct qla_tgt_srr_imm { |
| @@ -976,6 +1052,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *, | |||
| 976 | extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); | 1052 | extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); |
| 977 | extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); | 1053 | extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); |
| 978 | extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); | 1054 | extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); |
| 1055 | extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *); | ||
| 1056 | extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t); | ||
| 979 | extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); | 1057 | extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); |
| 980 | extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); | 1058 | extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); |
| 981 | extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); | 1059 | extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); |
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index a804e9b744bb..cb9a0c4bc419 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -201,7 +201,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg, | |||
| 201 | ql_dbg(ql_dbg_misc, NULL, 0xd014, | 201 | ql_dbg(ql_dbg_misc, NULL, 0xd014, |
| 202 | "%s: @%x\n", __func__, offset); | 202 | "%s: @%x\n", __func__, offset); |
| 203 | } | 203 | } |
| 204 | qla27xx_insert32(offset, buf, len); | ||
| 205 | qla27xx_read32(window, buf, len); | 204 | qla27xx_read32(window, buf, len); |
| 206 | } | 205 | } |
| 207 | 206 | ||
| @@ -220,7 +219,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg, | |||
| 220 | 219 | ||
| 221 | static inline void | 220 | static inline void |
| 222 | qla27xx_read_window(__iomem struct device_reg_24xx *reg, | 221 | qla27xx_read_window(__iomem struct device_reg_24xx *reg, |
| 223 | uint32_t base, uint offset, uint count, uint width, void *buf, | 222 | uint32_t addr, uint offset, uint count, uint width, void *buf, |
| 224 | ulong *len) | 223 | ulong *len) |
| 225 | { | 224 | { |
| 226 | void *window = (void *)reg + offset; | 225 | void *window = (void *)reg + offset; |
| @@ -229,14 +228,14 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg, | |||
| 229 | if (buf) { | 228 | if (buf) { |
| 230 | ql_dbg(ql_dbg_misc, NULL, 0xd016, | 229 | ql_dbg(ql_dbg_misc, NULL, 0xd016, |
| 231 | "%s: base=%x offset=%x count=%x width=%x\n", | 230 | "%s: base=%x offset=%x count=%x width=%x\n", |
| 232 | __func__, base, offset, count, width); | 231 | __func__, addr, offset, count, width); |
| 233 | } | 232 | } |
| 234 | qla27xx_write_reg(reg, IOBASE_ADDR, base, buf); | 233 | qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); |
| 235 | while (count--) { | 234 | while (count--) { |
| 236 | qla27xx_insert32(base, buf, len); | 235 | qla27xx_insert32(addr, buf, len); |
| 237 | readn(window, buf, len); | 236 | readn(window, buf, len); |
| 238 | window += width; | 237 | window += width; |
| 239 | base += width; | 238 | addr++; |
| 240 | } | 239 | } |
| 241 | } | 240 | } |
| 242 | 241 | ||
| @@ -336,7 +335,8 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, | |||
| 336 | 335 | ||
| 337 | ql_dbg(ql_dbg_misc, vha, 0xd204, | 336 | ql_dbg(ql_dbg_misc, vha, 0xd204, |
| 338 | "%s: rdpci [%lx]\n", __func__, *len); | 337 | "%s: rdpci [%lx]\n", __func__, *len); |
| 339 | qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len); | 338 | qla27xx_insert32(ent->t260.pci_offset, buf, len); |
| 339 | qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len); | ||
| 340 | 340 | ||
| 341 | return false; | 341 | return false; |
| 342 | } | 342 | } |
| @@ -349,7 +349,7 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, | |||
| 349 | 349 | ||
| 350 | ql_dbg(ql_dbg_misc, vha, 0xd205, | 350 | ql_dbg(ql_dbg_misc, vha, 0xd205, |
| 351 | "%s: wrpci [%lx]\n", __func__, *len); | 351 | "%s: wrpci [%lx]\n", __func__, *len); |
| 352 | qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf); | 352 | qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf); |
| 353 | 353 | ||
| 354 | return false; | 354 | return false; |
| 355 | } | 355 | } |
| @@ -392,9 +392,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, | |||
| 392 | goto done; | 392 | goto done; |
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | if (end < start) { | 395 | if (end < start || end == 0) { |
| 396 | ql_dbg(ql_dbg_misc, vha, 0xd023, | 396 | ql_dbg(ql_dbg_misc, vha, 0xd023, |
| 397 | "%s: bad range (start=%x end=%x)\n", __func__, | 397 | "%s: unusable range (start=%x end=%x)\n", __func__, |
| 398 | ent->t262.end_addr, ent->t262.start_addr); | 398 | ent->t262.end_addr, ent->t262.start_addr); |
| 399 | qla27xx_skip_entry(ent, buf); | 399 | qla27xx_skip_entry(ent, buf); |
| 400 | goto done; | 400 | goto done; |
| @@ -452,17 +452,15 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, | |||
| 452 | ql_dbg(ql_dbg_misc, vha, 0xd025, | 452 | ql_dbg(ql_dbg_misc, vha, 0xd025, |
| 453 | "%s: unsupported atio queue\n", __func__); | 453 | "%s: unsupported atio queue\n", __func__); |
| 454 | qla27xx_skip_entry(ent, buf); | 454 | qla27xx_skip_entry(ent, buf); |
| 455 | goto done; | ||
| 456 | } else { | 455 | } else { |
| 457 | ql_dbg(ql_dbg_misc, vha, 0xd026, | 456 | ql_dbg(ql_dbg_misc, vha, 0xd026, |
| 458 | "%s: unknown queue %u\n", __func__, ent->t263.queue_type); | 457 | "%s: unknown queue %u\n", __func__, ent->t263.queue_type); |
| 459 | qla27xx_skip_entry(ent, buf); | 458 | qla27xx_skip_entry(ent, buf); |
| 460 | goto done; | ||
| 461 | } | 459 | } |
| 462 | 460 | ||
| 463 | if (buf) | 461 | if (buf) |
| 464 | ent->t263.num_queues = count; | 462 | ent->t263.num_queues = count; |
| 465 | done: | 463 | |
| 466 | return false; | 464 | return false; |
| 467 | } | 465 | } |
| 468 | 466 | ||
| @@ -503,7 +501,7 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, | |||
| 503 | ql_dbg(ql_dbg_misc, vha, 0xd209, | 501 | ql_dbg(ql_dbg_misc, vha, 0xd209, |
| 504 | "%s: pause risc [%lx]\n", __func__, *len); | 502 | "%s: pause risc [%lx]\n", __func__, *len); |
| 505 | if (buf) | 503 | if (buf) |
| 506 | qla24xx_pause_risc(reg); | 504 | qla24xx_pause_risc(reg, vha->hw); |
| 507 | 505 | ||
| 508 | return false; | 506 | return false; |
| 509 | } | 507 | } |
| @@ -590,7 +588,6 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, | |||
| 590 | struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) | 588 | struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) |
| 591 | { | 589 | { |
| 592 | struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); | 590 | struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); |
| 593 | void *window = (void *)reg + 0xc4; | ||
| 594 | ulong dwords = ent->t270.count; | 591 | ulong dwords = ent->t270.count; |
| 595 | ulong addr = ent->t270.addr; | 592 | ulong addr = ent->t270.addr; |
| 596 | 593 | ||
| @@ -599,10 +596,9 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, | |||
| 599 | qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); | 596 | qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); |
| 600 | while (dwords--) { | 597 | while (dwords--) { |
| 601 | qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); | 598 | qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); |
| 602 | qla27xx_read_reg(reg, 0xc4, buf, len); | ||
| 603 | qla27xx_insert32(addr, buf, len); | 599 | qla27xx_insert32(addr, buf, len); |
| 604 | qla27xx_read32(window, buf, len); | 600 | qla27xx_read_reg(reg, 0xc4, buf, len); |
| 605 | addr++; | 601 | addr += sizeof(uint32_t); |
| 606 | } | 602 | } |
| 607 | 603 | ||
| 608 | return false; | 604 | return false; |
| @@ -614,12 +610,12 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, | |||
| 614 | { | 610 | { |
| 615 | struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); | 611 | struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); |
| 616 | ulong addr = ent->t271.addr; | 612 | ulong addr = ent->t271.addr; |
| 613 | ulong data = ent->t271.data; | ||
| 617 | 614 | ||
| 618 | ql_dbg(ql_dbg_misc, vha, 0xd20f, | 615 | ql_dbg(ql_dbg_misc, vha, 0xd20f, |
| 619 | "%s: wrremreg [%lx]\n", __func__, *len); | 616 | "%s: wrremreg [%lx]\n", __func__, *len); |
| 620 | qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); | 617 | qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); |
| 621 | qla27xx_read_reg(reg, 0xc4, buf, len); | 618 | qla27xx_write_reg(reg, 0xc4, data, buf); |
| 622 | qla27xx_insert32(addr, buf, len); | ||
| 623 | qla27xx_write_reg(reg, 0xc0, addr, buf); | 619 | qla27xx_write_reg(reg, 0xc0, addr, buf); |
| 624 | 620 | ||
| 625 | return false; | 621 | return false; |
| @@ -662,9 +658,59 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, | |||
| 662 | "%s: failed pcicfg read at %lx\n", __func__, addr); | 658 | "%s: failed pcicfg read at %lx\n", __func__, addr); |
| 663 | qla27xx_insert32(addr, buf, len); | 659 | qla27xx_insert32(addr, buf, len); |
| 664 | qla27xx_insert32(value, buf, len); | 660 | qla27xx_insert32(value, buf, len); |
| 665 | addr += 4; | 661 | addr += sizeof(uint32_t); |
| 662 | } | ||
| 663 | |||
| 664 | return false; | ||
| 665 | } | ||
| 666 | |||
| 667 | static int | ||
| 668 | qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, | ||
| 669 | struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) | ||
| 670 | { | ||
| 671 | uint count = 0; | ||
| 672 | uint i; | ||
| 673 | |||
| 674 | ql_dbg(ql_dbg_misc, vha, 0xd212, | ||
| 675 | "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len); | ||
| 676 | if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { | ||
| 677 | for (i = 0; i < vha->hw->max_req_queues; i++) { | ||
| 678 | struct req_que *req = vha->hw->req_q_map[i]; | ||
| 679 | if (req || !buf) { | ||
| 680 | qla27xx_insert16(i, buf, len); | ||
| 681 | qla27xx_insert16(1, buf, len); | ||
| 682 | qla27xx_insert32(req && req->out_ptr ? | ||
| 683 | *req->out_ptr : 0, buf, len); | ||
| 684 | count++; | ||
| 685 | } | ||
| 686 | } | ||
| 687 | } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { | ||
| 688 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { | ||
| 689 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; | ||
| 690 | if (rsp || !buf) { | ||
| 691 | qla27xx_insert16(i, buf, len); | ||
| 692 | qla27xx_insert16(1, buf, len); | ||
| 693 | qla27xx_insert32(rsp && rsp->in_ptr ? | ||
| 694 | *rsp->in_ptr : 0, buf, len); | ||
| 695 | count++; | ||
| 696 | } | ||
| 697 | } | ||
| 698 | } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { | ||
| 699 | ql_dbg(ql_dbg_misc, vha, 0xd02e, | ||
| 700 | "%s: unsupported atio queue\n", __func__); | ||
| 701 | qla27xx_skip_entry(ent, buf); | ||
| 702 | } else { | ||
| 703 | ql_dbg(ql_dbg_misc, vha, 0xd02f, | ||
| 704 | "%s: unknown queue %u\n", __func__, ent->t274.queue_type); | ||
| 705 | qla27xx_skip_entry(ent, buf); | ||
| 666 | } | 706 | } |
| 667 | 707 | ||
| 708 | if (buf) | ||
| 709 | ent->t274.num_queues = count; | ||
| 710 | |||
| 711 | if (!count) | ||
| 712 | qla27xx_skip_entry(ent, buf); | ||
| 713 | |||
| 668 | return false; | 714 | return false; |
| 669 | } | 715 | } |
| 670 | 716 | ||
| @@ -709,6 +755,7 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { | |||
| 709 | { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } , | 755 | { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } , |
| 710 | { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , | 756 | { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , |
| 711 | { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , | 757 | { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , |
| 758 | { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } , | ||
| 712 | { -1 , qla27xx_fwdt_entry_other } | 759 | { -1 , qla27xx_fwdt_entry_other } |
| 713 | }; | 760 | }; |
| 714 | 761 | ||
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index c9d2fff4d964..1967424c8e64 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| @@ -52,6 +52,7 @@ struct __packed qla27xx_fwdt_template { | |||
| 52 | #define ENTRY_TYPE_WRREMREG 271 | 52 | #define ENTRY_TYPE_WRREMREG 271 |
| 53 | #define ENTRY_TYPE_RDREMRAM 272 | 53 | #define ENTRY_TYPE_RDREMRAM 272 |
| 54 | #define ENTRY_TYPE_PCICFG 273 | 54 | #define ENTRY_TYPE_PCICFG 273 |
| 55 | #define ENTRY_TYPE_GET_SHADOW 274 | ||
| 55 | 56 | ||
| 56 | #define CAPTURE_FLAG_PHYS_ONLY BIT_0 | 57 | #define CAPTURE_FLAG_PHYS_ONLY BIT_0 |
| 57 | #define CAPTURE_FLAG_PHYS_VIRT BIT_1 | 58 | #define CAPTURE_FLAG_PHYS_VIRT BIT_1 |
| @@ -109,12 +110,12 @@ struct __packed qla27xx_fwdt_entry { | |||
| 109 | } t259; | 110 | } t259; |
| 110 | 111 | ||
| 111 | struct __packed { | 112 | struct __packed { |
| 112 | uint8_t pci_addr; | 113 | uint8_t pci_offset; |
| 113 | uint8_t reserved[3]; | 114 | uint8_t reserved[3]; |
| 114 | } t260; | 115 | } t260; |
| 115 | 116 | ||
| 116 | struct __packed { | 117 | struct __packed { |
| 117 | uint8_t pci_addr; | 118 | uint8_t pci_offset; |
| 118 | uint8_t reserved[3]; | 119 | uint8_t reserved[3]; |
| 119 | uint32_t write_data; | 120 | uint32_t write_data; |
| 120 | } t261; | 121 | } t261; |
| @@ -186,6 +187,12 @@ struct __packed qla27xx_fwdt_entry { | |||
| 186 | uint32_t addr; | 187 | uint32_t addr; |
| 187 | uint32_t count; | 188 | uint32_t count; |
| 188 | } t273; | 189 | } t273; |
| 190 | |||
| 191 | struct __packed { | ||
| 192 | uint32_t num_queues; | ||
| 193 | uint8_t queue_type; | ||
| 194 | uint8_t reserved[3]; | ||
| 195 | } t274; | ||
| 189 | }; | 196 | }; |
| 190 | }; | 197 | }; |
| 191 | 198 | ||
| @@ -202,4 +209,8 @@ struct __packed qla27xx_fwdt_entry { | |||
| 202 | #define T268_BUF_TYPE_EXCH_BUFOFF 2 | 209 | #define T268_BUF_TYPE_EXCH_BUFOFF 2 |
| 203 | #define T268_BUF_TYPE_EXTD_LOGIN 3 | 210 | #define T268_BUF_TYPE_EXTD_LOGIN 3 |
| 204 | 211 | ||
| 212 | #define T274_QUEUE_TYPE_REQ_SHAD 1 | ||
| 213 | #define T274_QUEUE_TYPE_RSP_SHAD 2 | ||
| 214 | #define T274_QUEUE_TYPE_ATIO_SHAD 3 | ||
| 215 | |||
| 205 | #endif | 216 | #endif |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index e36b94712544..4d2c98cbec4f 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
| @@ -1,13 +1,13 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
| 3 | * Copyright (c) 2003-2013 QLogic Corporation | 3 | * Copyright (c) 2003-2014 QLogic Corporation |
| 4 | * | 4 | * |
| 5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| 7 | /* | 7 | /* |
| 8 | * Driver version | 8 | * Driver version |
| 9 | */ | 9 | */ |
| 10 | #define QLA2XXX_VERSION "8.07.00.02-k" | 10 | #define QLA2XXX_VERSION "8.07.00.08-k" |
| 11 | 11 | ||
| 12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
| 13 | #define QLA_DRIVER_MINOR_VER 7 | 13 | #define QLA_DRIVER_MINOR_VER 7 |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68fb66fdb757..896cb23adb77 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) | |||
| 472 | cmd->sg_cnt = se_cmd->t_data_nents; | 472 | cmd->sg_cnt = se_cmd->t_data_nents; |
| 473 | cmd->sg = se_cmd->t_data_sg; | 473 | cmd->sg = se_cmd->t_data_sg; |
| 474 | 474 | ||
| 475 | cmd->prot_sg_cnt = se_cmd->t_prot_nents; | ||
| 476 | cmd->prot_sg = se_cmd->t_prot_sg; | ||
| 477 | cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; | ||
| 478 | se_cmd->pi_err = 0; | ||
| 479 | |||
| 475 | /* | 480 | /* |
| 476 | * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup | 481 | * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup |
| 477 | * the SGL mappings into PCIe memory for incoming FCP WRITE data. | 482 | * the SGL mappings into PCIe memory for incoming FCP WRITE data. |
| @@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | |||
| 567 | return; | 572 | return; |
| 568 | } | 573 | } |
| 569 | 574 | ||
| 570 | transport_generic_request_failure(&cmd->se_cmd, | 575 | if (cmd->se_cmd.pi_err) |
| 571 | TCM_CHECK_CONDITION_ABORT_CMD); | 576 | transport_generic_request_failure(&cmd->se_cmd, |
| 577 | cmd->se_cmd.pi_err); | ||
| 578 | else | ||
| 579 | transport_generic_request_failure(&cmd->se_cmd, | ||
| 580 | TCM_CHECK_CONDITION_ABORT_CMD); | ||
| 581 | |||
| 572 | return; | 582 | return; |
| 573 | } | 583 | } |
| 574 | 584 | ||
| @@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | |||
| 584 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | 594 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); |
| 585 | } | 595 | } |
| 586 | 596 | ||
| 597 | static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) | ||
| 598 | { | ||
| 599 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | ||
| 600 | |||
| 601 | /* take an extra kref to prevent cmd free too early. | ||
| 602 | * need to wait for SCSI status/check condition to | ||
| 603 | * finish responding generate by transport_generic_request_failure. | ||
| 604 | */ | ||
| 605 | kref_get(&cmd->se_cmd.cmd_kref); | ||
| 606 | transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); | ||
| 607 | } | ||
| 608 | |||
| 609 | /* | ||
| 610 | * Called from qla_target.c:qlt_do_ctio_completion() | ||
| 611 | */ | ||
| 612 | static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) | ||
| 613 | { | ||
| 614 | INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); | ||
| 615 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | ||
| 616 | } | ||
| 617 | |||
| 587 | /* | 618 | /* |
| 588 | * Called from qla_target.c:qlt_issue_task_mgmt() | 619 | * Called from qla_target.c:qlt_issue_task_mgmt() |
| 589 | */ | 620 | */ |
| @@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) | |||
| 610 | cmd->sg = se_cmd->t_data_sg; | 641 | cmd->sg = se_cmd->t_data_sg; |
| 611 | cmd->offset = 0; | 642 | cmd->offset = 0; |
| 612 | 643 | ||
| 644 | cmd->prot_sg_cnt = se_cmd->t_prot_nents; | ||
| 645 | cmd->prot_sg = se_cmd->t_prot_sg; | ||
| 646 | cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; | ||
| 647 | se_cmd->pi_err = 0; | ||
| 648 | |||
| 613 | /* | 649 | /* |
| 614 | * Now queue completed DATA_IN the qla2xxx LLD and response ring | 650 | * Now queue completed DATA_IN the qla2xxx LLD and response ring |
| 615 | */ | 651 | */ |
| @@ -1600,6 +1636,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, | |||
| 1600 | static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { | 1636 | static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { |
| 1601 | .handle_cmd = tcm_qla2xxx_handle_cmd, | 1637 | .handle_cmd = tcm_qla2xxx_handle_cmd, |
| 1602 | .handle_data = tcm_qla2xxx_handle_data, | 1638 | .handle_data = tcm_qla2xxx_handle_data, |
| 1639 | .handle_dif_err = tcm_qla2xxx_handle_dif_err, | ||
| 1603 | .handle_tmr = tcm_qla2xxx_handle_tmr, | 1640 | .handle_tmr = tcm_qla2xxx_handle_tmr, |
| 1604 | .free_cmd = tcm_qla2xxx_free_cmd, | 1641 | .free_cmd = tcm_qla2xxx_free_cmd, |
| 1605 | .free_mcmd = tcm_qla2xxx_free_mcmd, | 1642 | .free_mcmd = tcm_qla2xxx_free_mcmd, |
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c index 2eba35365920..556c1525f881 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.c +++ b/drivers/scsi/qla4xxx/ql4_83xx.c | |||
| @@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) | |||
| 249 | qla4_83xx_flash_unlock(ha); | 249 | qla4_83xx_flash_unlock(ha); |
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | /** | ||
| 253 | * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory | ||
| 254 | * @ha: Pointer to adapter structure | ||
| 255 | * @addr: Flash address to write to | ||
| 256 | * @data: Data to be written | ||
| 257 | * @count: word_count to be written | ||
| 258 | * | ||
| 259 | * Return: On success return QLA_SUCCESS | ||
| 260 | * On error return QLA_ERROR | ||
| 261 | **/ | ||
| 262 | int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, | ||
| 263 | uint32_t *data, uint32_t count) | ||
| 264 | { | ||
| 265 | int i, j; | ||
| 266 | uint32_t agt_ctrl; | ||
| 267 | unsigned long flags; | ||
| 268 | int ret_val = QLA_SUCCESS; | ||
| 269 | |||
| 270 | /* Only 128-bit aligned access */ | ||
| 271 | if (addr & 0xF) { | ||
| 272 | ret_val = QLA_ERROR; | ||
| 273 | goto exit_ms_mem_write; | ||
| 274 | } | ||
| 275 | |||
| 276 | write_lock_irqsave(&ha->hw_lock, flags); | ||
| 277 | |||
| 278 | /* Write address */ | ||
| 279 | ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); | ||
| 280 | if (ret_val == QLA_ERROR) { | ||
| 281 | ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", | ||
| 282 | __func__); | ||
| 283 | goto exit_ms_mem_write_unlock; | ||
| 284 | } | ||
| 285 | |||
| 286 | for (i = 0; i < count; i++, addr += 16) { | ||
| 287 | if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, | ||
| 288 | QLA8XXX_ADDR_QDR_NET_MAX)) || | ||
| 289 | (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, | ||
| 290 | QLA8XXX_ADDR_DDR_NET_MAX)))) { | ||
| 291 | ret_val = QLA_ERROR; | ||
| 292 | goto exit_ms_mem_write_unlock; | ||
| 293 | } | ||
| 294 | |||
| 295 | ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, | ||
| 296 | addr); | ||
| 297 | /* Write data */ | ||
| 298 | ret_val |= qla4_83xx_wr_reg_indirect(ha, | ||
| 299 | MD_MIU_TEST_AGT_WRDATA_LO, | ||
| 300 | *data++); | ||
| 301 | ret_val |= qla4_83xx_wr_reg_indirect(ha, | ||
| 302 | MD_MIU_TEST_AGT_WRDATA_HI, | ||
| 303 | *data++); | ||
| 304 | ret_val |= qla4_83xx_wr_reg_indirect(ha, | ||
| 305 | MD_MIU_TEST_AGT_WRDATA_ULO, | ||
| 306 | *data++); | ||
| 307 | ret_val |= qla4_83xx_wr_reg_indirect(ha, | ||
| 308 | MD_MIU_TEST_AGT_WRDATA_UHI, | ||
| 309 | *data++); | ||
| 310 | if (ret_val == QLA_ERROR) { | ||
| 311 | ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", | ||
| 312 | __func__); | ||
| 313 | goto exit_ms_mem_write_unlock; | ||
| 314 | } | ||
| 315 | |||
| 316 | /* Check write status */ | ||
| 317 | ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, | ||
| 318 | MIU_TA_CTL_WRITE_ENABLE); | ||
| 319 | ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, | ||
| 320 | MIU_TA_CTL_WRITE_START); | ||
| 321 | if (ret_val == QLA_ERROR) { | ||
| 322 | ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", | ||
| 323 | __func__); | ||
| 324 | goto exit_ms_mem_write_unlock; | ||
| 325 | } | ||
| 326 | |||
| 327 | for (j = 0; j < MAX_CTL_CHECK; j++) { | ||
| 328 | ret_val = qla4_83xx_rd_reg_indirect(ha, | ||
| 329 | MD_MIU_TEST_AGT_CTRL, | ||
| 330 | &agt_ctrl); | ||
| 331 | if (ret_val == QLA_ERROR) { | ||
| 332 | ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", | ||
| 333 | __func__); | ||
| 334 | goto exit_ms_mem_write_unlock; | ||
| 335 | } | ||
| 336 | if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) | ||
| 337 | break; | ||
| 338 | } | ||
| 339 | |||
| 340 | /* Status check failed */ | ||
| 341 | if (j >= MAX_CTL_CHECK) { | ||
| 342 | printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", | ||
| 343 | __func__); | ||
| 344 | ret_val = QLA_ERROR; | ||
| 345 | goto exit_ms_mem_write_unlock; | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 349 | exit_ms_mem_write_unlock: | ||
| 350 | write_unlock_irqrestore(&ha->hw_lock, flags); | ||
| 351 | |||
| 352 | exit_ms_mem_write: | ||
| 353 | return ret_val; | ||
| 354 | } | ||
| 355 | |||
| 356 | #define INTENT_TO_RECOVER 0x01 | 252 | #define INTENT_TO_RECOVER 0x01 |
| 357 | #define PROCEED_TO_RECOVER 0x02 | 253 | #define PROCEED_TO_RECOVER 0x02 |
| 358 | 254 | ||
| @@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha) | |||
| 760 | __func__)); | 656 | __func__)); |
| 761 | 657 | ||
| 762 | /* 128 bit/16 byte write to MS memory */ | 658 | /* 128 bit/16 byte write to MS memory */ |
| 763 | ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, | 659 | ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, |
| 764 | count); | 660 | count); |
| 765 | if (ret_val == QLA_ERROR) { | 661 | if (ret_val == QLA_ERROR) { |
| 766 | ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", | 662 | ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", |
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h index a0de6e25ea5a..775fdf9fcc87 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.h +++ b/drivers/scsi/qla4xxx/ql4_83xx.h | |||
| @@ -254,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd { | |||
| 254 | uint32_t rsvd_1; | 254 | uint32_t rsvd_1; |
| 255 | }; | 255 | }; |
| 256 | 256 | ||
| 257 | struct qla8044_minidump_entry_rddfe { | ||
| 258 | struct qla8xxx_minidump_entry_hdr h; | ||
| 259 | uint32_t addr_1; | ||
| 260 | uint32_t value; | ||
| 261 | uint8_t stride; | ||
| 262 | uint8_t stride2; | ||
| 263 | uint16_t count; | ||
| 264 | uint32_t poll; | ||
| 265 | uint32_t mask; | ||
| 266 | uint32_t modify_mask; | ||
| 267 | uint32_t data_size; | ||
| 268 | uint32_t rsvd; | ||
| 269 | |||
| 270 | } __packed; | ||
| 271 | |||
| 272 | struct qla8044_minidump_entry_rdmdio { | ||
| 273 | struct qla8xxx_minidump_entry_hdr h; | ||
| 274 | |||
| 275 | uint32_t addr_1; | ||
| 276 | uint32_t addr_2; | ||
| 277 | uint32_t value_1; | ||
| 278 | uint8_t stride_1; | ||
| 279 | uint8_t stride_2; | ||
| 280 | uint16_t count; | ||
| 281 | uint32_t poll; | ||
| 282 | uint32_t mask; | ||
| 283 | uint32_t value_2; | ||
| 284 | uint32_t data_size; | ||
| 285 | |||
| 286 | } __packed; | ||
| 287 | |||
| 288 | struct qla8044_minidump_entry_pollwr { | ||
| 289 | struct qla8xxx_minidump_entry_hdr h; | ||
| 290 | uint32_t addr_1; | ||
| 291 | uint32_t addr_2; | ||
| 292 | uint32_t value_1; | ||
| 293 | uint32_t value_2; | ||
| 294 | uint32_t poll; | ||
| 295 | uint32_t mask; | ||
| 296 | uint32_t data_size; | ||
| 297 | uint32_t rsvd; | ||
| 298 | |||
| 299 | } __packed; | ||
| 300 | |||
| 257 | /* RDMUX2 Entry */ | 301 | /* RDMUX2 Entry */ |
| 258 | struct qla83xx_minidump_entry_rdmux2 { | 302 | struct qla83xx_minidump_entry_rdmux2 { |
| 259 | struct qla8xxx_minidump_entry_hdr h; | 303 | struct qla8xxx_minidump_entry_hdr h; |
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 73a502288bde..8f6d0fb2cd80 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
| @@ -601,6 +601,7 @@ struct scsi_qla_host { | |||
| 601 | #define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/ | 601 | #define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/ |
| 602 | #define DPC_POST_IDC_ACK 23 /* 0x00800000 */ | 602 | #define DPC_POST_IDC_ACK 23 /* 0x00800000 */ |
| 603 | #define DPC_RESTORE_ACB 24 /* 0x01000000 */ | 603 | #define DPC_RESTORE_ACB 24 /* 0x01000000 */ |
| 604 | #define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */ | ||
| 604 | 605 | ||
| 605 | struct Scsi_Host *host; /* pointer to host data */ | 606 | struct Scsi_Host *host; /* pointer to host data */ |
| 606 | uint32_t tot_ddbs; | 607 | uint32_t tot_ddbs; |
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 209853ce0bbc..699575efc9ba 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h | |||
| @@ -1415,6 +1415,9 @@ struct ql_iscsi_stats { | |||
| 1415 | #define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16 | 1415 | #define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16 |
| 1416 | #define QLA83XX_SS_OCM_WNDREG_INDEX 3 | 1416 | #define QLA83XX_SS_OCM_WNDREG_INDEX 3 |
| 1417 | #define QLA83XX_SS_PCI_INDEX 0 | 1417 | #define QLA83XX_SS_PCI_INDEX 0 |
| 1418 | #define QLA8022_TEMPLATE_CAP_OFFSET 172 | ||
| 1419 | #define QLA83XX_TEMPLATE_CAP_OFFSET 268 | ||
| 1420 | #define QLA80XX_TEMPLATE_RESERVED_BITS 16 | ||
| 1418 | 1421 | ||
| 1419 | struct qla4_8xxx_minidump_template_hdr { | 1422 | struct qla4_8xxx_minidump_template_hdr { |
| 1420 | uint32_t entry_type; | 1423 | uint32_t entry_type; |
| @@ -1434,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr { | |||
| 1434 | uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN]; | 1437 | uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN]; |
| 1435 | uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN]; | 1438 | uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN]; |
| 1436 | uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN]; | 1439 | uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN]; |
| 1440 | uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS]; | ||
| 1437 | }; | 1441 | }; |
| 1438 | 1442 | ||
| 1439 | #endif /* _QLA4X_FW_H */ | 1443 | #endif /* _QLA4X_FW_H */ |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index b1a19cd8d5b2..5f58b451327e 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
| @@ -274,13 +274,14 @@ int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, | |||
| 274 | int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, | 274 | int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, |
| 275 | uint32_t acb_type, uint32_t len); | 275 | uint32_t acb_type, uint32_t len); |
| 276 | int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); | 276 | int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); |
| 277 | int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, | 277 | int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, |
| 278 | uint64_t addr, uint32_t *data, uint32_t count); | 278 | uint64_t addr, uint32_t *data, uint32_t count); |
| 279 | uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); | 279 | uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); |
| 280 | int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); | 280 | int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); |
| 281 | int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); | 281 | int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); |
| 282 | int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha); | 282 | int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha); |
| 283 | int qla4_83xx_is_detached(struct scsi_qla_host *ha); | 283 | int qla4_83xx_is_detached(struct scsi_qla_host *ha); |
| 284 | int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha); | ||
| 284 | 285 | ||
| 285 | extern int ql4xextended_error_logging; | 286 | extern int ql4xextended_error_logging; |
| 286 | extern int ql4xdontresethba; | 287 | extern int ql4xdontresethba; |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 28fbece7e08f..6f12f859b11d 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
| @@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) | |||
| 282 | return ipv4_wait|ipv6_wait; | 282 | return ipv4_wait|ipv6_wait; |
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha, | ||
| 286 | struct qla4_8xxx_minidump_template_hdr *md_hdr) | ||
| 287 | { | ||
| 288 | int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET : | ||
| 289 | QLA83XX_TEMPLATE_CAP_OFFSET; | ||
| 290 | int rval = 1; | ||
| 291 | uint32_t *cap_offset; | ||
| 292 | |||
| 293 | cap_offset = (uint32_t *)((char *)md_hdr + offset); | ||
| 294 | |||
| 295 | if (!(le32_to_cpu(*cap_offset) & BIT_0)) { | ||
| 296 | ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n", | ||
| 297 | *cap_offset); | ||
| 298 | rval = 0; | ||
| 299 | } | ||
| 300 | |||
| 301 | return rval; | ||
| 302 | } | ||
| 303 | |||
| 285 | /** | 304 | /** |
| 286 | * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. | 305 | * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. |
| 287 | * @ha: pointer to host adapter structure. | 306 | * @ha: pointer to host adapter structure. |
| @@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) | |||
| 294 | void *md_tmp; | 313 | void *md_tmp; |
| 295 | dma_addr_t md_tmp_dma; | 314 | dma_addr_t md_tmp_dma; |
| 296 | struct qla4_8xxx_minidump_template_hdr *md_hdr; | 315 | struct qla4_8xxx_minidump_template_hdr *md_hdr; |
| 316 | int dma_capable; | ||
| 297 | 317 | ||
| 298 | if (ha->fw_dump) { | 318 | if (ha->fw_dump) { |
| 299 | ql4_printk(KERN_WARNING, ha, | 319 | ql4_printk(KERN_WARNING, ha, |
| @@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) | |||
| 326 | 346 | ||
| 327 | md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; | 347 | md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; |
| 328 | 348 | ||
| 349 | dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr); | ||
| 350 | |||
| 329 | capture_debug_level = md_hdr->capture_debug_level; | 351 | capture_debug_level = md_hdr->capture_debug_level; |
| 330 | 352 | ||
| 331 | /* Get capture mask based on module loadtime setting. */ | 353 | /* Get capture mask based on module loadtime setting. */ |
| 332 | if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) | 354 | if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) || |
| 355 | (ql4xmdcapmask == 0xFF && dma_capable)) { | ||
| 333 | ha->fw_dump_capture_mask = ql4xmdcapmask; | 356 | ha->fw_dump_capture_mask = ql4xmdcapmask; |
| 334 | else | 357 | } else { |
| 358 | if (ql4xmdcapmask == 0xFF) | ||
| 359 | ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n"); | ||
| 335 | ha->fw_dump_capture_mask = capture_debug_level; | 360 | ha->fw_dump_capture_mask = capture_debug_level; |
| 361 | } | ||
| 336 | 362 | ||
| 337 | md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; | 363 | md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; |
| 338 | 364 | ||
| @@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha) | |||
| 864 | if (status == QLA_SUCCESS) { | 890 | if (status == QLA_SUCCESS) { |
| 865 | if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) | 891 | if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) |
| 866 | qla4xxx_get_crash_record(ha); | 892 | qla4xxx_get_crash_record(ha); |
| 893 | |||
| 894 | qla4xxx_init_rings(ha); | ||
| 867 | } else { | 895 | } else { |
| 868 | DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", | 896 | DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", |
| 869 | ha->host_no, __func__)); | 897 | ha->host_no, __func__)); |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index b1925d195f41..081b6b78d2c6 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
| @@ -1526,7 +1526,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) | |||
| 1526 | 1526 | ||
| 1527 | int qla4xxx_request_irqs(struct scsi_qla_host *ha) | 1527 | int qla4xxx_request_irqs(struct scsi_qla_host *ha) |
| 1528 | { | 1528 | { |
| 1529 | int ret; | 1529 | int ret = 0; |
| 1530 | int rval = QLA_ERROR; | 1530 | int rval = QLA_ERROR; |
| 1531 | 1531 | ||
| 1532 | if (is_qla40XX(ha)) | 1532 | if (is_qla40XX(ha)) |
| @@ -1580,15 +1580,13 @@ try_msi: | |||
| 1580 | } | 1580 | } |
| 1581 | } | 1581 | } |
| 1582 | 1582 | ||
| 1583 | /* | 1583 | try_intx: |
| 1584 | * Prevent interrupts from falling back to INTx mode in cases where | ||
| 1585 | * interrupts cannot get acquired through MSI-X or MSI mode. | ||
| 1586 | */ | ||
| 1587 | if (is_qla8022(ha)) { | 1584 | if (is_qla8022(ha)) { |
| 1588 | ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret); | 1585 | ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n", |
| 1586 | __func__); | ||
| 1589 | goto irq_not_attached; | 1587 | goto irq_not_attached; |
| 1590 | } | 1588 | } |
| 1591 | try_intx: | 1589 | |
| 1592 | /* Trying INTx */ | 1590 | /* Trying INTx */ |
| 1593 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, | 1591 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, |
| 1594 | IRQF_SHARED, DRIVER_NAME, ha); | 1592 | IRQF_SHARED, DRIVER_NAME, ha); |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 0a6b782d6fdb..0a3312c6dd6d 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
| @@ -2381,7 +2381,7 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) | |||
| 2381 | ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", | 2381 | ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", |
| 2382 | __func__); | 2382 | __func__); |
| 2383 | rval = QLA_ERROR; | 2383 | rval = QLA_ERROR; |
| 2384 | goto exit_config_acb; | 2384 | goto exit_free_acb; |
| 2385 | } | 2385 | } |
| 2386 | memcpy(ha->saved_acb, acb, acb_len); | 2386 | memcpy(ha->saved_acb, acb, acb_len); |
| 2387 | break; | 2387 | break; |
| @@ -2395,8 +2395,6 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) | |||
| 2395 | } | 2395 | } |
| 2396 | 2396 | ||
| 2397 | memcpy(acb, ha->saved_acb, acb_len); | 2397 | memcpy(acb, ha->saved_acb, acb_len); |
| 2398 | kfree(ha->saved_acb); | ||
| 2399 | ha->saved_acb = NULL; | ||
| 2400 | 2398 | ||
| 2401 | rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); | 2399 | rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); |
| 2402 | if (rval != QLA_SUCCESS) | 2400 | if (rval != QLA_SUCCESS) |
| @@ -2412,6 +2410,10 @@ exit_free_acb: | |||
| 2412 | dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, | 2410 | dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, |
| 2413 | acb_dma); | 2411 | acb_dma); |
| 2414 | exit_config_acb: | 2412 | exit_config_acb: |
| 2413 | if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) { | ||
| 2414 | kfree(ha->saved_acb); | ||
| 2415 | ha->saved_acb = NULL; | ||
| 2416 | } | ||
| 2415 | DEBUG2(ql4_printk(KERN_INFO, ha, | 2417 | DEBUG2(ql4_printk(KERN_INFO, ha, |
| 2416 | "%s %s\n", __func__, | 2418 | "%s %s\n", __func__, |
| 2417 | rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); | 2419 | rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 63328c812b70..9dbdb4be2d8f 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #include <asm-generic/io-64-nonatomic-lo-hi.h> | 15 | #include <asm-generic/io-64-nonatomic-lo-hi.h> |
| 16 | 16 | ||
| 17 | #define TIMEOUT_100_MS 100 | ||
| 17 | #define MASK(n) DMA_BIT_MASK(n) | 18 | #define MASK(n) DMA_BIT_MASK(n) |
| 18 | #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) | 19 | #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) |
| 19 | #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) | 20 | #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) |
| @@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) | |||
| 1176 | return 0; | 1177 | return 0; |
| 1177 | } | 1178 | } |
| 1178 | 1179 | ||
| 1180 | /** | ||
| 1181 | * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory | ||
| 1182 | * @ha: Pointer to adapter structure | ||
| 1183 | * @addr: Flash address to write to | ||
| 1184 | * @data: Data to be written | ||
| 1185 | * @count: word_count to be written | ||
| 1186 | * | ||
| 1187 | * Return: On success return QLA_SUCCESS | ||
| 1188 | * On error return QLA_ERROR | ||
| 1189 | **/ | ||
| 1190 | int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, | ||
| 1191 | uint32_t *data, uint32_t count) | ||
| 1192 | { | ||
| 1193 | int i, j; | ||
| 1194 | uint32_t agt_ctrl; | ||
| 1195 | unsigned long flags; | ||
| 1196 | int ret_val = QLA_SUCCESS; | ||
| 1197 | |||
| 1198 | /* Only 128-bit aligned access */ | ||
| 1199 | if (addr & 0xF) { | ||
| 1200 | ret_val = QLA_ERROR; | ||
| 1201 | goto exit_ms_mem_write; | ||
| 1202 | } | ||
| 1203 | |||
| 1204 | write_lock_irqsave(&ha->hw_lock, flags); | ||
| 1205 | |||
| 1206 | /* Write address */ | ||
| 1207 | ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); | ||
| 1208 | if (ret_val == QLA_ERROR) { | ||
| 1209 | ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", | ||
| 1210 | __func__); | ||
| 1211 | goto exit_ms_mem_write_unlock; | ||
| 1212 | } | ||
| 1213 | |||
| 1214 | for (i = 0; i < count; i++, addr += 16) { | ||
| 1215 | if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, | ||
| 1216 | QLA8XXX_ADDR_QDR_NET_MAX)) || | ||
| 1217 | (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, | ||
| 1218 | QLA8XXX_ADDR_DDR_NET_MAX)))) { | ||
| 1219 | ret_val = QLA_ERROR; | ||
| 1220 | goto exit_ms_mem_write_unlock; | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | ret_val = ha->isp_ops->wr_reg_indirect(ha, | ||
| 1224 | MD_MIU_TEST_AGT_ADDR_LO, | ||
| 1225 | addr); | ||
| 1226 | /* Write data */ | ||
| 1227 | ret_val |= ha->isp_ops->wr_reg_indirect(ha, | ||
| 1228 | MD_MIU_TEST_AGT_WRDATA_LO, | ||
| 1229 | *data++); | ||
| 1230 | ret_val |= ha->isp_ops->wr_reg_indirect(ha, | ||
| 1231 | MD_MIU_TEST_AGT_WRDATA_HI, | ||
| 1232 | *data++); | ||
| 1233 | ret_val |= ha->isp_ops->wr_reg_indirect(ha, | ||
| 1234 | MD_MIU_TEST_AGT_WRDATA_ULO, | ||
| 1235 | *data++); | ||
| 1236 | ret_val |= ha->isp_ops->wr_reg_indirect(ha, | ||
| 1237 | MD_MIU_TEST_AGT_WRDATA_UHI, | ||
| 1238 | *data++); | ||
| 1239 | if (ret_val == QLA_ERROR) { | ||
| 1240 | ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", | ||
| 1241 | __func__); | ||
| 1242 | goto exit_ms_mem_write_unlock; | ||
| 1243 | } | ||
| 1244 | |||
| 1245 | /* Check write status */ | ||
| 1246 | ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, | ||
| 1247 | MIU_TA_CTL_WRITE_ENABLE); | ||
| 1248 | ret_val |= ha->isp_ops->wr_reg_indirect(ha, | ||
| 1249 | MD_MIU_TEST_AGT_CTRL, | ||
| 1250 | MIU_TA_CTL_WRITE_START); | ||
| 1251 | if (ret_val == QLA_ERROR) { | ||
| 1252 | ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", | ||
| 1253 | __func__); | ||
| 1254 | goto exit_ms_mem_write_unlock; | ||
| 1255 | } | ||
| 1256 | |||
| 1257 | for (j = 0; j < MAX_CTL_CHECK; j++) { | ||
| 1258 | ret_val = ha->isp_ops->rd_reg_indirect(ha, | ||
| 1259 | MD_MIU_TEST_AGT_CTRL, | ||
| 1260 | &agt_ctrl); | ||
| 1261 | if (ret_val == QLA_ERROR) { | ||
| 1262 | ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", | ||
| 1263 | __func__); | ||
| 1264 | goto exit_ms_mem_write_unlock; | ||
| 1265 | } | ||
| 1266 | if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) | ||
| 1267 | break; | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | /* Status check failed */ | ||
| 1271 | if (j >= MAX_CTL_CHECK) { | ||
| 1272 | printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", | ||
| 1273 | __func__); | ||
| 1274 | ret_val = QLA_ERROR; | ||
| 1275 | goto exit_ms_mem_write_unlock; | ||
| 1276 | } | ||
| 1277 | } | ||
| 1278 | |||
| 1279 | exit_ms_mem_write_unlock: | ||
| 1280 | write_unlock_irqrestore(&ha->hw_lock, flags); | ||
| 1281 | |||
| 1282 | exit_ms_mem_write: | ||
| 1283 | return ret_val; | ||
| 1284 | } | ||
| 1285 | |||
| 1179 | static int | 1286 | static int |
| 1180 | qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) | 1287 | qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) |
| 1181 | { | 1288 | { |
| @@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha) | |||
| 1714 | qla4_82xx_rom_unlock(ha); | 1821 | qla4_82xx_rom_unlock(ha); |
| 1715 | } | 1822 | } |
| 1716 | 1823 | ||
| 1824 | static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha, | ||
| 1825 | uint32_t addr1, uint32_t mask) | ||
| 1826 | { | ||
| 1827 | unsigned long timeout; | ||
| 1828 | uint32_t rval = QLA_SUCCESS; | ||
| 1829 | uint32_t temp; | ||
| 1830 | |||
| 1831 | timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); | ||
| 1832 | do { | ||
| 1833 | ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); | ||
| 1834 | if ((temp & mask) != 0) | ||
| 1835 | break; | ||
| 1836 | |||
| 1837 | if (time_after_eq(jiffies, timeout)) { | ||
| 1838 | ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n"); | ||
| 1839 | return QLA_ERROR; | ||
| 1840 | } | ||
| 1841 | } while (1); | ||
| 1842 | |||
| 1843 | return rval; | ||
| 1844 | } | ||
| 1845 | |||
| 1846 | uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1, | ||
| 1847 | uint32_t addr3, uint32_t mask, uint32_t addr, | ||
| 1848 | uint32_t *data_ptr) | ||
| 1849 | { | ||
| 1850 | int rval = QLA_SUCCESS; | ||
| 1851 | uint32_t temp; | ||
| 1852 | uint32_t data; | ||
| 1853 | |||
| 1854 | rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); | ||
| 1855 | if (rval) | ||
| 1856 | goto exit_ipmdio_rd_reg; | ||
| 1857 | |||
| 1858 | temp = (0x40000000 | addr); | ||
| 1859 | ha->isp_ops->wr_reg_indirect(ha, addr1, temp); | ||
| 1860 | |||
| 1861 | rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); | ||
| 1862 | if (rval) | ||
| 1863 | goto exit_ipmdio_rd_reg; | ||
| 1864 | |||
| 1865 | ha->isp_ops->rd_reg_indirect(ha, addr3, &data); | ||
| 1866 | *data_ptr = data; | ||
| 1867 | |||
| 1868 | exit_ipmdio_rd_reg: | ||
| 1869 | return rval; | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | |||
| 1873 | static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha, | ||
| 1874 | uint32_t addr1, | ||
| 1875 | uint32_t addr2, | ||
| 1876 | uint32_t addr3, | ||
| 1877 | uint32_t mask) | ||
| 1878 | { | ||
| 1879 | unsigned long timeout; | ||
| 1880 | uint32_t temp; | ||
| 1881 | uint32_t rval = QLA_SUCCESS; | ||
| 1882 | |||
| 1883 | timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); | ||
| 1884 | do { | ||
| 1885 | ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp); | ||
| 1886 | if ((temp & 0x1) != 1) | ||
| 1887 | break; | ||
| 1888 | if (time_after_eq(jiffies, timeout)) { | ||
| 1889 | ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n"); | ||
| 1890 | return QLA_ERROR; | ||
| 1891 | } | ||
| 1892 | } while (1); | ||
| 1893 | |||
| 1894 | return rval; | ||
| 1895 | } | ||
| 1896 | |||
| 1897 | static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha, | ||
| 1898 | uint32_t addr1, uint32_t addr3, | ||
| 1899 | uint32_t mask, uint32_t addr, | ||
| 1900 | uint32_t value) | ||
| 1901 | { | ||
| 1902 | int rval = QLA_SUCCESS; | ||
| 1903 | |||
| 1904 | rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); | ||
| 1905 | if (rval) | ||
| 1906 | goto exit_ipmdio_wr_reg; | ||
| 1907 | |||
| 1908 | ha->isp_ops->wr_reg_indirect(ha, addr3, value); | ||
| 1909 | ha->isp_ops->wr_reg_indirect(ha, addr1, addr); | ||
| 1910 | |||
| 1911 | rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); | ||
| 1912 | if (rval) | ||
| 1913 | goto exit_ipmdio_wr_reg; | ||
| 1914 | |||
| 1915 | exit_ipmdio_wr_reg: | ||
| 1916 | return rval; | ||
| 1917 | } | ||
| 1918 | |||
| 1717 | static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, | 1919 | static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, |
| 1718 | struct qla8xxx_minidump_entry_hdr *entry_hdr, | 1920 | struct qla8xxx_minidump_entry_hdr *entry_hdr, |
| 1719 | uint32_t **d_ptr) | 1921 | uint32_t **d_ptr) |
| @@ -1822,7 +2024,7 @@ error_exit: | |||
| 1822 | return rval; | 2024 | return rval; |
| 1823 | } | 2025 | } |
| 1824 | 2026 | ||
| 1825 | static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha, | 2027 | static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha, |
| 1826 | struct qla8xxx_minidump_entry_hdr *entry_hdr, | 2028 | struct qla8xxx_minidump_entry_hdr *entry_hdr, |
| 1827 | uint32_t **d_ptr) | 2029 | uint32_t **d_ptr) |
| 1828 | { | 2030 | { |
| @@ -1899,11 +2101,11 @@ static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha, | |||
| 1899 | dma_desc.cmd.read_data_size = size; | 2101 | dma_desc.cmd.read_data_size = size; |
| 1900 | 2102 | ||
| 1901 | /* Prepare: Write pex-dma descriptor to MS memory. */ | 2103 | /* Prepare: Write pex-dma descriptor to MS memory. */ |
| 1902 | rval = qla4_83xx_ms_mem_write_128b(ha, | 2104 | rval = qla4_8xxx_ms_mem_write_128b(ha, |
| 1903 | (uint64_t)m_hdr->desc_card_addr, | 2105 | (uint64_t)m_hdr->desc_card_addr, |
| 1904 | (uint32_t *)&dma_desc, | 2106 | (uint32_t *)&dma_desc, |
| 1905 | (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); | 2107 | (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); |
| 1906 | if (rval == -1) { | 2108 | if (rval != QLA_SUCCESS) { |
| 1907 | ql4_printk(KERN_INFO, ha, | 2109 | ql4_printk(KERN_INFO, ha, |
| 1908 | "%s: Error writing rdmem-dma-init to MS !!!\n", | 2110 | "%s: Error writing rdmem-dma-init to MS !!!\n", |
| 1909 | __func__); | 2111 | __func__); |
| @@ -2359,17 +2561,10 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, | |||
| 2359 | uint32_t *data_ptr = *d_ptr; | 2561 | uint32_t *data_ptr = *d_ptr; |
| 2360 | int rval = QLA_SUCCESS; | 2562 | int rval = QLA_SUCCESS; |
| 2361 | 2563 | ||
| 2362 | if (is_qla8032(ha) || is_qla8042(ha)) { | 2564 | rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr); |
| 2363 | rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr, | 2565 | if (rval != QLA_SUCCESS) |
| 2364 | &data_ptr); | ||
| 2365 | if (rval != QLA_SUCCESS) { | ||
| 2366 | rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, | ||
| 2367 | &data_ptr); | ||
| 2368 | } | ||
| 2369 | } else { | ||
| 2370 | rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, | 2566 | rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, |
| 2371 | &data_ptr); | 2567 | &data_ptr); |
| 2372 | } | ||
| 2373 | *d_ptr = data_ptr; | 2568 | *d_ptr = data_ptr; |
| 2374 | return rval; | 2569 | return rval; |
| 2375 | } | 2570 | } |
| @@ -2440,6 +2635,227 @@ exit_process_pollrd: | |||
| 2440 | return rval; | 2635 | return rval; |
| 2441 | } | 2636 | } |
| 2442 | 2637 | ||
| 2638 | static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha, | ||
| 2639 | struct qla8xxx_minidump_entry_hdr *entry_hdr, | ||
| 2640 | uint32_t **d_ptr) | ||
| 2641 | { | ||
| 2642 | int loop_cnt; | ||
| 2643 | uint32_t addr1, addr2, value, data, temp, wrval; | ||
| 2644 | uint8_t stride, stride2; | ||
| 2645 | uint16_t count; | ||
| 2646 | uint32_t poll, mask, data_size, modify_mask; | ||
| 2647 | uint32_t wait_count = 0; | ||
| 2648 | uint32_t *data_ptr = *d_ptr; | ||
| 2649 | struct qla8044_minidump_entry_rddfe *rddfe; | ||
| 2650 | uint32_t rval = QLA_SUCCESS; | ||
| 2651 | |||
| 2652 | rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; | ||
| 2653 | addr1 = le32_to_cpu(rddfe->addr_1); | ||
| 2654 | value = le32_to_cpu(rddfe->value); | ||
| 2655 | stride = le32_to_cpu(rddfe->stride); | ||
| 2656 | stride2 = le32_to_cpu(rddfe->stride2); | ||
| 2657 | count = le32_to_cpu(rddfe->count); | ||
| 2658 | |||
| 2659 | poll = le32_to_cpu(rddfe->poll); | ||
| 2660 | mask = le32_to_cpu(rddfe->mask); | ||
| 2661 | modify_mask = le32_to_cpu(rddfe->modify_mask); | ||
| 2662 | data_size = le32_to_cpu(rddfe->data_size); | ||
| 2663 | |||
| 2664 | addr2 = addr1 + stride; | ||
| 2665 | |||
| 2666 | for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { | ||
| 2667 | ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value)); | ||
| 2668 | |||
| 2669 | wait_count = 0; | ||
| 2670 | while (wait_count < poll) { | ||
| 2671 | ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); | ||
| 2672 | if ((temp & mask) != 0) | ||
| 2673 | break; | ||
| 2674 | wait_count++; | ||
| 2675 | } | ||
| 2676 | |||
| 2677 | if (wait_count == poll) { | ||
| 2678 | ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); | ||
| 2679 | rval = QLA_ERROR; | ||
| 2680 | goto exit_process_rddfe; | ||
| 2681 | } else { | ||
| 2682 | ha->isp_ops->rd_reg_indirect(ha, addr2, &temp); | ||
| 2683 | temp = temp & modify_mask; | ||
| 2684 | temp = (temp | ((loop_cnt << 16) | loop_cnt)); | ||
| 2685 | wrval = ((temp << 16) | temp); | ||
| 2686 | |||
| 2687 | ha->isp_ops->wr_reg_indirect(ha, addr2, wrval); | ||
| 2688 | ha->isp_ops->wr_reg_indirect(ha, addr1, value); | ||
| 2689 | |||
| 2690 | wait_count = 0; | ||
| 2691 | while (wait_count < poll) { | ||
| 2692 | ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); | ||
| 2693 | if ((temp & mask) != 0) | ||
| 2694 | break; | ||
| 2695 | wait_count++; | ||
| 2696 | } | ||
| 2697 | if (wait_count == poll) { | ||
| 2698 | ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", | ||
| 2699 | __func__); | ||
| 2700 | rval = QLA_ERROR; | ||
| 2701 | goto exit_process_rddfe; | ||
| 2702 | } | ||
| 2703 | |||
| 2704 | ha->isp_ops->wr_reg_indirect(ha, addr1, | ||
| 2705 | ((0x40000000 | value) + | ||
| 2706 | stride2)); | ||
| 2707 | wait_count = 0; | ||
| 2708 | while (wait_count < poll) { | ||
| 2709 | ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); | ||
| 2710 | if ((temp & mask) != 0) | ||
| 2711 | break; | ||
| 2712 | wait_count++; | ||
| 2713 | } | ||
| 2714 | |||
| 2715 | if (wait_count == poll) { | ||
| 2716 | ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", | ||
| 2717 | __func__); | ||
| 2718 | rval = QLA_ERROR; | ||
| 2719 | goto exit_process_rddfe; | ||
| 2720 | } | ||
| 2721 | |||
| 2722 | ha->isp_ops->rd_reg_indirect(ha, addr2, &data); | ||
| 2723 | |||
| 2724 | *data_ptr++ = cpu_to_le32(wrval); | ||
| 2725 | *data_ptr++ = cpu_to_le32(data); | ||
| 2726 | } | ||
| 2727 | } | ||
| 2728 | |||
| 2729 | *d_ptr = data_ptr; | ||
| 2730 | exit_process_rddfe: | ||
| 2731 | return rval; | ||
| 2732 | } | ||
| 2733 | |||
| 2734 | static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha, | ||
| 2735 | struct qla8xxx_minidump_entry_hdr *entry_hdr, | ||
| 2736 | uint32_t **d_ptr) | ||
| 2737 | { | ||
| 2738 | int rval = QLA_SUCCESS; | ||
| 2739 | uint32_t addr1, addr2, value1, value2, data, selval; | ||
| 2740 | uint8_t stride1, stride2; | ||
| 2741 | uint32_t addr3, addr4, addr5, addr6, addr7; | ||
| 2742 | uint16_t count, loop_cnt; | ||
| 2743 | uint32_t poll, mask; | ||
| 2744 | uint32_t *data_ptr = *d_ptr; | ||
| 2745 | struct qla8044_minidump_entry_rdmdio *rdmdio; | ||
| 2746 | |||
| 2747 | rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; | ||
| 2748 | addr1 = le32_to_cpu(rdmdio->addr_1); | ||
| 2749 | addr2 = le32_to_cpu(rdmdio->addr_2); | ||
| 2750 | value1 = le32_to_cpu(rdmdio->value_1); | ||
| 2751 | stride1 = le32_to_cpu(rdmdio->stride_1); | ||
| 2752 | stride2 = le32_to_cpu(rdmdio->stride_2); | ||
| 2753 | count = le32_to_cpu(rdmdio->count); | ||
| 2754 | |||
| 2755 | poll = le32_to_cpu(rdmdio->poll); | ||
| 2756 | mask = le32_to_cpu(rdmdio->mask); | ||
| 2757 | value2 = le32_to_cpu(rdmdio->value_2); | ||
| 2758 | |||
| 2759 | addr3 = addr1 + stride1; | ||
| 2760 | |||
| 2761 | for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { | ||
| 2762 | rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, | ||
| 2763 | addr3, mask); | ||
| 2764 | if (rval) | ||
| 2765 | goto exit_process_rdmdio; | ||
| 2766 | |||
| 2767 | addr4 = addr2 - stride1; | ||
| 2768 | rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4, | ||
| 2769 | value2); | ||
| 2770 | if (rval) | ||
| 2771 | goto exit_process_rdmdio; | ||
| 2772 | |||
| 2773 | addr5 = addr2 - (2 * stride1); | ||
| 2774 | rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5, | ||
| 2775 | value1); | ||
| 2776 | if (rval) | ||
| 2777 | goto exit_process_rdmdio; | ||
| 2778 | |||
| 2779 | addr6 = addr2 - (3 * stride1); | ||
| 2780 | rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, | ||
| 2781 | addr6, 0x2); | ||
| 2782 | if (rval) | ||
| 2783 | goto exit_process_rdmdio; | ||
| 2784 | |||
| 2785 | rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, | ||
| 2786 | addr3, mask); | ||
| 2787 | if (rval) | ||
| 2788 | goto exit_process_rdmdio; | ||
| 2789 | |||
| 2790 | addr7 = addr2 - (4 * stride1); | ||
| 2791 | rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, | ||
| 2792 | mask, addr7, &data); | ||
| 2793 | if (rval) | ||
| 2794 | goto exit_process_rdmdio; | ||
| 2795 | |||
| 2796 | selval = (value2 << 18) | (value1 << 2) | 2; | ||
| 2797 | |||
| 2798 | stride2 = le32_to_cpu(rdmdio->stride_2); | ||
| 2799 | *data_ptr++ = cpu_to_le32(selval); | ||
| 2800 | *data_ptr++ = cpu_to_le32(data); | ||
| 2801 | |||
| 2802 | value1 = value1 + stride2; | ||
| 2803 | *d_ptr = data_ptr; | ||
| 2804 | } | ||
| 2805 | |||
| 2806 | exit_process_rdmdio: | ||
| 2807 | return rval; | ||
| 2808 | } | ||
| 2809 | |||
| 2810 | static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha, | ||
| 2811 | struct qla8xxx_minidump_entry_hdr *entry_hdr, | ||
| 2812 | uint32_t **d_ptr) | ||
| 2813 | { | ||
| 2814 | uint32_t addr1, addr2, value1, value2, poll, mask, r_value; | ||
| 2815 | struct qla8044_minidump_entry_pollwr *pollwr_hdr; | ||
| 2816 | uint32_t wait_count = 0; | ||
| 2817 | uint32_t rval = QLA_SUCCESS; | ||
| 2818 | |||
| 2819 | pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; | ||
| 2820 | addr1 = le32_to_cpu(pollwr_hdr->addr_1); | ||
| 2821 | addr2 = le32_to_cpu(pollwr_hdr->addr_2); | ||
| 2822 | value1 = le32_to_cpu(pollwr_hdr->value_1); | ||
| 2823 | value2 = le32_to_cpu(pollwr_hdr->value_2); | ||
| 2824 | |||
| 2825 | poll = le32_to_cpu(pollwr_hdr->poll); | ||
| 2826 | mask = le32_to_cpu(pollwr_hdr->mask); | ||
| 2827 | |||
| 2828 | while (wait_count < poll) { | ||
| 2829 | ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); | ||
| 2830 | |||
| 2831 | if ((r_value & poll) != 0) | ||
| 2832 | break; | ||
| 2833 | |||
| 2834 | wait_count++; | ||
| 2835 | } | ||
| 2836 | |||
| 2837 | if (wait_count == poll) { | ||
| 2838 | ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); | ||
| 2839 | rval = QLA_ERROR; | ||
| 2840 | goto exit_process_pollwr; | ||
| 2841 | } | ||
| 2842 | |||
| 2843 | ha->isp_ops->wr_reg_indirect(ha, addr2, value2); | ||
| 2844 | ha->isp_ops->wr_reg_indirect(ha, addr1, value1); | ||
| 2845 | |||
| 2846 | wait_count = 0; | ||
| 2847 | while (wait_count < poll) { | ||
| 2848 | ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); | ||
| 2849 | |||
| 2850 | if ((r_value & poll) != 0) | ||
| 2851 | break; | ||
| 2852 | wait_count++; | ||
| 2853 | } | ||
| 2854 | |||
| 2855 | exit_process_pollwr: | ||
| 2856 | return rval; | ||
| 2857 | } | ||
| 2858 | |||
| 2443 | static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha, | 2859 | static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha, |
| 2444 | struct qla8xxx_minidump_entry_hdr *entry_hdr, | 2860 | struct qla8xxx_minidump_entry_hdr *entry_hdr, |
| 2445 | uint32_t **d_ptr) | 2861 | uint32_t **d_ptr) |
| @@ -2753,6 +3169,24 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) | |||
| 2753 | if (rval != QLA_SUCCESS) | 3169 | if (rval != QLA_SUCCESS) |
| 2754 | qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); | 3170 | qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); |
| 2755 | break; | 3171 | break; |
| 3172 | case QLA8044_RDDFE: | ||
| 3173 | rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr, | ||
| 3174 | &data_ptr); | ||
| 3175 | if (rval != QLA_SUCCESS) | ||
| 3176 | qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); | ||
| 3177 | break; | ||
| 3178 | case QLA8044_RDMDIO: | ||
| 3179 | rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr, | ||
| 3180 | &data_ptr); | ||
| 3181 | if (rval != QLA_SUCCESS) | ||
| 3182 | qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); | ||
| 3183 | break; | ||
| 3184 | case QLA8044_POLLWR: | ||
| 3185 | rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr, | ||
| 3186 | &data_ptr); | ||
| 3187 | if (rval != QLA_SUCCESS) | ||
| 3188 | qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); | ||
| 3189 | break; | ||
| 2756 | case QLA8XXX_RDNOP: | 3190 | case QLA8XXX_RDNOP: |
| 2757 | default: | 3191 | default: |
| 2758 | qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); | 3192 | qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index 14500a0f62cc..337d9fcf6417 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h | |||
| @@ -858,6 +858,9 @@ struct crb_addr_pair { | |||
| 858 | #define QLA83XX_POLLRD 35 | 858 | #define QLA83XX_POLLRD 35 |
| 859 | #define QLA83XX_RDMUX2 36 | 859 | #define QLA83XX_RDMUX2 36 |
| 860 | #define QLA83XX_POLLRDMWR 37 | 860 | #define QLA83XX_POLLRDMWR 37 |
| 861 | #define QLA8044_RDDFE 38 | ||
| 862 | #define QLA8044_RDMDIO 39 | ||
| 863 | #define QLA8044_POLLWR 40 | ||
| 861 | #define QLA8XXX_RDROM 71 | 864 | #define QLA8XXX_RDROM 71 |
| 862 | #define QLA8XXX_RDMEM 72 | 865 | #define QLA8XXX_RDMEM 72 |
| 863 | #define QLA8XXX_CNTRL 98 | 866 | #define QLA8XXX_CNTRL 98 |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 459b9f7186fd..320206376206 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
| @@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo, | |||
| 83 | " Target Session Recovery Timeout.\n" | 83 | " Target Session Recovery Timeout.\n" |
| 84 | "\t\t Default: 120 sec."); | 84 | "\t\t Default: 120 sec."); |
| 85 | 85 | ||
| 86 | int ql4xmdcapmask = 0x1F; | 86 | int ql4xmdcapmask = 0; |
| 87 | module_param(ql4xmdcapmask, int, S_IRUGO); | 87 | module_param(ql4xmdcapmask, int, S_IRUGO); |
| 88 | MODULE_PARM_DESC(ql4xmdcapmask, | 88 | MODULE_PARM_DESC(ql4xmdcapmask, |
| 89 | " Set the Minidump driver capture mask level.\n" | 89 | " Set the Minidump driver capture mask level.\n" |
| 90 | "\t\t Default is 0x1F.\n" | 90 | "\t\t Default is 0 (firmware default capture mask)\n" |
| 91 | "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F"); | 91 | "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); |
| 92 | 92 | ||
| 93 | int ql4xenablemd = 1; | 93 | int ql4xenablemd = 1; |
| 94 | module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); | 94 | module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); |
| @@ -1742,6 +1742,9 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, | |||
| 1742 | struct sockaddr *dst_addr; | 1742 | struct sockaddr *dst_addr; |
| 1743 | struct scsi_qla_host *ha; | 1743 | struct scsi_qla_host *ha; |
| 1744 | 1744 | ||
| 1745 | if (!qla_ep) | ||
| 1746 | return -ENOTCONN; | ||
| 1747 | |||
| 1745 | ha = to_qla_host(qla_ep->host); | 1748 | ha = to_qla_host(qla_ep->host); |
| 1746 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, | 1749 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, |
| 1747 | ha->host_no)); | 1750 | ha->host_no)); |
| @@ -1749,9 +1752,6 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, | |||
| 1749 | switch (param) { | 1752 | switch (param) { |
| 1750 | case ISCSI_PARAM_CONN_PORT: | 1753 | case ISCSI_PARAM_CONN_PORT: |
| 1751 | case ISCSI_PARAM_CONN_ADDRESS: | 1754 | case ISCSI_PARAM_CONN_ADDRESS: |
| 1752 | if (!qla_ep) | ||
| 1753 | return -ENOTCONN; | ||
| 1754 | |||
| 1755 | dst_addr = (struct sockaddr *)&qla_ep->dst_addr; | 1755 | dst_addr = (struct sockaddr *)&qla_ep->dst_addr; |
| 1756 | if (!dst_addr) | 1756 | if (!dst_addr) |
| 1757 | return -ENOTCONN; | 1757 | return -ENOTCONN; |
| @@ -2879,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, | |||
| 2879 | struct iscsi_conn *conn; | 2879 | struct iscsi_conn *conn; |
| 2880 | struct qla_conn *qla_conn; | 2880 | struct qla_conn *qla_conn; |
| 2881 | struct sockaddr *dst_addr; | 2881 | struct sockaddr *dst_addr; |
| 2882 | int len = 0; | ||
| 2883 | 2882 | ||
| 2884 | conn = cls_conn->dd_data; | 2883 | conn = cls_conn->dd_data; |
| 2885 | qla_conn = conn->dd_data; | 2884 | qla_conn = conn->dd_data; |
| @@ -2893,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, | |||
| 2893 | default: | 2892 | default: |
| 2894 | return iscsi_conn_get_param(cls_conn, param, buf); | 2893 | return iscsi_conn_get_param(cls_conn, param, buf); |
| 2895 | } | 2894 | } |
| 2896 | |||
| 2897 | return len; | ||
| 2898 | |||
| 2899 | } | 2895 | } |
| 2900 | 2896 | ||
| 2901 | int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) | 2897 | int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) |
| @@ -3569,14 +3565,13 @@ static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, | |||
| 3569 | if (test_bit(OPT_IPV6_DEVICE, &options)) { | 3565 | if (test_bit(OPT_IPV6_DEVICE, &options)) { |
| 3570 | conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; | 3566 | conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; |
| 3571 | 3567 | ||
| 3572 | conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); | 3568 | conn->link_local_ipv6_addr = kmemdup( |
| 3569 | fw_ddb_entry->link_local_ipv6_addr, | ||
| 3570 | IPv6_ADDR_LEN, GFP_KERNEL); | ||
| 3573 | if (!conn->link_local_ipv6_addr) { | 3571 | if (!conn->link_local_ipv6_addr) { |
| 3574 | rc = -ENOMEM; | 3572 | rc = -ENOMEM; |
| 3575 | goto exit_copy; | 3573 | goto exit_copy; |
| 3576 | } | 3574 | } |
| 3577 | |||
| 3578 | memcpy(conn->link_local_ipv6_addr, | ||
| 3579 | fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN); | ||
| 3580 | } else { | 3575 | } else { |
| 3581 | conn->ipv4_tos = fw_ddb_entry->ipv4_tos; | 3576 | conn->ipv4_tos = fw_ddb_entry->ipv4_tos; |
| 3582 | } | 3577 | } |
| @@ -4565,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) | |||
| 4565 | test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || | 4560 | test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || |
| 4566 | test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || | 4561 | test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || |
| 4567 | test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || | 4562 | test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || |
| 4563 | test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || | ||
| 4568 | test_bit(DPC_AEN, &ha->dpc_flags)) { | 4564 | test_bit(DPC_AEN, &ha->dpc_flags)) { |
| 4569 | DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" | 4565 | DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" |
| 4570 | " - dpc flags = 0x%lx\n", | 4566 | " - dpc flags = 0x%lx\n", |
| @@ -4862,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) | |||
| 4862 | ha->host_no, __func__)); | 4858 | ha->host_no, __func__)); |
| 4863 | status = ha->isp_ops->reset_firmware(ha); | 4859 | status = ha->isp_ops->reset_firmware(ha); |
| 4864 | if (status == QLA_SUCCESS) { | 4860 | if (status == QLA_SUCCESS) { |
| 4865 | if (!test_bit(AF_FW_RECOVERY, &ha->flags)) | ||
| 4866 | qla4xxx_cmd_wait(ha); | ||
| 4867 | |||
| 4868 | ha->isp_ops->disable_intrs(ha); | 4861 | ha->isp_ops->disable_intrs(ha); |
| 4869 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | 4862 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); |
| 4870 | qla4xxx_abort_active_cmds(ha, DID_RESET << 16); | 4863 | qla4xxx_abort_active_cmds(ha, DID_RESET << 16); |
| @@ -5432,6 +5425,11 @@ dpc_post_reset_ha: | |||
| 5432 | qla4xxx_relogin_all_devices(ha); | 5425 | qla4xxx_relogin_all_devices(ha); |
| 5433 | } | 5426 | } |
| 5434 | } | 5427 | } |
| 5428 | if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { | ||
| 5429 | if (qla4xxx_sysfs_ddb_export(ha)) | ||
| 5430 | ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", | ||
| 5431 | __func__); | ||
| 5432 | } | ||
| 5435 | } | 5433 | } |
| 5436 | 5434 | ||
| 5437 | /** | 5435 | /** |
| @@ -8409,7 +8407,7 @@ exit_ddb_del: | |||
| 8409 | * | 8407 | * |
| 8410 | * Export the firmware DDB for all send targets and normal targets to sysfs. | 8408 | * Export the firmware DDB for all send targets and normal targets to sysfs. |
| 8411 | **/ | 8409 | **/ |
| 8412 | static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) | 8410 | int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) |
| 8413 | { | 8411 | { |
| 8414 | struct dev_db_entry *fw_ddb_entry = NULL; | 8412 | struct dev_db_entry *fw_ddb_entry = NULL; |
| 8415 | dma_addr_t fw_ddb_entry_dma; | 8413 | dma_addr_t fw_ddb_entry_dma; |
| @@ -8847,11 +8845,8 @@ skip_retry_init: | |||
| 8847 | ql4_printk(KERN_ERR, ha, | 8845 | ql4_printk(KERN_ERR, ha, |
| 8848 | "%s: No iSCSI boot target configured\n", __func__); | 8846 | "%s: No iSCSI boot target configured\n", __func__); |
| 8849 | 8847 | ||
| 8850 | if (qla4xxx_sysfs_ddb_export(ha)) | 8848 | set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); |
| 8851 | ql4_printk(KERN_ERR, ha, | 8849 | /* Perform the build ddb list and login to each */ |
| 8852 | "%s: Error exporting ddb to sysfs\n", __func__); | ||
| 8853 | |||
| 8854 | /* Perform the build ddb list and login to each */ | ||
| 8855 | qla4xxx_build_ddb_list(ha, INIT_ADAPTER); | 8850 | qla4xxx_build_ddb_list(ha, INIT_ADAPTER); |
| 8856 | iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); | 8851 | iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); |
| 8857 | qla4xxx_wait_login_resp_boot_tgt(ha); | 8852 | qla4xxx_wait_login_resp_boot_tgt(ha); |
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index c6ba0a6b8458..f11eaa773339 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h | |||
| @@ -5,4 +5,4 @@ | |||
| 5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #define QLA4XXX_DRIVER_VERSION "5.04.00-k4" | 8 | #define QLA4XXX_DRIVER_VERSION "5.04.00-k6" |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index f3e9cc038d1d..1328a2621070 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
| @@ -130,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 130 | #define SCSI_DEBUG_OPT_DIF_ERR 32 | 130 | #define SCSI_DEBUG_OPT_DIF_ERR 32 |
| 131 | #define SCSI_DEBUG_OPT_DIX_ERR 64 | 131 | #define SCSI_DEBUG_OPT_DIX_ERR 64 |
| 132 | #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 | 132 | #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 |
| 133 | #define SCSI_DEBUG_OPT_SHORT_TRANSFER 256 | ||
| 133 | /* When "every_nth" > 0 then modulo "every_nth" commands: | 134 | /* When "every_nth" > 0 then modulo "every_nth" commands: |
| 134 | * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set | 135 | * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set |
| 135 | * - a RECOVERED_ERROR is simulated on successful read and write | 136 | * - a RECOVERED_ERROR is simulated on successful read and write |
| @@ -3583,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3583 | int inj_transport = 0; | 3584 | int inj_transport = 0; |
| 3584 | int inj_dif = 0; | 3585 | int inj_dif = 0; |
| 3585 | int inj_dix = 0; | 3586 | int inj_dix = 0; |
| 3587 | int inj_short = 0; | ||
| 3586 | int delay_override = 0; | 3588 | int delay_override = 0; |
| 3587 | int unmap = 0; | 3589 | int unmap = 0; |
| 3588 | 3590 | ||
| @@ -3628,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3628 | inj_dif = 1; /* to reads and writes below */ | 3630 | inj_dif = 1; /* to reads and writes below */ |
| 3629 | else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts) | 3631 | else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts) |
| 3630 | inj_dix = 1; /* to reads and writes below */ | 3632 | inj_dix = 1; /* to reads and writes below */ |
| 3633 | else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts) | ||
| 3634 | inj_short = 1; | ||
| 3631 | } | 3635 | } |
| 3632 | 3636 | ||
| 3633 | if (devip->wlun) { | 3637 | if (devip->wlun) { |
| @@ -3744,6 +3748,10 @@ read: | |||
| 3744 | if (scsi_debug_fake_rw) | 3748 | if (scsi_debug_fake_rw) |
| 3745 | break; | 3749 | break; |
| 3746 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); | 3750 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); |
| 3751 | |||
| 3752 | if (inj_short) | ||
| 3753 | num /= 2; | ||
| 3754 | |||
| 3747 | errsts = resp_read(SCpnt, lba, num, devip, ei_lba); | 3755 | errsts = resp_read(SCpnt, lba, num, devip, ei_lba); |
| 3748 | if (inj_recovered && (0 == errsts)) { | 3756 | if (inj_recovered && (0 == errsts)) { |
| 3749 | mk_sense_buffer(devip, RECOVERED_ERROR, | 3757 | mk_sense_buffer(devip, RECOVERED_ERROR, |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index f17aa7aa7879..47a1ffc4c904 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
| @@ -1029,6 +1029,7 @@ retry: | |||
| 1029 | rtn = NEEDS_RETRY; | 1029 | rtn = NEEDS_RETRY; |
| 1030 | } else { | 1030 | } else { |
| 1031 | timeleft = wait_for_completion_timeout(&done, timeout); | 1031 | timeleft = wait_for_completion_timeout(&done, timeout); |
| 1032 | rtn = SUCCESS; | ||
| 1032 | } | 1033 | } |
| 1033 | 1034 | ||
| 1034 | shost->eh_action = NULL; | 1035 | shost->eh_action = NULL; |
| @@ -2306,6 +2307,12 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
| 2306 | } | 2307 | } |
| 2307 | 2308 | ||
| 2308 | scmd = scsi_get_command(dev, GFP_KERNEL); | 2309 | scmd = scsi_get_command(dev, GFP_KERNEL); |
| 2310 | if (!scmd) { | ||
| 2311 | rtn = FAILED; | ||
| 2312 | put_device(&dev->sdev_gendev); | ||
| 2313 | goto out_put_autopm_host; | ||
| 2314 | } | ||
| 2315 | |||
| 2309 | blk_rq_init(NULL, &req); | 2316 | blk_rq_init(NULL, &req); |
| 2310 | scmd->request = &req; | 2317 | scmd->request = &req; |
| 2311 | 2318 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a0c95cac91f0..be0d5fad999d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) | |||
| 512 | scsi_run_queue(sdev->request_queue); | 512 | scsi_run_queue(sdev->request_queue); |
| 513 | } | 513 | } |
| 514 | 514 | ||
| 515 | static void __scsi_release_buffers(struct scsi_cmnd *, int); | ||
| 516 | |||
| 517 | /* | ||
| 518 | * Function: scsi_end_request() | ||
| 519 | * | ||
| 520 | * Purpose: Post-processing of completed commands (usually invoked at end | ||
| 521 | * of upper level post-processing and scsi_io_completion). | ||
| 522 | * | ||
| 523 | * Arguments: cmd - command that is complete. | ||
| 524 | * error - 0 if I/O indicates success, < 0 for I/O error. | ||
| 525 | * bytes - number of bytes of completed I/O | ||
| 526 | * requeue - indicates whether we should requeue leftovers. | ||
| 527 | * | ||
| 528 | * Lock status: Assumed that lock is not held upon entry. | ||
| 529 | * | ||
| 530 | * Returns: cmd if requeue required, NULL otherwise. | ||
| 531 | * | ||
| 532 | * Notes: This is called for block device requests in order to | ||
| 533 | * mark some number of sectors as complete. | ||
| 534 | * | ||
| 535 | * We are guaranteeing that the request queue will be goosed | ||
| 536 | * at some point during this call. | ||
| 537 | * Notes: If cmd was requeued, upon return it will be a stale pointer. | ||
| 538 | */ | ||
| 539 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, | ||
| 540 | int bytes, int requeue) | ||
| 541 | { | ||
| 542 | struct request_queue *q = cmd->device->request_queue; | ||
| 543 | struct request *req = cmd->request; | ||
| 544 | |||
| 545 | /* | ||
| 546 | * If there are blocks left over at the end, set up the command | ||
| 547 | * to queue the remainder of them. | ||
| 548 | */ | ||
| 549 | if (blk_end_request(req, error, bytes)) { | ||
| 550 | /* kill remainder if no retrys */ | ||
| 551 | if (error && scsi_noretry_cmd(cmd)) | ||
| 552 | blk_end_request_all(req, error); | ||
| 553 | else { | ||
| 554 | if (requeue) { | ||
| 555 | /* | ||
| 556 | * Bleah. Leftovers again. Stick the | ||
| 557 | * leftovers in the front of the | ||
| 558 | * queue, and goose the queue again. | ||
| 559 | */ | ||
| 560 | scsi_release_buffers(cmd); | ||
| 561 | scsi_requeue_command(q, cmd); | ||
| 562 | cmd = NULL; | ||
| 563 | } | ||
| 564 | return cmd; | ||
| 565 | } | ||
| 566 | } | ||
| 567 | |||
| 568 | /* | ||
| 569 | * This will goose the queue request function at the end, so we don't | ||
| 570 | * need to worry about launching another command. | ||
| 571 | */ | ||
| 572 | __scsi_release_buffers(cmd, 0); | ||
| 573 | scsi_next_command(cmd); | ||
| 574 | return NULL; | ||
| 575 | } | ||
| 576 | |||
| 577 | static inline unsigned int scsi_sgtable_index(unsigned short nents) | 515 | static inline unsigned int scsi_sgtable_index(unsigned short nents) |
| 578 | { | 516 | { |
| 579 | unsigned int index; | 517 | unsigned int index; |
| @@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) | |||
| 625 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); | 563 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); |
| 626 | } | 564 | } |
| 627 | 565 | ||
| 628 | static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) | ||
| 629 | { | ||
| 630 | |||
| 631 | if (cmd->sdb.table.nents) | ||
| 632 | scsi_free_sgtable(&cmd->sdb); | ||
| 633 | |||
| 634 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | ||
| 635 | |||
| 636 | if (do_bidi_check && scsi_bidi_cmnd(cmd)) { | ||
| 637 | struct scsi_data_buffer *bidi_sdb = | ||
| 638 | cmd->request->next_rq->special; | ||
| 639 | scsi_free_sgtable(bidi_sdb); | ||
| 640 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | ||
| 641 | cmd->request->next_rq->special = NULL; | ||
| 642 | } | ||
| 643 | |||
| 644 | if (scsi_prot_sg_count(cmd)) | ||
| 645 | scsi_free_sgtable(cmd->prot_sdb); | ||
| 646 | } | ||
| 647 | |||
| 648 | /* | 566 | /* |
| 649 | * Function: scsi_release_buffers() | 567 | * Function: scsi_release_buffers() |
| 650 | * | 568 | * |
| 651 | * Purpose: Completion processing for block device I/O requests. | 569 | * Purpose: Free resources allocate for a scsi_command. |
| 652 | * | 570 | * |
| 653 | * Arguments: cmd - command that we are bailing. | 571 | * Arguments: cmd - command that we are bailing. |
| 654 | * | 572 | * |
| @@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) | |||
| 659 | * Notes: In the event that an upper level driver rejects a | 577 | * Notes: In the event that an upper level driver rejects a |
| 660 | * command, we must release resources allocated during | 578 | * command, we must release resources allocated during |
| 661 | * the __init_io() function. Primarily this would involve | 579 | * the __init_io() function. Primarily this would involve |
| 662 | * the scatter-gather table, and potentially any bounce | 580 | * the scatter-gather table. |
| 663 | * buffers. | ||
| 664 | */ | 581 | */ |
| 665 | void scsi_release_buffers(struct scsi_cmnd *cmd) | 582 | void scsi_release_buffers(struct scsi_cmnd *cmd) |
| 666 | { | 583 | { |
| 667 | __scsi_release_buffers(cmd, 1); | 584 | if (cmd->sdb.table.nents) |
| 585 | scsi_free_sgtable(&cmd->sdb); | ||
| 586 | |||
| 587 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | ||
| 588 | |||
| 589 | if (scsi_prot_sg_count(cmd)) | ||
| 590 | scsi_free_sgtable(cmd->prot_sdb); | ||
| 668 | } | 591 | } |
| 669 | EXPORT_SYMBOL(scsi_release_buffers); | 592 | EXPORT_SYMBOL(scsi_release_buffers); |
| 670 | 593 | ||
| 594 | static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) | ||
| 595 | { | ||
| 596 | struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; | ||
| 597 | |||
| 598 | scsi_free_sgtable(bidi_sdb); | ||
| 599 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | ||
| 600 | cmd->request->next_rq->special = NULL; | ||
| 601 | } | ||
| 602 | |||
| 671 | /** | 603 | /** |
| 672 | * __scsi_error_from_host_byte - translate SCSI error code into errno | 604 | * __scsi_error_from_host_byte - translate SCSI error code into errno |
| 673 | * @cmd: SCSI command (unused) | 605 | * @cmd: SCSI command (unused) |
| @@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | |||
| 725 | * | 657 | * |
| 726 | * Returns: Nothing | 658 | * Returns: Nothing |
| 727 | * | 659 | * |
| 728 | * Notes: This function is matched in terms of capabilities to | 660 | * Notes: We will finish off the specified number of sectors. If we |
| 729 | * the function that created the scatter-gather list. | 661 | * are done, the command block will be released and the queue |
| 730 | * In other words, if there are no bounce buffers | 662 | * function will be goosed. If we are not done then we have to |
| 731 | * (the normal case for most drivers), we don't need | ||
| 732 | * the logic to deal with cleaning up afterwards. | ||
| 733 | * | ||
| 734 | * We must call scsi_end_request(). This will finish off | ||
| 735 | * the specified number of sectors. If we are done, the | ||
| 736 | * command block will be released and the queue function | ||
| 737 | * will be goosed. If we are not done then we have to | ||
| 738 | * figure out what to do next: | 663 | * figure out what to do next: |
| 739 | * | 664 | * |
| 740 | * a) We can call scsi_requeue_command(). The request | 665 | * a) We can call scsi_requeue_command(). The request |
| @@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | |||
| 743 | * be used if we made forward progress, or if we want | 668 | * be used if we made forward progress, or if we want |
| 744 | * to switch from READ(10) to READ(6) for example. | 669 | * to switch from READ(10) to READ(6) for example. |
| 745 | * | 670 | * |
| 746 | * b) We can call scsi_queue_insert(). The request will | 671 | * b) We can call __scsi_queue_insert(). The request will |
| 747 | * be put back on the queue and retried using the same | 672 | * be put back on the queue and retried using the same |
| 748 | * command as before, possibly after a delay. | 673 | * command as before, possibly after a delay. |
| 749 | * | 674 | * |
| @@ -801,6 +726,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 801 | req->next_rq->resid_len = scsi_in(cmd)->resid; | 726 | req->next_rq->resid_len = scsi_in(cmd)->resid; |
| 802 | 727 | ||
| 803 | scsi_release_buffers(cmd); | 728 | scsi_release_buffers(cmd); |
| 729 | scsi_release_bidi_buffers(cmd); | ||
| 730 | |||
| 804 | blk_end_request_all(req, 0); | 731 | blk_end_request_all(req, 0); |
| 805 | 732 | ||
| 806 | scsi_next_command(cmd); | 733 | scsi_next_command(cmd); |
| @@ -840,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 840 | } | 767 | } |
| 841 | 768 | ||
| 842 | /* | 769 | /* |
| 843 | * A number of bytes were successfully read. If there | 770 | * If we finished all bytes in the request we are done now. |
| 844 | * are leftovers and there is some kind of error | ||
| 845 | * (result != 0), retry the rest. | ||
| 846 | */ | 771 | */ |
| 847 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) | 772 | if (!blk_end_request(req, error, good_bytes)) |
| 848 | return; | 773 | goto next_command; |
| 774 | |||
| 775 | /* | ||
| 776 | * Kill remainder if no retrys. | ||
| 777 | */ | ||
| 778 | if (error && scsi_noretry_cmd(cmd)) { | ||
| 779 | blk_end_request_all(req, error); | ||
| 780 | goto next_command; | ||
| 781 | } | ||
| 782 | |||
| 783 | /* | ||
| 784 | * If there had been no error, but we have leftover bytes in the | ||
| 785 | * requeues just queue the command up again. | ||
| 786 | */ | ||
| 787 | if (result == 0) | ||
| 788 | goto requeue; | ||
| 849 | 789 | ||
| 850 | error = __scsi_error_from_host_byte(cmd, result); | 790 | error = __scsi_error_from_host_byte(cmd, result); |
| 851 | 791 | ||
| @@ -973,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 973 | switch (action) { | 913 | switch (action) { |
| 974 | case ACTION_FAIL: | 914 | case ACTION_FAIL: |
| 975 | /* Give up and fail the remainder of the request */ | 915 | /* Give up and fail the remainder of the request */ |
| 976 | scsi_release_buffers(cmd); | ||
| 977 | if (!(req->cmd_flags & REQ_QUIET)) { | 916 | if (!(req->cmd_flags & REQ_QUIET)) { |
| 978 | if (description) | 917 | if (description) |
| 979 | scmd_printk(KERN_INFO, cmd, "%s\n", | 918 | scmd_printk(KERN_INFO, cmd, "%s\n", |
| @@ -983,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 983 | scsi_print_sense("", cmd); | 922 | scsi_print_sense("", cmd); |
| 984 | scsi_print_command(cmd); | 923 | scsi_print_command(cmd); |
| 985 | } | 924 | } |
| 986 | if (blk_end_request_err(req, error)) | 925 | if (!blk_end_request_err(req, error)) |
| 987 | scsi_requeue_command(q, cmd); | 926 | goto next_command; |
| 988 | else | 927 | /*FALLTHRU*/ |
| 989 | scsi_next_command(cmd); | ||
| 990 | break; | ||
| 991 | case ACTION_REPREP: | 928 | case ACTION_REPREP: |
| 929 | requeue: | ||
| 992 | /* Unprep the request and put it back at the head of the queue. | 930 | /* Unprep the request and put it back at the head of the queue. |
| 993 | * A new command will be prepared and issued. | 931 | * A new command will be prepared and issued. |
| 994 | */ | 932 | */ |
| @@ -1004,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 1004 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); | 942 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); |
| 1005 | break; | 943 | break; |
| 1006 | } | 944 | } |
| 945 | return; | ||
| 946 | |||
| 947 | next_command: | ||
| 948 | scsi_release_buffers(cmd); | ||
| 949 | scsi_next_command(cmd); | ||
| 1007 | } | 950 | } |
| 1008 | 951 | ||
| 1009 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | 952 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, |
| @@ -1128,15 +1071,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, | |||
| 1128 | 1071 | ||
| 1129 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | 1072 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) |
| 1130 | { | 1073 | { |
| 1131 | struct scsi_cmnd *cmd; | 1074 | struct scsi_cmnd *cmd = req->special; |
| 1132 | int ret = scsi_prep_state_check(sdev, req); | ||
| 1133 | |||
| 1134 | if (ret != BLKPREP_OK) | ||
| 1135 | return ret; | ||
| 1136 | |||
| 1137 | cmd = scsi_get_cmd_from_req(sdev, req); | ||
| 1138 | if (unlikely(!cmd)) | ||
| 1139 | return BLKPREP_DEFER; | ||
| 1140 | 1075 | ||
| 1141 | /* | 1076 | /* |
| 1142 | * BLOCK_PC requests may transfer data, in which case they must | 1077 | * BLOCK_PC requests may transfer data, in which case they must |
| @@ -1179,15 +1114,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); | |||
| 1179 | */ | 1114 | */ |
| 1180 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | 1115 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) |
| 1181 | { | 1116 | { |
| 1182 | struct scsi_cmnd *cmd; | 1117 | struct scsi_cmnd *cmd = req->special; |
| 1183 | int ret = scsi_prep_state_check(sdev, req); | ||
| 1184 | |||
| 1185 | if (ret != BLKPREP_OK) | ||
| 1186 | return ret; | ||
| 1187 | 1118 | ||
| 1188 | if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh | 1119 | if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh |
| 1189 | && sdev->scsi_dh_data->scsi_dh->prep_fn)) { | 1120 | && sdev->scsi_dh_data->scsi_dh->prep_fn)) { |
| 1190 | ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); | 1121 | int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); |
| 1191 | if (ret != BLKPREP_OK) | 1122 | if (ret != BLKPREP_OK) |
| 1192 | return ret; | 1123 | return ret; |
| 1193 | } | 1124 | } |
| @@ -1197,16 +1128,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | |||
| 1197 | */ | 1128 | */ |
| 1198 | BUG_ON(!req->nr_phys_segments); | 1129 | BUG_ON(!req->nr_phys_segments); |
| 1199 | 1130 | ||
| 1200 | cmd = scsi_get_cmd_from_req(sdev, req); | ||
| 1201 | if (unlikely(!cmd)) | ||
| 1202 | return BLKPREP_DEFER; | ||
| 1203 | |||
| 1204 | memset(cmd->cmnd, 0, BLK_MAX_CDB); | 1131 | memset(cmd->cmnd, 0, BLK_MAX_CDB); |
| 1205 | return scsi_init_io(cmd, GFP_ATOMIC); | 1132 | return scsi_init_io(cmd, GFP_ATOMIC); |
| 1206 | } | 1133 | } |
| 1207 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); | 1134 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); |
| 1208 | 1135 | ||
| 1209 | int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | 1136 | static int |
| 1137 | scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | ||
| 1210 | { | 1138 | { |
| 1211 | int ret = BLKPREP_OK; | 1139 | int ret = BLKPREP_OK; |
| 1212 | 1140 | ||
| @@ -1258,9 +1186,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | |||
| 1258 | } | 1186 | } |
| 1259 | return ret; | 1187 | return ret; |
| 1260 | } | 1188 | } |
| 1261 | EXPORT_SYMBOL(scsi_prep_state_check); | ||
| 1262 | 1189 | ||
| 1263 | int scsi_prep_return(struct request_queue *q, struct request *req, int ret) | 1190 | static int |
| 1191 | scsi_prep_return(struct request_queue *q, struct request *req, int ret) | ||
| 1264 | { | 1192 | { |
| 1265 | struct scsi_device *sdev = q->queuedata; | 1193 | struct scsi_device *sdev = q->queuedata; |
| 1266 | 1194 | ||
| @@ -1291,18 +1219,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) | |||
| 1291 | 1219 | ||
| 1292 | return ret; | 1220 | return ret; |
| 1293 | } | 1221 | } |
| 1294 | EXPORT_SYMBOL(scsi_prep_return); | ||
| 1295 | 1222 | ||
| 1296 | int scsi_prep_fn(struct request_queue *q, struct request *req) | 1223 | static int scsi_prep_fn(struct request_queue *q, struct request *req) |
| 1297 | { | 1224 | { |
| 1298 | struct scsi_device *sdev = q->queuedata; | 1225 | struct scsi_device *sdev = q->queuedata; |
| 1299 | int ret = BLKPREP_KILL; | 1226 | struct scsi_cmnd *cmd; |
| 1227 | int ret; | ||
| 1300 | 1228 | ||
| 1301 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) | 1229 | ret = scsi_prep_state_check(sdev, req); |
| 1230 | if (ret != BLKPREP_OK) | ||
| 1231 | goto out; | ||
| 1232 | |||
| 1233 | cmd = scsi_get_cmd_from_req(sdev, req); | ||
| 1234 | if (unlikely(!cmd)) { | ||
| 1235 | ret = BLKPREP_DEFER; | ||
| 1236 | goto out; | ||
| 1237 | } | ||
| 1238 | |||
| 1239 | if (req->cmd_type == REQ_TYPE_FS) | ||
| 1240 | ret = scsi_cmd_to_driver(cmd)->init_command(cmd); | ||
| 1241 | else if (req->cmd_type == REQ_TYPE_BLOCK_PC) | ||
| 1302 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | 1242 | ret = scsi_setup_blk_pc_cmnd(sdev, req); |
| 1243 | else | ||
| 1244 | ret = BLKPREP_KILL; | ||
| 1245 | |||
| 1246 | out: | ||
| 1303 | return scsi_prep_return(q, req, ret); | 1247 | return scsi_prep_return(q, req, ret); |
| 1304 | } | 1248 | } |
| 1305 | EXPORT_SYMBOL(scsi_prep_fn); | 1249 | |
| 1250 | static void scsi_unprep_fn(struct request_queue *q, struct request *req) | ||
| 1251 | { | ||
| 1252 | if (req->cmd_type == REQ_TYPE_FS) { | ||
| 1253 | struct scsi_cmnd *cmd = req->special; | ||
| 1254 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); | ||
| 1255 | |||
| 1256 | if (drv->uninit_command) | ||
| 1257 | drv->uninit_command(cmd); | ||
| 1258 | } | ||
| 1259 | } | ||
| 1306 | 1260 | ||
| 1307 | /* | 1261 | /* |
| 1308 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | 1262 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else |
| @@ -1723,6 +1677,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
| 1723 | return NULL; | 1677 | return NULL; |
| 1724 | 1678 | ||
| 1725 | blk_queue_prep_rq(q, scsi_prep_fn); | 1679 | blk_queue_prep_rq(q, scsi_prep_fn); |
| 1680 | blk_queue_unprep_rq(q, scsi_unprep_fn); | ||
| 1726 | blk_queue_softirq_done(q, scsi_softirq_done); | 1681 | blk_queue_softirq_done(q, scsi_softirq_done); |
| 1727 | blk_queue_rq_timed_out(q, scsi_times_out); | 1682 | blk_queue_rq_timed_out(q, scsi_times_out); |
| 1728 | blk_queue_lld_busy(q, scsi_lld_busy); | 1683 | blk_queue_lld_busy(q, scsi_lld_busy); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 96af195224f2..e9689d57ccb6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -109,6 +109,8 @@ static int sd_suspend_system(struct device *); | |||
| 109 | static int sd_suspend_runtime(struct device *); | 109 | static int sd_suspend_runtime(struct device *); |
| 110 | static int sd_resume(struct device *); | 110 | static int sd_resume(struct device *); |
| 111 | static void sd_rescan(struct device *); | 111 | static void sd_rescan(struct device *); |
| 112 | static int sd_init_command(struct scsi_cmnd *SCpnt); | ||
| 113 | static void sd_uninit_command(struct scsi_cmnd *SCpnt); | ||
| 112 | static int sd_done(struct scsi_cmnd *); | 114 | static int sd_done(struct scsi_cmnd *); |
| 113 | static int sd_eh_action(struct scsi_cmnd *, int); | 115 | static int sd_eh_action(struct scsi_cmnd *, int); |
| 114 | static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); | 116 | static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); |
| @@ -503,6 +505,8 @@ static struct scsi_driver sd_template = { | |||
| 503 | .pm = &sd_pm_ops, | 505 | .pm = &sd_pm_ops, |
| 504 | }, | 506 | }, |
| 505 | .rescan = sd_rescan, | 507 | .rescan = sd_rescan, |
| 508 | .init_command = sd_init_command, | ||
| 509 | .uninit_command = sd_uninit_command, | ||
| 506 | .done = sd_done, | 510 | .done = sd_done, |
| 507 | .eh_action = sd_eh_action, | 511 | .eh_action = sd_eh_action, |
| 508 | }; | 512 | }; |
| @@ -836,9 +840,9 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) | |||
| 836 | return scsi_setup_blk_pc_cmnd(sdp, rq); | 840 | return scsi_setup_blk_pc_cmnd(sdp, rq); |
| 837 | } | 841 | } |
| 838 | 842 | ||
| 839 | static void sd_unprep_fn(struct request_queue *q, struct request *rq) | 843 | static void sd_uninit_command(struct scsi_cmnd *SCpnt) |
| 840 | { | 844 | { |
| 841 | struct scsi_cmnd *SCpnt = rq->special; | 845 | struct request *rq = SCpnt->request; |
| 842 | 846 | ||
| 843 | if (rq->cmd_flags & REQ_DISCARD) | 847 | if (rq->cmd_flags & REQ_DISCARD) |
| 844 | __free_page(rq->completion_data); | 848 | __free_page(rq->completion_data); |
| @@ -850,18 +854,10 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) | |||
| 850 | } | 854 | } |
| 851 | } | 855 | } |
| 852 | 856 | ||
| 853 | /** | 857 | static int sd_init_command(struct scsi_cmnd *SCpnt) |
| 854 | * sd_prep_fn - build a scsi (read or write) command from | ||
| 855 | * information in the request structure. | ||
| 856 | * @SCpnt: pointer to mid-level's per scsi command structure that | ||
| 857 | * contains request and into which the scsi command is written | ||
| 858 | * | ||
| 859 | * Returns 1 if successful and 0 if error (or cannot be done now). | ||
| 860 | **/ | ||
| 861 | static int sd_prep_fn(struct request_queue *q, struct request *rq) | ||
| 862 | { | 858 | { |
| 863 | struct scsi_cmnd *SCpnt; | 859 | struct request *rq = SCpnt->request; |
| 864 | struct scsi_device *sdp = q->queuedata; | 860 | struct scsi_device *sdp = SCpnt->device; |
| 865 | struct gendisk *disk = rq->rq_disk; | 861 | struct gendisk *disk = rq->rq_disk; |
| 866 | struct scsi_disk *sdkp; | 862 | struct scsi_disk *sdkp; |
| 867 | sector_t block = blk_rq_pos(rq); | 863 | sector_t block = blk_rq_pos(rq); |
| @@ -883,12 +879,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
| 883 | } else if (rq->cmd_flags & REQ_FLUSH) { | 879 | } else if (rq->cmd_flags & REQ_FLUSH) { |
| 884 | ret = scsi_setup_flush_cmnd(sdp, rq); | 880 | ret = scsi_setup_flush_cmnd(sdp, rq); |
| 885 | goto out; | 881 | goto out; |
| 886 | } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | ||
| 887 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | ||
| 888 | goto out; | ||
| 889 | } else if (rq->cmd_type != REQ_TYPE_FS) { | ||
| 890 | ret = BLKPREP_KILL; | ||
| 891 | goto out; | ||
| 892 | } | 882 | } |
| 893 | ret = scsi_setup_fs_cmnd(sdp, rq); | 883 | ret = scsi_setup_fs_cmnd(sdp, rq); |
| 894 | if (ret != BLKPREP_OK) | 884 | if (ret != BLKPREP_OK) |
| @@ -900,11 +890,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
| 900 | * is used for a killable error condition */ | 890 | * is used for a killable error condition */ |
| 901 | ret = BLKPREP_KILL; | 891 | ret = BLKPREP_KILL; |
| 902 | 892 | ||
| 903 | SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, | 893 | SCSI_LOG_HLQUEUE(1, |
| 904 | "sd_prep_fn: block=%llu, " | 894 | scmd_printk(KERN_INFO, SCpnt, |
| 905 | "count=%d\n", | 895 | "%s: block=%llu, count=%d\n", |
| 906 | (unsigned long long)block, | 896 | __func__, (unsigned long long)block, this_count)); |
| 907 | this_count)); | ||
| 908 | 897 | ||
| 909 | if (!sdp || !scsi_device_online(sdp) || | 898 | if (!sdp || !scsi_device_online(sdp) || |
| 910 | block + blk_rq_sectors(rq) > get_capacity(disk)) { | 899 | block + blk_rq_sectors(rq) > get_capacity(disk)) { |
| @@ -1124,7 +1113,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
| 1124 | */ | 1113 | */ |
| 1125 | ret = BLKPREP_OK; | 1114 | ret = BLKPREP_OK; |
| 1126 | out: | 1115 | out: |
| 1127 | return scsi_prep_return(q, rq, ret); | 1116 | return ret; |
| 1128 | } | 1117 | } |
| 1129 | 1118 | ||
| 1130 | /** | 1119 | /** |
| @@ -1686,12 +1675,12 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
| 1686 | sshdr.ascq)); | 1675 | sshdr.ascq)); |
| 1687 | } | 1676 | } |
| 1688 | #endif | 1677 | #endif |
| 1678 | sdkp->medium_access_timed_out = 0; | ||
| 1679 | |||
| 1689 | if (driver_byte(result) != DRIVER_SENSE && | 1680 | if (driver_byte(result) != DRIVER_SENSE && |
| 1690 | (!sense_valid || sense_deferred)) | 1681 | (!sense_valid || sense_deferred)) |
| 1691 | goto out; | 1682 | goto out; |
| 1692 | 1683 | ||
| 1693 | sdkp->medium_access_timed_out = 0; | ||
| 1694 | |||
| 1695 | switch (sshdr.sense_key) { | 1684 | switch (sshdr.sense_key) { |
| 1696 | case HARDWARE_ERROR: | 1685 | case HARDWARE_ERROR: |
| 1697 | case MEDIUM_ERROR: | 1686 | case MEDIUM_ERROR: |
| @@ -2875,9 +2864,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) | |||
| 2875 | 2864 | ||
| 2876 | sd_revalidate_disk(gd); | 2865 | sd_revalidate_disk(gd); |
| 2877 | 2866 | ||
| 2878 | blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); | ||
| 2879 | blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn); | ||
| 2880 | |||
| 2881 | gd->driverfs_dev = &sdp->sdev_gendev; | 2867 | gd->driverfs_dev = &sdp->sdev_gendev; |
| 2882 | gd->flags = GENHD_FL_EXT_DEVT; | 2868 | gd->flags = GENHD_FL_EXT_DEVT; |
| 2883 | if (sdp->removable) { | 2869 | if (sdp->removable) { |
| @@ -3025,8 +3011,6 @@ static int sd_remove(struct device *dev) | |||
| 3025 | 3011 | ||
| 3026 | async_synchronize_full_domain(&scsi_sd_pm_domain); | 3012 | async_synchronize_full_domain(&scsi_sd_pm_domain); |
| 3027 | async_synchronize_full_domain(&scsi_sd_probe_domain); | 3013 | async_synchronize_full_domain(&scsi_sd_probe_domain); |
| 3028 | blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); | ||
| 3029 | blk_queue_unprep_rq(sdkp->device->request_queue, NULL); | ||
| 3030 | device_del(&sdkp->dev); | 3014 | device_del(&sdkp->dev); |
| 3031 | del_gendisk(sdkp->disk); | 3015 | del_gendisk(sdkp->disk); |
| 3032 | sd_shutdown(dev); | 3016 | sd_shutdown(dev); |
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 40d85929aefe..93cbd36c990b 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
| @@ -79,6 +79,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM); | |||
| 79 | static DEFINE_MUTEX(sr_mutex); | 79 | static DEFINE_MUTEX(sr_mutex); |
| 80 | static int sr_probe(struct device *); | 80 | static int sr_probe(struct device *); |
| 81 | static int sr_remove(struct device *); | 81 | static int sr_remove(struct device *); |
| 82 | static int sr_init_command(struct scsi_cmnd *SCpnt); | ||
| 82 | static int sr_done(struct scsi_cmnd *); | 83 | static int sr_done(struct scsi_cmnd *); |
| 83 | static int sr_runtime_suspend(struct device *dev); | 84 | static int sr_runtime_suspend(struct device *dev); |
| 84 | 85 | ||
| @@ -94,6 +95,7 @@ static struct scsi_driver sr_template = { | |||
| 94 | .remove = sr_remove, | 95 | .remove = sr_remove, |
| 95 | .pm = &sr_pm_ops, | 96 | .pm = &sr_pm_ops, |
| 96 | }, | 97 | }, |
| 98 | .init_command = sr_init_command, | ||
| 97 | .done = sr_done, | 99 | .done = sr_done, |
| 98 | }; | 100 | }; |
| 99 | 101 | ||
| @@ -378,21 +380,14 @@ static int sr_done(struct scsi_cmnd *SCpnt) | |||
| 378 | return good_bytes; | 380 | return good_bytes; |
| 379 | } | 381 | } |
| 380 | 382 | ||
| 381 | static int sr_prep_fn(struct request_queue *q, struct request *rq) | 383 | static int sr_init_command(struct scsi_cmnd *SCpnt) |
| 382 | { | 384 | { |
| 383 | int block = 0, this_count, s_size; | 385 | int block = 0, this_count, s_size; |
| 384 | struct scsi_cd *cd; | 386 | struct scsi_cd *cd; |
| 385 | struct scsi_cmnd *SCpnt; | 387 | struct request *rq = SCpnt->request; |
| 386 | struct scsi_device *sdp = q->queuedata; | 388 | struct scsi_device *sdp = SCpnt->device; |
| 387 | int ret; | 389 | int ret; |
| 388 | 390 | ||
| 389 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | ||
| 390 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | ||
| 391 | goto out; | ||
| 392 | } else if (rq->cmd_type != REQ_TYPE_FS) { | ||
| 393 | ret = BLKPREP_KILL; | ||
| 394 | goto out; | ||
| 395 | } | ||
| 396 | ret = scsi_setup_fs_cmnd(sdp, rq); | 391 | ret = scsi_setup_fs_cmnd(sdp, rq); |
| 397 | if (ret != BLKPREP_OK) | 392 | if (ret != BLKPREP_OK) |
| 398 | goto out; | 393 | goto out; |
| @@ -517,7 +512,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq) | |||
| 517 | */ | 512 | */ |
| 518 | ret = BLKPREP_OK; | 513 | ret = BLKPREP_OK; |
| 519 | out: | 514 | out: |
| 520 | return scsi_prep_return(q, rq, ret); | 515 | return ret; |
| 521 | } | 516 | } |
| 522 | 517 | ||
| 523 | static int sr_block_open(struct block_device *bdev, fmode_t mode) | 518 | static int sr_block_open(struct block_device *bdev, fmode_t mode) |
| @@ -718,7 +713,6 @@ static int sr_probe(struct device *dev) | |||
| 718 | 713 | ||
| 719 | /* FIXME: need to handle a get_capabilities failure properly ?? */ | 714 | /* FIXME: need to handle a get_capabilities failure properly ?? */ |
| 720 | get_capabilities(cd); | 715 | get_capabilities(cd); |
| 721 | blk_queue_prep_rq(sdev->request_queue, sr_prep_fn); | ||
| 722 | sr_vendor_init(cd); | 716 | sr_vendor_init(cd); |
| 723 | 717 | ||
| 724 | disk->driverfs_dev = &sdev->sdev_gendev; | 718 | disk->driverfs_dev = &sdev->sdev_gendev; |
| @@ -993,7 +987,6 @@ static int sr_remove(struct device *dev) | |||
| 993 | 987 | ||
| 994 | scsi_autopm_get_device(cd->device); | 988 | scsi_autopm_get_device(cd->device); |
| 995 | 989 | ||
| 996 | blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); | ||
| 997 | del_gendisk(cd->disk); | 990 | del_gendisk(cd->disk); |
| 998 | 991 | ||
| 999 | mutex_lock(&sr_ref_mutex); | 992 | mutex_lock(&sr_ref_mutex); |
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 636bbe0ea84c..88220794cc98 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c | |||
| @@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) | |||
| 364 | return( 0 ); | 364 | return( 0 ); |
| 365 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= | 365 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= |
| 366 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { | 366 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { |
| 367 | TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", | 367 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", |
| 368 | H_NO(cmd), cmd->device->id, cmd->device->lun ); | 368 | H_NO(cmd), cmd->device->id, cmd->device->lun ); |
| 369 | return( 1 ); | 369 | return( 1 ); |
| 370 | } | 370 | } |
| @@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) | |||
| 388 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { | 388 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { |
| 389 | cmd->tag = TAG_NONE; | 389 | cmd->tag = TAG_NONE; |
| 390 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 390 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
| 391 | TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " | 391 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " |
| 392 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); | 392 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); |
| 393 | } | 393 | } |
| 394 | else { | 394 | else { |
| @@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) | |||
| 397 | cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); | 397 | cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); |
| 398 | set_bit( cmd->tag, &ta->allocated ); | 398 | set_bit( cmd->tag, &ta->allocated ); |
| 399 | ta->nr_allocated++; | 399 | ta->nr_allocated++; |
| 400 | TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " | 400 | dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " |
| 401 | "(now %d tags in use)\n", | 401 | "(now %d tags in use)\n", |
| 402 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, | 402 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, |
| 403 | ta->nr_allocated ); | 403 | ta->nr_allocated ); |
| @@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd) | |||
| 415 | 415 | ||
| 416 | if (cmd->tag == TAG_NONE) { | 416 | if (cmd->tag == TAG_NONE) { |
| 417 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 417 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
| 418 | TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", | 418 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", |
| 419 | H_NO(cmd), cmd->device->id, cmd->device->lun ); | 419 | H_NO(cmd), cmd->device->id, cmd->device->lun ); |
| 420 | } | 420 | } |
| 421 | else if (cmd->tag >= MAX_TAGS) { | 421 | else if (cmd->tag >= MAX_TAGS) { |
| @@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd) | |||
| 426 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 426 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
| 427 | clear_bit( cmd->tag, &ta->allocated ); | 427 | clear_bit( cmd->tag, &ta->allocated ); |
| 428 | ta->nr_allocated--; | 428 | ta->nr_allocated--; |
| 429 | TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", | 429 | dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", |
| 430 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); | 430 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); |
| 431 | } | 431 | } |
| 432 | } | 432 | } |
| @@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) | |||
| 484 | 484 | ||
| 485 | #include <linux/delay.h> | 485 | #include <linux/delay.h> |
| 486 | 486 | ||
| 487 | #if 1 | 487 | #if NDEBUG |
| 488 | static struct { | 488 | static struct { |
| 489 | unsigned char mask; | 489 | unsigned char mask; |
| 490 | const char * name;} | 490 | const char * name;} |
| @@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) | |||
| 572 | } | 572 | } |
| 573 | } | 573 | } |
| 574 | 574 | ||
| 575 | #else /* !NDEBUG */ | ||
| 576 | |||
| 577 | /* dummies... */ | ||
| 578 | __inline__ void NCR5380_print(struct Scsi_Host *instance) { }; | ||
| 579 | __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; | ||
| 580 | |||
| 581 | #endif | 575 | #endif |
| 582 | 576 | ||
| 583 | /* | 577 | /* |
| @@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void) | |||
| 618 | { | 612 | { |
| 619 | static int done = 0; | 613 | static int done = 0; |
| 620 | if (!done) { | 614 | if (!done) { |
| 621 | INI_PRINTK("scsi : NCR5380_all_init()\n"); | 615 | dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); |
| 622 | done = 1; | 616 | done = 1; |
| 623 | } | 617 | } |
| 624 | } | 618 | } |
| @@ -681,8 +675,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance) | |||
| 681 | Scsi_Cmnd *ptr; | 675 | Scsi_Cmnd *ptr; |
| 682 | unsigned long flags; | 676 | unsigned long flags; |
| 683 | 677 | ||
| 684 | NCR_PRINT(NDEBUG_ANY); | 678 | NCR5380_dprint(NDEBUG_ANY, instance); |
| 685 | NCR_PRINT_PHASE(NDEBUG_ANY); | 679 | NCR5380_dprint_phase(NDEBUG_ANY, instance); |
| 686 | 680 | ||
| 687 | hostdata = (struct NCR5380_hostdata *)instance->hostdata; | 681 | hostdata = (struct NCR5380_hostdata *)instance->hostdata; |
| 688 | 682 | ||
| @@ -928,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, | |||
| 928 | 922 | ||
| 929 | local_irq_restore(flags); | 923 | local_irq_restore(flags); |
| 930 | 924 | ||
| 931 | QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), | 925 | dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), |
| 932 | (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); | 926 | (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); |
| 933 | 927 | ||
| 934 | /* If queue_command() is called from an interrupt (real one or bottom | 928 | /* If queue_command() is called from an interrupt (real one or bottom |
| @@ -998,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl) | |||
| 998 | done = 1; | 992 | done = 1; |
| 999 | 993 | ||
| 1000 | if (!hostdata->connected) { | 994 | if (!hostdata->connected) { |
| 1001 | MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); | 995 | dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO ); |
| 1002 | /* | 996 | /* |
| 1003 | * Search through the issue_queue for a command destined | 997 | * Search through the issue_queue for a command destined |
| 1004 | * for a target that's not busy. | 998 | * for a target that's not busy. |
| @@ -1012,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl) | |||
| 1012 | for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, | 1006 | for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, |
| 1013 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { | 1007 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { |
| 1014 | 1008 | ||
| 1015 | #if (NDEBUG & NDEBUG_LISTS) | ||
| 1016 | if (prev != tmp) | 1009 | if (prev != tmp) |
| 1017 | printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", | 1010 | dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); |
| 1018 | tmp, tmp->target, hostdata->busy[tmp->target], | ||
| 1019 | tmp->lun); | ||
| 1020 | #endif | ||
| 1021 | /* When we find one, remove it from the issue queue. */ | 1011 | /* When we find one, remove it from the issue queue. */ |
| 1022 | /* ++guenther: possible race with Falcon locking */ | 1012 | /* ++guenther: possible race with Falcon locking */ |
| 1023 | if ( | 1013 | if ( |
| @@ -1047,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl) | |||
| 1047 | * On failure, we must add the command back to the | 1037 | * On failure, we must add the command back to the |
| 1048 | * issue queue so we can keep trying. | 1038 | * issue queue so we can keep trying. |
| 1049 | */ | 1039 | */ |
| 1050 | MAIN_PRINTK("scsi%d: main(): command for target %d " | 1040 | dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " |
| 1051 | "lun %d removed from issue_queue\n", | 1041 | "lun %d removed from issue_queue\n", |
| 1052 | HOSTNO, tmp->target, tmp->lun); | 1042 | HOSTNO, tmp->device->id, tmp->device->lun); |
| 1053 | /* | 1043 | /* |
| 1054 | * REQUEST SENSE commands are issued without tagged | 1044 | * REQUEST SENSE commands are issued without tagged |
| 1055 | * queueing, even on SCSI-II devices because the | 1045 | * queueing, even on SCSI-II devices because the |
| @@ -1076,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl) | |||
| 1076 | cmd_free_tag( tmp ); | 1066 | cmd_free_tag( tmp ); |
| 1077 | #endif | 1067 | #endif |
| 1078 | local_irq_restore(flags); | 1068 | local_irq_restore(flags); |
| 1079 | MAIN_PRINTK("scsi%d: main(): select() failed, " | 1069 | dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " |
| 1080 | "returned to issue_queue\n", HOSTNO); | 1070 | "returned to issue_queue\n", HOSTNO); |
| 1081 | if (hostdata->connected) | 1071 | if (hostdata->connected) |
| 1082 | break; | 1072 | break; |
| @@ -1090,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl) | |||
| 1090 | #endif | 1080 | #endif |
| 1091 | ) { | 1081 | ) { |
| 1092 | local_irq_restore(flags); | 1082 | local_irq_restore(flags); |
| 1093 | MAIN_PRINTK("scsi%d: main: performing information transfer\n", | 1083 | dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", |
| 1094 | HOSTNO); | 1084 | HOSTNO); |
| 1095 | NCR5380_information_transfer(instance); | 1085 | NCR5380_information_transfer(instance); |
| 1096 | MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); | 1086 | dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); |
| 1097 | done = 0; | 1087 | done = 0; |
| 1098 | } | 1088 | } |
| 1099 | } while (!done); | 1089 | } while (!done); |
| @@ -1130,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance ) | |||
| 1130 | return; | 1120 | return; |
| 1131 | } | 1121 | } |
| 1132 | 1122 | ||
| 1133 | DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", | 1123 | dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", |
| 1134 | HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), | 1124 | HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), |
| 1135 | NCR5380_read(STATUS_REG)); | 1125 | NCR5380_read(STATUS_REG)); |
| 1136 | 1126 | ||
| @@ -1189,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) | |||
| 1189 | int done = 1, handled = 0; | 1179 | int done = 1, handled = 0; |
| 1190 | unsigned char basr; | 1180 | unsigned char basr; |
| 1191 | 1181 | ||
| 1192 | INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); | 1182 | dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); |
| 1193 | 1183 | ||
| 1194 | /* Look for pending interrupts */ | 1184 | /* Look for pending interrupts */ |
| 1195 | basr = NCR5380_read(BUS_AND_STATUS_REG); | 1185 | basr = NCR5380_read(BUS_AND_STATUS_REG); |
| 1196 | INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); | 1186 | dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); |
| 1197 | /* dispatch to appropriate routine if found and done=0 */ | 1187 | /* dispatch to appropriate routine if found and done=0 */ |
| 1198 | if (basr & BASR_IRQ) { | 1188 | if (basr & BASR_IRQ) { |
| 1199 | NCR_PRINT(NDEBUG_INTR); | 1189 | NCR5380_dprint(NDEBUG_INTR, instance); |
| 1200 | if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { | 1190 | if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { |
| 1201 | done = 0; | 1191 | done = 0; |
| 1202 | // ENABLE_IRQ(); | 1192 | // ENABLE_IRQ(); |
| 1203 | INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); | 1193 | dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); |
| 1204 | NCR5380_reselect(instance); | 1194 | NCR5380_reselect(instance); |
| 1205 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1195 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1206 | } | 1196 | } |
| 1207 | else if (basr & BASR_PARITY_ERROR) { | 1197 | else if (basr & BASR_PARITY_ERROR) { |
| 1208 | INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); | 1198 | dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); |
| 1209 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1199 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1210 | } | 1200 | } |
| 1211 | else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { | 1201 | else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { |
| 1212 | INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); | 1202 | dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); |
| 1213 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1203 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
| 1214 | } | 1204 | } |
| 1215 | else { | 1205 | else { |
| @@ -1229,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) | |||
| 1229 | ((basr & BASR_END_DMA_TRANSFER) || | 1219 | ((basr & BASR_END_DMA_TRANSFER) || |
| 1230 | !(basr & BASR_PHASE_MATCH))) { | 1220 | !(basr & BASR_PHASE_MATCH))) { |
| 1231 | 1221 | ||
| 1232 | INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); | 1222 | dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); |
| 1233 | NCR5380_dma_complete( instance ); | 1223 | NCR5380_dma_complete( instance ); |
| 1234 | done = 0; | 1224 | done = 0; |
| 1235 | // ENABLE_IRQ(); | 1225 | // ENABLE_IRQ(); |
| @@ -1238,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) | |||
| 1238 | { | 1228 | { |
| 1239 | /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ | 1229 | /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ |
| 1240 | if (basr & BASR_PHASE_MATCH) | 1230 | if (basr & BASR_PHASE_MATCH) |
| 1241 | INT_PRINTK("scsi%d: unknown interrupt, " | 1231 | dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " |
| 1242 | "BASR 0x%x, MR 0x%x, SR 0x%x\n", | 1232 | "BASR 0x%x, MR 0x%x, SR 0x%x\n", |
| 1243 | HOSTNO, basr, NCR5380_read(MODE_REG), | 1233 | HOSTNO, basr, NCR5380_read(MODE_REG), |
| 1244 | NCR5380_read(STATUS_REG)); | 1234 | NCR5380_read(STATUS_REG)); |
| @@ -1262,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) | |||
| 1262 | } | 1252 | } |
| 1263 | 1253 | ||
| 1264 | if (!done) { | 1254 | if (!done) { |
| 1265 | INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); | 1255 | dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); |
| 1266 | /* Put a call to NCR5380_main() on the queue... */ | 1256 | /* Put a call to NCR5380_main() on the queue... */ |
| 1267 | queue_main(); | 1257 | queue_main(); |
| 1268 | } | 1258 | } |
| @@ -1338,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1338 | unsigned long flags; | 1328 | unsigned long flags; |
| 1339 | 1329 | ||
| 1340 | hostdata->restart_select = 0; | 1330 | hostdata->restart_select = 0; |
| 1341 | NCR_PRINT(NDEBUG_ARBITRATION); | 1331 | NCR5380_dprint(NDEBUG_ARBITRATION, instance); |
| 1342 | ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, | 1332 | dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, |
| 1343 | instance->this_id); | 1333 | instance->this_id); |
| 1344 | 1334 | ||
| 1345 | /* | 1335 | /* |
| @@ -1385,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1385 | && !hostdata->connected); | 1375 | && !hostdata->connected); |
| 1386 | #endif | 1376 | #endif |
| 1387 | 1377 | ||
| 1388 | ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); | 1378 | dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); |
| 1389 | 1379 | ||
| 1390 | if (hostdata->connected) { | 1380 | if (hostdata->connected) { |
| 1391 | NCR5380_write(MODE_REG, MR_BASE); | 1381 | NCR5380_write(MODE_REG, MR_BASE); |
| @@ -1406,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1406 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | 1396 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || |
| 1407 | hostdata->connected) { | 1397 | hostdata->connected) { |
| 1408 | NCR5380_write(MODE_REG, MR_BASE); | 1398 | NCR5380_write(MODE_REG, MR_BASE); |
| 1409 | ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", | 1399 | dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", |
| 1410 | HOSTNO); | 1400 | HOSTNO); |
| 1411 | return -1; | 1401 | return -1; |
| 1412 | } | 1402 | } |
| @@ -1421,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1421 | hostdata->connected) { | 1411 | hostdata->connected) { |
| 1422 | NCR5380_write(MODE_REG, MR_BASE); | 1412 | NCR5380_write(MODE_REG, MR_BASE); |
| 1423 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1413 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 1424 | ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", | 1414 | dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", |
| 1425 | HOSTNO); | 1415 | HOSTNO); |
| 1426 | return -1; | 1416 | return -1; |
| 1427 | } | 1417 | } |
| @@ -1444,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1444 | return -1; | 1434 | return -1; |
| 1445 | } | 1435 | } |
| 1446 | 1436 | ||
| 1447 | ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); | 1437 | dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); |
| 1448 | 1438 | ||
| 1449 | /* | 1439 | /* |
| 1450 | * Now that we have won arbitration, start Selection process, asserting | 1440 | * Now that we have won arbitration, start Selection process, asserting |
| @@ -1504,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1504 | 1494 | ||
| 1505 | udelay(1); | 1495 | udelay(1); |
| 1506 | 1496 | ||
| 1507 | SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); | 1497 | dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); |
| 1508 | 1498 | ||
| 1509 | /* | 1499 | /* |
| 1510 | * The SCSI specification calls for a 250 ms timeout for the actual | 1500 | * The SCSI specification calls for a 250 ms timeout for the actual |
| @@ -1559,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1559 | printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); | 1549 | printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); |
| 1560 | if (hostdata->restart_select) | 1550 | if (hostdata->restart_select) |
| 1561 | printk(KERN_NOTICE "\trestart select\n"); | 1551 | printk(KERN_NOTICE "\trestart select\n"); |
| 1562 | NCR_PRINT(NDEBUG_ANY); | 1552 | NCR5380_dprint(NDEBUG_ANY, instance); |
| 1563 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1553 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1564 | return -1; | 1554 | return -1; |
| 1565 | } | 1555 | } |
| @@ -1572,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1572 | #endif | 1562 | #endif |
| 1573 | cmd->scsi_done(cmd); | 1563 | cmd->scsi_done(cmd); |
| 1574 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1564 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1575 | SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); | 1565 | dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); |
| 1576 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1566 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
| 1577 | return 0; | 1567 | return 0; |
| 1578 | } | 1568 | } |
| @@ -1597,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1597 | /* Wait for start of REQ/ACK handshake */ | 1587 | /* Wait for start of REQ/ACK handshake */ |
| 1598 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)); | 1588 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)); |
| 1599 | 1589 | ||
| 1600 | SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", | 1590 | dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", |
| 1601 | HOSTNO, cmd->device->id); | 1591 | HOSTNO, cmd->device->id); |
| 1602 | tmp[0] = IDENTIFY(1, cmd->device->lun); | 1592 | tmp[0] = IDENTIFY(1, cmd->device->lun); |
| 1603 | 1593 | ||
| @@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, | |||
| 1617 | data = tmp; | 1607 | data = tmp; |
| 1618 | phase = PHASE_MSGOUT; | 1608 | phase = PHASE_MSGOUT; |
| 1619 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 1609 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 1620 | SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); | 1610 | dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); |
| 1621 | /* XXX need to handle errors here */ | 1611 | /* XXX need to handle errors here */ |
| 1622 | hostdata->connected = cmd; | 1612 | hostdata->connected = cmd; |
| 1623 | #ifndef SUPPORT_TAGS | 1613 | #ifndef SUPPORT_TAGS |
| @@ -1680,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, | |||
| 1680 | */ | 1670 | */ |
| 1681 | while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); | 1671 | while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); |
| 1682 | 1672 | ||
| 1683 | HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); | 1673 | dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); |
| 1684 | 1674 | ||
| 1685 | /* Check for phase mismatch */ | 1675 | /* Check for phase mismatch */ |
| 1686 | if ((tmp & PHASE_MASK) != p) { | 1676 | if ((tmp & PHASE_MASK) != p) { |
| 1687 | PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); | 1677 | dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); |
| 1688 | NCR_PRINT_PHASE(NDEBUG_PIO); | 1678 | NCR5380_dprint_phase(NDEBUG_PIO, instance); |
| 1689 | break; | 1679 | break; |
| 1690 | } | 1680 | } |
| 1691 | 1681 | ||
| @@ -1708,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, | |||
| 1708 | if (!((p & SR_MSG) && c > 1)) { | 1698 | if (!((p & SR_MSG) && c > 1)) { |
| 1709 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 1699 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| 1710 | ICR_ASSERT_DATA); | 1700 | ICR_ASSERT_DATA); |
| 1711 | NCR_PRINT(NDEBUG_PIO); | 1701 | NCR5380_dprint(NDEBUG_PIO, instance); |
| 1712 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 1702 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| 1713 | ICR_ASSERT_DATA | ICR_ASSERT_ACK); | 1703 | ICR_ASSERT_DATA | ICR_ASSERT_ACK); |
| 1714 | } else { | 1704 | } else { |
| 1715 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 1705 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| 1716 | ICR_ASSERT_DATA | ICR_ASSERT_ATN); | 1706 | ICR_ASSERT_DATA | ICR_ASSERT_ATN); |
| 1717 | NCR_PRINT(NDEBUG_PIO); | 1707 | NCR5380_dprint(NDEBUG_PIO, instance); |
| 1718 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 1708 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| 1719 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); | 1709 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); |
| 1720 | } | 1710 | } |
| 1721 | } else { | 1711 | } else { |
| 1722 | NCR_PRINT(NDEBUG_PIO); | 1712 | NCR5380_dprint(NDEBUG_PIO, instance); |
| 1723 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); | 1713 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); |
| 1724 | } | 1714 | } |
| 1725 | 1715 | ||
| 1726 | while (NCR5380_read(STATUS_REG) & SR_REQ); | 1716 | while (NCR5380_read(STATUS_REG) & SR_REQ); |
| 1727 | 1717 | ||
| 1728 | HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); | 1718 | dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); |
| 1729 | 1719 | ||
| 1730 | /* | 1720 | /* |
| 1731 | * We have several special cases to consider during REQ/ACK handshaking : | 1721 | * We have several special cases to consider during REQ/ACK handshaking : |
| @@ -1746,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, | |||
| 1746 | } | 1736 | } |
| 1747 | } while (--c); | 1737 | } while (--c); |
| 1748 | 1738 | ||
| 1749 | PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); | 1739 | dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); |
| 1750 | 1740 | ||
| 1751 | *count = c; | 1741 | *count = c; |
| 1752 | *data = d; | 1742 | *data = d; |
| @@ -1854,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance, | |||
| 1854 | } | 1844 | } |
| 1855 | hostdata->dma_len = c; | 1845 | hostdata->dma_len = c; |
| 1856 | 1846 | ||
| 1857 | DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", | 1847 | dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", |
| 1858 | HOSTNO, (p & SR_IO) ? "reading" : "writing", | 1848 | HOSTNO, (p & SR_IO) ? "reading" : "writing", |
| 1859 | c, (p & SR_IO) ? "to" : "from", *data); | 1849 | c, (p & SR_IO) ? "to" : "from", *data); |
| 1860 | 1850 | ||
| @@ -1931,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 1931 | phase = (tmp & PHASE_MASK); | 1921 | phase = (tmp & PHASE_MASK); |
| 1932 | if (phase != old_phase) { | 1922 | if (phase != old_phase) { |
| 1933 | old_phase = phase; | 1923 | old_phase = phase; |
| 1934 | NCR_PRINT_PHASE(NDEBUG_INFORMATION); | 1924 | NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); |
| 1935 | } | 1925 | } |
| 1936 | 1926 | ||
| 1937 | if(phase == PHASE_CMDOUT) { | 1927 | if(phase == PHASE_CMDOUT) { |
| @@ -1996,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 1996 | --cmd->SCp.buffers_residual; | 1986 | --cmd->SCp.buffers_residual; |
| 1997 | cmd->SCp.this_residual = cmd->SCp.buffer->length; | 1987 | cmd->SCp.this_residual = cmd->SCp.buffer->length; |
| 1998 | cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); | 1988 | cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); |
| 1999 | INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", | 1989 | dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", |
| 2000 | HOSTNO, cmd->SCp.this_residual, | 1990 | HOSTNO, cmd->SCp.this_residual, |
| 2001 | cmd->SCp.buffers_residual); | 1991 | cmd->SCp.buffers_residual); |
| 2002 | } | 1992 | } |
| @@ -2088,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2088 | /* Accept message by clearing ACK */ | 2078 | /* Accept message by clearing ACK */ |
| 2089 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2079 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2090 | 2080 | ||
| 2091 | LNK_PRINTK("scsi%d: target %d lun %d linked command " | 2081 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " |
| 2092 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2082 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2093 | 2083 | ||
| 2094 | /* Enable reselect interrupts */ | 2084 | /* Enable reselect interrupts */ |
| @@ -2113,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2113 | * and don't free it! */ | 2103 | * and don't free it! */ |
| 2114 | cmd->next_link->tag = cmd->tag; | 2104 | cmd->next_link->tag = cmd->tag; |
| 2115 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2105 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
| 2116 | LNK_PRINTK("scsi%d: target %d lun %d linked request " | 2106 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " |
| 2117 | "done, calling scsi_done().\n", | 2107 | "done, calling scsi_done().\n", |
| 2118 | HOSTNO, cmd->device->id, cmd->device->lun); | 2108 | HOSTNO, cmd->device->id, cmd->device->lun); |
| 2119 | #ifdef NCR5380_STATS | 2109 | #ifdef NCR5380_STATS |
| @@ -2128,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2128 | /* Accept message by clearing ACK */ | 2118 | /* Accept message by clearing ACK */ |
| 2129 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2119 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2130 | hostdata->connected = NULL; | 2120 | hostdata->connected = NULL; |
| 2131 | QU_PRINTK("scsi%d: command for target %d, lun %d " | 2121 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " |
| 2132 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2122 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2133 | #ifdef SUPPORT_TAGS | 2123 | #ifdef SUPPORT_TAGS |
| 2134 | cmd_free_tag( cmd ); | 2124 | cmd_free_tag( cmd ); |
| @@ -2142,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2142 | /* ++Andreas: the mid level code knows about | 2132 | /* ++Andreas: the mid level code knows about |
| 2143 | QUEUE_FULL now. */ | 2133 | QUEUE_FULL now. */ |
| 2144 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 2134 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
| 2145 | TAG_PRINTK("scsi%d: target %d lun %d returned " | 2135 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " |
| 2146 | "QUEUE_FULL after %d commands\n", | 2136 | "QUEUE_FULL after %d commands\n", |
| 2147 | HOSTNO, cmd->device->id, cmd->device->lun, | 2137 | HOSTNO, cmd->device->id, cmd->device->lun, |
| 2148 | ta->nr_allocated); | 2138 | ta->nr_allocated); |
| @@ -2186,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2186 | if ((cmd->cmnd[0] != REQUEST_SENSE) && | 2176 | if ((cmd->cmnd[0] != REQUEST_SENSE) && |
| 2187 | (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { | 2177 | (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { |
| 2188 | scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); | 2178 | scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); |
| 2189 | ASEN_PRINTK("scsi%d: performing request sense\n", | 2179 | dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", |
| 2190 | HOSTNO); | 2180 | HOSTNO); |
| 2191 | /* this is initialized from initialize_SCp | 2181 | /* this is initialized from initialize_SCp |
| 2192 | cmd->SCp.buffer = NULL; | 2182 | cmd->SCp.buffer = NULL; |
| @@ -2198,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2198 | SET_NEXT(cmd, hostdata->issue_queue); | 2188 | SET_NEXT(cmd, hostdata->issue_queue); |
| 2199 | hostdata->issue_queue = (struct scsi_cmnd *) cmd; | 2189 | hostdata->issue_queue = (struct scsi_cmnd *) cmd; |
| 2200 | local_irq_restore(flags); | 2190 | local_irq_restore(flags); |
| 2201 | QU_PRINTK("scsi%d: REQUEST SENSE added to head of " | 2191 | dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " |
| 2202 | "issue queue\n", H_NO(cmd)); | 2192 | "issue queue\n", H_NO(cmd)); |
| 2203 | } else | 2193 | } else |
| 2204 | #endif /* def AUTOSENSE */ | 2194 | #endif /* def AUTOSENSE */ |
| @@ -2238,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2238 | cmd->device->tagged_supported = 0; | 2228 | cmd->device->tagged_supported = 0; |
| 2239 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 2229 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
| 2240 | cmd->tag = TAG_NONE; | 2230 | cmd->tag = TAG_NONE; |
| 2241 | TAG_PRINTK("scsi%d: target %d lun %d rejected " | 2231 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " |
| 2242 | "QUEUE_TAG message; tagged queuing " | 2232 | "QUEUE_TAG message; tagged queuing " |
| 2243 | "disabled\n", | 2233 | "disabled\n", |
| 2244 | HOSTNO, cmd->device->id, cmd->device->lun); | 2234 | HOSTNO, cmd->device->id, cmd->device->lun); |
| @@ -2255,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2255 | hostdata->connected = NULL; | 2245 | hostdata->connected = NULL; |
| 2256 | hostdata->disconnected_queue = cmd; | 2246 | hostdata->disconnected_queue = cmd; |
| 2257 | local_irq_restore(flags); | 2247 | local_irq_restore(flags); |
| 2258 | QU_PRINTK("scsi%d: command for target %d lun %d was " | 2248 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " |
| 2259 | "moved from connected to the " | 2249 | "moved from connected to the " |
| 2260 | "disconnected_queue\n", HOSTNO, | 2250 | "disconnected_queue\n", HOSTNO, |
| 2261 | cmd->device->id, cmd->device->lun); | 2251 | cmd->device->id, cmd->device->lun); |
| @@ -2308,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2308 | /* Accept first byte by clearing ACK */ | 2298 | /* Accept first byte by clearing ACK */ |
| 2309 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2299 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2310 | 2300 | ||
| 2311 | EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); | 2301 | dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); |
| 2312 | 2302 | ||
| 2313 | len = 2; | 2303 | len = 2; |
| 2314 | data = extended_msg + 1; | 2304 | data = extended_msg + 1; |
| 2315 | phase = PHASE_MSGIN; | 2305 | phase = PHASE_MSGIN; |
| 2316 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2306 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2317 | EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, | 2307 | dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, |
| 2318 | (int)extended_msg[1], (int)extended_msg[2]); | 2308 | (int)extended_msg[1], (int)extended_msg[2]); |
| 2319 | 2309 | ||
| 2320 | if (!len && extended_msg[1] <= | 2310 | if (!len && extended_msg[1] <= |
| @@ -2326,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2326 | phase = PHASE_MSGIN; | 2316 | phase = PHASE_MSGIN; |
| 2327 | 2317 | ||
| 2328 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2318 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2329 | EXT_PRINTK("scsi%d: message received, residual %d\n", | 2319 | dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", |
| 2330 | HOSTNO, len); | 2320 | HOSTNO, len); |
| 2331 | 2321 | ||
| 2332 | switch (extended_msg[2]) { | 2322 | switch (extended_msg[2]) { |
| @@ -2416,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2416 | break; | 2406 | break; |
| 2417 | default: | 2407 | default: |
| 2418 | printk("scsi%d: unknown phase\n", HOSTNO); | 2408 | printk("scsi%d: unknown phase\n", HOSTNO); |
| 2419 | NCR_PRINT(NDEBUG_ANY); | 2409 | NCR5380_dprint(NDEBUG_ANY, instance); |
| 2420 | } /* switch(phase) */ | 2410 | } /* switch(phase) */ |
| 2421 | } /* if (tmp * SR_REQ) */ | 2411 | } /* if (tmp * SR_REQ) */ |
| 2422 | } /* while (1) */ | 2412 | } /* while (1) */ |
| @@ -2458,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance) | |||
| 2458 | 2448 | ||
| 2459 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); | 2449 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); |
| 2460 | 2450 | ||
| 2461 | RSL_PRINTK("scsi%d: reselect\n", HOSTNO); | 2451 | dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); |
| 2462 | 2452 | ||
| 2463 | /* | 2453 | /* |
| 2464 | * At this point, we have detected that our SCSI ID is on the bus, | 2454 | * At this point, we have detected that our SCSI ID is on the bus, |
| @@ -2580,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance) | |||
| 2580 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && | 2570 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && |
| 2581 | msg[1] == SIMPLE_QUEUE_TAG) | 2571 | msg[1] == SIMPLE_QUEUE_TAG) |
| 2582 | tag = msg[2]; | 2572 | tag = msg[2]; |
| 2583 | TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " | 2573 | dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " |
| 2584 | "reselection\n", HOSTNO, target_mask, lun, tag); | 2574 | "reselection\n", HOSTNO, target_mask, lun, tag); |
| 2585 | } | 2575 | } |
| 2586 | #endif | 2576 | #endif |
| 2587 | 2577 | ||
| 2588 | hostdata->connected = tmp; | 2578 | hostdata->connected = tmp; |
| 2589 | RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", | 2579 | dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", |
| 2590 | HOSTNO, tmp->target, tmp->lun, tmp->tag); | 2580 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); |
| 2591 | } | 2581 | } |
| 2592 | 2582 | ||
| 2593 | 2583 | ||
| @@ -2622,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2622 | 2612 | ||
| 2623 | local_irq_save(flags); | 2613 | local_irq_save(flags); |
| 2624 | 2614 | ||
| 2625 | ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, | 2615 | dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, |
| 2626 | NCR5380_read(BUS_AND_STATUS_REG), | 2616 | NCR5380_read(BUS_AND_STATUS_REG), |
| 2627 | NCR5380_read(STATUS_REG)); | 2617 | NCR5380_read(STATUS_REG)); |
| 2628 | 2618 | ||
| @@ -2635,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2635 | 2625 | ||
| 2636 | if (hostdata->connected == cmd) { | 2626 | if (hostdata->connected == cmd) { |
| 2637 | 2627 | ||
| 2638 | ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); | 2628 | dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); |
| 2639 | /* | 2629 | /* |
| 2640 | * We should perform BSY checking, and make sure we haven't slipped | 2630 | * We should perform BSY checking, and make sure we haven't slipped |
| 2641 | * into BUS FREE. | 2631 | * into BUS FREE. |
| @@ -2664,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2664 | #endif | 2654 | #endif |
| 2665 | local_irq_restore(flags); | 2655 | local_irq_restore(flags); |
| 2666 | cmd->scsi_done(cmd); | 2656 | cmd->scsi_done(cmd); |
| 2667 | return SCSI_ABORT_SUCCESS; | 2657 | return SUCCESS; |
| 2668 | } else { | 2658 | } else { |
| 2669 | /* local_irq_restore(flags); */ | 2659 | /* local_irq_restore(flags); */ |
| 2670 | printk("scsi%d: abort of connected command failed!\n", HOSTNO); | 2660 | printk("scsi%d: abort of connected command failed!\n", HOSTNO); |
| 2671 | return SCSI_ABORT_ERROR; | 2661 | return FAILED; |
| 2672 | } | 2662 | } |
| 2673 | } | 2663 | } |
| 2674 | #endif | 2664 | #endif |
| @@ -2686,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2686 | SET_NEXT(tmp, NULL); | 2676 | SET_NEXT(tmp, NULL); |
| 2687 | tmp->result = DID_ABORT << 16; | 2677 | tmp->result = DID_ABORT << 16; |
| 2688 | local_irq_restore(flags); | 2678 | local_irq_restore(flags); |
| 2689 | ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", | 2679 | dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", |
| 2690 | HOSTNO); | 2680 | HOSTNO); |
| 2691 | /* Tagged queuing note: no tag to free here, hasn't been assigned | 2681 | /* Tagged queuing note: no tag to free here, hasn't been assigned |
| 2692 | * yet... */ | 2682 | * yet... */ |
| 2693 | tmp->scsi_done(tmp); | 2683 | tmp->scsi_done(tmp); |
| 2694 | return SCSI_ABORT_SUCCESS; | 2684 | return SUCCESS; |
| 2695 | } | 2685 | } |
| 2696 | 2686 | ||
| 2697 | /* | 2687 | /* |
| @@ -2707,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2707 | 2697 | ||
| 2708 | if (hostdata->connected) { | 2698 | if (hostdata->connected) { |
| 2709 | local_irq_restore(flags); | 2699 | local_irq_restore(flags); |
| 2710 | ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); | 2700 | dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); |
| 2711 | return SCSI_ABORT_SNOOZE; | 2701 | return FAILED; |
| 2712 | } | 2702 | } |
| 2713 | 2703 | ||
| 2714 | /* | 2704 | /* |
| @@ -2740,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2740 | tmp = NEXT(tmp)) | 2730 | tmp = NEXT(tmp)) |
| 2741 | if (cmd == tmp) { | 2731 | if (cmd == tmp) { |
| 2742 | local_irq_restore(flags); | 2732 | local_irq_restore(flags); |
| 2743 | ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); | 2733 | dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); |
| 2744 | 2734 | ||
| 2745 | if (NCR5380_select (instance, cmd, (int) cmd->tag)) | 2735 | if (NCR5380_select (instance, cmd, (int) cmd->tag)) |
| 2746 | return SCSI_ABORT_BUSY; | 2736 | return FAILED; |
| 2747 | 2737 | ||
| 2748 | ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); | 2738 | dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); |
| 2749 | 2739 | ||
| 2750 | do_abort (instance); | 2740 | do_abort (instance); |
| 2751 | 2741 | ||
| @@ -2769,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2769 | #endif | 2759 | #endif |
| 2770 | local_irq_restore(flags); | 2760 | local_irq_restore(flags); |
| 2771 | tmp->scsi_done(tmp); | 2761 | tmp->scsi_done(tmp); |
| 2772 | return SCSI_ABORT_SUCCESS; | 2762 | return SUCCESS; |
| 2773 | } | 2763 | } |
| 2774 | } | 2764 | } |
| 2775 | 2765 | ||
| @@ -2786,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2786 | local_irq_restore(flags); | 2776 | local_irq_restore(flags); |
| 2787 | printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); | 2777 | printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); |
| 2788 | 2778 | ||
| 2789 | return SCSI_ABORT_NOT_RUNNING; | 2779 | return FAILED; |
| 2790 | } | 2780 | } |
| 2791 | 2781 | ||
| 2792 | 2782 | ||
| @@ -2795,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) | |||
| 2795 | * | 2785 | * |
| 2796 | * Purpose : reset the SCSI bus. | 2786 | * Purpose : reset the SCSI bus. |
| 2797 | * | 2787 | * |
| 2798 | * Returns : SCSI_RESET_WAKEUP | 2788 | * Returns : SUCCESS or FAILURE |
| 2799 | * | 2789 | * |
| 2800 | */ | 2790 | */ |
| 2801 | 2791 | ||
| @@ -2804,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
| 2804 | SETUP_HOSTDATA(cmd->device->host); | 2794 | SETUP_HOSTDATA(cmd->device->host); |
| 2805 | int i; | 2795 | int i; |
| 2806 | unsigned long flags; | 2796 | unsigned long flags; |
| 2807 | #if 1 | 2797 | #if defined(RESET_RUN_DONE) |
| 2808 | struct scsi_cmnd *connected, *disconnected_queue; | 2798 | struct scsi_cmnd *connected, *disconnected_queue; |
| 2809 | #endif | 2799 | #endif |
| 2810 | 2800 | ||
| @@ -2826,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
| 2826 | * through anymore ... */ | 2816 | * through anymore ... */ |
| 2827 | (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); | 2817 | (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); |
| 2828 | 2818 | ||
| 2829 | #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ | 2819 | /* MSch 20140115 - looking at the generic NCR5380 driver, all of this |
| 2830 | /* XXX see below XXX */ | 2820 | * should go. |
| 2821 | * Catch-22: if we don't clear all queues, the SCSI driver lock will | ||
| 2822 | * not be released by atari_scsi_reset()! | ||
| 2823 | */ | ||
| 2824 | |||
| 2825 | #if defined(RESET_RUN_DONE) | ||
| 2826 | /* XXX Should now be done by midlevel code, but it's broken XXX */ | ||
| 2827 | /* XXX see below XXX */ | ||
| 2831 | 2828 | ||
| 2832 | /* MSch: old-style reset: actually abort all command processing here */ | 2829 | /* MSch: old-style reset: actually abort all command processing here */ |
| 2833 | 2830 | ||
| @@ -2857,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
| 2857 | */ | 2854 | */ |
| 2858 | 2855 | ||
| 2859 | if ((cmd = connected)) { | 2856 | if ((cmd = connected)) { |
| 2860 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); | 2857 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); |
| 2861 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); | 2858 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); |
| 2862 | cmd->scsi_done( cmd ); | 2859 | cmd->scsi_done( cmd ); |
| 2863 | } | 2860 | } |
| @@ -2869,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
| 2869 | cmd->scsi_done( cmd ); | 2866 | cmd->scsi_done( cmd ); |
| 2870 | } | 2867 | } |
| 2871 | if (i > 0) | 2868 | if (i > 0) |
| 2872 | ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); | 2869 | dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); |
| 2873 | 2870 | ||
| 2874 | 2871 | ||
| 2875 | /* since all commands have been explicitly terminated, we need to tell | 2872 | /* since all commands have been explicitly terminated, we need to tell |
| 2876 | * the midlevel code that the reset was SUCCESSFUL, and there is no | 2873 | * the midlevel code that the reset was SUCCESSFUL, and there is no |
| 2877 | * need to 'wake up' the commands by a request_sense | 2874 | * need to 'wake up' the commands by a request_sense |
| 2878 | */ | 2875 | */ |
| 2879 | return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; | 2876 | return SUCCESS; |
| 2880 | #else /* 1 */ | 2877 | #else /* 1 */ |
| 2881 | 2878 | ||
| 2882 | /* MSch: new-style reset handling: let the mid-level do what it can */ | 2879 | /* MSch: new-style reset handling: let the mid-level do what it can */ |
| @@ -2903,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
| 2903 | */ | 2900 | */ |
| 2904 | 2901 | ||
| 2905 | if (hostdata->issue_queue) | 2902 | if (hostdata->issue_queue) |
| 2906 | ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); | 2903 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); |
| 2907 | if (hostdata->connected) | 2904 | if (hostdata->connected) |
| 2908 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); | 2905 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); |
| 2909 | if (hostdata->disconnected_queue) | 2906 | if (hostdata->disconnected_queue) |
| 2910 | ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); | 2907 | dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); |
| 2911 | 2908 | ||
| 2912 | local_irq_save(flags); | 2909 | local_irq_save(flags); |
| 2913 | hostdata->issue_queue = NULL; | 2910 | hostdata->issue_queue = NULL; |
| @@ -2924,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) | |||
| 2924 | local_irq_restore(flags); | 2921 | local_irq_restore(flags); |
| 2925 | 2922 | ||
| 2926 | /* we did no complete reset of all commands, so a wakeup is required */ | 2923 | /* we did no complete reset of all commands, so a wakeup is required */ |
| 2927 | return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; | 2924 | return SUCCESS; |
| 2928 | #endif /* 1 */ | 2925 | #endif /* 1 */ |
| 2929 | } | 2926 | } |
| 2930 | 2927 | ||
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index e2c009b033ce..9707b7494a89 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c | |||
| @@ -3,6 +3,10 @@ | |||
| 3 | * | 3 | * |
| 4 | * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) | 4 | * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) |
| 5 | * | 5 | * |
| 6 | * VME support added by Sam Creasey | ||
| 7 | * | ||
| 8 | * TODO: modify this driver to support multiple Sun3 SCSI VME boards | ||
| 9 | * | ||
| 6 | * Adapted from mac_scsinew.c: | 10 | * Adapted from mac_scsinew.c: |
| 7 | */ | 11 | */ |
| 8 | /* | 12 | /* |
| @@ -45,10 +49,6 @@ | |||
| 45 | * USLEEP - enable support for devices that don't disconnect. Untested. | 49 | * USLEEP - enable support for devices that don't disconnect. Untested. |
| 46 | */ | 50 | */ |
| 47 | 51 | ||
| 48 | /* | ||
| 49 | * $Log: sun3_NCR5380.c,v $ | ||
| 50 | */ | ||
| 51 | |||
| 52 | #define AUTOSENSE | 52 | #define AUTOSENSE |
| 53 | 53 | ||
| 54 | #include <linux/types.h> | 54 | #include <linux/types.h> |
| @@ -69,23 +69,15 @@ | |||
| 69 | #include <asm/idprom.h> | 69 | #include <asm/idprom.h> |
| 70 | #include <asm/machines.h> | 70 | #include <asm/machines.h> |
| 71 | 71 | ||
| 72 | #define NDEBUG 0 | ||
| 73 | |||
| 74 | #define NDEBUG_ABORT 0x00100000 | ||
| 75 | #define NDEBUG_TAGS 0x00200000 | ||
| 76 | #define NDEBUG_MERGING 0x00400000 | ||
| 77 | |||
| 78 | /* dma on! */ | 72 | /* dma on! */ |
| 79 | #define REAL_DMA | 73 | #define REAL_DMA |
| 80 | 74 | ||
| 81 | #include "scsi.h" | 75 | #include "scsi.h" |
| 82 | #include "initio.h" | ||
| 83 | #include <scsi/scsi_host.h> | 76 | #include <scsi/scsi_host.h> |
| 84 | #include "sun3_scsi.h" | 77 | #include "sun3_scsi.h" |
| 78 | #include "NCR5380.h" | ||
| 85 | 79 | ||
| 86 | static void NCR5380_print(struct Scsi_Host *instance); | 80 | extern int sun3_map_test(unsigned long, char *); |
| 87 | |||
| 88 | /* #define OLDDMA */ | ||
| 89 | 81 | ||
| 90 | #define USE_WRAPPER | 82 | #define USE_WRAPPER |
| 91 | /*#define RESET_BOOT */ | 83 | /*#define RESET_BOOT */ |
| @@ -101,7 +93,11 @@ static void NCR5380_print(struct Scsi_Host *instance); | |||
| 101 | 93 | ||
| 102 | /* #define SUPPORT_TAGS */ | 94 | /* #define SUPPORT_TAGS */ |
| 103 | 95 | ||
| 96 | #ifdef SUN3_SCSI_VME | ||
| 97 | #define ENABLE_IRQ() | ||
| 98 | #else | ||
| 104 | #define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); | 99 | #define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); |
| 100 | #endif | ||
| 105 | 101 | ||
| 106 | 102 | ||
| 107 | static irqreturn_t scsi_sun3_intr(int irq, void *dummy); | 103 | static irqreturn_t scsi_sun3_intr(int irq, void *dummy); |
| @@ -123,6 +119,8 @@ module_param(setup_hostid, int, 0); | |||
| 123 | 119 | ||
| 124 | static struct scsi_cmnd *sun3_dma_setup_done = NULL; | 120 | static struct scsi_cmnd *sun3_dma_setup_done = NULL; |
| 125 | 121 | ||
| 122 | #define RESET_RUN_DONE | ||
| 123 | |||
| 126 | #define AFTER_RESET_DELAY (HZ/2) | 124 | #define AFTER_RESET_DELAY (HZ/2) |
| 127 | 125 | ||
| 128 | /* ms to wait after hitting dma regs */ | 126 | /* ms to wait after hitting dma regs */ |
| @@ -136,10 +134,9 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL; | |||
| 136 | 134 | ||
| 137 | static volatile unsigned char *sun3_scsi_regp; | 135 | static volatile unsigned char *sun3_scsi_regp; |
| 138 | static volatile struct sun3_dma_regs *dregs; | 136 | static volatile struct sun3_dma_regs *dregs; |
| 139 | #ifdef OLDDMA | 137 | #ifndef SUN3_SCSI_VME |
| 140 | static unsigned char *dmabuf = NULL; /* dma memory buffer */ | ||
| 141 | #endif | ||
| 142 | static struct sun3_udc_regs *udc_regs = NULL; | 138 | static struct sun3_udc_regs *udc_regs = NULL; |
| 139 | #endif | ||
| 143 | static unsigned char *sun3_dma_orig_addr = NULL; | 140 | static unsigned char *sun3_dma_orig_addr = NULL; |
| 144 | static unsigned long sun3_dma_orig_count = 0; | 141 | static unsigned long sun3_dma_orig_count = 0; |
| 145 | static int sun3_dma_active = 0; | 142 | static int sun3_dma_active = 0; |
| @@ -159,6 +156,7 @@ static inline void sun3scsi_write(int reg, int value) | |||
| 159 | sun3_scsi_regp[reg] = value; | 156 | sun3_scsi_regp[reg] = value; |
| 160 | } | 157 | } |
| 161 | 158 | ||
| 159 | #ifndef SUN3_SCSI_VME | ||
| 162 | /* dma controller register access functions */ | 160 | /* dma controller register access functions */ |
| 163 | 161 | ||
| 164 | static inline unsigned short sun3_udc_read(unsigned char reg) | 162 | static inline unsigned short sun3_udc_read(unsigned char reg) |
| @@ -180,6 +178,7 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg) | |||
| 180 | dregs->udc_data = val; | 178 | dregs->udc_data = val; |
| 181 | udelay(SUN3_DMA_DELAY); | 179 | udelay(SUN3_DMA_DELAY); |
| 182 | } | 180 | } |
| 181 | #endif | ||
| 183 | 182 | ||
| 184 | /* | 183 | /* |
| 185 | * XXX: status debug | 184 | * XXX: status debug |
| @@ -198,17 +197,32 @@ static struct Scsi_Host *default_instance; | |||
| 198 | * | 197 | * |
| 199 | */ | 198 | */ |
| 200 | 199 | ||
| 201 | int __init sun3scsi_detect(struct scsi_host_template * tpnt) | 200 | static int __init sun3scsi_detect(struct scsi_host_template *tpnt) |
| 202 | { | 201 | { |
| 203 | unsigned long ioaddr; | 202 | unsigned long ioaddr, irq; |
| 204 | static int called = 0; | 203 | static int called = 0; |
| 205 | struct Scsi_Host *instance; | 204 | struct Scsi_Host *instance; |
| 205 | #ifdef SUN3_SCSI_VME | ||
| 206 | int i; | ||
| 207 | unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, | ||
| 208 | IOBASE_SUN3_VMESCSI + 0x4000, | ||
| 209 | 0 }; | ||
| 210 | unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, | ||
| 211 | SUN3_VEC_VMESCSI1, | ||
| 212 | 0 }; | ||
| 213 | #endif | ||
| 206 | 214 | ||
| 207 | /* check that this machine has an onboard 5380 */ | 215 | /* check that this machine has an onboard 5380 */ |
| 208 | switch(idprom->id_machtype) { | 216 | switch(idprom->id_machtype) { |
| 217 | #ifdef SUN3_SCSI_VME | ||
| 218 | case SM_SUN3|SM_3_160: | ||
| 219 | case SM_SUN3|SM_3_260: | ||
| 220 | break; | ||
| 221 | #else | ||
| 209 | case SM_SUN3|SM_3_50: | 222 | case SM_SUN3|SM_3_50: |
| 210 | case SM_SUN3|SM_3_60: | 223 | case SM_SUN3|SM_3_60: |
| 211 | break; | 224 | break; |
| 225 | #endif | ||
| 212 | 226 | ||
| 213 | default: | 227 | default: |
| 214 | return 0; | 228 | return 0; |
| @@ -217,7 +231,11 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) | |||
| 217 | if(called) | 231 | if(called) |
| 218 | return 0; | 232 | return 0; |
| 219 | 233 | ||
| 234 | #ifdef SUN3_SCSI_VME | ||
| 235 | tpnt->proc_name = "Sun3 5380 VME SCSI"; | ||
| 236 | #else | ||
| 220 | tpnt->proc_name = "Sun3 5380 SCSI"; | 237 | tpnt->proc_name = "Sun3 5380 SCSI"; |
| 238 | #endif | ||
| 221 | 239 | ||
| 222 | /* setup variables */ | 240 | /* setup variables */ |
| 223 | tpnt->can_queue = | 241 | tpnt->can_queue = |
| @@ -234,6 +252,38 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) | |||
| 234 | tpnt->this_id = 7; | 252 | tpnt->this_id = 7; |
| 235 | } | 253 | } |
| 236 | 254 | ||
| 255 | #ifdef SUN3_SCSI_VME | ||
| 256 | ioaddr = 0; | ||
| 257 | for (i = 0; addrs[i] != 0; i++) { | ||
| 258 | unsigned char x; | ||
| 259 | |||
| 260 | ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, | ||
| 261 | SUN3_PAGE_TYPE_VME16); | ||
| 262 | irq = vecs[i]; | ||
| 263 | sun3_scsi_regp = (unsigned char *)ioaddr; | ||
| 264 | |||
| 265 | dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); | ||
| 266 | |||
| 267 | if (sun3_map_test((unsigned long)dregs, &x)) { | ||
| 268 | unsigned short oldcsr; | ||
| 269 | |||
| 270 | oldcsr = dregs->csr; | ||
| 271 | dregs->csr = 0; | ||
| 272 | udelay(SUN3_DMA_DELAY); | ||
| 273 | if (dregs->csr == 0x1400) | ||
| 274 | break; | ||
| 275 | |||
| 276 | dregs->csr = oldcsr; | ||
| 277 | } | ||
| 278 | |||
| 279 | iounmap((void *)ioaddr); | ||
| 280 | ioaddr = 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | if (!ioaddr) | ||
| 284 | return 0; | ||
| 285 | #else | ||
| 286 | irq = IRQ_SUN3_SCSI; | ||
| 237 | ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); | 287 | ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); |
| 238 | sun3_scsi_regp = (unsigned char *)ioaddr; | 288 | sun3_scsi_regp = (unsigned char *)ioaddr; |
| 239 | 289 | ||
| @@ -244,11 +294,6 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) | |||
| 244 | printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); | 294 | printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); |
| 245 | return 0; | 295 | return 0; |
| 246 | } | 296 | } |
| 247 | #ifdef OLDDMA | ||
| 248 | if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) { | ||
| 249 | printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); | ||
| 250 | return 0; | ||
| 251 | } | ||
| 252 | #endif | 297 | #endif |
| 253 | #ifdef SUPPORT_TAGS | 298 | #ifdef SUPPORT_TAGS |
| 254 | if (setup_use_tagged_queuing < 0) | 299 | if (setup_use_tagged_queuing < 0) |
| @@ -262,7 +307,7 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) | |||
| 262 | default_instance = instance; | 307 | default_instance = instance; |
| 263 | 308 | ||
| 264 | instance->io_port = (unsigned long) ioaddr; | 309 | instance->io_port = (unsigned long) ioaddr; |
| 265 | instance->irq = IRQ_SUN3_SCSI; | 310 | instance->irq = irq; |
| 266 | 311 | ||
| 267 | NCR5380_init(instance, 0); | 312 | NCR5380_init(instance, 0); |
| 268 | 313 | ||
| @@ -283,7 +328,8 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) | |||
| 283 | #endif | 328 | #endif |
| 284 | } | 329 | } |
| 285 | 330 | ||
| 286 | printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port); | 331 | pr_info("scsi%d: %s at port %lX irq", instance->host_no, |
| 332 | tpnt->proc_name, instance->io_port); | ||
| 287 | if (instance->irq == SCSI_IRQ_NONE) | 333 | if (instance->irq == SCSI_IRQ_NONE) |
| 288 | printk ("s disabled"); | 334 | printk ("s disabled"); |
| 289 | else | 335 | else |
| @@ -300,6 +346,15 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) | |||
| 300 | dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; | 346 | dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; |
| 301 | udelay(SUN3_DMA_DELAY); | 347 | udelay(SUN3_DMA_DELAY); |
| 302 | dregs->fifo_count = 0; | 348 | dregs->fifo_count = 0; |
| 349 | #ifdef SUN3_SCSI_VME | ||
| 350 | dregs->fifo_count_hi = 0; | ||
| 351 | dregs->dma_addr_hi = 0; | ||
| 352 | dregs->dma_addr_lo = 0; | ||
| 353 | dregs->dma_count_hi = 0; | ||
| 354 | dregs->dma_count_lo = 0; | ||
| 355 | |||
| 356 | dregs->ivect = VME_DATA24 | (instance->irq & 0xff); | ||
| 357 | #endif | ||
| 303 | 358 | ||
| 304 | called = 1; | 359 | called = 1; |
| 305 | 360 | ||
| @@ -367,7 +422,8 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance) | |||
| 367 | } | 422 | } |
| 368 | #endif | 423 | #endif |
| 369 | 424 | ||
| 370 | const char * sun3scsi_info (struct Scsi_Host *spnt) { | 425 | static const char *sun3scsi_info(struct Scsi_Host *spnt) |
| 426 | { | ||
| 371 | return ""; | 427 | return ""; |
| 372 | } | 428 | } |
| 373 | 429 | ||
| @@ -379,6 +435,10 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy) | |||
| 379 | unsigned short csr = dregs->csr; | 435 | unsigned short csr = dregs->csr; |
| 380 | int handled = 0; | 436 | int handled = 0; |
| 381 | 437 | ||
| 438 | #ifdef SUN3_SCSI_VME | ||
| 439 | dregs->csr &= ~CSR_DMA_ENABLE; | ||
| 440 | #endif | ||
| 441 | |||
| 382 | if(csr & ~CSR_GOOD) { | 442 | if(csr & ~CSR_GOOD) { |
| 383 | if(csr & CSR_DMA_BUSERR) { | 443 | if(csr & CSR_DMA_BUSERR) { |
| 384 | printk("scsi%d: bus error in dma\n", default_instance->host_no); | 444 | printk("scsi%d: bus error in dma\n", default_instance->host_no); |
| @@ -422,31 +482,28 @@ void sun3_sun3_debug (void) | |||
| 422 | /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ | 482 | /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ |
| 423 | static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) | 483 | static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) |
| 424 | { | 484 | { |
| 425 | #ifdef OLDDMA | ||
| 426 | if(write_flag) | ||
| 427 | memcpy(dmabuf, data, count); | ||
| 428 | else { | ||
| 429 | sun3_dma_orig_addr = data; | ||
| 430 | sun3_dma_orig_count = count; | ||
| 431 | } | ||
| 432 | #else | ||
| 433 | void *addr; | 485 | void *addr; |
| 434 | 486 | ||
| 435 | if(sun3_dma_orig_addr != NULL) | 487 | if(sun3_dma_orig_addr != NULL) |
| 436 | dvma_unmap(sun3_dma_orig_addr); | 488 | dvma_unmap(sun3_dma_orig_addr); |
| 437 | 489 | ||
| 438 | // addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); | 490 | #ifdef SUN3_SCSI_VME |
| 491 | addr = (void *)dvma_map_vme((unsigned long) data, count); | ||
| 492 | #else | ||
| 439 | addr = (void *)dvma_map((unsigned long) data, count); | 493 | addr = (void *)dvma_map((unsigned long) data, count); |
| 494 | #endif | ||
| 440 | 495 | ||
| 441 | sun3_dma_orig_addr = addr; | 496 | sun3_dma_orig_addr = addr; |
| 442 | sun3_dma_orig_count = count; | 497 | sun3_dma_orig_count = count; |
| 443 | #endif | 498 | |
| 499 | #ifndef SUN3_SCSI_VME | ||
| 444 | dregs->fifo_count = 0; | 500 | dregs->fifo_count = 0; |
| 445 | sun3_udc_write(UDC_RESET, UDC_CSR); | 501 | sun3_udc_write(UDC_RESET, UDC_CSR); |
| 446 | 502 | ||
| 447 | /* reset fifo */ | 503 | /* reset fifo */ |
| 448 | dregs->csr &= ~CSR_FIFO; | 504 | dregs->csr &= ~CSR_FIFO; |
| 449 | dregs->csr |= CSR_FIFO; | 505 | dregs->csr |= CSR_FIFO; |
| 506 | #endif | ||
| 450 | 507 | ||
| 451 | /* set direction */ | 508 | /* set direction */ |
| 452 | if(write_flag) | 509 | if(write_flag) |
| @@ -454,6 +511,17 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri | |||
| 454 | else | 511 | else |
| 455 | dregs->csr &= ~CSR_SEND; | 512 | dregs->csr &= ~CSR_SEND; |
| 456 | 513 | ||
| 514 | #ifdef SUN3_SCSI_VME | ||
| 515 | dregs->csr |= CSR_PACK_ENABLE; | ||
| 516 | |||
| 517 | dregs->dma_addr_hi = ((unsigned long)addr >> 16); | ||
| 518 | dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); | ||
| 519 | |||
| 520 | dregs->dma_count_hi = 0; | ||
| 521 | dregs->dma_count_lo = 0; | ||
| 522 | dregs->fifo_count_hi = 0; | ||
| 523 | dregs->fifo_count = 0; | ||
| 524 | #else | ||
| 457 | /* byte count for fifo */ | 525 | /* byte count for fifo */ |
| 458 | dregs->fifo_count = count; | 526 | dregs->fifo_count = count; |
| 459 | 527 | ||
| @@ -467,17 +535,12 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri | |||
| 467 | printk("scsi%d: fifo_mismatch %04x not %04x\n", | 535 | printk("scsi%d: fifo_mismatch %04x not %04x\n", |
| 468 | default_instance->host_no, dregs->fifo_count, | 536 | default_instance->host_no, dregs->fifo_count, |
| 469 | (unsigned int) count); | 537 | (unsigned int) count); |
| 470 | NCR5380_print(default_instance); | 538 | NCR5380_dprint(NDEBUG_DMA, default_instance); |
| 471 | } | 539 | } |
| 472 | 540 | ||
| 473 | /* setup udc */ | 541 | /* setup udc */ |
| 474 | #ifdef OLDDMA | ||
| 475 | udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8); | ||
| 476 | udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff); | ||
| 477 | #else | ||
| 478 | udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); | 542 | udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); |
| 479 | udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); | 543 | udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); |
| 480 | #endif | ||
| 481 | udc_regs->count = count/2; /* count in words */ | 544 | udc_regs->count = count/2; /* count in words */ |
| 482 | udc_regs->mode_hi = UDC_MODE_HIWORD; | 545 | udc_regs->mode_hi = UDC_MODE_HIWORD; |
| 483 | if(write_flag) { | 546 | if(write_flag) { |
| @@ -501,11 +564,13 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri | |||
| 501 | 564 | ||
| 502 | /* interrupt enable */ | 565 | /* interrupt enable */ |
| 503 | sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); | 566 | sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); |
| 567 | #endif | ||
| 504 | 568 | ||
| 505 | return count; | 569 | return count; |
| 506 | 570 | ||
| 507 | } | 571 | } |
| 508 | 572 | ||
| 573 | #ifndef SUN3_SCSI_VME | ||
| 509 | static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) | 574 | static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) |
| 510 | { | 575 | { |
| 511 | unsigned short resid; | 576 | unsigned short resid; |
| @@ -518,6 +583,7 @@ static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) | |||
| 518 | 583 | ||
| 519 | return (unsigned long) resid; | 584 | return (unsigned long) resid; |
| 520 | } | 585 | } |
| 586 | #endif | ||
| 521 | 587 | ||
| 522 | static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) | 588 | static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) |
| 523 | { | 589 | { |
| @@ -536,8 +602,23 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, | |||
| 536 | 602 | ||
| 537 | static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) | 603 | static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) |
| 538 | { | 604 | { |
| 605 | #ifdef SUN3_SCSI_VME | ||
| 606 | unsigned short csr; | ||
| 607 | |||
| 608 | csr = dregs->csr; | ||
| 539 | 609 | ||
| 610 | dregs->dma_count_hi = (sun3_dma_orig_count >> 16); | ||
| 611 | dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); | ||
| 612 | |||
| 613 | dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); | ||
| 614 | dregs->fifo_count = (sun3_dma_orig_count & 0xffff); | ||
| 615 | |||
| 616 | /* if(!(csr & CSR_DMA_ENABLE)) | ||
| 617 | * dregs->csr |= CSR_DMA_ENABLE; | ||
| 618 | */ | ||
| 619 | #else | ||
| 540 | sun3_udc_write(UDC_CHN_START, UDC_CSR); | 620 | sun3_udc_write(UDC_CHN_START, UDC_CSR); |
| 621 | #endif | ||
| 541 | 622 | ||
| 542 | return 0; | 623 | return 0; |
| 543 | } | 624 | } |
| @@ -545,12 +626,46 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) | |||
| 545 | /* clean up after our dma is done */ | 626 | /* clean up after our dma is done */ |
| 546 | static int sun3scsi_dma_finish(int write_flag) | 627 | static int sun3scsi_dma_finish(int write_flag) |
| 547 | { | 628 | { |
| 548 | unsigned short count; | 629 | unsigned short __maybe_unused count; |
| 549 | unsigned short fifo; | 630 | unsigned short fifo; |
| 550 | int ret = 0; | 631 | int ret = 0; |
| 551 | 632 | ||
| 552 | sun3_dma_active = 0; | 633 | sun3_dma_active = 0; |
| 553 | #if 1 | 634 | |
| 635 | #ifdef SUN3_SCSI_VME | ||
| 636 | dregs->csr &= ~CSR_DMA_ENABLE; | ||
| 637 | |||
| 638 | fifo = dregs->fifo_count; | ||
| 639 | if (write_flag) { | ||
| 640 | if ((fifo > 0) && (fifo < sun3_dma_orig_count)) | ||
| 641 | fifo++; | ||
| 642 | } | ||
| 643 | |||
| 644 | last_residual = fifo; | ||
| 645 | /* empty bytes from the fifo which didn't make it */ | ||
| 646 | if ((!write_flag) && (dregs->csr & CSR_LEFT)) { | ||
| 647 | unsigned char *vaddr; | ||
| 648 | |||
| 649 | vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); | ||
| 650 | |||
| 651 | vaddr += (sun3_dma_orig_count - fifo); | ||
| 652 | vaddr--; | ||
| 653 | |||
| 654 | switch (dregs->csr & CSR_LEFT) { | ||
| 655 | case CSR_LEFT_3: | ||
| 656 | *vaddr = (dregs->bpack_lo & 0xff00) >> 8; | ||
| 657 | vaddr--; | ||
| 658 | |||
| 659 | case CSR_LEFT_2: | ||
| 660 | *vaddr = (dregs->bpack_hi & 0x00ff); | ||
| 661 | vaddr--; | ||
| 662 | |||
| 663 | case CSR_LEFT_1: | ||
| 664 | *vaddr = (dregs->bpack_hi & 0xff00) >> 8; | ||
| 665 | break; | ||
| 666 | } | ||
| 667 | } | ||
| 668 | #else | ||
| 554 | // check to empty the fifo on a read | 669 | // check to empty the fifo on a read |
| 555 | if(!write_flag) { | 670 | if(!write_flag) { |
| 556 | int tmo = 20000; /* .2 sec */ | 671 | int tmo = 20000; /* .2 sec */ |
| @@ -566,28 +681,8 @@ static int sun3scsi_dma_finish(int write_flag) | |||
| 566 | udelay(10); | 681 | udelay(10); |
| 567 | } | 682 | } |
| 568 | } | 683 | } |
| 569 | |||
| 570 | #endif | ||
| 571 | 684 | ||
| 572 | count = sun3scsi_dma_count(default_instance); | 685 | count = sun3scsi_dma_count(default_instance); |
| 573 | #ifdef OLDDMA | ||
| 574 | |||
| 575 | /* if we've finished a read, copy out the data we read */ | ||
| 576 | if(sun3_dma_orig_addr) { | ||
| 577 | /* check for residual bytes after dma end */ | ||
| 578 | if(count && (NCR5380_read(BUS_AND_STATUS_REG) & | ||
| 579 | (BASR_PHASE_MATCH | BASR_ACK))) { | ||
| 580 | printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no); | ||
| 581 | printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG)); | ||
| 582 | ret = count; | ||
| 583 | } | ||
| 584 | |||
| 585 | /* copy in what we dma'd no matter what */ | ||
| 586 | memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count); | ||
| 587 | sun3_dma_orig_addr = NULL; | ||
| 588 | |||
| 589 | } | ||
| 590 | #else | ||
| 591 | 686 | ||
| 592 | fifo = dregs->fifo_count; | 687 | fifo = dregs->fifo_count; |
| 593 | last_residual = fifo; | 688 | last_residual = fifo; |
| @@ -605,10 +700,23 @@ static int sun3scsi_dma_finish(int write_flag) | |||
| 605 | vaddr[-2] = (data & 0xff00) >> 8; | 700 | vaddr[-2] = (data & 0xff00) >> 8; |
| 606 | vaddr[-1] = (data & 0xff); | 701 | vaddr[-1] = (data & 0xff); |
| 607 | } | 702 | } |
| 703 | #endif | ||
| 608 | 704 | ||
| 609 | dvma_unmap(sun3_dma_orig_addr); | 705 | dvma_unmap(sun3_dma_orig_addr); |
| 610 | sun3_dma_orig_addr = NULL; | 706 | sun3_dma_orig_addr = NULL; |
| 611 | #endif | 707 | |
| 708 | #ifdef SUN3_SCSI_VME | ||
| 709 | dregs->dma_addr_hi = 0; | ||
| 710 | dregs->dma_addr_lo = 0; | ||
| 711 | dregs->dma_count_hi = 0; | ||
| 712 | dregs->dma_count_lo = 0; | ||
| 713 | |||
| 714 | dregs->fifo_count = 0; | ||
| 715 | dregs->fifo_count_hi = 0; | ||
| 716 | |||
| 717 | dregs->csr &= ~CSR_SEND; | ||
| 718 | /* dregs->csr |= CSR_DMA_ENABLE; */ | ||
| 719 | #else | ||
| 612 | sun3_udc_write(UDC_RESET, UDC_CSR); | 720 | sun3_udc_write(UDC_RESET, UDC_CSR); |
| 613 | dregs->fifo_count = 0; | 721 | dregs->fifo_count = 0; |
| 614 | dregs->csr &= ~CSR_SEND; | 722 | dregs->csr &= ~CSR_SEND; |
| @@ -616,6 +724,7 @@ static int sun3scsi_dma_finish(int write_flag) | |||
| 616 | /* reset fifo */ | 724 | /* reset fifo */ |
| 617 | dregs->csr &= ~CSR_FIFO; | 725 | dregs->csr &= ~CSR_FIFO; |
| 618 | dregs->csr |= CSR_FIFO; | 726 | dregs->csr |= CSR_FIFO; |
| 727 | #endif | ||
| 619 | 728 | ||
| 620 | sun3_dma_setup_done = NULL; | 729 | sun3_dma_setup_done = NULL; |
| 621 | 730 | ||
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index a8da9c710fea..e96a37cf06ac 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h | |||
| @@ -29,12 +29,8 @@ | |||
| 29 | * 1+ (800) 334-5454 | 29 | * 1+ (800) 334-5454 |
| 30 | */ | 30 | */ |
| 31 | 31 | ||
| 32 | /* | 32 | #ifndef SUN3_SCSI_H |
| 33 | * $Log: cumana_NCR5380.h,v $ | 33 | #define SUN3_SCSI_H |
| 34 | */ | ||
| 35 | |||
| 36 | #ifndef SUN3_NCR5380_H | ||
| 37 | #define SUN3_NCR5380_H | ||
| 38 | 34 | ||
| 39 | #define SUN3SCSI_PUBLIC_RELEASE 1 | 35 | #define SUN3SCSI_PUBLIC_RELEASE 1 |
| 40 | 36 | ||
| @@ -82,8 +78,6 @@ static int sun3scsi_release (struct Scsi_Host *); | |||
| 82 | #define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" | 78 | #define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" |
| 83 | #endif | 79 | #endif |
| 84 | 80 | ||
| 85 | #ifndef HOSTS_C | ||
| 86 | |||
| 87 | #define NCR5380_implementation_fields \ | 81 | #define NCR5380_implementation_fields \ |
| 88 | int port, ctrl | 82 | int port, ctrl |
| 89 | 83 | ||
| @@ -108,9 +102,6 @@ static int sun3scsi_release (struct Scsi_Host *); | |||
| 108 | #define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0) | 102 | #define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0) |
| 109 | #define NCR5380_dma_residual sun3scsi_dma_residual | 103 | #define NCR5380_dma_residual sun3scsi_dma_residual |
| 110 | 104 | ||
| 111 | #define BOARD_NORMAL 0 | ||
| 112 | #define BOARD_NCR53C400 1 | ||
| 113 | |||
| 114 | /* additional registers - mainly DMA control regs */ | 105 | /* additional registers - mainly DMA control regs */ |
| 115 | /* these start at regbase + 8 -- directly after the NCR regs */ | 106 | /* these start at regbase + 8 -- directly after the NCR regs */ |
| 116 | struct sun3_dma_regs { | 107 | struct sun3_dma_regs { |
| @@ -191,189 +182,5 @@ struct sun3_udc_regs { | |||
| 191 | 182 | ||
| 192 | #define VME_DATA24 0x3d00 | 183 | #define VME_DATA24 0x3d00 |
| 193 | 184 | ||
| 194 | // debugging printk's, taken from atari_scsi.h | 185 | #endif /* SUN3_SCSI_H */ |
| 195 | /* Debugging printk definitions: | ||
| 196 | * | ||
| 197 | * ARB -> arbitration | ||
| 198 | * ASEN -> auto-sense | ||
| 199 | * DMA -> DMA | ||
| 200 | * HSH -> PIO handshake | ||
| 201 | * INF -> information transfer | ||
| 202 | * INI -> initialization | ||
| 203 | * INT -> interrupt | ||
| 204 | * LNK -> linked commands | ||
| 205 | * MAIN -> NCR5380_main() control flow | ||
| 206 | * NDAT -> no data-out phase | ||
| 207 | * NWR -> no write commands | ||
| 208 | * PIO -> PIO transfers | ||
| 209 | * PDMA -> pseudo DMA (unused on Atari) | ||
| 210 | * QU -> queues | ||
| 211 | * RSL -> reselections | ||
| 212 | * SEL -> selections | ||
| 213 | * USL -> usleep cpde (unused on Atari) | ||
| 214 | * LBS -> last byte sent (unused on Atari) | ||
| 215 | * RSS -> restarting of selections | ||
| 216 | * EXT -> extended messages | ||
| 217 | * ABRT -> aborting and resetting | ||
| 218 | * TAG -> queue tag handling | ||
| 219 | * MER -> merging of consec. buffers | ||
| 220 | * | ||
| 221 | */ | ||
| 222 | |||
| 223 | #include "NCR5380.h" | ||
| 224 | |||
| 225 | #if NDEBUG & NDEBUG_ARBITRATION | ||
| 226 | #define ARB_PRINTK(format, args...) \ | ||
| 227 | printk(KERN_DEBUG format , ## args) | ||
| 228 | #else | ||
| 229 | #define ARB_PRINTK(format, args...) | ||
| 230 | #endif | ||
| 231 | #if NDEBUG & NDEBUG_AUTOSENSE | ||
| 232 | #define ASEN_PRINTK(format, args...) \ | ||
| 233 | printk(KERN_DEBUG format , ## args) | ||
| 234 | #else | ||
| 235 | #define ASEN_PRINTK(format, args...) | ||
| 236 | #endif | ||
| 237 | #if NDEBUG & NDEBUG_DMA | ||
| 238 | #define DMA_PRINTK(format, args...) \ | ||
| 239 | printk(KERN_DEBUG format , ## args) | ||
| 240 | #else | ||
| 241 | #define DMA_PRINTK(format, args...) | ||
| 242 | #endif | ||
| 243 | #if NDEBUG & NDEBUG_HANDSHAKE | ||
| 244 | #define HSH_PRINTK(format, args...) \ | ||
| 245 | printk(KERN_DEBUG format , ## args) | ||
| 246 | #else | ||
| 247 | #define HSH_PRINTK(format, args...) | ||
| 248 | #endif | ||
| 249 | #if NDEBUG & NDEBUG_INFORMATION | ||
| 250 | #define INF_PRINTK(format, args...) \ | ||
| 251 | printk(KERN_DEBUG format , ## args) | ||
| 252 | #else | ||
| 253 | #define INF_PRINTK(format, args...) | ||
| 254 | #endif | ||
| 255 | #if NDEBUG & NDEBUG_INIT | ||
| 256 | #define INI_PRINTK(format, args...) \ | ||
| 257 | printk(KERN_DEBUG format , ## args) | ||
| 258 | #else | ||
| 259 | #define INI_PRINTK(format, args...) | ||
| 260 | #endif | ||
| 261 | #if NDEBUG & NDEBUG_INTR | ||
| 262 | #define INT_PRINTK(format, args...) \ | ||
| 263 | printk(KERN_DEBUG format , ## args) | ||
| 264 | #else | ||
| 265 | #define INT_PRINTK(format, args...) | ||
| 266 | #endif | ||
| 267 | #if NDEBUG & NDEBUG_LINKED | ||
| 268 | #define LNK_PRINTK(format, args...) \ | ||
| 269 | printk(KERN_DEBUG format , ## args) | ||
| 270 | #else | ||
| 271 | #define LNK_PRINTK(format, args...) | ||
| 272 | #endif | ||
| 273 | #if NDEBUG & NDEBUG_MAIN | ||
| 274 | #define MAIN_PRINTK(format, args...) \ | ||
| 275 | printk(KERN_DEBUG format , ## args) | ||
| 276 | #else | ||
| 277 | #define MAIN_PRINTK(format, args...) | ||
| 278 | #endif | ||
| 279 | #if NDEBUG & NDEBUG_NO_DATAOUT | ||
| 280 | #define NDAT_PRINTK(format, args...) \ | ||
| 281 | printk(KERN_DEBUG format , ## args) | ||
| 282 | #else | ||
| 283 | #define NDAT_PRINTK(format, args...) | ||
| 284 | #endif | ||
| 285 | #if NDEBUG & NDEBUG_NO_WRITE | ||
| 286 | #define NWR_PRINTK(format, args...) \ | ||
| 287 | printk(KERN_DEBUG format , ## args) | ||
| 288 | #else | ||
| 289 | #define NWR_PRINTK(format, args...) | ||
| 290 | #endif | ||
| 291 | #if NDEBUG & NDEBUG_PIO | ||
| 292 | #define PIO_PRINTK(format, args...) \ | ||
| 293 | printk(KERN_DEBUG format , ## args) | ||
| 294 | #else | ||
| 295 | #define PIO_PRINTK(format, args...) | ||
| 296 | #endif | ||
| 297 | #if NDEBUG & NDEBUG_PSEUDO_DMA | ||
| 298 | #define PDMA_PRINTK(format, args...) \ | ||
| 299 | printk(KERN_DEBUG format , ## args) | ||
| 300 | #else | ||
| 301 | #define PDMA_PRINTK(format, args...) | ||
| 302 | #endif | ||
| 303 | #if NDEBUG & NDEBUG_QUEUES | ||
| 304 | #define QU_PRINTK(format, args...) \ | ||
| 305 | printk(KERN_DEBUG format , ## args) | ||
| 306 | #else | ||
| 307 | #define QU_PRINTK(format, args...) | ||
| 308 | #endif | ||
| 309 | #if NDEBUG & NDEBUG_RESELECTION | ||
| 310 | #define RSL_PRINTK(format, args...) \ | ||
| 311 | printk(KERN_DEBUG format , ## args) | ||
| 312 | #else | ||
| 313 | #define RSL_PRINTK(format, args...) | ||
| 314 | #endif | ||
| 315 | #if NDEBUG & NDEBUG_SELECTION | ||
| 316 | #define SEL_PRINTK(format, args...) \ | ||
| 317 | printk(KERN_DEBUG format , ## args) | ||
| 318 | #else | ||
| 319 | #define SEL_PRINTK(format, args...) | ||
| 320 | #endif | ||
| 321 | #if NDEBUG & NDEBUG_USLEEP | ||
| 322 | #define USL_PRINTK(format, args...) \ | ||
| 323 | printk(KERN_DEBUG format , ## args) | ||
| 324 | #else | ||
| 325 | #define USL_PRINTK(format, args...) | ||
| 326 | #endif | ||
| 327 | #if NDEBUG & NDEBUG_LAST_BYTE_SENT | ||
| 328 | #define LBS_PRINTK(format, args...) \ | ||
| 329 | printk(KERN_DEBUG format , ## args) | ||
| 330 | #else | ||
| 331 | #define LBS_PRINTK(format, args...) | ||
| 332 | #endif | ||
| 333 | #if NDEBUG & NDEBUG_RESTART_SELECT | ||
| 334 | #define RSS_PRINTK(format, args...) \ | ||
| 335 | printk(KERN_DEBUG format , ## args) | ||
| 336 | #else | ||
| 337 | #define RSS_PRINTK(format, args...) | ||
| 338 | #endif | ||
| 339 | #if NDEBUG & NDEBUG_EXTENDED | ||
| 340 | #define EXT_PRINTK(format, args...) \ | ||
| 341 | printk(KERN_DEBUG format , ## args) | ||
| 342 | #else | ||
| 343 | #define EXT_PRINTK(format, args...) | ||
| 344 | #endif | ||
| 345 | #if NDEBUG & NDEBUG_ABORT | ||
| 346 | #define ABRT_PRINTK(format, args...) \ | ||
| 347 | printk(KERN_DEBUG format , ## args) | ||
| 348 | #else | ||
| 349 | #define ABRT_PRINTK(format, args...) | ||
| 350 | #endif | ||
| 351 | #if NDEBUG & NDEBUG_TAGS | ||
| 352 | #define TAG_PRINTK(format, args...) \ | ||
| 353 | printk(KERN_DEBUG format , ## args) | ||
| 354 | #else | ||
| 355 | #define TAG_PRINTK(format, args...) | ||
| 356 | #endif | ||
| 357 | #if NDEBUG & NDEBUG_MERGING | ||
| 358 | #define MER_PRINTK(format, args...) \ | ||
| 359 | printk(KERN_DEBUG format , ## args) | ||
| 360 | #else | ||
| 361 | #define MER_PRINTK(format, args...) | ||
| 362 | #endif | ||
| 363 | |||
| 364 | /* conditional macros for NCR5380_print_{,phase,status} */ | ||
| 365 | |||
| 366 | #define NCR_PRINT(mask) \ | ||
| 367 | ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) | ||
| 368 | |||
| 369 | #define NCR_PRINT_PHASE(mask) \ | ||
| 370 | ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) | ||
| 371 | |||
| 372 | #define NCR_PRINT_STATUS(mask) \ | ||
| 373 | ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) | ||
| 374 | |||
| 375 | |||
| 376 | |||
| 377 | #endif /* ndef HOSTS_C */ | ||
| 378 | #endif /* SUN3_NCR5380_H */ | ||
| 379 | 186 | ||
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index a3dd55d1d2fd..1eeece6e2040 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c | |||
| @@ -1,589 +1,3 @@ | |||
| 1 | /* | ||
| 2 | * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) | ||
| 3 | * | ||
| 4 | * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) | ||
| 5 | * | ||
| 6 | * VME support added by Sam Creasey | ||
| 7 | * | ||
| 8 | * Adapted from sun3_scsi.c -- see there for other headers | ||
| 9 | * | ||
| 10 | * TODO: modify this driver to support multiple Sun3 SCSI VME boards | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 14 | #define AUTOSENSE | ||
| 15 | |||
| 16 | #include <linux/types.h> | ||
| 17 | #include <linux/stddef.h> | ||
| 18 | #include <linux/ctype.h> | ||
| 19 | #include <linux/delay.h> | ||
| 20 | |||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/signal.h> | ||
| 23 | #include <linux/ioport.h> | ||
| 24 | #include <linux/init.h> | ||
| 25 | #include <linux/blkdev.h> | ||
| 26 | |||
| 27 | #include <asm/io.h> | ||
| 28 | |||
| 29 | #include <asm/sun3ints.h> | ||
| 30 | #include <asm/dvma.h> | ||
| 31 | #include <asm/idprom.h> | ||
| 32 | #include <asm/machines.h> | ||
| 33 | |||
| 34 | #define SUN3_SCSI_VME | 1 | #define SUN3_SCSI_VME |
| 35 | 2 | ||
| 36 | #undef SUN3_SCSI_DEBUG | 3 | #include "sun3_scsi.c" |
| 37 | |||
| 38 | /* dma on! */ | ||
| 39 | #define REAL_DMA | ||
| 40 | |||
| 41 | #define NDEBUG 0 | ||
| 42 | |||
| 43 | #define NDEBUG_ABORT 0x00100000 | ||
| 44 | #define NDEBUG_TAGS 0x00200000 | ||
| 45 | #define NDEBUG_MERGING 0x00400000 | ||
| 46 | |||
| 47 | #include "scsi.h" | ||
| 48 | #include "initio.h" | ||
| 49 | #include <scsi/scsi_host.h> | ||
| 50 | #include "sun3_scsi.h" | ||
| 51 | |||
| 52 | extern int sun3_map_test(unsigned long, char *); | ||
| 53 | |||
| 54 | #define USE_WRAPPER | ||
| 55 | /*#define RESET_BOOT */ | ||
| 56 | #define DRIVER_SETUP | ||
| 57 | |||
| 58 | /* | ||
| 59 | * BUG can be used to trigger a strange code-size related hang on 2.1 kernels | ||
| 60 | */ | ||
| 61 | #ifdef BUG | ||
| 62 | #undef RESET_BOOT | ||
| 63 | #undef DRIVER_SETUP | ||
| 64 | #endif | ||
| 65 | |||
| 66 | /* #define SUPPORT_TAGS */ | ||
| 67 | |||
| 68 | //#define ENABLE_IRQ() enable_irq( SUN3_VEC_VMESCSI0 ); | ||
| 69 | #define ENABLE_IRQ() | ||
| 70 | |||
| 71 | |||
| 72 | static irqreturn_t scsi_sun3_intr(int irq, void *dummy); | ||
| 73 | static inline unsigned char sun3scsi_read(int reg); | ||
| 74 | static inline void sun3scsi_write(int reg, int value); | ||
| 75 | |||
| 76 | static int setup_can_queue = -1; | ||
| 77 | module_param(setup_can_queue, int, 0); | ||
| 78 | static int setup_cmd_per_lun = -1; | ||
| 79 | module_param(setup_cmd_per_lun, int, 0); | ||
| 80 | static int setup_sg_tablesize = -1; | ||
| 81 | module_param(setup_sg_tablesize, int, 0); | ||
| 82 | #ifdef SUPPORT_TAGS | ||
| 83 | static int setup_use_tagged_queuing = -1; | ||
| 84 | module_param(setup_use_tagged_queuing, int, 0); | ||
| 85 | #endif | ||
| 86 | static int setup_hostid = -1; | ||
| 87 | module_param(setup_hostid, int, 0); | ||
| 88 | |||
| 89 | static struct scsi_cmnd *sun3_dma_setup_done = NULL; | ||
| 90 | |||
| 91 | #define AFTER_RESET_DELAY (HZ/2) | ||
| 92 | |||
| 93 | /* ms to wait after hitting dma regs */ | ||
| 94 | #define SUN3_DMA_DELAY 10 | ||
| 95 | |||
| 96 | /* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ | ||
| 97 | #define SUN3_DVMA_BUFSIZE 0xe000 | ||
| 98 | |||
| 99 | /* minimum number of bytes to do dma on */ | ||
| 100 | #define SUN3_DMA_MINSIZE 128 | ||
| 101 | |||
| 102 | static volatile unsigned char *sun3_scsi_regp; | ||
| 103 | static volatile struct sun3_dma_regs *dregs; | ||
| 104 | #ifdef OLDDMA | ||
| 105 | static unsigned char *dmabuf = NULL; /* dma memory buffer */ | ||
| 106 | #endif | ||
| 107 | static unsigned char *sun3_dma_orig_addr = NULL; | ||
| 108 | static unsigned long sun3_dma_orig_count = 0; | ||
| 109 | static int sun3_dma_active = 0; | ||
| 110 | static unsigned long last_residual = 0; | ||
| 111 | |||
| 112 | /* | ||
| 113 | * NCR 5380 register access functions | ||
| 114 | */ | ||
| 115 | |||
| 116 | static inline unsigned char sun3scsi_read(int reg) | ||
| 117 | { | ||
| 118 | return( sun3_scsi_regp[reg] ); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline void sun3scsi_write(int reg, int value) | ||
| 122 | { | ||
| 123 | sun3_scsi_regp[reg] = value; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * XXX: status debug | ||
| 128 | */ | ||
| 129 | static struct Scsi_Host *default_instance; | ||
| 130 | |||
| 131 | /* | ||
| 132 | * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) | ||
| 133 | * | ||
| 134 | * Purpose : initializes mac NCR5380 driver based on the | ||
| 135 | * command line / compile time port and irq definitions. | ||
| 136 | * | ||
| 137 | * Inputs : tpnt - template for this SCSI adapter. | ||
| 138 | * | ||
| 139 | * Returns : 1 if a host adapter was found, 0 if not. | ||
| 140 | * | ||
| 141 | */ | ||
| 142 | |||
| 143 | static int __init sun3scsi_detect(struct scsi_host_template * tpnt) | ||
| 144 | { | ||
| 145 | unsigned long ioaddr, irq = 0; | ||
| 146 | static int called = 0; | ||
| 147 | struct Scsi_Host *instance; | ||
| 148 | int i; | ||
| 149 | unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, | ||
| 150 | IOBASE_SUN3_VMESCSI + 0x4000, | ||
| 151 | 0 }; | ||
| 152 | unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, | ||
| 153 | SUN3_VEC_VMESCSI1, | ||
| 154 | 0 }; | ||
| 155 | /* check that this machine has an onboard 5380 */ | ||
| 156 | switch(idprom->id_machtype) { | ||
| 157 | case SM_SUN3|SM_3_160: | ||
| 158 | case SM_SUN3|SM_3_260: | ||
| 159 | break; | ||
| 160 | |||
| 161 | default: | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | if(called) | ||
| 166 | return 0; | ||
| 167 | |||
| 168 | tpnt->proc_name = "Sun3 5380 VME SCSI"; | ||
| 169 | |||
| 170 | /* setup variables */ | ||
| 171 | tpnt->can_queue = | ||
| 172 | (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; | ||
| 173 | tpnt->cmd_per_lun = | ||
| 174 | (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; | ||
| 175 | tpnt->sg_tablesize = | ||
| 176 | (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; | ||
| 177 | |||
| 178 | if (setup_hostid >= 0) | ||
| 179 | tpnt->this_id = setup_hostid; | ||
| 180 | else { | ||
| 181 | /* use 7 as default */ | ||
| 182 | tpnt->this_id = 7; | ||
| 183 | } | ||
| 184 | |||
| 185 | ioaddr = 0; | ||
| 186 | for(i = 0; addrs[i] != 0; i++) { | ||
| 187 | unsigned char x; | ||
| 188 | |||
| 189 | ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, | ||
| 190 | SUN3_PAGE_TYPE_VME16); | ||
| 191 | irq = vecs[i]; | ||
| 192 | sun3_scsi_regp = (unsigned char *)ioaddr; | ||
| 193 | |||
| 194 | dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); | ||
| 195 | |||
| 196 | if(sun3_map_test((unsigned long)dregs, &x)) { | ||
| 197 | unsigned short oldcsr; | ||
| 198 | |||
| 199 | oldcsr = dregs->csr; | ||
| 200 | dregs->csr = 0; | ||
| 201 | udelay(SUN3_DMA_DELAY); | ||
| 202 | if(dregs->csr == 0x1400) | ||
| 203 | break; | ||
| 204 | |||
| 205 | dregs->csr = oldcsr; | ||
| 206 | } | ||
| 207 | |||
| 208 | iounmap((void *)ioaddr); | ||
| 209 | ioaddr = 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | if(!ioaddr) | ||
| 213 | return 0; | ||
| 214 | |||
| 215 | #ifdef SUPPORT_TAGS | ||
| 216 | if (setup_use_tagged_queuing < 0) | ||
| 217 | setup_use_tagged_queuing = USE_TAGGED_QUEUING; | ||
| 218 | #endif | ||
| 219 | |||
| 220 | instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); | ||
| 221 | if(instance == NULL) | ||
| 222 | return 0; | ||
| 223 | |||
| 224 | default_instance = instance; | ||
| 225 | |||
| 226 | instance->io_port = (unsigned long) ioaddr; | ||
| 227 | instance->irq = irq; | ||
| 228 | |||
| 229 | NCR5380_init(instance, 0); | ||
| 230 | |||
| 231 | instance->n_io_port = 32; | ||
| 232 | |||
| 233 | ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; | ||
| 234 | |||
| 235 | if (request_irq(instance->irq, scsi_sun3_intr, | ||
| 236 | 0, "Sun3SCSI-5380VME", instance)) { | ||
| 237 | #ifndef REAL_DMA | ||
| 238 | printk("scsi%d: IRQ%d not free, interrupts disabled\n", | ||
| 239 | instance->host_no, instance->irq); | ||
| 240 | instance->irq = SCSI_IRQ_NONE; | ||
| 241 | #else | ||
| 242 | printk("scsi%d: IRQ%d not free, bailing out\n", | ||
| 243 | instance->host_no, instance->irq); | ||
| 244 | return 0; | ||
| 245 | #endif | ||
| 246 | } | ||
| 247 | |||
| 248 | printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port); | ||
| 249 | if (instance->irq == SCSI_IRQ_NONE) | ||
| 250 | printk ("s disabled"); | ||
| 251 | else | ||
| 252 | printk (" %d", instance->irq); | ||
| 253 | printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", | ||
| 254 | instance->can_queue, instance->cmd_per_lun, | ||
| 255 | SUN3SCSI_PUBLIC_RELEASE); | ||
| 256 | printk("\nscsi%d:", instance->host_no); | ||
| 257 | NCR5380_print_options(instance); | ||
| 258 | printk("\n"); | ||
| 259 | |||
| 260 | dregs->csr = 0; | ||
| 261 | udelay(SUN3_DMA_DELAY); | ||
| 262 | dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; | ||
| 263 | udelay(SUN3_DMA_DELAY); | ||
| 264 | dregs->fifo_count = 0; | ||
| 265 | dregs->fifo_count_hi = 0; | ||
| 266 | dregs->dma_addr_hi = 0; | ||
| 267 | dregs->dma_addr_lo = 0; | ||
| 268 | dregs->dma_count_hi = 0; | ||
| 269 | dregs->dma_count_lo = 0; | ||
| 270 | |||
| 271 | dregs->ivect = VME_DATA24 | (instance->irq & 0xff); | ||
| 272 | |||
| 273 | called = 1; | ||
| 274 | |||
| 275 | #ifdef RESET_BOOT | ||
| 276 | sun3_scsi_reset_boot(instance); | ||
| 277 | #endif | ||
| 278 | |||
| 279 | return 1; | ||
| 280 | } | ||
| 281 | |||
| 282 | int sun3scsi_release (struct Scsi_Host *shpnt) | ||
| 283 | { | ||
| 284 | if (shpnt->irq != SCSI_IRQ_NONE) | ||
| 285 | free_irq(shpnt->irq, shpnt); | ||
| 286 | |||
| 287 | iounmap((void *)sun3_scsi_regp); | ||
| 288 | |||
| 289 | NCR5380_exit(shpnt); | ||
| 290 | return 0; | ||
| 291 | } | ||
| 292 | |||
| 293 | #ifdef RESET_BOOT | ||
| 294 | /* | ||
| 295 | * Our 'bus reset on boot' function | ||
| 296 | */ | ||
| 297 | |||
| 298 | static void sun3_scsi_reset_boot(struct Scsi_Host *instance) | ||
| 299 | { | ||
| 300 | unsigned long end; | ||
| 301 | |||
| 302 | NCR5380_local_declare(); | ||
| 303 | NCR5380_setup(instance); | ||
| 304 | |||
| 305 | /* | ||
| 306 | * Do a SCSI reset to clean up the bus during initialization. No | ||
| 307 | * messing with the queues, interrupts, or locks necessary here. | ||
| 308 | */ | ||
| 309 | |||
| 310 | printk( "Sun3 SCSI: resetting the SCSI bus..." ); | ||
| 311 | |||
| 312 | /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ | ||
| 313 | // sun3_disable_irq( IRQ_SUN3_SCSI ); | ||
| 314 | |||
| 315 | /* get in phase */ | ||
| 316 | NCR5380_write( TARGET_COMMAND_REG, | ||
| 317 | PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); | ||
| 318 | |||
| 319 | /* assert RST */ | ||
| 320 | NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); | ||
| 321 | |||
| 322 | /* The min. reset hold time is 25us, so 40us should be enough */ | ||
| 323 | udelay( 50 ); | ||
| 324 | |||
| 325 | /* reset RST and interrupt */ | ||
| 326 | NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); | ||
| 327 | NCR5380_read( RESET_PARITY_INTERRUPT_REG ); | ||
| 328 | |||
| 329 | for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) | ||
| 330 | barrier(); | ||
| 331 | |||
| 332 | /* switch on SCSI IRQ again */ | ||
| 333 | // sun3_enable_irq( IRQ_SUN3_SCSI ); | ||
| 334 | |||
| 335 | printk( " done\n" ); | ||
| 336 | } | ||
| 337 | #endif | ||
| 338 | |||
| 339 | static const char * sun3scsi_info (struct Scsi_Host *spnt) { | ||
| 340 | return ""; | ||
| 341 | } | ||
| 342 | |||
| 343 | // safe bits for the CSR | ||
| 344 | #define CSR_GOOD 0x060f | ||
| 345 | |||
| 346 | static irqreturn_t scsi_sun3_intr(int irq, void *dummy) | ||
| 347 | { | ||
| 348 | unsigned short csr = dregs->csr; | ||
| 349 | int handled = 0; | ||
| 350 | |||
| 351 | dregs->csr &= ~CSR_DMA_ENABLE; | ||
| 352 | |||
| 353 | |||
| 354 | #ifdef SUN3_SCSI_DEBUG | ||
| 355 | printk("scsi_intr csr %x\n", csr); | ||
| 356 | #endif | ||
| 357 | |||
| 358 | if(csr & ~CSR_GOOD) { | ||
| 359 | if(csr & CSR_DMA_BUSERR) { | ||
| 360 | printk("scsi%d: bus error in dma\n", default_instance->host_no); | ||
| 361 | #ifdef SUN3_SCSI_DEBUG | ||
| 362 | printk("scsi: residual %x count %x addr %p dmaaddr %x\n", | ||
| 363 | dregs->fifo_count, | ||
| 364 | dregs->dma_count_lo | (dregs->dma_count_hi << 16), | ||
| 365 | sun3_dma_orig_addr, | ||
| 366 | dregs->dma_addr_lo | (dregs->dma_addr_hi << 16)); | ||
| 367 | #endif | ||
| 368 | } | ||
| 369 | |||
| 370 | if(csr & CSR_DMA_CONFLICT) { | ||
| 371 | printk("scsi%d: dma conflict\n", default_instance->host_no); | ||
| 372 | } | ||
| 373 | handled = 1; | ||
| 374 | } | ||
| 375 | |||
| 376 | if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { | ||
| 377 | NCR5380_intr(irq, dummy); | ||
| 378 | handled = 1; | ||
| 379 | } | ||
| 380 | |||
| 381 | return IRQ_RETVAL(handled); | ||
| 382 | } | ||
| 383 | |||
| 384 | /* | ||
| 385 | * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; | ||
| 386 | * reentering NCR5380_print_status seems to have ugly side effects | ||
| 387 | */ | ||
| 388 | |||
| 389 | /* this doesn't seem to get used at all -- sam */ | ||
| 390 | #if 0 | ||
| 391 | void sun3_sun3_debug (void) | ||
| 392 | { | ||
| 393 | unsigned long flags; | ||
| 394 | NCR5380_local_declare(); | ||
| 395 | |||
| 396 | if (default_instance) { | ||
| 397 | local_irq_save(flags); | ||
| 398 | NCR5380_print_status(default_instance); | ||
| 399 | local_irq_restore(flags); | ||
| 400 | } | ||
| 401 | } | ||
| 402 | #endif | ||
| 403 | |||
| 404 | |||
| 405 | /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ | ||
| 406 | static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) | ||
| 407 | { | ||
| 408 | void *addr; | ||
| 409 | |||
| 410 | if(sun3_dma_orig_addr != NULL) | ||
| 411 | dvma_unmap(sun3_dma_orig_addr); | ||
| 412 | |||
| 413 | // addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); | ||
| 414 | addr = (void *)dvma_map_vme((unsigned long) data, count); | ||
| 415 | |||
| 416 | sun3_dma_orig_addr = addr; | ||
| 417 | sun3_dma_orig_count = count; | ||
| 418 | |||
| 419 | #ifdef SUN3_SCSI_DEBUG | ||
| 420 | printk("scsi: dma_setup addr %p count %x\n", addr, count); | ||
| 421 | #endif | ||
| 422 | |||
| 423 | // dregs->fifo_count = 0; | ||
| 424 | #if 0 | ||
| 425 | /* reset fifo */ | ||
| 426 | dregs->csr &= ~CSR_FIFO; | ||
| 427 | dregs->csr |= CSR_FIFO; | ||
| 428 | #endif | ||
| 429 | /* set direction */ | ||
| 430 | if(write_flag) | ||
| 431 | dregs->csr |= CSR_SEND; | ||
| 432 | else | ||
| 433 | dregs->csr &= ~CSR_SEND; | ||
| 434 | |||
| 435 | /* reset fifo */ | ||
| 436 | // dregs->csr &= ~CSR_FIFO; | ||
| 437 | // dregs->csr |= CSR_FIFO; | ||
| 438 | |||
| 439 | dregs->csr |= CSR_PACK_ENABLE; | ||
| 440 | |||
| 441 | dregs->dma_addr_hi = ((unsigned long)addr >> 16); | ||
| 442 | dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); | ||
| 443 | |||
| 444 | dregs->dma_count_hi = 0; | ||
| 445 | dregs->dma_count_lo = 0; | ||
| 446 | dregs->fifo_count_hi = 0; | ||
| 447 | dregs->fifo_count = 0; | ||
| 448 | |||
| 449 | #ifdef SUN3_SCSI_DEBUG | ||
| 450 | printk("scsi: dma_setup done csr %x\n", dregs->csr); | ||
| 451 | #endif | ||
| 452 | return count; | ||
| 453 | |||
| 454 | } | ||
| 455 | |||
| 456 | static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) | ||
| 457 | { | ||
| 458 | return last_residual; | ||
| 459 | } | ||
| 460 | |||
| 461 | static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, | ||
| 462 | struct scsi_cmnd *cmd, | ||
| 463 | int write_flag) | ||
| 464 | { | ||
| 465 | if (cmd->request->cmd_type == REQ_TYPE_FS) | ||
| 466 | return wanted; | ||
| 467 | else | ||
| 468 | return 0; | ||
| 469 | } | ||
| 470 | |||
| 471 | static int sun3scsi_dma_start(unsigned long count, char *data) | ||
| 472 | { | ||
| 473 | |||
| 474 | unsigned short csr; | ||
| 475 | |||
| 476 | csr = dregs->csr; | ||
| 477 | #ifdef SUN3_SCSI_DEBUG | ||
| 478 | printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count); | ||
| 479 | #endif | ||
| 480 | |||
| 481 | dregs->dma_count_hi = (sun3_dma_orig_count >> 16); | ||
| 482 | dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); | ||
| 483 | |||
| 484 | dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); | ||
| 485 | dregs->fifo_count = (sun3_dma_orig_count & 0xffff); | ||
| 486 | |||
| 487 | // if(!(csr & CSR_DMA_ENABLE)) | ||
| 488 | // dregs->csr |= CSR_DMA_ENABLE; | ||
| 489 | |||
| 490 | return 0; | ||
| 491 | } | ||
| 492 | |||
| 493 | /* clean up after our dma is done */ | ||
| 494 | static int sun3scsi_dma_finish(int write_flag) | ||
| 495 | { | ||
| 496 | unsigned short fifo; | ||
| 497 | int ret = 0; | ||
| 498 | |||
| 499 | sun3_dma_active = 0; | ||
| 500 | |||
| 501 | dregs->csr &= ~CSR_DMA_ENABLE; | ||
| 502 | |||
| 503 | fifo = dregs->fifo_count; | ||
| 504 | if(write_flag) { | ||
| 505 | if((fifo > 0) && (fifo < sun3_dma_orig_count)) | ||
| 506 | fifo++; | ||
| 507 | } | ||
| 508 | |||
| 509 | last_residual = fifo; | ||
| 510 | #ifdef SUN3_SCSI_DEBUG | ||
| 511 | printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count); | ||
| 512 | #endif | ||
| 513 | /* empty bytes from the fifo which didn't make it */ | ||
| 514 | if((!write_flag) && (dregs->csr & CSR_LEFT)) { | ||
| 515 | unsigned char *vaddr; | ||
| 516 | |||
| 517 | #ifdef SUN3_SCSI_DEBUG | ||
| 518 | printk("scsi: got left over bytes\n"); | ||
| 519 | #endif | ||
| 520 | |||
| 521 | vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); | ||
| 522 | |||
| 523 | vaddr += (sun3_dma_orig_count - fifo); | ||
| 524 | vaddr--; | ||
| 525 | |||
| 526 | switch(dregs->csr & CSR_LEFT) { | ||
| 527 | case CSR_LEFT_3: | ||
| 528 | *vaddr = (dregs->bpack_lo & 0xff00) >> 8; | ||
| 529 | vaddr--; | ||
| 530 | |||
| 531 | case CSR_LEFT_2: | ||
| 532 | *vaddr = (dregs->bpack_hi & 0x00ff); | ||
| 533 | vaddr--; | ||
| 534 | |||
| 535 | case CSR_LEFT_1: | ||
| 536 | *vaddr = (dregs->bpack_hi & 0xff00) >> 8; | ||
| 537 | break; | ||
| 538 | } | ||
| 539 | |||
| 540 | |||
| 541 | } | ||
| 542 | |||
| 543 | dvma_unmap(sun3_dma_orig_addr); | ||
| 544 | sun3_dma_orig_addr = NULL; | ||
| 545 | |||
| 546 | dregs->dma_addr_hi = 0; | ||
| 547 | dregs->dma_addr_lo = 0; | ||
| 548 | dregs->dma_count_hi = 0; | ||
| 549 | dregs->dma_count_lo = 0; | ||
| 550 | |||
| 551 | dregs->fifo_count = 0; | ||
| 552 | dregs->fifo_count_hi = 0; | ||
| 553 | |||
| 554 | dregs->csr &= ~CSR_SEND; | ||
| 555 | |||
| 556 | // dregs->csr |= CSR_DMA_ENABLE; | ||
| 557 | |||
| 558 | #if 0 | ||
| 559 | /* reset fifo */ | ||
| 560 | dregs->csr &= ~CSR_FIFO; | ||
| 561 | dregs->csr |= CSR_FIFO; | ||
| 562 | #endif | ||
| 563 | sun3_dma_setup_done = NULL; | ||
| 564 | |||
| 565 | return ret; | ||
| 566 | |||
| 567 | } | ||
| 568 | |||
| 569 | #include "sun3_NCR5380.c" | ||
| 570 | |||
| 571 | static struct scsi_host_template driver_template = { | ||
| 572 | .name = SUN3_SCSI_NAME, | ||
| 573 | .detect = sun3scsi_detect, | ||
| 574 | .release = sun3scsi_release, | ||
| 575 | .info = sun3scsi_info, | ||
| 576 | .queuecommand = sun3scsi_queue_command, | ||
| 577 | .eh_abort_handler = sun3scsi_abort, | ||
| 578 | .eh_bus_reset_handler = sun3scsi_bus_reset, | ||
| 579 | .can_queue = CAN_QUEUE, | ||
| 580 | .this_id = 7, | ||
| 581 | .sg_tablesize = SG_TABLESIZE, | ||
| 582 | .cmd_per_lun = CMD_PER_LUN, | ||
| 583 | .use_clustering = DISABLE_CLUSTERING | ||
| 584 | }; | ||
| 585 | |||
| 586 | |||
| 587 | #include "scsi_module.c" | ||
| 588 | |||
| 589 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index a4abce9d526e..8cc80931df14 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c | |||
| @@ -102,10 +102,6 @@ | |||
| 102 | * 15 9-11 | 102 | * 15 9-11 |
| 103 | */ | 103 | */ |
| 104 | 104 | ||
| 105 | /* | ||
| 106 | * $Log: t128.c,v $ | ||
| 107 | */ | ||
| 108 | |||
| 109 | #include <linux/signal.h> | 105 | #include <linux/signal.h> |
| 110 | #include <linux/io.h> | 106 | #include <linux/io.h> |
| 111 | #include <linux/blkdev.h> | 107 | #include <linux/blkdev.h> |
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index 1df82c28e56d..fd68cecc62af 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h | |||
| @@ -34,10 +34,6 @@ | |||
| 34 | * 1+ (800) 334-5454 | 34 | * 1+ (800) 334-5454 |
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | /* | ||
| 38 | * $Log: t128.h,v $ | ||
| 39 | */ | ||
| 40 | |||
| 41 | #ifndef T128_H | 37 | #ifndef T128_H |
| 42 | #define T128_H | 38 | #define T128_H |
| 43 | 39 | ||
| @@ -107,8 +103,6 @@ static int t128_bus_reset(struct scsi_cmnd *); | |||
| 107 | #define CAN_QUEUE 32 | 103 | #define CAN_QUEUE 32 |
| 108 | #endif | 104 | #endif |
| 109 | 105 | ||
| 110 | #ifndef HOSTS_C | ||
| 111 | |||
| 112 | #define NCR5380_implementation_fields \ | 106 | #define NCR5380_implementation_fields \ |
| 113 | void __iomem *base | 107 | void __iomem *base |
| 114 | 108 | ||
| @@ -148,6 +142,5 @@ static int t128_bus_reset(struct scsi_cmnd *); | |||
| 148 | 142 | ||
| 149 | #define T128_IRQS 0xc4a8 | 143 | #define T128_IRQS 0xc4a8 |
| 150 | 144 | ||
| 151 | #endif /* else def HOSTS_C */ | ||
| 152 | #endif /* ndef ASM */ | 145 | #endif /* ndef ASM */ |
| 153 | #endif /* T128_H */ | 146 | #endif /* T128_H */ |
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 721050090520..f42d1cee652a 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
| @@ -196,9 +196,9 @@ enum { | |||
| 196 | * @dword_2: UPIU header DW-2 | 196 | * @dword_2: UPIU header DW-2 |
| 197 | */ | 197 | */ |
| 198 | struct utp_upiu_header { | 198 | struct utp_upiu_header { |
| 199 | u32 dword_0; | 199 | __be32 dword_0; |
| 200 | u32 dword_1; | 200 | __be32 dword_1; |
| 201 | u32 dword_2; | 201 | __be32 dword_2; |
| 202 | }; | 202 | }; |
| 203 | 203 | ||
| 204 | /** | 204 | /** |
| @@ -207,7 +207,7 @@ struct utp_upiu_header { | |||
| 207 | * @cdb: Command Descriptor Block CDB DW-4 to DW-7 | 207 | * @cdb: Command Descriptor Block CDB DW-4 to DW-7 |
| 208 | */ | 208 | */ |
| 209 | struct utp_upiu_cmd { | 209 | struct utp_upiu_cmd { |
| 210 | u32 exp_data_transfer_len; | 210 | __be32 exp_data_transfer_len; |
| 211 | u8 cdb[MAX_CDB_SIZE]; | 211 | u8 cdb[MAX_CDB_SIZE]; |
| 212 | }; | 212 | }; |
| 213 | 213 | ||
| @@ -228,10 +228,10 @@ struct utp_upiu_query { | |||
| 228 | u8 idn; | 228 | u8 idn; |
| 229 | u8 index; | 229 | u8 index; |
| 230 | u8 selector; | 230 | u8 selector; |
| 231 | u16 reserved_osf; | 231 | __be16 reserved_osf; |
| 232 | u16 length; | 232 | __be16 length; |
| 233 | u32 value; | 233 | __be32 value; |
| 234 | u32 reserved[2]; | 234 | __be32 reserved[2]; |
| 235 | }; | 235 | }; |
| 236 | 236 | ||
| 237 | /** | 237 | /** |
| @@ -256,9 +256,9 @@ struct utp_upiu_req { | |||
| 256 | * @sense_data: Sense data field DW-8 to DW-12 | 256 | * @sense_data: Sense data field DW-8 to DW-12 |
| 257 | */ | 257 | */ |
| 258 | struct utp_cmd_rsp { | 258 | struct utp_cmd_rsp { |
| 259 | u32 residual_transfer_count; | 259 | __be32 residual_transfer_count; |
| 260 | u32 reserved[4]; | 260 | __be32 reserved[4]; |
| 261 | u16 sense_data_len; | 261 | __be16 sense_data_len; |
| 262 | u8 sense_data[18]; | 262 | u8 sense_data[18]; |
| 263 | }; | 263 | }; |
| 264 | 264 | ||
| @@ -286,10 +286,10 @@ struct utp_upiu_rsp { | |||
| 286 | */ | 286 | */ |
| 287 | struct utp_upiu_task_req { | 287 | struct utp_upiu_task_req { |
| 288 | struct utp_upiu_header header; | 288 | struct utp_upiu_header header; |
| 289 | u32 input_param1; | 289 | __be32 input_param1; |
| 290 | u32 input_param2; | 290 | __be32 input_param2; |
| 291 | u32 input_param3; | 291 | __be32 input_param3; |
| 292 | u32 reserved[2]; | 292 | __be32 reserved[2]; |
| 293 | }; | 293 | }; |
| 294 | 294 | ||
| 295 | /** | 295 | /** |
| @@ -301,9 +301,9 @@ struct utp_upiu_task_req { | |||
| 301 | */ | 301 | */ |
| 302 | struct utp_upiu_task_rsp { | 302 | struct utp_upiu_task_rsp { |
| 303 | struct utp_upiu_header header; | 303 | struct utp_upiu_header header; |
| 304 | u32 output_param1; | 304 | __be32 output_param1; |
| 305 | u32 output_param2; | 305 | __be32 output_param2; |
| 306 | u32 reserved[3]; | 306 | __be32 reserved[3]; |
| 307 | }; | 307 | }; |
| 308 | 308 | ||
| 309 | /** | 309 | /** |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 04884d663e4e..0c2877251251 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -55,6 +55,9 @@ | |||
| 55 | /* Query request timeout */ | 55 | /* Query request timeout */ |
| 56 | #define QUERY_REQ_TIMEOUT 30 /* msec */ | 56 | #define QUERY_REQ_TIMEOUT 30 /* msec */ |
| 57 | 57 | ||
| 58 | /* Task management command timeout */ | ||
| 59 | #define TM_CMD_TIMEOUT 100 /* msecs */ | ||
| 60 | |||
| 58 | /* Expose the flag value from utp_upiu_query.value */ | 61 | /* Expose the flag value from utp_upiu_query.value */ |
| 59 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | 62 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF |
| 60 | 63 | ||
| @@ -71,9 +74,22 @@ enum { | |||
| 71 | 74 | ||
| 72 | /* UFSHCD states */ | 75 | /* UFSHCD states */ |
| 73 | enum { | 76 | enum { |
| 74 | UFSHCD_STATE_OPERATIONAL, | ||
| 75 | UFSHCD_STATE_RESET, | 77 | UFSHCD_STATE_RESET, |
| 76 | UFSHCD_STATE_ERROR, | 78 | UFSHCD_STATE_ERROR, |
| 79 | UFSHCD_STATE_OPERATIONAL, | ||
| 80 | }; | ||
| 81 | |||
| 82 | /* UFSHCD error handling flags */ | ||
| 83 | enum { | ||
| 84 | UFSHCD_EH_IN_PROGRESS = (1 << 0), | ||
| 85 | }; | ||
| 86 | |||
| 87 | /* UFSHCD UIC layer error flags */ | ||
| 88 | enum { | ||
| 89 | UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ | ||
| 90 | UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ | ||
| 91 | UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ | ||
| 92 | UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ | ||
| 77 | }; | 93 | }; |
| 78 | 94 | ||
| 79 | /* Interrupt configuration options */ | 95 | /* Interrupt configuration options */ |
| @@ -83,6 +99,18 @@ enum { | |||
| 83 | UFSHCD_INT_CLEAR, | 99 | UFSHCD_INT_CLEAR, |
| 84 | }; | 100 | }; |
| 85 | 101 | ||
| 102 | #define ufshcd_set_eh_in_progress(h) \ | ||
| 103 | (h->eh_flags |= UFSHCD_EH_IN_PROGRESS) | ||
| 104 | #define ufshcd_eh_in_progress(h) \ | ||
| 105 | (h->eh_flags & UFSHCD_EH_IN_PROGRESS) | ||
| 106 | #define ufshcd_clear_eh_in_progress(h) \ | ||
| 107 | (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) | ||
| 108 | |||
| 109 | static void ufshcd_tmc_handler(struct ufs_hba *hba); | ||
| 110 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); | ||
| 111 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); | ||
| 112 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); | ||
| 113 | |||
| 86 | /* | 114 | /* |
| 87 | * ufshcd_wait_for_register - wait for register value to change | 115 | * ufshcd_wait_for_register - wait for register value to change |
| 88 | * @hba - per-adapter interface | 116 | * @hba - per-adapter interface |
| @@ -163,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs) | |||
| 163 | */ | 191 | */ |
| 164 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | 192 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) |
| 165 | { | 193 | { |
| 166 | return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; | 194 | return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; |
| 167 | } | 195 | } |
| 168 | 196 | ||
| 169 | /** | 197 | /** |
| @@ -176,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | |||
| 176 | static inline int | 204 | static inline int |
| 177 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) | 205 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) |
| 178 | { | 206 | { |
| 179 | return task_req_descp->header.dword_2 & MASK_OCS; | 207 | return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS; |
| 180 | } | 208 | } |
| 181 | 209 | ||
| 182 | /** | 210 | /** |
| 183 | * ufshcd_get_tm_free_slot - get a free slot for task management request | 211 | * ufshcd_get_tm_free_slot - get a free slot for task management request |
| 184 | * @hba: per adapter instance | 212 | * @hba: per adapter instance |
| 213 | * @free_slot: pointer to variable with available slot value | ||
| 185 | * | 214 | * |
| 186 | * Returns maximum number of task management request slots in case of | 215 | * Get a free tag and lock it until ufshcd_put_tm_slot() is called. |
| 187 | * task management queue full or returns the free slot number | 216 | * Returns 0 if free slot is not available, else return 1 with tag value |
| 217 | * in @free_slot. | ||
| 188 | */ | 218 | */ |
| 189 | static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) | 219 | static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) |
| 190 | { | 220 | { |
| 191 | return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); | 221 | int tag; |
| 222 | bool ret = false; | ||
| 223 | |||
| 224 | if (!free_slot) | ||
| 225 | goto out; | ||
| 226 | |||
| 227 | do { | ||
| 228 | tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); | ||
| 229 | if (tag >= hba->nutmrs) | ||
| 230 | goto out; | ||
| 231 | } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); | ||
| 232 | |||
| 233 | *free_slot = tag; | ||
| 234 | ret = true; | ||
| 235 | out: | ||
| 236 | return ret; | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) | ||
| 240 | { | ||
| 241 | clear_bit_unlock(slot, &hba->tm_slots_in_use); | ||
| 192 | } | 242 | } |
| 193 | 243 | ||
| 194 | /** | 244 | /** |
| @@ -390,26 +440,6 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | |||
| 390 | } | 440 | } |
| 391 | 441 | ||
| 392 | /** | 442 | /** |
| 393 | * ufshcd_query_to_cpu() - formats the buffer to native cpu endian | ||
| 394 | * @response: upiu query response to convert | ||
| 395 | */ | ||
| 396 | static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response) | ||
| 397 | { | ||
| 398 | response->length = be16_to_cpu(response->length); | ||
| 399 | response->value = be32_to_cpu(response->value); | ||
| 400 | } | ||
| 401 | |||
| 402 | /** | ||
| 403 | * ufshcd_query_to_be() - formats the buffer to big endian | ||
| 404 | * @request: upiu query request to convert | ||
| 405 | */ | ||
| 406 | static inline void ufshcd_query_to_be(struct utp_upiu_query *request) | ||
| 407 | { | ||
| 408 | request->length = cpu_to_be16(request->length); | ||
| 409 | request->value = cpu_to_be32(request->value); | ||
| 410 | } | ||
| 411 | |||
| 412 | /** | ||
| 413 | * ufshcd_copy_query_response() - Copy the Query Response and the data | 443 | * ufshcd_copy_query_response() - Copy the Query Response and the data |
| 414 | * descriptor | 444 | * descriptor |
| 415 | * @hba: per adapter instance | 445 | * @hba: per adapter instance |
| @@ -425,7 +455,6 @@ void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |||
| 425 | UPIU_RSP_CODE_OFFSET; | 455 | UPIU_RSP_CODE_OFFSET; |
| 426 | 456 | ||
| 427 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); | 457 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); |
| 428 | ufshcd_query_to_cpu(&query_res->upiu_res); | ||
| 429 | 458 | ||
| 430 | 459 | ||
| 431 | /* Get the descriptor */ | 460 | /* Get the descriptor */ |
| @@ -749,7 +778,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | |||
| 749 | { | 778 | { |
| 750 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | 779 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; |
| 751 | struct ufs_query *query = &hba->dev_cmd.query; | 780 | struct ufs_query *query = &hba->dev_cmd.query; |
| 752 | u16 len = query->request.upiu_req.length; | 781 | u16 len = be16_to_cpu(query->request.upiu_req.length); |
| 753 | u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; | 782 | u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; |
| 754 | 783 | ||
| 755 | /* Query request header */ | 784 | /* Query request header */ |
| @@ -766,7 +795,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | |||
| 766 | /* Copy the Query Request buffer as is */ | 795 | /* Copy the Query Request buffer as is */ |
| 767 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, | 796 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, |
| 768 | QUERY_OSF_SIZE); | 797 | QUERY_OSF_SIZE); |
| 769 | ufshcd_query_to_be(&ucd_req_ptr->qr); | ||
| 770 | 798 | ||
| 771 | /* Copy the Descriptor */ | 799 | /* Copy the Descriptor */ |
| 772 | if ((len > 0) && (query->request.upiu_req.opcode == | 800 | if ((len > 0) && (query->request.upiu_req.opcode == |
| @@ -853,10 +881,25 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
| 853 | 881 | ||
| 854 | tag = cmd->request->tag; | 882 | tag = cmd->request->tag; |
| 855 | 883 | ||
| 856 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { | 884 | spin_lock_irqsave(hba->host->host_lock, flags); |
| 885 | switch (hba->ufshcd_state) { | ||
| 886 | case UFSHCD_STATE_OPERATIONAL: | ||
| 887 | break; | ||
| 888 | case UFSHCD_STATE_RESET: | ||
| 857 | err = SCSI_MLQUEUE_HOST_BUSY; | 889 | err = SCSI_MLQUEUE_HOST_BUSY; |
| 858 | goto out; | 890 | goto out_unlock; |
| 891 | case UFSHCD_STATE_ERROR: | ||
| 892 | set_host_byte(cmd, DID_ERROR); | ||
| 893 | cmd->scsi_done(cmd); | ||
| 894 | goto out_unlock; | ||
| 895 | default: | ||
| 896 | dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", | ||
| 897 | __func__, hba->ufshcd_state); | ||
| 898 | set_host_byte(cmd, DID_BAD_TARGET); | ||
| 899 | cmd->scsi_done(cmd); | ||
| 900 | goto out_unlock; | ||
| 859 | } | 901 | } |
| 902 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 860 | 903 | ||
| 861 | /* acquire the tag to make sure device cmds don't use it */ | 904 | /* acquire the tag to make sure device cmds don't use it */ |
| 862 | if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { | 905 | if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { |
| @@ -893,6 +936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
| 893 | /* issue command to the controller */ | 936 | /* issue command to the controller */ |
| 894 | spin_lock_irqsave(hba->host->host_lock, flags); | 937 | spin_lock_irqsave(hba->host->host_lock, flags); |
| 895 | ufshcd_send_command(hba, tag); | 938 | ufshcd_send_command(hba, tag); |
| 939 | out_unlock: | ||
| 896 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 940 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
| 897 | out: | 941 | out: |
| 898 | return err; | 942 | return err; |
| @@ -1151,7 +1195,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | |||
| 1151 | } | 1195 | } |
| 1152 | 1196 | ||
| 1153 | if (flag_res) | 1197 | if (flag_res) |
| 1154 | *flag_res = (response->upiu_res.value & | 1198 | *flag_res = (be32_to_cpu(response->upiu_res.value) & |
| 1155 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; | 1199 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; |
| 1156 | 1200 | ||
| 1157 | out_unlock: | 1201 | out_unlock: |
| @@ -1170,7 +1214,7 @@ out_unlock: | |||
| 1170 | * | 1214 | * |
| 1171 | * Returns 0 for success, non-zero in case of failure | 1215 | * Returns 0 for success, non-zero in case of failure |
| 1172 | */ | 1216 | */ |
| 1173 | int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | 1217 | static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
| 1174 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) | 1218 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) |
| 1175 | { | 1219 | { |
| 1176 | struct ufs_query_req *request; | 1220 | struct ufs_query_req *request; |
| @@ -1195,7 +1239,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | |||
| 1195 | switch (opcode) { | 1239 | switch (opcode) { |
| 1196 | case UPIU_QUERY_OPCODE_WRITE_ATTR: | 1240 | case UPIU_QUERY_OPCODE_WRITE_ATTR: |
| 1197 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | 1241 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; |
| 1198 | request->upiu_req.value = *attr_val; | 1242 | request->upiu_req.value = cpu_to_be32(*attr_val); |
| 1199 | break; | 1243 | break; |
| 1200 | case UPIU_QUERY_OPCODE_READ_ATTR: | 1244 | case UPIU_QUERY_OPCODE_READ_ATTR: |
| 1201 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | 1245 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; |
| @@ -1222,7 +1266,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | |||
| 1222 | goto out_unlock; | 1266 | goto out_unlock; |
| 1223 | } | 1267 | } |
| 1224 | 1268 | ||
| 1225 | *attr_val = response->upiu_res.value; | 1269 | *attr_val = be32_to_cpu(response->upiu_res.value); |
| 1226 | 1270 | ||
| 1227 | out_unlock: | 1271 | out_unlock: |
| 1228 | mutex_unlock(&hba->dev_cmd.lock); | 1272 | mutex_unlock(&hba->dev_cmd.lock); |
| @@ -1481,7 +1525,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | |||
| 1481 | * | 1525 | * |
| 1482 | * Returns 0 on success, non-zero value on failure | 1526 | * Returns 0 on success, non-zero value on failure |
| 1483 | */ | 1527 | */ |
| 1484 | int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | 1528 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) |
| 1485 | { | 1529 | { |
| 1486 | struct uic_command uic_cmd = {0}; | 1530 | struct uic_command uic_cmd = {0}; |
| 1487 | struct completion pwr_done; | 1531 | struct completion pwr_done; |
| @@ -1701,11 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |||
| 1701 | goto out; | 1745 | goto out; |
| 1702 | } | 1746 | } |
| 1703 | 1747 | ||
| 1704 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | ||
| 1705 | scsi_unblock_requests(hba->host); | ||
| 1706 | |||
| 1707 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | ||
| 1708 | |||
| 1709 | out: | 1748 | out: |
| 1710 | return err; | 1749 | return err; |
| 1711 | } | 1750 | } |
| @@ -1831,66 +1870,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |||
| 1831 | } | 1870 | } |
| 1832 | 1871 | ||
| 1833 | /** | 1872 | /** |
| 1834 | * ufshcd_do_reset - reset the host controller | ||
| 1835 | * @hba: per adapter instance | ||
| 1836 | * | ||
| 1837 | * Returns SUCCESS/FAILED | ||
| 1838 | */ | ||
| 1839 | static int ufshcd_do_reset(struct ufs_hba *hba) | ||
| 1840 | { | ||
| 1841 | struct ufshcd_lrb *lrbp; | ||
| 1842 | unsigned long flags; | ||
| 1843 | int tag; | ||
| 1844 | |||
| 1845 | /* block commands from midlayer */ | ||
| 1846 | scsi_block_requests(hba->host); | ||
| 1847 | |||
| 1848 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 1849 | hba->ufshcd_state = UFSHCD_STATE_RESET; | ||
| 1850 | |||
| 1851 | /* send controller to reset state */ | ||
| 1852 | ufshcd_hba_stop(hba); | ||
| 1853 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 1854 | |||
| 1855 | /* abort outstanding commands */ | ||
| 1856 | for (tag = 0; tag < hba->nutrs; tag++) { | ||
| 1857 | if (test_bit(tag, &hba->outstanding_reqs)) { | ||
| 1858 | lrbp = &hba->lrb[tag]; | ||
| 1859 | if (lrbp->cmd) { | ||
| 1860 | scsi_dma_unmap(lrbp->cmd); | ||
| 1861 | lrbp->cmd->result = DID_RESET << 16; | ||
| 1862 | lrbp->cmd->scsi_done(lrbp->cmd); | ||
| 1863 | lrbp->cmd = NULL; | ||
| 1864 | clear_bit_unlock(tag, &hba->lrb_in_use); | ||
| 1865 | } | ||
| 1866 | } | ||
| 1867 | } | ||
| 1868 | |||
| 1869 | /* complete device management command */ | ||
| 1870 | if (hba->dev_cmd.complete) | ||
| 1871 | complete(hba->dev_cmd.complete); | ||
| 1872 | |||
| 1873 | /* clear outstanding request/task bit maps */ | ||
| 1874 | hba->outstanding_reqs = 0; | ||
| 1875 | hba->outstanding_tasks = 0; | ||
| 1876 | |||
| 1877 | /* Host controller enable */ | ||
| 1878 | if (ufshcd_hba_enable(hba)) { | ||
| 1879 | dev_err(hba->dev, | ||
| 1880 | "Reset: Controller initialization failed\n"); | ||
| 1881 | return FAILED; | ||
| 1882 | } | ||
| 1883 | |||
| 1884 | if (ufshcd_link_startup(hba)) { | ||
| 1885 | dev_err(hba->dev, | ||
| 1886 | "Reset: Link start-up failed\n"); | ||
| 1887 | return FAILED; | ||
| 1888 | } | ||
| 1889 | |||
| 1890 | return SUCCESS; | ||
| 1891 | } | ||
| 1892 | |||
| 1893 | /** | ||
| 1894 | * ufshcd_slave_alloc - handle initial SCSI device configurations | 1873 | * ufshcd_slave_alloc - handle initial SCSI device configurations |
| 1895 | * @sdev: pointer to SCSI device | 1874 | * @sdev: pointer to SCSI device |
| 1896 | * | 1875 | * |
| @@ -1907,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) | |||
| 1907 | sdev->use_10_for_ms = 1; | 1886 | sdev->use_10_for_ms = 1; |
| 1908 | scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); | 1887 | scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); |
| 1909 | 1888 | ||
| 1889 | /* allow SCSI layer to restart the device in case of errors */ | ||
| 1890 | sdev->allow_restart = 1; | ||
| 1891 | |||
| 1910 | /* | 1892 | /* |
| 1911 | * Inform SCSI Midlayer that the LUN queue depth is same as the | 1893 | * Inform SCSI Midlayer that the LUN queue depth is same as the |
| 1912 | * controller queue depth. If a LUN queue depth is less than the | 1894 | * controller queue depth. If a LUN queue depth is less than the |
| @@ -1934,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) | |||
| 1934 | * ufshcd_task_req_compl - handle task management request completion | 1916 | * ufshcd_task_req_compl - handle task management request completion |
| 1935 | * @hba: per adapter instance | 1917 | * @hba: per adapter instance |
| 1936 | * @index: index of the completed request | 1918 | * @index: index of the completed request |
| 1919 | * @resp: task management service response | ||
| 1937 | * | 1920 | * |
| 1938 | * Returns SUCCESS/FAILED | 1921 | * Returns non-zero value on error, zero on success |
| 1939 | */ | 1922 | */ |
| 1940 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) | 1923 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) |
| 1941 | { | 1924 | { |
| 1942 | struct utp_task_req_desc *task_req_descp; | 1925 | struct utp_task_req_desc *task_req_descp; |
| 1943 | struct utp_upiu_task_rsp *task_rsp_upiup; | 1926 | struct utp_upiu_task_rsp *task_rsp_upiup; |
| @@ -1958,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) | |||
| 1958 | task_req_descp[index].task_rsp_upiu; | 1941 | task_req_descp[index].task_rsp_upiu; |
| 1959 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); | 1942 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); |
| 1960 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); | 1943 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); |
| 1961 | 1944 | if (resp) | |
| 1962 | if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && | 1945 | *resp = (u8)task_result; |
| 1963 | task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) | ||
| 1964 | task_result = FAILED; | ||
| 1965 | else | ||
| 1966 | task_result = SUCCESS; | ||
| 1967 | } else { | 1946 | } else { |
| 1968 | task_result = FAILED; | 1947 | dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", |
| 1969 | dev_err(hba->dev, | 1948 | __func__, ocs_value); |
| 1970 | "trc: Invalid ocs = %x\n", ocs_value); | ||
| 1971 | } | 1949 | } |
| 1972 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1950 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
| 1973 | return task_result; | 1951 | |
| 1952 | return ocs_value; | ||
| 1974 | } | 1953 | } |
| 1975 | 1954 | ||
| 1976 | /** | 1955 | /** |
| @@ -2105,6 +2084,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |||
| 2105 | case OCS_ABORTED: | 2084 | case OCS_ABORTED: |
| 2106 | result |= DID_ABORT << 16; | 2085 | result |= DID_ABORT << 16; |
| 2107 | break; | 2086 | break; |
| 2087 | case OCS_INVALID_COMMAND_STATUS: | ||
| 2088 | result |= DID_REQUEUE << 16; | ||
| 2089 | break; | ||
| 2108 | case OCS_INVALID_CMD_TABLE_ATTR: | 2090 | case OCS_INVALID_CMD_TABLE_ATTR: |
| 2109 | case OCS_INVALID_PRDT_ATTR: | 2091 | case OCS_INVALID_PRDT_ATTR: |
| 2110 | case OCS_MISMATCH_DATA_BUF_SIZE: | 2092 | case OCS_MISMATCH_DATA_BUF_SIZE: |
| @@ -2422,41 +2404,145 @@ out: | |||
| 2422 | } | 2404 | } |
| 2423 | 2405 | ||
| 2424 | /** | 2406 | /** |
| 2425 | * ufshcd_fatal_err_handler - handle fatal errors | 2407 | * ufshcd_err_handler - handle UFS errors that require s/w attention |
| 2426 | * @hba: per adapter instance | 2408 | * @work: pointer to work structure |
| 2427 | */ | 2409 | */ |
| 2428 | static void ufshcd_fatal_err_handler(struct work_struct *work) | 2410 | static void ufshcd_err_handler(struct work_struct *work) |
| 2429 | { | 2411 | { |
| 2430 | struct ufs_hba *hba; | 2412 | struct ufs_hba *hba; |
| 2431 | hba = container_of(work, struct ufs_hba, feh_workq); | 2413 | unsigned long flags; |
| 2414 | u32 err_xfer = 0; | ||
| 2415 | u32 err_tm = 0; | ||
| 2416 | int err = 0; | ||
| 2417 | int tag; | ||
| 2418 | |||
| 2419 | hba = container_of(work, struct ufs_hba, eh_work); | ||
| 2432 | 2420 | ||
| 2433 | pm_runtime_get_sync(hba->dev); | 2421 | pm_runtime_get_sync(hba->dev); |
| 2434 | /* check if reset is already in progress */ | 2422 | |
| 2435 | if (hba->ufshcd_state != UFSHCD_STATE_RESET) | 2423 | spin_lock_irqsave(hba->host->host_lock, flags); |
| 2436 | ufshcd_do_reset(hba); | 2424 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) { |
| 2425 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2426 | goto out; | ||
| 2427 | } | ||
| 2428 | |||
| 2429 | hba->ufshcd_state = UFSHCD_STATE_RESET; | ||
| 2430 | ufshcd_set_eh_in_progress(hba); | ||
| 2431 | |||
| 2432 | /* Complete requests that have door-bell cleared by h/w */ | ||
| 2433 | ufshcd_transfer_req_compl(hba); | ||
| 2434 | ufshcd_tmc_handler(hba); | ||
| 2435 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2436 | |||
| 2437 | /* Clear pending transfer requests */ | ||
| 2438 | for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) | ||
| 2439 | if (ufshcd_clear_cmd(hba, tag)) | ||
| 2440 | err_xfer |= 1 << tag; | ||
| 2441 | |||
| 2442 | /* Clear pending task management requests */ | ||
| 2443 | for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) | ||
| 2444 | if (ufshcd_clear_tm_cmd(hba, tag)) | ||
| 2445 | err_tm |= 1 << tag; | ||
| 2446 | |||
| 2447 | /* Complete the requests that are cleared by s/w */ | ||
| 2448 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 2449 | ufshcd_transfer_req_compl(hba); | ||
| 2450 | ufshcd_tmc_handler(hba); | ||
| 2451 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2452 | |||
| 2453 | /* Fatal errors need reset */ | ||
| 2454 | if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || | ||
| 2455 | ((hba->saved_err & UIC_ERROR) && | ||
| 2456 | (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { | ||
| 2457 | err = ufshcd_reset_and_restore(hba); | ||
| 2458 | if (err) { | ||
| 2459 | dev_err(hba->dev, "%s: reset and restore failed\n", | ||
| 2460 | __func__); | ||
| 2461 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | ||
| 2462 | } | ||
| 2463 | /* | ||
| 2464 | * Inform scsi mid-layer that we did reset and allow to handle | ||
| 2465 | * Unit Attention properly. | ||
| 2466 | */ | ||
| 2467 | scsi_report_bus_reset(hba->host, 0); | ||
| 2468 | hba->saved_err = 0; | ||
| 2469 | hba->saved_uic_err = 0; | ||
| 2470 | } | ||
| 2471 | ufshcd_clear_eh_in_progress(hba); | ||
| 2472 | |||
| 2473 | out: | ||
| 2474 | scsi_unblock_requests(hba->host); | ||
| 2437 | pm_runtime_put_sync(hba->dev); | 2475 | pm_runtime_put_sync(hba->dev); |
| 2438 | } | 2476 | } |
| 2439 | 2477 | ||
| 2440 | /** | 2478 | /** |
| 2441 | * ufshcd_err_handler - Check for fatal errors | 2479 | * ufshcd_update_uic_error - check and set fatal UIC error flags. |
| 2442 | * @work: pointer to a work queue structure | 2480 | * @hba: per-adapter instance |
| 2443 | */ | 2481 | */ |
| 2444 | static void ufshcd_err_handler(struct ufs_hba *hba) | 2482 | static void ufshcd_update_uic_error(struct ufs_hba *hba) |
| 2445 | { | 2483 | { |
| 2446 | u32 reg; | 2484 | u32 reg; |
| 2447 | 2485 | ||
| 2486 | /* PA_INIT_ERROR is fatal and needs UIC reset */ | ||
| 2487 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); | ||
| 2488 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) | ||
| 2489 | hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; | ||
| 2490 | |||
| 2491 | /* UIC NL/TL/DME errors needs software retry */ | ||
| 2492 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); | ||
| 2493 | if (reg) | ||
| 2494 | hba->uic_error |= UFSHCD_UIC_NL_ERROR; | ||
| 2495 | |||
| 2496 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); | ||
| 2497 | if (reg) | ||
| 2498 | hba->uic_error |= UFSHCD_UIC_TL_ERROR; | ||
| 2499 | |||
| 2500 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); | ||
| 2501 | if (reg) | ||
| 2502 | hba->uic_error |= UFSHCD_UIC_DME_ERROR; | ||
| 2503 | |||
| 2504 | dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", | ||
| 2505 | __func__, hba->uic_error); | ||
| 2506 | } | ||
| 2507 | |||
| 2508 | /** | ||
| 2509 | * ufshcd_check_errors - Check for errors that need s/w attention | ||
| 2510 | * @hba: per-adapter instance | ||
| 2511 | */ | ||
| 2512 | static void ufshcd_check_errors(struct ufs_hba *hba) | ||
| 2513 | { | ||
| 2514 | bool queue_eh_work = false; | ||
| 2515 | |||
| 2448 | if (hba->errors & INT_FATAL_ERRORS) | 2516 | if (hba->errors & INT_FATAL_ERRORS) |
| 2449 | goto fatal_eh; | 2517 | queue_eh_work = true; |
| 2450 | 2518 | ||
| 2451 | if (hba->errors & UIC_ERROR) { | 2519 | if (hba->errors & UIC_ERROR) { |
| 2452 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); | 2520 | hba->uic_error = 0; |
| 2453 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) | 2521 | ufshcd_update_uic_error(hba); |
| 2454 | goto fatal_eh; | 2522 | if (hba->uic_error) |
| 2523 | queue_eh_work = true; | ||
| 2455 | } | 2524 | } |
| 2456 | return; | 2525 | |
| 2457 | fatal_eh: | 2526 | if (queue_eh_work) { |
| 2458 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | 2527 | /* handle fatal errors only when link is functional */ |
| 2459 | schedule_work(&hba->feh_workq); | 2528 | if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { |
| 2529 | /* block commands from scsi mid-layer */ | ||
| 2530 | scsi_block_requests(hba->host); | ||
| 2531 | |||
| 2532 | /* transfer error masks to sticky bits */ | ||
| 2533 | hba->saved_err |= hba->errors; | ||
| 2534 | hba->saved_uic_err |= hba->uic_error; | ||
| 2535 | |||
| 2536 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | ||
| 2537 | schedule_work(&hba->eh_work); | ||
| 2538 | } | ||
| 2539 | } | ||
| 2540 | /* | ||
| 2541 | * if (!queue_eh_work) - | ||
| 2542 | * Other errors are either non-fatal where host recovers | ||
| 2543 | * itself without s/w intervention or errors that will be | ||
| 2544 | * handled by the SCSI core layer. | ||
| 2545 | */ | ||
| 2460 | } | 2546 | } |
| 2461 | 2547 | ||
| 2462 | /** | 2548 | /** |
| @@ -2469,7 +2555,7 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba) | |||
| 2469 | 2555 | ||
| 2470 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); | 2556 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); |
| 2471 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; | 2557 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; |
| 2472 | wake_up_interruptible(&hba->ufshcd_tm_wait_queue); | 2558 | wake_up(&hba->tm_wq); |
| 2473 | } | 2559 | } |
| 2474 | 2560 | ||
| 2475 | /** | 2561 | /** |
| @@ -2481,7 +2567,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | |||
| 2481 | { | 2567 | { |
| 2482 | hba->errors = UFSHCD_ERROR_MASK & intr_status; | 2568 | hba->errors = UFSHCD_ERROR_MASK & intr_status; |
| 2483 | if (hba->errors) | 2569 | if (hba->errors) |
| 2484 | ufshcd_err_handler(hba); | 2570 | ufshcd_check_errors(hba); |
| 2485 | 2571 | ||
| 2486 | if (intr_status & UFSHCD_UIC_MASK) | 2572 | if (intr_status & UFSHCD_UIC_MASK) |
| 2487 | ufshcd_uic_cmd_compl(hba, intr_status); | 2573 | ufshcd_uic_cmd_compl(hba, intr_status); |
| @@ -2519,38 +2605,58 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) | |||
| 2519 | return retval; | 2605 | return retval; |
| 2520 | } | 2606 | } |
| 2521 | 2607 | ||
| 2608 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) | ||
| 2609 | { | ||
| 2610 | int err = 0; | ||
| 2611 | u32 mask = 1 << tag; | ||
| 2612 | unsigned long flags; | ||
| 2613 | |||
| 2614 | if (!test_bit(tag, &hba->outstanding_tasks)) | ||
| 2615 | goto out; | ||
| 2616 | |||
| 2617 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 2618 | ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); | ||
| 2619 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2620 | |||
| 2621 | /* poll for max. 1 sec to clear door bell register by h/w */ | ||
| 2622 | err = ufshcd_wait_for_register(hba, | ||
| 2623 | REG_UTP_TASK_REQ_DOOR_BELL, | ||
| 2624 | mask, 0, 1000, 1000); | ||
| 2625 | out: | ||
| 2626 | return err; | ||
| 2627 | } | ||
| 2628 | |||
| 2522 | /** | 2629 | /** |
| 2523 | * ufshcd_issue_tm_cmd - issues task management commands to controller | 2630 | * ufshcd_issue_tm_cmd - issues task management commands to controller |
| 2524 | * @hba: per adapter instance | 2631 | * @hba: per adapter instance |
| 2525 | * @lrbp: pointer to local reference block | 2632 | * @lun_id: LUN ID to which TM command is sent |
| 2633 | * @task_id: task ID to which the TM command is applicable | ||
| 2634 | * @tm_function: task management function opcode | ||
| 2635 | * @tm_response: task management service response return value | ||
| 2526 | * | 2636 | * |
| 2527 | * Returns SUCCESS/FAILED | 2637 | * Returns non-zero value on error, zero on success. |
| 2528 | */ | 2638 | */ |
| 2529 | static int | 2639 | static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, |
| 2530 | ufshcd_issue_tm_cmd(struct ufs_hba *hba, | 2640 | u8 tm_function, u8 *tm_response) |
| 2531 | struct ufshcd_lrb *lrbp, | ||
| 2532 | u8 tm_function) | ||
| 2533 | { | 2641 | { |
| 2534 | struct utp_task_req_desc *task_req_descp; | 2642 | struct utp_task_req_desc *task_req_descp; |
| 2535 | struct utp_upiu_task_req *task_req_upiup; | 2643 | struct utp_upiu_task_req *task_req_upiup; |
| 2536 | struct Scsi_Host *host; | 2644 | struct Scsi_Host *host; |
| 2537 | unsigned long flags; | 2645 | unsigned long flags; |
| 2538 | int free_slot = 0; | 2646 | int free_slot; |
| 2539 | int err; | 2647 | int err; |
| 2648 | int task_tag; | ||
| 2540 | 2649 | ||
| 2541 | host = hba->host; | 2650 | host = hba->host; |
| 2542 | 2651 | ||
| 2543 | spin_lock_irqsave(host->host_lock, flags); | 2652 | /* |
| 2544 | 2653 | * Get free slot, sleep if slots are unavailable. | |
| 2545 | /* If task management queue is full */ | 2654 | * Even though we use wait_event() which sleeps indefinitely, |
| 2546 | free_slot = ufshcd_get_tm_free_slot(hba); | 2655 | * the maximum wait time is bounded by %TM_CMD_TIMEOUT. |
| 2547 | if (free_slot >= hba->nutmrs) { | 2656 | */ |
| 2548 | spin_unlock_irqrestore(host->host_lock, flags); | 2657 | wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); |
| 2549 | dev_err(hba->dev, "Task management queue full\n"); | ||
| 2550 | err = FAILED; | ||
| 2551 | goto out; | ||
| 2552 | } | ||
| 2553 | 2658 | ||
| 2659 | spin_lock_irqsave(host->host_lock, flags); | ||
| 2554 | task_req_descp = hba->utmrdl_base_addr; | 2660 | task_req_descp = hba->utmrdl_base_addr; |
| 2555 | task_req_descp += free_slot; | 2661 | task_req_descp += free_slot; |
| 2556 | 2662 | ||
| @@ -2562,18 +2668,15 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, | |||
| 2562 | /* Configure task request UPIU */ | 2668 | /* Configure task request UPIU */ |
| 2563 | task_req_upiup = | 2669 | task_req_upiup = |
| 2564 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; | 2670 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; |
| 2671 | task_tag = hba->nutrs + free_slot; | ||
| 2565 | task_req_upiup->header.dword_0 = | 2672 | task_req_upiup->header.dword_0 = |
| 2566 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, | 2673 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, |
| 2567 | lrbp->lun, lrbp->task_tag); | 2674 | lun_id, task_tag); |
| 2568 | task_req_upiup->header.dword_1 = | 2675 | task_req_upiup->header.dword_1 = |
| 2569 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); | 2676 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); |
| 2570 | 2677 | ||
| 2571 | task_req_upiup->input_param1 = lrbp->lun; | 2678 | task_req_upiup->input_param1 = cpu_to_be32(lun_id); |
| 2572 | task_req_upiup->input_param1 = | 2679 | task_req_upiup->input_param2 = cpu_to_be32(task_id); |
| 2573 | cpu_to_be32(task_req_upiup->input_param1); | ||
| 2574 | task_req_upiup->input_param2 = lrbp->task_tag; | ||
| 2575 | task_req_upiup->input_param2 = | ||
| 2576 | cpu_to_be32(task_req_upiup->input_param2); | ||
| 2577 | 2680 | ||
| 2578 | /* send command to the controller */ | 2681 | /* send command to the controller */ |
| 2579 | __set_bit(free_slot, &hba->outstanding_tasks); | 2682 | __set_bit(free_slot, &hba->outstanding_tasks); |
| @@ -2582,91 +2685,88 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, | |||
| 2582 | spin_unlock_irqrestore(host->host_lock, flags); | 2685 | spin_unlock_irqrestore(host->host_lock, flags); |
| 2583 | 2686 | ||
| 2584 | /* wait until the task management command is completed */ | 2687 | /* wait until the task management command is completed */ |
| 2585 | err = | 2688 | err = wait_event_timeout(hba->tm_wq, |
| 2586 | wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, | 2689 | test_bit(free_slot, &hba->tm_condition), |
| 2587 | (test_bit(free_slot, | 2690 | msecs_to_jiffies(TM_CMD_TIMEOUT)); |
| 2588 | &hba->tm_condition) != 0), | ||
| 2589 | 60 * HZ); | ||
| 2590 | if (!err) { | 2691 | if (!err) { |
| 2591 | dev_err(hba->dev, | 2692 | dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", |
| 2592 | "Task management command timed-out\n"); | 2693 | __func__, tm_function); |
| 2593 | err = FAILED; | 2694 | if (ufshcd_clear_tm_cmd(hba, free_slot)) |
| 2594 | goto out; | 2695 | dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", |
| 2696 | __func__, free_slot); | ||
| 2697 | err = -ETIMEDOUT; | ||
| 2698 | } else { | ||
| 2699 | err = ufshcd_task_req_compl(hba, free_slot, tm_response); | ||
| 2595 | } | 2700 | } |
| 2701 | |||
| 2596 | clear_bit(free_slot, &hba->tm_condition); | 2702 | clear_bit(free_slot, &hba->tm_condition); |
| 2597 | err = ufshcd_task_req_compl(hba, free_slot); | 2703 | ufshcd_put_tm_slot(hba, free_slot); |
| 2598 | out: | 2704 | wake_up(&hba->tm_tag_wq); |
| 2705 | |||
| 2599 | return err; | 2706 | return err; |
| 2600 | } | 2707 | } |
| 2601 | 2708 | ||
| 2602 | /** | 2709 | /** |
| 2603 | * ufshcd_device_reset - reset device and abort all the pending commands | 2710 | * ufshcd_eh_device_reset_handler - device reset handler registered to |
| 2711 | * scsi layer. | ||
| 2604 | * @cmd: SCSI command pointer | 2712 | * @cmd: SCSI command pointer |
| 2605 | * | 2713 | * |
| 2606 | * Returns SUCCESS/FAILED | 2714 | * Returns SUCCESS/FAILED |
| 2607 | */ | 2715 | */ |
| 2608 | static int ufshcd_device_reset(struct scsi_cmnd *cmd) | 2716 | static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) |
| 2609 | { | 2717 | { |
| 2610 | struct Scsi_Host *host; | 2718 | struct Scsi_Host *host; |
| 2611 | struct ufs_hba *hba; | 2719 | struct ufs_hba *hba; |
| 2612 | unsigned int tag; | 2720 | unsigned int tag; |
| 2613 | u32 pos; | 2721 | u32 pos; |
| 2614 | int err; | 2722 | int err; |
| 2723 | u8 resp = 0xF; | ||
| 2724 | struct ufshcd_lrb *lrbp; | ||
| 2725 | unsigned long flags; | ||
| 2615 | 2726 | ||
| 2616 | host = cmd->device->host; | 2727 | host = cmd->device->host; |
| 2617 | hba = shost_priv(host); | 2728 | hba = shost_priv(host); |
| 2618 | tag = cmd->request->tag; | 2729 | tag = cmd->request->tag; |
| 2619 | 2730 | ||
| 2620 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); | 2731 | lrbp = &hba->lrb[tag]; |
| 2621 | if (err == FAILED) | 2732 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); |
| 2733 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | ||
| 2734 | if (!err) | ||
| 2735 | err = resp; | ||
| 2622 | goto out; | 2736 | goto out; |
| 2737 | } | ||
| 2623 | 2738 | ||
| 2624 | for (pos = 0; pos < hba->nutrs; pos++) { | 2739 | /* clear the commands that were pending for corresponding LUN */ |
| 2625 | if (test_bit(pos, &hba->outstanding_reqs) && | 2740 | for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { |
| 2626 | (hba->lrb[tag].lun == hba->lrb[pos].lun)) { | 2741 | if (hba->lrb[pos].lun == lrbp->lun) { |
| 2627 | 2742 | err = ufshcd_clear_cmd(hba, pos); | |
| 2628 | /* clear the respective UTRLCLR register bit */ | 2743 | if (err) |
| 2629 | ufshcd_utrl_clear(hba, pos); | 2744 | break; |
| 2630 | |||
| 2631 | clear_bit(pos, &hba->outstanding_reqs); | ||
| 2632 | |||
| 2633 | if (hba->lrb[pos].cmd) { | ||
| 2634 | scsi_dma_unmap(hba->lrb[pos].cmd); | ||
| 2635 | hba->lrb[pos].cmd->result = | ||
| 2636 | DID_ABORT << 16; | ||
| 2637 | hba->lrb[pos].cmd->scsi_done(cmd); | ||
| 2638 | hba->lrb[pos].cmd = NULL; | ||
| 2639 | clear_bit_unlock(pos, &hba->lrb_in_use); | ||
| 2640 | wake_up(&hba->dev_cmd.tag_wq); | ||
| 2641 | } | ||
| 2642 | } | 2745 | } |
| 2643 | } /* end of for */ | 2746 | } |
| 2747 | spin_lock_irqsave(host->host_lock, flags); | ||
| 2748 | ufshcd_transfer_req_compl(hba); | ||
| 2749 | spin_unlock_irqrestore(host->host_lock, flags); | ||
| 2644 | out: | 2750 | out: |
| 2751 | if (!err) { | ||
| 2752 | err = SUCCESS; | ||
| 2753 | } else { | ||
| 2754 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | ||
| 2755 | err = FAILED; | ||
| 2756 | } | ||
| 2645 | return err; | 2757 | return err; |
| 2646 | } | 2758 | } |
| 2647 | 2759 | ||
| 2648 | /** | 2760 | /** |
| 2649 | * ufshcd_host_reset - Main reset function registered with scsi layer | ||
| 2650 | * @cmd: SCSI command pointer | ||
| 2651 | * | ||
| 2652 | * Returns SUCCESS/FAILED | ||
| 2653 | */ | ||
| 2654 | static int ufshcd_host_reset(struct scsi_cmnd *cmd) | ||
| 2655 | { | ||
| 2656 | struct ufs_hba *hba; | ||
| 2657 | |||
| 2658 | hba = shost_priv(cmd->device->host); | ||
| 2659 | |||
| 2660 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | ||
| 2661 | return SUCCESS; | ||
| 2662 | |||
| 2663 | return ufshcd_do_reset(hba); | ||
| 2664 | } | ||
| 2665 | |||
| 2666 | /** | ||
| 2667 | * ufshcd_abort - abort a specific command | 2761 | * ufshcd_abort - abort a specific command |
| 2668 | * @cmd: SCSI command pointer | 2762 | * @cmd: SCSI command pointer |
| 2669 | * | 2763 | * |
| 2764 | * Abort the pending command in device by sending UFS_ABORT_TASK task management | ||
| 2765 | * command, and in host controller by clearing the door-bell register. There can | ||
| 2766 | * be race between controller sending the command to the device while abort is | ||
| 2767 | * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is | ||
| 2768 | * really issued and then try to abort it. | ||
| 2769 | * | ||
| 2670 | * Returns SUCCESS/FAILED | 2770 | * Returns SUCCESS/FAILED |
| 2671 | */ | 2771 | */ |
| 2672 | static int ufshcd_abort(struct scsi_cmnd *cmd) | 2772 | static int ufshcd_abort(struct scsi_cmnd *cmd) |
| @@ -2675,33 +2775,68 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) | |||
| 2675 | struct ufs_hba *hba; | 2775 | struct ufs_hba *hba; |
| 2676 | unsigned long flags; | 2776 | unsigned long flags; |
| 2677 | unsigned int tag; | 2777 | unsigned int tag; |
| 2678 | int err; | 2778 | int err = 0; |
| 2779 | int poll_cnt; | ||
| 2780 | u8 resp = 0xF; | ||
| 2781 | struct ufshcd_lrb *lrbp; | ||
| 2679 | 2782 | ||
| 2680 | host = cmd->device->host; | 2783 | host = cmd->device->host; |
| 2681 | hba = shost_priv(host); | 2784 | hba = shost_priv(host); |
| 2682 | tag = cmd->request->tag; | 2785 | tag = cmd->request->tag; |
| 2683 | 2786 | ||
| 2684 | spin_lock_irqsave(host->host_lock, flags); | 2787 | /* If command is already aborted/completed, return SUCCESS */ |
| 2788 | if (!(test_bit(tag, &hba->outstanding_reqs))) | ||
| 2789 | goto out; | ||
| 2685 | 2790 | ||
| 2686 | /* check if command is still pending */ | 2791 | lrbp = &hba->lrb[tag]; |
| 2687 | if (!(test_bit(tag, &hba->outstanding_reqs))) { | 2792 | for (poll_cnt = 100; poll_cnt; poll_cnt--) { |
| 2688 | err = FAILED; | 2793 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, |
| 2689 | spin_unlock_irqrestore(host->host_lock, flags); | 2794 | UFS_QUERY_TASK, &resp); |
| 2795 | if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { | ||
| 2796 | /* cmd pending in the device */ | ||
| 2797 | break; | ||
| 2798 | } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | ||
| 2799 | u32 reg; | ||
| 2800 | |||
| 2801 | /* | ||
| 2802 | * cmd not pending in the device, check if it is | ||
| 2803 | * in transition. | ||
| 2804 | */ | ||
| 2805 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | ||
| 2806 | if (reg & (1 << tag)) { | ||
| 2807 | /* sleep for max. 200us to stabilize */ | ||
| 2808 | usleep_range(100, 200); | ||
| 2809 | continue; | ||
| 2810 | } | ||
| 2811 | /* command completed already */ | ||
| 2812 | goto out; | ||
| 2813 | } else { | ||
| 2814 | if (!err) | ||
| 2815 | err = resp; /* service response error */ | ||
| 2816 | goto out; | ||
| 2817 | } | ||
| 2818 | } | ||
| 2819 | |||
| 2820 | if (!poll_cnt) { | ||
| 2821 | err = -EBUSY; | ||
| 2690 | goto out; | 2822 | goto out; |
| 2691 | } | 2823 | } |
| 2692 | spin_unlock_irqrestore(host->host_lock, flags); | ||
| 2693 | 2824 | ||
| 2694 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); | 2825 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, |
| 2695 | if (err == FAILED) | 2826 | UFS_ABORT_TASK, &resp); |
| 2827 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | ||
| 2828 | if (!err) | ||
| 2829 | err = resp; /* service response error */ | ||
| 2830 | goto out; | ||
| 2831 | } | ||
| 2832 | |||
| 2833 | err = ufshcd_clear_cmd(hba, tag); | ||
| 2834 | if (err) | ||
| 2696 | goto out; | 2835 | goto out; |
| 2697 | 2836 | ||
| 2698 | scsi_dma_unmap(cmd); | 2837 | scsi_dma_unmap(cmd); |
| 2699 | 2838 | ||
| 2700 | spin_lock_irqsave(host->host_lock, flags); | 2839 | spin_lock_irqsave(host->host_lock, flags); |
| 2701 | |||
| 2702 | /* clear the respective UTRLCLR register bit */ | ||
| 2703 | ufshcd_utrl_clear(hba, tag); | ||
| 2704 | |||
| 2705 | __clear_bit(tag, &hba->outstanding_reqs); | 2840 | __clear_bit(tag, &hba->outstanding_reqs); |
| 2706 | hba->lrb[tag].cmd = NULL; | 2841 | hba->lrb[tag].cmd = NULL; |
| 2707 | spin_unlock_irqrestore(host->host_lock, flags); | 2842 | spin_unlock_irqrestore(host->host_lock, flags); |
| @@ -2709,6 +2844,129 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) | |||
| 2709 | clear_bit_unlock(tag, &hba->lrb_in_use); | 2844 | clear_bit_unlock(tag, &hba->lrb_in_use); |
| 2710 | wake_up(&hba->dev_cmd.tag_wq); | 2845 | wake_up(&hba->dev_cmd.tag_wq); |
| 2711 | out: | 2846 | out: |
| 2847 | if (!err) { | ||
| 2848 | err = SUCCESS; | ||
| 2849 | } else { | ||
| 2850 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | ||
| 2851 | err = FAILED; | ||
| 2852 | } | ||
| 2853 | |||
| 2854 | return err; | ||
| 2855 | } | ||
| 2856 | |||
| 2857 | /** | ||
| 2858 | * ufshcd_host_reset_and_restore - reset and restore host controller | ||
| 2859 | * @hba: per-adapter instance | ||
| 2860 | * | ||
| 2861 | * Note that host controller reset may issue DME_RESET to | ||
| 2862 | * local and remote (device) Uni-Pro stack and the attributes | ||
| 2863 | * are reset to default state. | ||
| 2864 | * | ||
| 2865 | * Returns zero on success, non-zero on failure | ||
| 2866 | */ | ||
| 2867 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) | ||
| 2868 | { | ||
| 2869 | int err; | ||
| 2870 | async_cookie_t cookie; | ||
| 2871 | unsigned long flags; | ||
| 2872 | |||
| 2873 | /* Reset the host controller */ | ||
| 2874 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 2875 | ufshcd_hba_stop(hba); | ||
| 2876 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2877 | |||
| 2878 | err = ufshcd_hba_enable(hba); | ||
| 2879 | if (err) | ||
| 2880 | goto out; | ||
| 2881 | |||
| 2882 | /* Establish the link again and restore the device */ | ||
| 2883 | cookie = async_schedule(ufshcd_async_scan, hba); | ||
| 2884 | /* wait for async scan to be completed */ | ||
| 2885 | async_synchronize_cookie(++cookie); | ||
| 2886 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) | ||
| 2887 | err = -EIO; | ||
| 2888 | out: | ||
| 2889 | if (err) | ||
| 2890 | dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); | ||
| 2891 | |||
| 2892 | return err; | ||
| 2893 | } | ||
| 2894 | |||
| 2895 | /** | ||
| 2896 | * ufshcd_reset_and_restore - reset and re-initialize host/device | ||
| 2897 | * @hba: per-adapter instance | ||
| 2898 | * | ||
| 2899 | * Reset and recover device, host and re-establish link. This | ||
| 2900 | * is helpful to recover the communication in fatal error conditions. | ||
| 2901 | * | ||
| 2902 | * Returns zero on success, non-zero on failure | ||
| 2903 | */ | ||
| 2904 | static int ufshcd_reset_and_restore(struct ufs_hba *hba) | ||
| 2905 | { | ||
| 2906 | int err = 0; | ||
| 2907 | unsigned long flags; | ||
| 2908 | |||
| 2909 | err = ufshcd_host_reset_and_restore(hba); | ||
| 2910 | |||
| 2911 | /* | ||
| 2912 | * After reset the door-bell might be cleared, complete | ||
| 2913 | * outstanding requests in s/w here. | ||
| 2914 | */ | ||
| 2915 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 2916 | ufshcd_transfer_req_compl(hba); | ||
| 2917 | ufshcd_tmc_handler(hba); | ||
| 2918 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2919 | |||
| 2920 | return err; | ||
| 2921 | } | ||
| 2922 | |||
| 2923 | /** | ||
| 2924 | * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer | ||
| 2925 | * @cmd - SCSI command pointer | ||
| 2926 | * | ||
| 2927 | * Returns SUCCESS/FAILED | ||
| 2928 | */ | ||
| 2929 | static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) | ||
| 2930 | { | ||
| 2931 | int err; | ||
| 2932 | unsigned long flags; | ||
| 2933 | struct ufs_hba *hba; | ||
| 2934 | |||
| 2935 | hba = shost_priv(cmd->device->host); | ||
| 2936 | |||
| 2937 | /* | ||
| 2938 | * Check if there is any race with fatal error handling. | ||
| 2939 | * If so, wait for it to complete. Even though fatal error | ||
| 2940 | * handling does reset and restore in some cases, don't assume | ||
| 2941 | * anything out of it. We are just avoiding race here. | ||
| 2942 | */ | ||
| 2943 | do { | ||
| 2944 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 2945 | if (!(work_pending(&hba->eh_work) || | ||
| 2946 | hba->ufshcd_state == UFSHCD_STATE_RESET)) | ||
| 2947 | break; | ||
| 2948 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2949 | dev_dbg(hba->dev, "%s: reset in progress\n", __func__); | ||
| 2950 | flush_work(&hba->eh_work); | ||
| 2951 | } while (1); | ||
| 2952 | |||
| 2953 | hba->ufshcd_state = UFSHCD_STATE_RESET; | ||
| 2954 | ufshcd_set_eh_in_progress(hba); | ||
| 2955 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2956 | |||
| 2957 | err = ufshcd_reset_and_restore(hba); | ||
| 2958 | |||
| 2959 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
| 2960 | if (!err) { | ||
| 2961 | err = SUCCESS; | ||
| 2962 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | ||
| 2963 | } else { | ||
| 2964 | err = FAILED; | ||
| 2965 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | ||
| 2966 | } | ||
| 2967 | ufshcd_clear_eh_in_progress(hba); | ||
| 2968 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
| 2969 | |||
| 2712 | return err; | 2970 | return err; |
| 2713 | } | 2971 | } |
| 2714 | 2972 | ||
| @@ -2737,8 +2995,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) | |||
| 2737 | goto out; | 2995 | goto out; |
| 2738 | 2996 | ||
| 2739 | ufshcd_force_reset_auto_bkops(hba); | 2997 | ufshcd_force_reset_auto_bkops(hba); |
| 2740 | scsi_scan_host(hba->host); | 2998 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; |
| 2741 | pm_runtime_put_sync(hba->dev); | 2999 | |
| 3000 | /* If we are in error handling context no need to scan the host */ | ||
| 3001 | if (!ufshcd_eh_in_progress(hba)) { | ||
| 3002 | scsi_scan_host(hba->host); | ||
| 3003 | pm_runtime_put_sync(hba->dev); | ||
| 3004 | } | ||
| 2742 | out: | 3005 | out: |
| 2743 | return; | 3006 | return; |
| 2744 | } | 3007 | } |
| @@ -2751,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = { | |||
| 2751 | .slave_alloc = ufshcd_slave_alloc, | 3014 | .slave_alloc = ufshcd_slave_alloc, |
| 2752 | .slave_destroy = ufshcd_slave_destroy, | 3015 | .slave_destroy = ufshcd_slave_destroy, |
| 2753 | .eh_abort_handler = ufshcd_abort, | 3016 | .eh_abort_handler = ufshcd_abort, |
| 2754 | .eh_device_reset_handler = ufshcd_device_reset, | 3017 | .eh_device_reset_handler = ufshcd_eh_device_reset_handler, |
| 2755 | .eh_host_reset_handler = ufshcd_host_reset, | 3018 | .eh_host_reset_handler = ufshcd_eh_host_reset_handler, |
| 2756 | .this_id = -1, | 3019 | .this_id = -1, |
| 2757 | .sg_tablesize = SG_ALL, | 3020 | .sg_tablesize = SG_ALL, |
| 2758 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | 3021 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, |
| @@ -2916,10 +3179,11 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | |||
| 2916 | host->max_cmd_len = MAX_CDB_SIZE; | 3179 | host->max_cmd_len = MAX_CDB_SIZE; |
| 2917 | 3180 | ||
| 2918 | /* Initailize wait queue for task management */ | 3181 | /* Initailize wait queue for task management */ |
| 2919 | init_waitqueue_head(&hba->ufshcd_tm_wait_queue); | 3182 | init_waitqueue_head(&hba->tm_wq); |
| 3183 | init_waitqueue_head(&hba->tm_tag_wq); | ||
| 2920 | 3184 | ||
| 2921 | /* Initialize work queues */ | 3185 | /* Initialize work queues */ |
| 2922 | INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); | 3186 | INIT_WORK(&hba->eh_work, ufshcd_err_handler); |
| 2923 | INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); | 3187 | INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); |
| 2924 | 3188 | ||
| 2925 | /* Initialize UIC command mutex */ | 3189 | /* Initialize UIC command mutex */ |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 577679a2d189..acf318e338ed 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h | |||
| @@ -174,15 +174,21 @@ struct ufs_dev_cmd { | |||
| 174 | * @irq: Irq number of the controller | 174 | * @irq: Irq number of the controller |
| 175 | * @active_uic_cmd: handle of active UIC command | 175 | * @active_uic_cmd: handle of active UIC command |
| 176 | * @uic_cmd_mutex: mutex for uic command | 176 | * @uic_cmd_mutex: mutex for uic command |
| 177 | * @ufshcd_tm_wait_queue: wait queue for task management | 177 | * @tm_wq: wait queue for task management |
| 178 | * @tm_tag_wq: wait queue for free task management slots | ||
| 179 | * @tm_slots_in_use: bit map of task management request slots in use | ||
| 178 | * @pwr_done: completion for power mode change | 180 | * @pwr_done: completion for power mode change |
| 179 | * @tm_condition: condition variable for task management | 181 | * @tm_condition: condition variable for task management |
| 180 | * @ufshcd_state: UFSHCD states | 182 | * @ufshcd_state: UFSHCD states |
| 183 | * @eh_flags: Error handling flags | ||
| 181 | * @intr_mask: Interrupt Mask Bits | 184 | * @intr_mask: Interrupt Mask Bits |
| 182 | * @ee_ctrl_mask: Exception event control mask | 185 | * @ee_ctrl_mask: Exception event control mask |
| 183 | * @feh_workq: Work queue for fatal controller error handling | 186 | * @eh_work: Worker to handle UFS errors that require s/w attention |
| 184 | * @eeh_work: Worker to handle exception events | 187 | * @eeh_work: Worker to handle exception events |
| 185 | * @errors: HBA errors | 188 | * @errors: HBA errors |
| 189 | * @uic_error: UFS interconnect layer error status | ||
| 190 | * @saved_err: sticky error mask | ||
| 191 | * @saved_uic_err: sticky UIC error mask | ||
| 186 | * @dev_cmd: ufs device management command information | 192 | * @dev_cmd: ufs device management command information |
| 187 | * @auto_bkops_enabled: to track whether bkops is enabled in device | 193 | * @auto_bkops_enabled: to track whether bkops is enabled in device |
| 188 | */ | 194 | */ |
| @@ -217,21 +223,27 @@ struct ufs_hba { | |||
| 217 | struct uic_command *active_uic_cmd; | 223 | struct uic_command *active_uic_cmd; |
| 218 | struct mutex uic_cmd_mutex; | 224 | struct mutex uic_cmd_mutex; |
| 219 | 225 | ||
| 220 | wait_queue_head_t ufshcd_tm_wait_queue; | 226 | wait_queue_head_t tm_wq; |
| 227 | wait_queue_head_t tm_tag_wq; | ||
| 221 | unsigned long tm_condition; | 228 | unsigned long tm_condition; |
| 229 | unsigned long tm_slots_in_use; | ||
| 222 | 230 | ||
| 223 | struct completion *pwr_done; | 231 | struct completion *pwr_done; |
| 224 | 232 | ||
| 225 | u32 ufshcd_state; | 233 | u32 ufshcd_state; |
| 234 | u32 eh_flags; | ||
| 226 | u32 intr_mask; | 235 | u32 intr_mask; |
| 227 | u16 ee_ctrl_mask; | 236 | u16 ee_ctrl_mask; |
| 228 | 237 | ||
| 229 | /* Work Queues */ | 238 | /* Work Queues */ |
| 230 | struct work_struct feh_workq; | 239 | struct work_struct eh_work; |
| 231 | struct work_struct eeh_work; | 240 | struct work_struct eeh_work; |
| 232 | 241 | ||
| 233 | /* HBA Errors */ | 242 | /* HBA Errors */ |
| 234 | u32 errors; | 243 | u32 errors; |
| 244 | u32 uic_error; | ||
| 245 | u32 saved_err; | ||
| 246 | u32 saved_uic_err; | ||
| 235 | 247 | ||
| 236 | /* Device management request data */ | 248 | /* Device management request data */ |
| 237 | struct ufs_dev_cmd dev_cmd; | 249 | struct ufs_dev_cmd dev_cmd; |
| @@ -263,6 +275,8 @@ static inline void check_upiu_size(void) | |||
| 263 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); | 275 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); |
| 264 | } | 276 | } |
| 265 | 277 | ||
| 278 | extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state); | ||
| 279 | extern int ufshcd_resume(struct ufs_hba *hba); | ||
| 266 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); | 280 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); |
| 267 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); | 281 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); |
| 268 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); | 282 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); |
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 0475c6619a68..9abc7e32b43d 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h | |||
| @@ -304,10 +304,10 @@ enum { | |||
| 304 | * @size: size of physical segment DW-3 | 304 | * @size: size of physical segment DW-3 |
| 305 | */ | 305 | */ |
| 306 | struct ufshcd_sg_entry { | 306 | struct ufshcd_sg_entry { |
| 307 | u32 base_addr; | 307 | __le32 base_addr; |
| 308 | u32 upper_addr; | 308 | __le32 upper_addr; |
| 309 | u32 reserved; | 309 | __le32 reserved; |
| 310 | u32 size; | 310 | __le32 size; |
| 311 | }; | 311 | }; |
| 312 | 312 | ||
| 313 | /** | 313 | /** |
| @@ -330,10 +330,10 @@ struct utp_transfer_cmd_desc { | |||
| 330 | * @dword3: Descriptor Header DW3 | 330 | * @dword3: Descriptor Header DW3 |
| 331 | */ | 331 | */ |
| 332 | struct request_desc_header { | 332 | struct request_desc_header { |
| 333 | u32 dword_0; | 333 | __le32 dword_0; |
| 334 | u32 dword_1; | 334 | __le32 dword_1; |
| 335 | u32 dword_2; | 335 | __le32 dword_2; |
| 336 | u32 dword_3; | 336 | __le32 dword_3; |
| 337 | }; | 337 | }; |
| 338 | 338 | ||
| 339 | /** | 339 | /** |
| @@ -352,16 +352,16 @@ struct utp_transfer_req_desc { | |||
| 352 | struct request_desc_header header; | 352 | struct request_desc_header header; |
| 353 | 353 | ||
| 354 | /* DW 4-5*/ | 354 | /* DW 4-5*/ |
| 355 | u32 command_desc_base_addr_lo; | 355 | __le32 command_desc_base_addr_lo; |
| 356 | u32 command_desc_base_addr_hi; | 356 | __le32 command_desc_base_addr_hi; |
| 357 | 357 | ||
| 358 | /* DW 6 */ | 358 | /* DW 6 */ |
| 359 | u16 response_upiu_length; | 359 | __le16 response_upiu_length; |
| 360 | u16 response_upiu_offset; | 360 | __le16 response_upiu_offset; |
| 361 | 361 | ||
| 362 | /* DW 7 */ | 362 | /* DW 7 */ |
| 363 | u16 prd_table_length; | 363 | __le16 prd_table_length; |
| 364 | u16 prd_table_offset; | 364 | __le16 prd_table_offset; |
| 365 | }; | 365 | }; |
| 366 | 366 | ||
| 367 | /** | 367 | /** |
| @@ -376,10 +376,10 @@ struct utp_task_req_desc { | |||
| 376 | struct request_desc_header header; | 376 | struct request_desc_header header; |
| 377 | 377 | ||
| 378 | /* DW 4-11 */ | 378 | /* DW 4-11 */ |
| 379 | u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; | 379 | __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; |
| 380 | 380 | ||
| 381 | /* DW 12-19 */ | 381 | /* DW 12-19 */ |
| 382 | u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; | 382 | __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; |
| 383 | }; | 383 | }; |
| 384 | 384 | ||
| 385 | #endif /* End of Header */ | 385 | #endif /* End of Header */ |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index db3b494e5926..d4727b339474 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
| @@ -73,17 +73,12 @@ struct virtio_scsi_vq { | |||
| 73 | * queue, and also lets the driver optimize the IRQ affinity for the virtqueues | 73 | * queue, and also lets the driver optimize the IRQ affinity for the virtqueues |
| 74 | * (each virtqueue's affinity is set to the CPU that "owns" the queue). | 74 | * (each virtqueue's affinity is set to the CPU that "owns" the queue). |
| 75 | * | 75 | * |
| 76 | * An interesting effect of this policy is that only writes to req_vq need to | 76 | * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq |
| 77 | * take the tgt_lock. Read can be done outside the lock because: | 77 | * could be done locklessly, but we do not do it yet. |
| 78 | * | 78 | * |
| 79 | * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1. | 79 | * Decrements of reqs are never concurrent with writes of req_vq: before the |
| 80 | * In that case, no other CPU is reading req_vq: even if they were in | 80 | * decrement reqs will be != 0; after the decrement the virtqueue completion |
| 81 | * virtscsi_queuecommand_multi, they would be spinning on tgt_lock. | 81 | * routine will not use the req_vq so it can be changed by a new request. |
| 82 | * | ||
| 83 | * - reads of req_vq only occur when the target is not idle (reqs != 0). | ||
| 84 | * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq. | ||
| 85 | * | ||
| 86 | * Similarly, decrements of reqs are never concurrent with writes of req_vq. | ||
| 87 | * Thus they can happen outside the tgt_lock, provided of course we make reqs | 82 | * Thus they can happen outside the tgt_lock, provided of course we make reqs |
| 88 | * an atomic_t. | 83 | * an atomic_t. |
| 89 | */ | 84 | */ |
| @@ -204,7 +199,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) | |||
| 204 | set_driver_byte(sc, DRIVER_SENSE); | 199 | set_driver_byte(sc, DRIVER_SENSE); |
| 205 | } | 200 | } |
| 206 | 201 | ||
| 207 | mempool_free(cmd, virtscsi_cmd_pool); | ||
| 208 | sc->scsi_done(sc); | 202 | sc->scsi_done(sc); |
| 209 | 203 | ||
| 210 | atomic_dec(&tgt->reqs); | 204 | atomic_dec(&tgt->reqs); |
| @@ -238,38 +232,6 @@ static void virtscsi_req_done(struct virtqueue *vq) | |||
| 238 | int index = vq->index - VIRTIO_SCSI_VQ_BASE; | 232 | int index = vq->index - VIRTIO_SCSI_VQ_BASE; |
| 239 | struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; | 233 | struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; |
| 240 | 234 | ||
| 241 | /* | ||
| 242 | * Read req_vq before decrementing the reqs field in | ||
| 243 | * virtscsi_complete_cmd. | ||
| 244 | * | ||
| 245 | * With barriers: | ||
| 246 | * | ||
| 247 | * CPU #0 virtscsi_queuecommand_multi (CPU #1) | ||
| 248 | * ------------------------------------------------------------ | ||
| 249 | * lock vq_lock | ||
| 250 | * read req_vq | ||
| 251 | * read reqs (reqs = 1) | ||
| 252 | * write reqs (reqs = 0) | ||
| 253 | * increment reqs (reqs = 1) | ||
| 254 | * write req_vq | ||
| 255 | * | ||
| 256 | * Possible reordering without barriers: | ||
| 257 | * | ||
| 258 | * CPU #0 virtscsi_queuecommand_multi (CPU #1) | ||
| 259 | * ------------------------------------------------------------ | ||
| 260 | * lock vq_lock | ||
| 261 | * read reqs (reqs = 1) | ||
| 262 | * write reqs (reqs = 0) | ||
| 263 | * increment reqs (reqs = 1) | ||
| 264 | * write req_vq | ||
| 265 | * read (wrong) req_vq | ||
| 266 | * | ||
| 267 | * We do not need a full smp_rmb, because req_vq is required to get | ||
| 268 | * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored | ||
| 269 | * in the virtqueue as the user token. | ||
| 270 | */ | ||
| 271 | smp_read_barrier_depends(); | ||
| 272 | |||
| 273 | virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); | 235 | virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); |
| 274 | }; | 236 | }; |
| 275 | 237 | ||
| @@ -279,8 +241,6 @@ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) | |||
| 279 | 241 | ||
| 280 | if (cmd->comp) | 242 | if (cmd->comp) |
| 281 | complete_all(cmd->comp); | 243 | complete_all(cmd->comp); |
| 282 | else | ||
| 283 | mempool_free(cmd, virtscsi_cmd_pool); | ||
| 284 | } | 244 | } |
| 285 | 245 | ||
| 286 | static void virtscsi_ctrl_done(struct virtqueue *vq) | 246 | static void virtscsi_ctrl_done(struct virtqueue *vq) |
| @@ -496,10 +456,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | |||
| 496 | struct virtio_scsi_vq *req_vq, | 456 | struct virtio_scsi_vq *req_vq, |
| 497 | struct scsi_cmnd *sc) | 457 | struct scsi_cmnd *sc) |
| 498 | { | 458 | { |
| 499 | struct virtio_scsi_cmd *cmd; | ||
| 500 | int ret; | ||
| 501 | |||
| 502 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | 459 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); |
| 460 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); | ||
| 461 | |||
| 503 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); | 462 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); |
| 504 | 463 | ||
| 505 | /* TODO: check feature bit and fail if unsupported? */ | 464 | /* TODO: check feature bit and fail if unsupported? */ |
| @@ -508,11 +467,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | |||
| 508 | dev_dbg(&sc->device->sdev_gendev, | 467 | dev_dbg(&sc->device->sdev_gendev, |
| 509 | "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); | 468 | "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); |
| 510 | 469 | ||
| 511 | ret = SCSI_MLQUEUE_HOST_BUSY; | ||
| 512 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); | ||
| 513 | if (!cmd) | ||
| 514 | goto out; | ||
| 515 | |||
| 516 | memset(cmd, 0, sizeof(*cmd)); | 470 | memset(cmd, 0, sizeof(*cmd)); |
| 517 | cmd->sc = sc; | 471 | cmd->sc = sc; |
| 518 | cmd->req.cmd = (struct virtio_scsi_cmd_req){ | 472 | cmd->req.cmd = (struct virtio_scsi_cmd_req){ |
| @@ -531,13 +485,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | |||
| 531 | 485 | ||
| 532 | if (virtscsi_kick_cmd(req_vq, cmd, | 486 | if (virtscsi_kick_cmd(req_vq, cmd, |
| 533 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, | 487 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, |
| 534 | GFP_ATOMIC) == 0) | 488 | GFP_ATOMIC) != 0) |
| 535 | ret = 0; | 489 | return SCSI_MLQUEUE_HOST_BUSY; |
| 536 | else | 490 | return 0; |
| 537 | mempool_free(cmd, virtscsi_cmd_pool); | ||
| 538 | |||
| 539 | out: | ||
| 540 | return ret; | ||
| 541 | } | 491 | } |
| 542 | 492 | ||
| 543 | static int virtscsi_queuecommand_single(struct Scsi_Host *sh, | 493 | static int virtscsi_queuecommand_single(struct Scsi_Host *sh, |
| @@ -560,12 +510,8 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, | |||
| 560 | 510 | ||
| 561 | spin_lock_irqsave(&tgt->tgt_lock, flags); | 511 | spin_lock_irqsave(&tgt->tgt_lock, flags); |
| 562 | 512 | ||
| 563 | /* | ||
| 564 | * The memory barrier after atomic_inc_return matches | ||
| 565 | * the smp_read_barrier_depends() in virtscsi_req_done. | ||
| 566 | */ | ||
| 567 | if (atomic_inc_return(&tgt->reqs) > 1) | 513 | if (atomic_inc_return(&tgt->reqs) > 1) |
| 568 | vq = ACCESS_ONCE(tgt->req_vq); | 514 | vq = tgt->req_vq; |
| 569 | else { | 515 | else { |
| 570 | queue_num = smp_processor_id(); | 516 | queue_num = smp_processor_id(); |
| 571 | while (unlikely(queue_num >= vscsi->num_queues)) | 517 | while (unlikely(queue_num >= vscsi->num_queues)) |
| @@ -683,6 +629,7 @@ static struct scsi_host_template virtscsi_host_template_single = { | |||
| 683 | .name = "Virtio SCSI HBA", | 629 | .name = "Virtio SCSI HBA", |
| 684 | .proc_name = "virtio_scsi", | 630 | .proc_name = "virtio_scsi", |
| 685 | .this_id = -1, | 631 | .this_id = -1, |
| 632 | .cmd_size = sizeof(struct virtio_scsi_cmd), | ||
| 686 | .queuecommand = virtscsi_queuecommand_single, | 633 | .queuecommand = virtscsi_queuecommand_single, |
| 687 | .eh_abort_handler = virtscsi_abort, | 634 | .eh_abort_handler = virtscsi_abort, |
| 688 | .eh_device_reset_handler = virtscsi_device_reset, | 635 | .eh_device_reset_handler = virtscsi_device_reset, |
| @@ -699,6 +646,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { | |||
| 699 | .name = "Virtio SCSI HBA", | 646 | .name = "Virtio SCSI HBA", |
| 700 | .proc_name = "virtio_scsi", | 647 | .proc_name = "virtio_scsi", |
| 701 | .this_id = -1, | 648 | .this_id = -1, |
| 649 | .cmd_size = sizeof(struct virtio_scsi_cmd), | ||
| 702 | .queuecommand = virtscsi_queuecommand_multi, | 650 | .queuecommand = virtscsi_queuecommand_multi, |
| 703 | .eh_abort_handler = virtscsi_abort, | 651 | .eh_abort_handler = virtscsi_abort, |
| 704 | .eh_device_reset_handler = virtscsi_device_reset, | 652 | .eh_device_reset_handler = virtscsi_device_reset, |
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h index 25ac6283b9c7..a2594afe05c7 100644 --- a/include/scsi/osd_protocol.h +++ b/include/scsi/osd_protocol.h | |||
| @@ -263,16 +263,16 @@ static inline struct osd_cdb_head *osd_cdb_head(struct osd_cdb *ocdb) | |||
| 263 | * Ex name = FORMAT_OSD we have OSD_ACT_FORMAT_OSD && OSDv1_ACT_FORMAT_OSD | 263 | * Ex name = FORMAT_OSD we have OSD_ACT_FORMAT_OSD && OSDv1_ACT_FORMAT_OSD |
| 264 | */ | 264 | */ |
| 265 | #define OSD_ACT___(Name, Num) \ | 265 | #define OSD_ACT___(Name, Num) \ |
| 266 | OSD_ACT_##Name = __constant_cpu_to_be16(0x8880 + Num), \ | 266 | OSD_ACT_##Name = cpu_to_be16(0x8880 + Num), \ |
| 267 | OSDv1_ACT_##Name = __constant_cpu_to_be16(0x8800 + Num), | 267 | OSDv1_ACT_##Name = cpu_to_be16(0x8800 + Num), |
| 268 | 268 | ||
| 269 | /* V2 only actions */ | 269 | /* V2 only actions */ |
| 270 | #define OSD_ACT_V2(Name, Num) \ | 270 | #define OSD_ACT_V2(Name, Num) \ |
| 271 | OSD_ACT_##Name = __constant_cpu_to_be16(0x8880 + Num), | 271 | OSD_ACT_##Name = cpu_to_be16(0x8880 + Num), |
| 272 | 272 | ||
| 273 | #define OSD_ACT_V1_V2(Name, Num1, Num2) \ | 273 | #define OSD_ACT_V1_V2(Name, Num1, Num2) \ |
| 274 | OSD_ACT_##Name = __constant_cpu_to_be16(Num2), \ | 274 | OSD_ACT_##Name = cpu_to_be16(Num2), \ |
| 275 | OSDv1_ACT_##Name = __constant_cpu_to_be16(Num1), | 275 | OSDv1_ACT_##Name = cpu_to_be16(Num1), |
| 276 | 276 | ||
| 277 | enum osd_service_actions { | 277 | enum osd_service_actions { |
| 278 | OSD_ACT_V2(OBJECT_STRUCTURE_CHECK, 0x00) | 278 | OSD_ACT_V2(OBJECT_STRUCTURE_CHECK, 0x00) |
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index dd7c998221b3..e016e2ac38df 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h | |||
| @@ -133,6 +133,15 @@ struct scsi_cmnd { | |||
| 133 | unsigned char tag; /* SCSI-II queued command tag */ | 133 | unsigned char tag; /* SCSI-II queued command tag */ |
| 134 | }; | 134 | }; |
| 135 | 135 | ||
| 136 | /* | ||
| 137 | * Return the driver private allocation behind the command. | ||
| 138 | * Only works if cmd_size is set in the host template. | ||
| 139 | */ | ||
| 140 | static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd) | ||
| 141 | { | ||
| 142 | return cmd + 1; | ||
| 143 | } | ||
| 144 | |||
| 136 | /* make sure not to use it with REQ_TYPE_BLOCK_PC commands */ | 145 | /* make sure not to use it with REQ_TYPE_BLOCK_PC commands */ |
| 137 | static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) | 146 | static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) |
| 138 | { | 147 | { |
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h index 20fdfc2526ad..36c4114ed9bc 100644 --- a/include/scsi/scsi_driver.h +++ b/include/scsi/scsi_driver.h | |||
| @@ -4,17 +4,17 @@ | |||
| 4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
| 5 | 5 | ||
| 6 | struct module; | 6 | struct module; |
| 7 | struct request; | ||
| 7 | struct scsi_cmnd; | 8 | struct scsi_cmnd; |
| 8 | struct scsi_device; | 9 | struct scsi_device; |
| 9 | struct request; | ||
| 10 | struct request_queue; | ||
| 11 | |||
| 12 | 10 | ||
| 13 | struct scsi_driver { | 11 | struct scsi_driver { |
| 14 | struct module *owner; | 12 | struct module *owner; |
| 15 | struct device_driver gendrv; | 13 | struct device_driver gendrv; |
| 16 | 14 | ||
| 17 | void (*rescan)(struct device *); | 15 | void (*rescan)(struct device *); |
| 16 | int (*init_command)(struct scsi_cmnd *); | ||
| 17 | void (*uninit_command)(struct scsi_cmnd *); | ||
| 18 | int (*done)(struct scsi_cmnd *); | 18 | int (*done)(struct scsi_cmnd *); |
| 19 | int (*eh_action)(struct scsi_cmnd *, int); | 19 | int (*eh_action)(struct scsi_cmnd *, int); |
| 20 | }; | 20 | }; |
| @@ -31,8 +31,5 @@ extern int scsi_register_interface(struct class_interface *); | |||
| 31 | 31 | ||
| 32 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req); | 32 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req); |
| 33 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req); | 33 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req); |
| 34 | int scsi_prep_state_check(struct scsi_device *sdev, struct request *req); | ||
| 35 | int scsi_prep_return(struct request_queue *q, struct request *req, int ret); | ||
| 36 | int scsi_prep_fn(struct request_queue *, struct request *); | ||
| 37 | 34 | ||
| 38 | #endif /* _SCSI_SCSI_DRIVER_H */ | 35 | #endif /* _SCSI_SCSI_DRIVER_H */ |
