aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/mptbase.c5
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c17
-rw-r--r--drivers/message/fusion/mptsas.c211
-rw-r--r--drivers/message/fusion/mptscsih.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c90
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c11
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c20
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h34
-rw-r--r--drivers/s390/scsi/zfcp_def.h114
-rw-r--r--drivers/s390/scsi/zfcp_erp.c36
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c23
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c163
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c50
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h109
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h183
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c38
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c37
-rw-r--r--drivers/scsi/FlashPoint.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h21
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c88
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h14
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c136
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c488
-rw-r--r--drivers/scsi/be2iscsi/be_main.h27
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c139
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c5
-rw-r--r--drivers/scsi/constants.c20
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c17
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c7
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c4
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h2
-rw-r--r--drivers/scsi/gdth.c430
-rw-r--r--drivers/scsi/gdth.h952
-rw-r--r--drivers/scsi/gdth_ioctl.h366
-rw-r--r--drivers/scsi/gdth_proc.c42
-rw-r--r--drivers/scsi/gdth_proc.h4
-rw-r--r--drivers/scsi/hpsa.c793
-rw-r--r--drivers/scsi/hpsa.h136
-rw-r--r--drivers/scsi/hpsa_cmd.h204
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/libiscsi.c53
-rw-r--r--drivers/scsi/libsrp.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2473
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h98
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c735
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h265
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c547
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c329
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h82
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c7
-rw-r--r--drivers/scsi/mac_esp.c95
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c246
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h36
-rw-r--r--drivers/scsi/mpt2sas/Kconfig1
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h16
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h25
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt93
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h24
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h77
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c18
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c51
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c13
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c266
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c196
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c732
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h155
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c110
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c135
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c14
-rw-r--r--drivers/scsi/raid_class.c1
-rw-r--r--drivers/scsi/scsi.c40
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/scsi_sas_internal.h2
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c18
-rw-r--r--drivers/scsi/scsi_transport_fc.c26
-rw-r--r--drivers/scsi/scsi_transport_sas.c103
-rw-r--r--drivers/scsi/sd.c54
-rw-r--r--drivers/scsi/ses.c10
-rw-r--r--drivers/scsi/u14-34f.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c3
116 files changed, 9988 insertions, 3291 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 44d2037e9e56..5382b5a44aff 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -126,8 +126,6 @@ static int mfcounter = 0;
126 * Public data... 126 * Public data...
127 */ 127 */
128 128
129static struct proc_dir_entry *mpt_proc_root_dir;
130
131#define WHOINIT_UNKNOWN 0xAA 129#define WHOINIT_UNKNOWN 0xAA
132 130
133/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 131/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -146,6 +144,9 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 144static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 145static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
148 146
147#ifdef CONFIG_PROC_FS
148static struct proc_dir_entry *mpt_proc_root_dir;
149#endif
149 150
150/* 151/*
151 * Driver Callback Index's 152 * Driver Callback Index's
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b4948671eb92..9718c8f2e959 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.13" 79#define MPT_LINUX_VERSION_COMMON "3.04.14"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.13" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 352acd05c46b..caa8f568a41c 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -360,8 +360,8 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
360 u16 iocstatus; 360 u16 iocstatus;
361 361
362 /* bus reset is only good for SCSI IO, RAID PASSTHRU */ 362 /* bus reset is only good for SCSI IO, RAID PASSTHRU */
363 if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) || 363 if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
364 (function == MPI_FUNCTION_SCSI_IO_REQUEST)) { 364 function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
365 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT 365 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 "TaskMgmt, not SCSI_IO!!\n", ioc->name)); 366 "TaskMgmt, not SCSI_IO!!\n", ioc->name));
367 return -EPERM; 367 return -EPERM;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index ebf6ae024da4..612ab3c51a6b 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -195,29 +195,34 @@ mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
195 unsigned long flags; 195 unsigned long flags;
196 int ready; 196 int ready;
197 MPT_ADAPTER *ioc; 197 MPT_ADAPTER *ioc;
198 int loops = 40; /* seconds */
198 199
199 hd = shost_priv(SCpnt->device->host); 200 hd = shost_priv(SCpnt->device->host);
200 ioc = hd->ioc; 201 ioc = hd->ioc;
201 spin_lock_irqsave(shost->host_lock, flags); 202 spin_lock_irqsave(shost->host_lock, flags);
202 while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY) { 203 while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY
204 || (loops > 0 && ioc->active == 0)) {
203 spin_unlock_irqrestore(shost->host_lock, flags); 205 spin_unlock_irqrestore(shost->host_lock, flags);
204 dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT 206 dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
205 "mptfc_block_error_handler.%d: %d:%d, port status is " 207 "mptfc_block_error_handler.%d: %d:%d, port status is "
206 "DID_IMM_RETRY, deferring %s recovery.\n", 208 "%x, active flag %d, deferring %s recovery.\n",
207 ioc->name, ioc->sh->host_no, 209 ioc->name, ioc->sh->host_no,
208 SCpnt->device->id, SCpnt->device->lun, caller)); 210 SCpnt->device->id, SCpnt->device->lun,
211 ready, ioc->active, caller));
209 msleep(1000); 212 msleep(1000);
210 spin_lock_irqsave(shost->host_lock, flags); 213 spin_lock_irqsave(shost->host_lock, flags);
214 loops --;
211 } 215 }
212 spin_unlock_irqrestore(shost->host_lock, flags); 216 spin_unlock_irqrestore(shost->host_lock, flags);
213 217
214 if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata) { 218 if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
219 || ioc->active == 0) {
215 dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT 220 dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
216 "%s.%d: %d:%d, failing recovery, " 221 "%s.%d: %d:%d, failing recovery, "
217 "port state %d, vdevice %p.\n", caller, 222 "port state %x, active %d, vdevice %p.\n", caller,
218 ioc->name, ioc->sh->host_no, 223 ioc->name, ioc->sh->host_no,
219 SCpnt->device->id, SCpnt->device->lun, ready, 224 SCpnt->device->id, SCpnt->device->lun, ready,
220 SCpnt->device->hostdata)); 225 ioc->active, SCpnt->device->hostdata));
221 return FAILED; 226 return FAILED;
222 } 227 }
223 dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT 228 dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 83873e3d0ce7..c20bbe45da82 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1075,6 +1075,19 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
1075 return 0; 1075 return 0;
1076} 1076}
1077 1077
1078static void
1079mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
1080{
1081 scsi_device_set_state(sdev, SDEV_BLOCK);
1082}
1083
1084static void
1085mptsas_block_io_starget(struct scsi_target *starget)
1086{
1087 if (starget)
1088 starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
1089}
1090
1078/** 1091/**
1079 * mptsas_target_reset_queue 1092 * mptsas_target_reset_queue
1080 * 1093 *
@@ -1098,10 +1111,11 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
1098 id = sas_event_data->TargetID; 1111 id = sas_event_data->TargetID;
1099 channel = sas_event_data->Bus; 1112 channel = sas_event_data->Bus;
1100 1113
1101 if (!(vtarget = mptsas_find_vtarget(ioc, channel, id))) 1114 vtarget = mptsas_find_vtarget(ioc, channel, id);
1102 return; 1115 if (vtarget) {
1103 1116 mptsas_block_io_starget(vtarget->starget);
1104 vtarget->deleted = 1; /* block IO */ 1117 vtarget->deleted = 1; /* block IO */
1118 }
1105 1119
1106 target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event), 1120 target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
1107 GFP_ATOMIC); 1121 GFP_ATOMIC);
@@ -1868,7 +1882,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1868 if (ioc->sas_discovery_quiesce_io) 1882 if (ioc->sas_discovery_quiesce_io)
1869 return SCSI_MLQUEUE_HOST_BUSY; 1883 return SCSI_MLQUEUE_HOST_BUSY;
1870 1884
1871// scsi_print_command(SCpnt); 1885 if (ioc->debug_level & MPT_DEBUG_SCSI)
1886 scsi_print_command(SCpnt);
1872 1887
1873 return mptscsih_qcmd(SCpnt,done); 1888 return mptscsih_qcmd(SCpnt,done);
1874} 1889}
@@ -2686,6 +2701,187 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
2686 return error; 2701 return error;
2687} 2702}
2688 2703
2704struct rep_manu_request{
2705 u8 smp_frame_type;
2706 u8 function;
2707 u8 reserved;
2708 u8 request_length;
2709};
2710
2711struct rep_manu_reply{
2712 u8 smp_frame_type; /* 0x41 */
2713 u8 function; /* 0x01 */
2714 u8 function_result;
2715 u8 response_length;
2716 u16 expander_change_count;
2717 u8 reserved0[2];
2718 u8 sas_format:1;
2719 u8 reserved1:7;
2720 u8 reserved2[3];
2721 u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
2722 u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
2723 u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
2724 u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
2725 u16 component_id;
2726 u8 component_revision_id;
2727 u8 reserved3;
2728 u8 vendor_specific[8];
2729};
2730
2731/**
2732 * mptsas_exp_repmanufacture_info -
2733 * @ioc: per adapter object
2734 * @sas_address: expander sas address
2735 * @edev: the sas_expander_device object
2736 *
2737 * Fills in the sas_expander_device object when SMP port is created.
2738 *
2739 * Returns 0 for success, non-zero for failure.
2740 */
2741static int
2742mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
2743 u64 sas_address, struct sas_expander_device *edev)
2744{
2745 MPT_FRAME_HDR *mf;
2746 SmpPassthroughRequest_t *smpreq;
2747 SmpPassthroughReply_t *smprep;
2748 struct rep_manu_reply *manufacture_reply;
2749 struct rep_manu_request *manufacture_request;
2750 int ret;
2751 int flagsLength;
2752 unsigned long timeleft;
2753 char *psge;
2754 unsigned long flags;
2755 void *data_out = NULL;
2756 dma_addr_t data_out_dma = 0;
2757 u32 sz;
2758
2759 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
2760 if (ioc->ioc_reset_in_progress) {
2761 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
2762 printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
2763 __func__, ioc->name);
2764 return -EFAULT;
2765 }
2766 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
2767
2768 ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
2769 if (ret)
2770 goto out;
2771
2772 mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
2773 if (!mf) {
2774 ret = -ENOMEM;
2775 goto out_unlock;
2776 }
2777
2778 smpreq = (SmpPassthroughRequest_t *)mf;
2779 memset(smpreq, 0, sizeof(*smpreq));
2780
2781 sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
2782
2783 data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
2784 if (!data_out) {
2785 printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
2786 __FILE__, __LINE__, __func__);
2787 ret = -ENOMEM;
2788 goto put_mf;
2789 }
2790
2791 manufacture_request = data_out;
2792 manufacture_request->smp_frame_type = 0x40;
2793 manufacture_request->function = 1;
2794 manufacture_request->reserved = 0;
2795 manufacture_request->request_length = 0;
2796
2797 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
2798 smpreq->PhysicalPort = 0xFF;
2799 *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
2800 smpreq->RequestDataLength = sizeof(struct rep_manu_request);
2801
2802 psge = (char *)
2803 (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
2804
2805 flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2806 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
2807 MPI_SGE_FLAGS_HOST_TO_IOC |
2808 MPI_SGE_FLAGS_END_OF_BUFFER;
2809 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
2810 flagsLength |= sizeof(struct rep_manu_request);
2811
2812 ioc->add_sge(psge, flagsLength, data_out_dma);
2813 psge += ioc->SGE_size;
2814
2815 flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2816 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
2817 MPI_SGE_FLAGS_IOC_TO_HOST |
2818 MPI_SGE_FLAGS_END_OF_BUFFER;
2819 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
2820 flagsLength |= sizeof(struct rep_manu_reply);
2821 ioc->add_sge(psge, flagsLength, data_out_dma +
2822 sizeof(struct rep_manu_request));
2823
2824 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
2825 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
2826
2827 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
2828 if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2829 ret = -ETIME;
2830 mpt_free_msg_frame(ioc, mf);
2831 mf = NULL;
2832 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
2833 goto out_free;
2834 if (!timeleft)
2835 mpt_HardResetHandler(ioc, CAN_SLEEP);
2836 goto out_free;
2837 }
2838
2839 mf = NULL;
2840
2841 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
2842 u8 *tmp;
2843
2844 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
2845 if (le16_to_cpu(smprep->ResponseDataLength) !=
2846 sizeof(struct rep_manu_reply))
2847 goto out_free;
2848
2849 manufacture_reply = data_out + sizeof(struct rep_manu_request);
2850 strncpy(edev->vendor_id, manufacture_reply->vendor_id,
2851 SAS_EXPANDER_VENDOR_ID_LEN);
2852 strncpy(edev->product_id, manufacture_reply->product_id,
2853 SAS_EXPANDER_PRODUCT_ID_LEN);
2854 strncpy(edev->product_rev, manufacture_reply->product_rev,
2855 SAS_EXPANDER_PRODUCT_REV_LEN);
2856 edev->level = manufacture_reply->sas_format;
2857 if (manufacture_reply->sas_format) {
2858 strncpy(edev->component_vendor_id,
2859 manufacture_reply->component_vendor_id,
2860 SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
2861 tmp = (u8 *)&manufacture_reply->component_id;
2862 edev->component_id = tmp[0] << 8 | tmp[1];
2863 edev->component_revision_id =
2864 manufacture_reply->component_revision_id;
2865 }
2866 } else {
2867 printk(MYIOC_s_ERR_FMT
2868 "%s: smp passthru reply failed to be returned\n",
2869 ioc->name, __func__);
2870 ret = -ENXIO;
2871 }
2872out_free:
2873 if (data_out_dma)
2874 pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
2875put_mf:
2876 if (mf)
2877 mpt_free_msg_frame(ioc, mf);
2878out_unlock:
2879 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
2880 mutex_unlock(&ioc->sas_mgmt.mutex);
2881out:
2882 return ret;
2883 }
2884
2689static void 2885static void
2690mptsas_parse_device_info(struct sas_identify *identify, 2886mptsas_parse_device_info(struct sas_identify *identify,
2691 struct mptsas_devinfo *device_info) 2887 struct mptsas_devinfo *device_info)
@@ -2967,6 +3163,11 @@ static int mptsas_probe_one_phy(struct device *dev,
2967 goto out; 3163 goto out;
2968 } 3164 }
2969 mptsas_set_rphy(ioc, phy_info, rphy); 3165 mptsas_set_rphy(ioc, phy_info, rphy);
3166 if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
3167 identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
3168 mptsas_exp_repmanufacture_info(ioc,
3169 identify.sas_address,
3170 rphy_to_expander_device(rphy));
2970 } 3171 }
2971 3172
2972 out: 3173 out:
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 81279b3d694c..4a7d1afcb666 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1438,9 +1438,14 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1438 && (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) 1438 && (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
1439 && (SCpnt->device->tagged_supported)) { 1439 && (SCpnt->device->tagged_supported)) {
1440 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ; 1440 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
1441 } else { 1441 if (SCpnt->request && SCpnt->request->ioprio) {
1442 if (((SCpnt->request->ioprio & 0x7) == 1) ||
1443 !(SCpnt->request->ioprio & 0x7))
1444 scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
1445 }
1446 } else
1442 scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED; 1447 scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
1443 } 1448
1444 1449
1445 /* Use the above information to set up the message frame 1450 /* Use the above information to set up the message frame
1446 */ 1451 */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9d0c941b7d33..66d6c01fcf3e 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Module interface and handling of zfcp data structures. 4 * Module interface and handling of zfcp data structures.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9/* 9/*
@@ -32,6 +32,7 @@
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include "zfcp_ext.h" 33#include "zfcp_ext.h"
34#include "zfcp_fc.h" 34#include "zfcp_fc.h"
35#include "zfcp_reqlist.h"
35 36
36#define ZFCP_BUS_ID_SIZE 20 37#define ZFCP_BUS_ID_SIZE 20
37 38
@@ -49,36 +50,6 @@ static struct kmem_cache *zfcp_cache_hw_align(const char *name,
49 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); 50 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
50} 51}
51 52
52static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
53{
54 int idx;
55
56 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
57 GFP_KERNEL);
58 if (!adapter->req_list)
59 return -ENOMEM;
60
61 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
62 INIT_LIST_HEAD(&adapter->req_list[idx]);
63 return 0;
64}
65
66/**
67 * zfcp_reqlist_isempty - is the request list empty
68 * @adapter: pointer to struct zfcp_adapter
69 *
70 * Returns: true if list is empty, false otherwise
71 */
72int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
73{
74 unsigned int idx;
75
76 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
77 if (!list_empty(&adapter->req_list[idx]))
78 return 0;
79 return 1;
80}
81
82static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) 53static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
83{ 54{
84 struct ccw_device *cdev; 55 struct ccw_device *cdev;
@@ -110,7 +81,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
110 flush_work(&unit->scsi_work); 81 flush_work(&unit->scsi_work);
111 82
112out_unit: 83out_unit:
113 put_device(&port->sysfs_device); 84 put_device(&port->dev);
114out_port: 85out_port:
115 zfcp_ccw_adapter_put(adapter); 86 zfcp_ccw_adapter_put(adapter);
116out_ccw_device: 87out_ccw_device:
@@ -255,7 +226,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
255 read_lock_irqsave(&port->unit_list_lock, flags); 226 read_lock_irqsave(&port->unit_list_lock, flags);
256 list_for_each_entry(unit, &port->unit_list, list) 227 list_for_each_entry(unit, &port->unit_list, list)
257 if (unit->fcp_lun == fcp_lun) { 228 if (unit->fcp_lun == fcp_lun) {
258 if (!get_device(&unit->sysfs_device)) 229 if (!get_device(&unit->dev))
259 unit = NULL; 230 unit = NULL;
260 read_unlock_irqrestore(&port->unit_list_lock, flags); 231 read_unlock_irqrestore(&port->unit_list_lock, flags);
261 return unit; 232 return unit;
@@ -280,7 +251,7 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
280 read_lock_irqsave(&adapter->port_list_lock, flags); 251 read_lock_irqsave(&adapter->port_list_lock, flags);
281 list_for_each_entry(port, &adapter->port_list, list) 252 list_for_each_entry(port, &adapter->port_list, list)
282 if (port->wwpn == wwpn) { 253 if (port->wwpn == wwpn) {
283 if (!get_device(&port->sysfs_device)) 254 if (!get_device(&port->dev))
284 port = NULL; 255 port = NULL;
285 read_unlock_irqrestore(&adapter->port_list_lock, flags); 256 read_unlock_irqrestore(&adapter->port_list_lock, flags);
286 return port; 257 return port;
@@ -298,10 +269,9 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
298 */ 269 */
299static void zfcp_unit_release(struct device *dev) 270static void zfcp_unit_release(struct device *dev)
300{ 271{
301 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, 272 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
302 sysfs_device);
303 273
304 put_device(&unit->port->sysfs_device); 274 put_device(&unit->port->dev);
305 kfree(unit); 275 kfree(unit);
306} 276}
307 277
@@ -318,11 +288,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
318 struct zfcp_unit *unit; 288 struct zfcp_unit *unit;
319 int retval = -ENOMEM; 289 int retval = -ENOMEM;
320 290
321 get_device(&port->sysfs_device); 291 get_device(&port->dev);
322 292
323 unit = zfcp_get_unit_by_lun(port, fcp_lun); 293 unit = zfcp_get_unit_by_lun(port, fcp_lun);
324 if (unit) { 294 if (unit) {
325 put_device(&unit->sysfs_device); 295 put_device(&unit->dev);
326 retval = -EEXIST; 296 retval = -EEXIST;
327 goto err_out; 297 goto err_out;
328 } 298 }
@@ -333,10 +303,10 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
333 303
334 unit->port = port; 304 unit->port = port;
335 unit->fcp_lun = fcp_lun; 305 unit->fcp_lun = fcp_lun;
336 unit->sysfs_device.parent = &port->sysfs_device; 306 unit->dev.parent = &port->dev;
337 unit->sysfs_device.release = zfcp_unit_release; 307 unit->dev.release = zfcp_unit_release;
338 308
339 if (dev_set_name(&unit->sysfs_device, "0x%016llx", 309 if (dev_set_name(&unit->dev, "0x%016llx",
340 (unsigned long long) fcp_lun)) { 310 (unsigned long long) fcp_lun)) {
341 kfree(unit); 311 kfree(unit);
342 goto err_out; 312 goto err_out;
@@ -353,13 +323,12 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
353 unit->latencies.cmd.channel.min = 0xFFFFFFFF; 323 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
354 unit->latencies.cmd.fabric.min = 0xFFFFFFFF; 324 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
355 325
356 if (device_register(&unit->sysfs_device)) { 326 if (device_register(&unit->dev)) {
357 put_device(&unit->sysfs_device); 327 put_device(&unit->dev);
358 goto err_out; 328 goto err_out;
359 } 329 }
360 330
361 if (sysfs_create_group(&unit->sysfs_device.kobj, 331 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
362 &zfcp_sysfs_unit_attrs))
363 goto err_out_put; 332 goto err_out_put;
364 333
365 write_lock_irq(&port->unit_list_lock); 334 write_lock_irq(&port->unit_list_lock);
@@ -371,9 +340,9 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
371 return unit; 340 return unit;
372 341
373err_out_put: 342err_out_put:
374 device_unregister(&unit->sysfs_device); 343 device_unregister(&unit->dev);
375err_out: 344err_out:
376 put_device(&port->sysfs_device); 345 put_device(&port->dev);
377 return ERR_PTR(retval); 346 return ERR_PTR(retval);
378} 347}
379 348
@@ -539,7 +508,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
539 if (zfcp_allocate_low_mem_buffers(adapter)) 508 if (zfcp_allocate_low_mem_buffers(adapter))
540 goto failed; 509 goto failed;
541 510
542 if (zfcp_reqlist_alloc(adapter)) 511 adapter->req_list = zfcp_reqlist_alloc();
512 if (!adapter->req_list)
543 goto failed; 513 goto failed;
544 514
545 if (zfcp_dbf_adapter_register(adapter)) 515 if (zfcp_dbf_adapter_register(adapter))
@@ -560,8 +530,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
560 INIT_LIST_HEAD(&adapter->erp_ready_head); 530 INIT_LIST_HEAD(&adapter->erp_ready_head);
561 INIT_LIST_HEAD(&adapter->erp_running_head); 531 INIT_LIST_HEAD(&adapter->erp_running_head);
562 532
563 spin_lock_init(&adapter->req_list_lock);
564
565 rwlock_init(&adapter->erp_lock); 533 rwlock_init(&adapter->erp_lock);
566 rwlock_init(&adapter->abort_lock); 534 rwlock_init(&adapter->abort_lock);
567 535
@@ -640,8 +608,7 @@ void zfcp_device_unregister(struct device *dev,
640 608
641static void zfcp_port_release(struct device *dev) 609static void zfcp_port_release(struct device *dev)
642{ 610{
643 struct zfcp_port *port = container_of(dev, struct zfcp_port, 611 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
644 sysfs_device);
645 612
646 zfcp_ccw_adapter_put(port->adapter); 613 zfcp_ccw_adapter_put(port->adapter);
647 kfree(port); 614 kfree(port);
@@ -669,7 +636,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
669 636
670 port = zfcp_get_port_by_wwpn(adapter, wwpn); 637 port = zfcp_get_port_by_wwpn(adapter, wwpn);
671 if (port) { 638 if (port) {
672 put_device(&port->sysfs_device); 639 put_device(&port->dev);
673 retval = -EEXIST; 640 retval = -EEXIST;
674 goto err_out; 641 goto err_out;
675 } 642 }
@@ -689,22 +656,21 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
689 port->d_id = d_id; 656 port->d_id = d_id;
690 port->wwpn = wwpn; 657 port->wwpn = wwpn;
691 port->rport_task = RPORT_NONE; 658 port->rport_task = RPORT_NONE;
692 port->sysfs_device.parent = &adapter->ccw_device->dev; 659 port->dev.parent = &adapter->ccw_device->dev;
693 port->sysfs_device.release = zfcp_port_release; 660 port->dev.release = zfcp_port_release;
694 661
695 if (dev_set_name(&port->sysfs_device, "0x%016llx", 662 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
696 (unsigned long long)wwpn)) {
697 kfree(port); 663 kfree(port);
698 goto err_out; 664 goto err_out;
699 } 665 }
700 retval = -EINVAL; 666 retval = -EINVAL;
701 667
702 if (device_register(&port->sysfs_device)) { 668 if (device_register(&port->dev)) {
703 put_device(&port->sysfs_device); 669 put_device(&port->dev);
704 goto err_out; 670 goto err_out;
705 } 671 }
706 672
707 if (sysfs_create_group(&port->sysfs_device.kobj, 673 if (sysfs_create_group(&port->dev.kobj,
708 &zfcp_sysfs_port_attrs)) 674 &zfcp_sysfs_port_attrs))
709 goto err_out_put; 675 goto err_out_put;
710 676
@@ -717,7 +683,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
717 return port; 683 return port;
718 684
719err_out_put: 685err_out_put:
720 device_unregister(&port->sysfs_device); 686 device_unregister(&port->dev);
721err_out: 687err_out:
722 zfcp_ccw_adapter_put(adapter); 688 zfcp_ccw_adapter_put(adapter);
723 return ERR_PTR(retval); 689 return ERR_PTR(retval);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index c22cb72a5ae8..ce1cc7a11fb4 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,13 +3,14 @@
3 * 3 *
4 * Registration and callback for the s390 common I/O layer. 4 * Registration and callback for the s390 common I/O layer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13#include "zfcp_reqlist.h"
13 14
14#define ZFCP_MODEL_PRIV 0x4 15#define ZFCP_MODEL_PRIV 0x4
15 16
@@ -122,12 +123,10 @@ static void zfcp_ccw_remove(struct ccw_device *cdev)
122 zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ 123 zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
123 124
124 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) 125 list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
125 zfcp_device_unregister(&unit->sysfs_device, 126 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
126 &zfcp_sysfs_unit_attrs);
127 127
128 list_for_each_entry_safe(port, p, &port_remove_lh, list) 128 list_for_each_entry_safe(port, p, &port_remove_lh, list)
129 zfcp_device_unregister(&port->sysfs_device, 129 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
130 &zfcp_sysfs_port_attrs);
131 130
132 zfcp_adapter_unregister(adapter); 131 zfcp_adapter_unregister(adapter);
133} 132}
@@ -162,7 +161,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
162 } 161 }
163 162
164 /* initialize request counter */ 163 /* initialize request counter */
165 BUG_ON(!zfcp_reqlist_isempty(adapter)); 164 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
166 adapter->req_no = 0; 165 adapter->req_no = 0;
167 166
168 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, 167 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 7369c8911bcf..7a149fd85f6d 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -140,9 +140,9 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
140 memcpy(response->fsf_status_qual, 140 memcpy(response->fsf_status_qual,
141 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); 141 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
142 response->fsf_req_status = fsf_req->status; 142 response->fsf_req_status = fsf_req->status;
143 response->sbal_first = fsf_req->queue_req.sbal_first; 143 response->sbal_first = fsf_req->qdio_req.sbal_first;
144 response->sbal_last = fsf_req->queue_req.sbal_last; 144 response->sbal_last = fsf_req->qdio_req.sbal_last;
145 response->sbal_response = fsf_req->queue_req.sbal_response; 145 response->sbal_response = fsf_req->qdio_req.sbal_response;
146 response->pool = fsf_req->pool != NULL; 146 response->pool = fsf_req->pool != NULL;
147 response->erp_action = (unsigned long)fsf_req->erp_action; 147 response->erp_action = (unsigned long)fsf_req->erp_action;
148 148
@@ -576,7 +576,8 @@ void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
576 struct zfcp_adapter *adapter = dbf->adapter; 576 struct zfcp_adapter *adapter = dbf->adapter;
577 577
578 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, 578 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
579 &adapter->erp_counter, 0, 0, 0); 579 &adapter->erp_counter, 0, 0,
580 ZFCP_DBF_INVALID_LUN);
580} 581}
581 582
582/** 583/**
@@ -590,8 +591,8 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
590 struct zfcp_dbf *dbf = port->adapter->dbf; 591 struct zfcp_dbf *dbf = port->adapter->dbf;
591 592
592 zfcp_dbf_rec_target(id, ref, dbf, &port->status, 593 zfcp_dbf_rec_target(id, ref, dbf, &port->status,
593 &port->erp_counter, port->wwpn, port->d_id, 594 &port->erp_counter, port->wwpn, port->d_id,
594 0); 595 ZFCP_DBF_INVALID_LUN);
595} 596}
596 597
597/** 598/**
@@ -642,10 +643,9 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
642 r->u.trigger.ps = atomic_read(&port->status); 643 r->u.trigger.ps = atomic_read(&port->status);
643 r->u.trigger.wwpn = port->wwpn; 644 r->u.trigger.wwpn = port->wwpn;
644 } 645 }
645 if (unit) { 646 if (unit)
646 r->u.trigger.us = atomic_read(&unit->status); 647 r->u.trigger.us = atomic_read(&unit->status);
647 r->u.trigger.fcp_lun = unit->fcp_lun; 648 r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
648 }
649 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); 649 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
650 spin_unlock_irqrestore(&dbf->rec_lock, flags); 650 spin_unlock_irqrestore(&dbf->rec_lock, flags);
651} 651}
@@ -668,7 +668,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
668 r->u.action.action = (unsigned long)erp_action; 668 r->u.action.action = (unsigned long)erp_action;
669 r->u.action.status = erp_action->status; 669 r->u.action.status = erp_action->status;
670 r->u.action.step = erp_action->step; 670 r->u.action.step = erp_action->step;
671 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; 671 r->u.action.fsf_req = erp_action->fsf_req_id;
672 debug_event(dbf->rec, 5, r, sizeof(*r)); 672 debug_event(dbf->rec, 5, r, sizeof(*r));
673 spin_unlock_irqrestore(&dbf->rec_lock, flags); 673 spin_unlock_irqrestore(&dbf->rec_lock, flags);
674} 674}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 8b7fd9a1033e..457e046f2d28 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -30,6 +30,8 @@
30#define ZFCP_DBF_TAG_SIZE 4 30#define ZFCP_DBF_TAG_SIZE 4
31#define ZFCP_DBF_ID_SIZE 7 31#define ZFCP_DBF_ID_SIZE 7
32 32
33#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
34
33struct zfcp_dbf_dump { 35struct zfcp_dbf_dump {
34 u8 tag[ZFCP_DBF_TAG_SIZE]; 36 u8 tag[ZFCP_DBF_TAG_SIZE];
35 u32 total_size; /* size of total dump data */ 37 u32 total_size; /* size of total dump data */
@@ -192,10 +194,10 @@ struct zfcp_dbf_san_record {
192 struct zfcp_dbf_san_record_ct_response ct_resp; 194 struct zfcp_dbf_san_record_ct_response ct_resp;
193 struct zfcp_dbf_san_record_els els; 195 struct zfcp_dbf_san_record_els els;
194 } u; 196 } u;
195#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
196 u8 payload[32];
197} __attribute__ ((packed)); 197} __attribute__ ((packed));
198 198
199#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
200
199struct zfcp_dbf_scsi_record { 201struct zfcp_dbf_scsi_record {
200 u8 tag[ZFCP_DBF_TAG_SIZE]; 202 u8 tag[ZFCP_DBF_TAG_SIZE];
201 u8 tag2[ZFCP_DBF_TAG_SIZE]; 203 u8 tag2[ZFCP_DBF_TAG_SIZE];
@@ -301,17 +303,31 @@ void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
301 303
302/** 304/**
303 * zfcp_dbf_scsi_result - trace event for SCSI command completion 305 * zfcp_dbf_scsi_result - trace event for SCSI command completion
304 * @tag: tag indicating success or failure of SCSI command 306 * @dbf: adapter dbf trace
305 * @level: trace level applicable for this event 307 * @scmd: SCSI command pointer
306 * @adapter: adapter that has been used to issue the SCSI command 308 * @req: FSF request used to issue SCSI command
309 */
310static inline
311void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
312 struct zfcp_fsf_req *req)
313{
314 if (scmd->result != 0)
315 zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
316 else if (scmd->retries > 0)
317 zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
318 else
319 zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
320}
321
322/**
323 * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
324 * @dbf: adapter dbf trace
307 * @scmd: SCSI command pointer 325 * @scmd: SCSI command pointer
308 * @fsf_req: request used to issue SCSI command (might be NULL)
309 */ 326 */
310static inline 327static inline
311void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf, 328void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
312 struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
313{ 329{
314 zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0); 330 zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
315} 331}
316 332
317/** 333/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e1b5b88e2ddb..7131c7db1f04 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Global definitions for the zfcp device driver. 4 * Global definitions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef ZFCP_DEF_H 9#ifndef ZFCP_DEF_H
@@ -33,15 +33,13 @@
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h> 34#include <scsi/scsi_bsg_fc.h>
35#include <asm/ccwdev.h> 35#include <asm/ccwdev.h>
36#include <asm/qdio.h>
37#include <asm/debug.h> 36#include <asm/debug.h>
38#include <asm/ebcdic.h> 37#include <asm/ebcdic.h>
39#include <asm/sysinfo.h> 38#include <asm/sysinfo.h>
40#include "zfcp_fsf.h" 39#include "zfcp_fsf.h"
40#include "zfcp_qdio.h"
41 41
42/********************* GENERAL DEFINES *********************************/ 42struct zfcp_reqlist;
43
44#define REQUEST_LIST_SIZE 128
45 43
46/********************* SCSI SPECIFIC DEFINES *********************************/ 44/********************* SCSI SPECIFIC DEFINES *********************************/
47#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) 45#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
@@ -129,12 +127,6 @@ struct zfcp_adapter_mempool {
129 mempool_t *qtcb_pool; 127 mempool_t *qtcb_pool;
130}; 128};
131 129
132struct zfcp_qdio_queue {
133 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
134 u8 first; /* index of next free bfr in queue */
135 atomic_t count; /* number of free buffers in queue */
136};
137
138struct zfcp_erp_action { 130struct zfcp_erp_action {
139 struct list_head list; 131 struct list_head list;
140 int action; /* requested action code */ 132 int action; /* requested action code */
@@ -143,8 +135,7 @@ struct zfcp_erp_action {
143 struct zfcp_unit *unit; 135 struct zfcp_unit *unit;
144 u32 status; /* recovery status */ 136 u32 status; /* recovery status */
145 u32 step; /* active step of this erp action */ 137 u32 step; /* active step of this erp action */
146 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending 138 unsigned long fsf_req_id;
147 for this action */
148 struct timer_list timer; 139 struct timer_list timer;
149}; 140};
150 141
@@ -167,29 +158,6 @@ struct zfcp_latencies {
167 spinlock_t lock; 158 spinlock_t lock;
168}; 159};
169 160
170/** struct zfcp_qdio - basic QDIO data structure
171 * @resp_q: response queue
172 * @req_q: request queue
173 * @stat_lock: lock to protect req_q_util and req_q_time
174 * @req_q_lock; lock to serialize access to request queue
175 * @req_q_time: time of last fill level change
176 * @req_q_util: used for accounting
177 * @req_q_full: queue full incidents
178 * @req_q_wq: used to wait for SBAL availability
179 * @adapter: adapter used in conjunction with this QDIO structure
180 */
181struct zfcp_qdio {
182 struct zfcp_qdio_queue resp_q;
183 struct zfcp_qdio_queue req_q;
184 spinlock_t stat_lock;
185 spinlock_t req_q_lock;
186 unsigned long long req_q_time;
187 u64 req_q_util;
188 atomic_t req_q_full;
189 wait_queue_head_t req_q_wq;
190 struct zfcp_adapter *adapter;
191};
192
193struct zfcp_adapter { 161struct zfcp_adapter {
194 struct kref ref; 162 struct kref ref;
195 u64 peer_wwnn; /* P2P peer WWNN */ 163 u64 peer_wwnn; /* P2P peer WWNN */
@@ -207,8 +175,7 @@ struct zfcp_adapter {
207 struct list_head port_list; /* remote port list */ 175 struct list_head port_list; /* remote port list */
208 rwlock_t port_list_lock; /* port list lock */ 176 rwlock_t port_list_lock; /* port list lock */
209 unsigned long req_no; /* unique FSF req number */ 177 unsigned long req_no; /* unique FSF req number */
210 struct list_head *req_list; /* list of pending reqs */ 178 struct zfcp_reqlist *req_list;
211 spinlock_t req_list_lock; /* request list lock */
212 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 179 u32 fsf_req_seq_no; /* FSF cmnd seq number */
213 rwlock_t abort_lock; /* Protects against SCSI 180 rwlock_t abort_lock; /* Protects against SCSI
214 stack abort/command 181 stack abort/command
@@ -241,7 +208,7 @@ struct zfcp_adapter {
241}; 208};
242 209
243struct zfcp_port { 210struct zfcp_port {
244 struct device sysfs_device; /* sysfs device */ 211 struct device dev;
245 struct fc_rport *rport; /* rport of fc transport class */ 212 struct fc_rport *rport; /* rport of fc transport class */
246 struct list_head list; /* list of remote ports */ 213 struct list_head list; /* list of remote ports */
247 struct zfcp_adapter *adapter; /* adapter used to access port */ 214 struct zfcp_adapter *adapter; /* adapter used to access port */
@@ -263,7 +230,7 @@ struct zfcp_port {
263}; 230};
264 231
265struct zfcp_unit { 232struct zfcp_unit {
266 struct device sysfs_device; /* sysfs device */ 233 struct device dev;
267 struct list_head list; /* list of logical units */ 234 struct list_head list; /* list of logical units */
268 struct zfcp_port *port; /* remote port of unit */ 235 struct zfcp_port *port; /* remote port of unit */
269 atomic_t status; /* status of this logical unit */ 236 atomic_t status; /* status of this logical unit */
@@ -277,33 +244,11 @@ struct zfcp_unit {
277}; 244};
278 245
279/** 246/**
280 * struct zfcp_queue_req - queue related values for a request
281 * @sbal_number: number of free SBALs
282 * @sbal_first: first SBAL for this request
283 * @sbal_last: last SBAL for this request
284 * @sbal_limit: last possible SBAL for this request
285 * @sbale_curr: current SBALE at creation of this request
286 * @sbal_response: SBAL used in interrupt
287 * @qdio_outb_usage: usage of outbound queue
288 * @qdio_inb_usage: usage of inbound queue
289 */
290struct zfcp_queue_req {
291 u8 sbal_number;
292 u8 sbal_first;
293 u8 sbal_last;
294 u8 sbal_limit;
295 u8 sbale_curr;
296 u8 sbal_response;
297 u16 qdio_outb_usage;
298 u16 qdio_inb_usage;
299};
300
301/**
302 * struct zfcp_fsf_req - basic FSF request structure 247 * struct zfcp_fsf_req - basic FSF request structure
303 * @list: list of FSF requests 248 * @list: list of FSF requests
304 * @req_id: unique request ID 249 * @req_id: unique request ID
305 * @adapter: adapter this request belongs to 250 * @adapter: adapter this request belongs to
306 * @queue_req: queue related values 251 * @qdio_req: qdio queue related values
307 * @completion: used to signal the completion of the request 252 * @completion: used to signal the completion of the request
308 * @status: status of the request 253 * @status: status of the request
309 * @fsf_command: FSF command issued 254 * @fsf_command: FSF command issued
@@ -321,7 +266,7 @@ struct zfcp_fsf_req {
321 struct list_head list; 266 struct list_head list;
322 unsigned long req_id; 267 unsigned long req_id;
323 struct zfcp_adapter *adapter; 268 struct zfcp_adapter *adapter;
324 struct zfcp_queue_req queue_req; 269 struct zfcp_qdio_req qdio_req;
325 struct completion completion; 270 struct completion completion;
326 u32 status; 271 u32 status;
327 u32 fsf_command; 272 u32 fsf_command;
@@ -352,45 +297,4 @@ struct zfcp_data {
352#define ZFCP_SET 0x00000100 297#define ZFCP_SET 0x00000100
353#define ZFCP_CLEAR 0x00000200 298#define ZFCP_CLEAR 0x00000200
354 299
355/*
356 * Helper functions for request ID management.
357 */
358static inline int zfcp_reqlist_hash(unsigned long req_id)
359{
360 return req_id % REQUEST_LIST_SIZE;
361}
362
363static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
364 struct zfcp_fsf_req *fsf_req)
365{
366 list_del(&fsf_req->list);
367}
368
369static inline struct zfcp_fsf_req *
370zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
371{
372 struct zfcp_fsf_req *request;
373 unsigned int idx;
374
375 idx = zfcp_reqlist_hash(req_id);
376 list_for_each_entry(request, &adapter->req_list[idx], list)
377 if (request->req_id == req_id)
378 return request;
379 return NULL;
380}
381
382static inline struct zfcp_fsf_req *
383zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
384{
385 struct zfcp_fsf_req *request;
386 unsigned int idx;
387
388 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
389 list_for_each_entry(request, &adapter->req_list[idx], list)
390 if (request == req)
391 return request;
392 }
393 return NULL;
394}
395
396#endif /* ZFCP_DEF_H */ 300#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index b51a11a82e63..0be5e7ea2828 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Error Recovery Procedures (ERP). 4 * Error Recovery Procedures (ERP).
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
11 11
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include "zfcp_ext.h" 13#include "zfcp_ext.h"
14#include "zfcp_reqlist.h"
14 15
15#define ZFCP_MAX_ERPS 3 16#define ZFCP_MAX_ERPS 3
16 17
@@ -174,7 +175,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
174 175
175 switch (need) { 176 switch (need) {
176 case ZFCP_ERP_ACTION_REOPEN_UNIT: 177 case ZFCP_ERP_ACTION_REOPEN_UNIT:
177 if (!get_device(&unit->sysfs_device)) 178 if (!get_device(&unit->dev))
178 return NULL; 179 return NULL;
179 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 180 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
180 erp_action = &unit->erp_action; 181 erp_action = &unit->erp_action;
@@ -184,7 +185,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
184 185
185 case ZFCP_ERP_ACTION_REOPEN_PORT: 186 case ZFCP_ERP_ACTION_REOPEN_PORT:
186 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 187 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
187 if (!get_device(&port->sysfs_device)) 188 if (!get_device(&port->dev))
188 return NULL; 189 return NULL;
189 zfcp_erp_action_dismiss_port(port); 190 zfcp_erp_action_dismiss_port(port);
190 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 191 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
@@ -478,26 +479,27 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
478static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) 479static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
479{ 480{
480 struct zfcp_adapter *adapter = act->adapter; 481 struct zfcp_adapter *adapter = act->adapter;
482 struct zfcp_fsf_req *req;
481 483
482 if (!act->fsf_req) 484 if (!act->fsf_req_id)
483 return; 485 return;
484 486
485 spin_lock(&adapter->req_list_lock); 487 spin_lock(&adapter->req_list->lock);
486 if (zfcp_reqlist_find_safe(adapter, act->fsf_req) && 488 req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
487 act->fsf_req->erp_action == act) { 489 if (req && req->erp_action == act) {
488 if (act->status & (ZFCP_STATUS_ERP_DISMISSED | 490 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
489 ZFCP_STATUS_ERP_TIMEDOUT)) { 491 ZFCP_STATUS_ERP_TIMEDOUT)) {
490 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 492 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
491 zfcp_dbf_rec_action("erscf_1", act); 493 zfcp_dbf_rec_action("erscf_1", act);
492 act->fsf_req->erp_action = NULL; 494 req->erp_action = NULL;
493 } 495 }
494 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 496 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
495 zfcp_dbf_rec_action("erscf_2", act); 497 zfcp_dbf_rec_action("erscf_2", act);
496 if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) 498 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
497 act->fsf_req = NULL; 499 act->fsf_req_id = 0;
498 } else 500 } else
499 act->fsf_req = NULL; 501 act->fsf_req_id = 0;
500 spin_unlock(&adapter->req_list_lock); 502 spin_unlock(&adapter->req_list->lock);
501} 503}
502 504
503/** 505/**
@@ -1179,19 +1181,19 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1179 switch (act->action) { 1181 switch (act->action) {
1180 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1182 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1181 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { 1183 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
1182 get_device(&unit->sysfs_device); 1184 get_device(&unit->dev);
1183 if (scsi_queue_work(unit->port->adapter->scsi_host, 1185 if (scsi_queue_work(unit->port->adapter->scsi_host,
1184 &unit->scsi_work) <= 0) 1186 &unit->scsi_work) <= 0)
1185 put_device(&unit->sysfs_device); 1187 put_device(&unit->dev);
1186 } 1188 }
1187 put_device(&unit->sysfs_device); 1189 put_device(&unit->dev);
1188 break; 1190 break;
1189 1191
1190 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1192 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1191 case ZFCP_ERP_ACTION_REOPEN_PORT: 1193 case ZFCP_ERP_ACTION_REOPEN_PORT:
1192 if (result == ZFCP_ERP_SUCCEEDED) 1194 if (result == ZFCP_ERP_SUCCEEDED)
1193 zfcp_scsi_schedule_rport_register(port); 1195 zfcp_scsi_schedule_rport_register(port);
1194 put_device(&port->sysfs_device); 1196 put_device(&port->dev);
1195 break; 1197 break;
1196 1198
1197 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1199 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 66bdb34143cb..8786a79c7f8f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -21,7 +21,6 @@ extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, 21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
22 u32); 22 u32);
23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); 23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
24extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
25extern void zfcp_sg_free_table(struct scatterlist *, int); 24extern void zfcp_sg_free_table(struct scatterlist *, int);
26extern int zfcp_sg_setup_table(struct scatterlist *, int); 25extern int zfcp_sg_setup_table(struct scatterlist *, int);
27extern void zfcp_device_unregister(struct device *, 26extern void zfcp_device_unregister(struct device *,
@@ -144,13 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
144/* zfcp_qdio.c */ 143/* zfcp_qdio.c */
145extern int zfcp_qdio_setup(struct zfcp_adapter *); 144extern int zfcp_qdio_setup(struct zfcp_adapter *);
146extern void zfcp_qdio_destroy(struct zfcp_qdio *); 145extern void zfcp_qdio_destroy(struct zfcp_qdio *);
147extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); 146extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
148extern struct qdio_buffer_element
149 *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
150extern struct qdio_buffer_element
151 *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
152extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, 147extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
153 struct zfcp_queue_req *, unsigned long, 148 struct zfcp_qdio_req *, unsigned long,
154 struct scatterlist *, int); 149 struct scatterlist *, int);
155extern int zfcp_qdio_open(struct zfcp_qdio *); 150extern int zfcp_qdio_open(struct zfcp_qdio *);
156extern void zfcp_qdio_close(struct zfcp_qdio *); 151extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 271399f62f1b..5219670f0c99 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Fibre Channel related functions for the zfcp device driver. 4 * Fibre Channel related functions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2008, 2009 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -316,7 +316,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
316 316
317 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); 317 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
318out: 318out:
319 put_device(&port->sysfs_device); 319 put_device(&port->dev);
320} 320}
321 321
322/** 322/**
@@ -325,9 +325,9 @@ out:
325 */ 325 */
326void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) 326void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
327{ 327{
328 get_device(&port->sysfs_device); 328 get_device(&port->dev);
329 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work)) 329 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
330 put_device(&port->sysfs_device); 330 put_device(&port->dev);
331} 331}
332 332
333/** 333/**
@@ -389,7 +389,7 @@ static void zfcp_fc_adisc_handler(void *data)
389 zfcp_scsi_schedule_rport_register(port); 389 zfcp_scsi_schedule_rport_register(port);
390 out: 390 out:
391 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 391 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
392 put_device(&port->sysfs_device); 392 put_device(&port->dev);
393 kmem_cache_free(zfcp_data.adisc_cache, adisc); 393 kmem_cache_free(zfcp_data.adisc_cache, adisc);
394} 394}
395 395
@@ -436,7 +436,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
436 container_of(work, struct zfcp_port, test_link_work); 436 container_of(work, struct zfcp_port, test_link_work);
437 int retval; 437 int retval;
438 438
439 get_device(&port->sysfs_device); 439 get_device(&port->dev);
440 port->rport_task = RPORT_DEL; 440 port->rport_task = RPORT_DEL;
441 zfcp_scsi_rport_work(&port->rport_work); 441 zfcp_scsi_rport_work(&port->rport_work);
442 442
@@ -455,7 +455,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
455 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); 455 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
456 456
457out: 457out:
458 put_device(&port->sysfs_device); 458 put_device(&port->dev);
459} 459}
460 460
461/** 461/**
@@ -468,9 +468,9 @@ out:
468 */ 468 */
469void zfcp_fc_test_link(struct zfcp_port *port) 469void zfcp_fc_test_link(struct zfcp_port *port)
470{ 470{
471 get_device(&port->sysfs_device); 471 get_device(&port->dev);
472 if (!queue_work(port->adapter->work_queue, &port->test_link_work)) 472 if (!queue_work(port->adapter->work_queue, &port->test_link_work))
473 put_device(&port->sysfs_device); 473 put_device(&port->dev);
474} 474}
475 475
476static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) 476static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
@@ -617,8 +617,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
617 617
618 list_for_each_entry_safe(port, tmp, &remove_lh, list) { 618 list_for_each_entry_safe(port, tmp, &remove_lh, list) {
619 zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); 619 zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
620 zfcp_device_unregister(&port->sysfs_device, 620 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
621 &zfcp_sysfs_port_attrs);
622 } 621 }
623 622
624 return ret; 623 return ret;
@@ -731,7 +730,7 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
731 return -EINVAL; 730 return -EINVAL;
732 731
733 d_id = port->d_id; 732 d_id = port->d_id;
734 put_device(&port->sysfs_device); 733 put_device(&port->dev);
735 } else 734 } else
736 d_id = ntoh24(job->request->rqst_data.h_els.port_id); 735 d_id = ntoh24(job->request->rqst_data.h_els.port_id);
737 736
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e8fb4d9baa8b..6538742b421a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implementation of FSF commands. 4 * Implementation of FSF commands.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,8 @@
14#include "zfcp_ext.h" 14#include "zfcp_ext.h"
15#include "zfcp_fc.h" 15#include "zfcp_fc.h"
16#include "zfcp_dbf.h" 16#include "zfcp_dbf.h"
17#include "zfcp_qdio.h"
18#include "zfcp_reqlist.h"
17 19
18static void zfcp_fsf_request_timeout_handler(unsigned long data) 20static void zfcp_fsf_request_timeout_handler(unsigned long data)
19{ 21{
@@ -393,7 +395,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
393 case FSF_PROT_LINK_DOWN: 395 case FSF_PROT_LINK_DOWN:
394 zfcp_fsf_link_down_info_eval(req, "fspse_5", 396 zfcp_fsf_link_down_info_eval(req, "fspse_5",
395 &psq->link_down_info); 397 &psq->link_down_info);
396 /* FIXME: reopening adapter now? better wait for link up */ 398 /* go through reopen to flush pending requests */
397 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); 399 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
398 break; 400 break;
399 case FSF_PROT_REEST_QUEUE: 401 case FSF_PROT_REEST_QUEUE:
@@ -457,15 +459,10 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
457void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 459void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
458{ 460{
459 struct zfcp_fsf_req *req, *tmp; 461 struct zfcp_fsf_req *req, *tmp;
460 unsigned long flags;
461 LIST_HEAD(remove_queue); 462 LIST_HEAD(remove_queue);
462 unsigned int i;
463 463
464 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 464 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
465 spin_lock_irqsave(&adapter->req_list_lock, flags); 465 zfcp_reqlist_move(adapter->req_list, &remove_queue);
466 for (i = 0; i < REQUEST_LIST_SIZE; i++)
467 list_splice_init(&adapter->req_list[i], &remove_queue);
468 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
469 466
470 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 467 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
471 list_del(&req->list); 468 list_del(&req->list);
@@ -495,8 +492,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
495 fc_host_port_id(shost) = ntoh24(bottom->s_id); 492 fc_host_port_id(shost) = ntoh24(bottom->s_id);
496 fc_host_speed(shost) = bottom->fc_link_speed; 493 fc_host_speed(shost) = bottom->fc_link_speed;
497 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 494 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
498 fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
499 fc_host_active_fc4s(shost)[2] = 1; /* FCP */
500 495
501 adapter->hydra_version = bottom->adapter_type; 496 adapter->hydra_version = bottom->adapter_type;
502 adapter->timer_ticks = bottom->timer_interval; 497 adapter->timer_ticks = bottom->timer_interval;
@@ -619,6 +614,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
619 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 614 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
620 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 615 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
621 fc_host_supported_speeds(shost) = bottom->supported_speed; 616 fc_host_supported_speeds(shost) = bottom->supported_speed;
617 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
618 FC_FC4_LIST_SIZE);
619 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
620 FC_FC4_LIST_SIZE);
622} 621}
623 622
624static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 623static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -725,12 +724,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
725 req->adapter = adapter; 724 req->adapter = adapter;
726 req->fsf_command = fsf_cmd; 725 req->fsf_command = fsf_cmd;
727 req->req_id = adapter->req_no; 726 req->req_id = adapter->req_no;
728 req->queue_req.sbal_number = 1; 727 req->qdio_req.sbal_number = 1;
729 req->queue_req.sbal_first = req_q->first; 728 req->qdio_req.sbal_first = req_q->first;
730 req->queue_req.sbal_last = req_q->first; 729 req->qdio_req.sbal_last = req_q->first;
731 req->queue_req.sbale_curr = 1; 730 req->qdio_req.sbale_curr = 1;
732 731
733 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 732 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
734 sbale[0].addr = (void *) req->req_id; 733 sbale[0].addr = (void *) req->req_id;
735 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 734 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
736 735
@@ -745,6 +744,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
745 return ERR_PTR(-ENOMEM); 744 return ERR_PTR(-ENOMEM);
746 } 745 }
747 746
747 req->seq_no = adapter->fsf_req_seq_no;
748 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 748 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
749 req->qtcb->prefix.req_id = req->req_id; 749 req->qtcb->prefix.req_id = req->req_id;
750 req->qtcb->prefix.ulp_info = 26; 750 req->qtcb->prefix.ulp_info = 26;
@@ -752,8 +752,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
752 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 752 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
753 req->qtcb->header.req_handle = req->req_id; 753 req->qtcb->header.req_handle = req->req_id;
754 req->qtcb->header.fsf_command = req->fsf_command; 754 req->qtcb->header.fsf_command = req->fsf_command;
755 req->seq_no = adapter->fsf_req_seq_no;
756 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
757 sbale[1].addr = (void *) req->qtcb; 755 sbale[1].addr = (void *) req->qtcb;
758 sbale[1].length = sizeof(struct fsf_qtcb); 756 sbale[1].length = sizeof(struct fsf_qtcb);
759 } 757 }
@@ -770,25 +768,17 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
770{ 768{
771 struct zfcp_adapter *adapter = req->adapter; 769 struct zfcp_adapter *adapter = req->adapter;
772 struct zfcp_qdio *qdio = adapter->qdio; 770 struct zfcp_qdio *qdio = adapter->qdio;
773 unsigned long flags; 771 int with_qtcb = (req->qtcb != NULL);
774 int idx; 772 int req_id = req->req_id;
775 int with_qtcb = (req->qtcb != NULL);
776 773
777 /* put allocated FSF request into hash table */ 774 zfcp_reqlist_add(adapter->req_list, req);
778 spin_lock_irqsave(&adapter->req_list_lock, flags);
779 idx = zfcp_reqlist_hash(req->req_id);
780 list_add_tail(&req->list, &adapter->req_list[idx]);
781 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
782 775
783 req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); 776 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
784 req->issued = get_clock(); 777 req->issued = get_clock();
785 if (zfcp_qdio_send(qdio, &req->queue_req)) { 778 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
786 del_timer(&req->timer); 779 del_timer(&req->timer);
787 spin_lock_irqsave(&adapter->req_list_lock, flags);
788 /* lookup request again, list might have changed */ 780 /* lookup request again, list might have changed */
789 if (zfcp_reqlist_find_safe(adapter, req)) 781 zfcp_reqlist_find_rm(adapter->req_list, req_id);
790 zfcp_reqlist_remove(adapter, req);
791 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
792 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); 782 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
793 return -EIO; 783 return -EIO;
794 } 784 }
@@ -826,9 +816,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
826 goto out; 816 goto out;
827 } 817 }
828 818
829 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 819 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
830 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; 820 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
831 req->queue_req.sbale_curr = 2; 821 req->qdio_req.sbale_curr = 2;
832 822
833 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); 823 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
834 if (!sr_buf) { 824 if (!sr_buf) {
@@ -837,7 +827,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
837 } 827 }
838 memset(sr_buf, 0, sizeof(*sr_buf)); 828 memset(sr_buf, 0, sizeof(*sr_buf));
839 req->data = sr_buf; 829 req->data = sr_buf;
840 sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); 830 sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
841 sbale->addr = (void *) sr_buf; 831 sbale->addr = (void *) sr_buf;
842 sbale->length = sizeof(*sr_buf); 832 sbale->length = sizeof(*sr_buf);
843 833
@@ -934,7 +924,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
934 ZFCP_STATUS_COMMON_UNBLOCKED))) 924 ZFCP_STATUS_COMMON_UNBLOCKED)))
935 goto out_error_free; 925 goto out_error_free;
936 926
937 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 927 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
938 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 928 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
939 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 929 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
940 930
@@ -1029,7 +1019,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1029{ 1019{
1030 struct zfcp_adapter *adapter = req->adapter; 1020 struct zfcp_adapter *adapter = req->adapter;
1031 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, 1021 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1032 &req->queue_req); 1022 &req->qdio_req);
1033 u32 feat = adapter->adapter_features; 1023 u32 feat = adapter->adapter_features;
1034 int bytes; 1024 int bytes;
1035 1025
@@ -1047,15 +1037,15 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1047 return 0; 1037 return 0;
1048 } 1038 }
1049 1039
1050 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1040 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1051 SBAL_FLAGS0_TYPE_WRITE_READ, 1041 SBAL_FLAGS0_TYPE_WRITE_READ,
1052 sg_req, max_sbals); 1042 sg_req, max_sbals);
1053 if (bytes <= 0) 1043 if (bytes <= 0)
1054 return -EIO; 1044 return -EIO;
1055 req->qtcb->bottom.support.req_buf_length = bytes; 1045 req->qtcb->bottom.support.req_buf_length = bytes;
1056 req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 1046 req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1057 1047
1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1048 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1059 SBAL_FLAGS0_TYPE_WRITE_READ, 1049 SBAL_FLAGS0_TYPE_WRITE_READ,
1060 sg_resp, max_sbals); 1050 sg_resp, max_sbals);
1061 req->qtcb->bottom.support.resp_buf_length = bytes; 1051 req->qtcb->bottom.support.resp_buf_length = bytes;
@@ -1251,7 +1241,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1251 } 1241 }
1252 1242
1253 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1243 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1254 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1244 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1255 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1245 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1256 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1246 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1257 1247
@@ -1262,13 +1252,13 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1262 FSF_FEATURE_UPDATE_ALERT; 1252 FSF_FEATURE_UPDATE_ALERT;
1263 req->erp_action = erp_action; 1253 req->erp_action = erp_action;
1264 req->handler = zfcp_fsf_exchange_config_data_handler; 1254 req->handler = zfcp_fsf_exchange_config_data_handler;
1265 erp_action->fsf_req = req; 1255 erp_action->fsf_req_id = req->req_id;
1266 1256
1267 zfcp_fsf_start_erp_timer(req); 1257 zfcp_fsf_start_erp_timer(req);
1268 retval = zfcp_fsf_req_send(req); 1258 retval = zfcp_fsf_req_send(req);
1269 if (retval) { 1259 if (retval) {
1270 zfcp_fsf_req_free(req); 1260 zfcp_fsf_req_free(req);
1271 erp_action->fsf_req = NULL; 1261 erp_action->fsf_req_id = 0;
1272 } 1262 }
1273out: 1263out:
1274 spin_unlock_bh(&qdio->req_q_lock); 1264 spin_unlock_bh(&qdio->req_q_lock);
@@ -1293,7 +1283,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1293 goto out_unlock; 1283 goto out_unlock;
1294 } 1284 }
1295 1285
1296 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1286 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1297 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1287 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1298 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1288 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1299 req->handler = zfcp_fsf_exchange_config_data_handler; 1289 req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1349,19 +1339,19 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1349 } 1339 }
1350 1340
1351 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1341 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1352 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1342 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1353 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1343 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1354 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1344 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1355 1345
1356 req->handler = zfcp_fsf_exchange_port_data_handler; 1346 req->handler = zfcp_fsf_exchange_port_data_handler;
1357 req->erp_action = erp_action; 1347 req->erp_action = erp_action;
1358 erp_action->fsf_req = req; 1348 erp_action->fsf_req_id = req->req_id;
1359 1349
1360 zfcp_fsf_start_erp_timer(req); 1350 zfcp_fsf_start_erp_timer(req);
1361 retval = zfcp_fsf_req_send(req); 1351 retval = zfcp_fsf_req_send(req);
1362 if (retval) { 1352 if (retval) {
1363 zfcp_fsf_req_free(req); 1353 zfcp_fsf_req_free(req);
1364 erp_action->fsf_req = NULL; 1354 erp_action->fsf_req_id = 0;
1365 } 1355 }
1366out: 1356out:
1367 spin_unlock_bh(&qdio->req_q_lock); 1357 spin_unlock_bh(&qdio->req_q_lock);
@@ -1398,7 +1388,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1398 if (data) 1388 if (data)
1399 req->data = data; 1389 req->data = data;
1400 1390
1401 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1391 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1402 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1392 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1403 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1393 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1404 1394
@@ -1484,7 +1474,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1484 } 1474 }
1485 1475
1486out: 1476out:
1487 put_device(&port->sysfs_device); 1477 put_device(&port->dev);
1488} 1478}
1489 1479
1490/** 1480/**
@@ -1513,7 +1503,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1513 } 1503 }
1514 1504
1515 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1505 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1516 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1506 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1517 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1507 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1518 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1508 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1519 1509
@@ -1521,15 +1511,15 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1521 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1511 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1522 req->data = port; 1512 req->data = port;
1523 req->erp_action = erp_action; 1513 req->erp_action = erp_action;
1524 erp_action->fsf_req = req; 1514 erp_action->fsf_req_id = req->req_id;
1525 get_device(&port->sysfs_device); 1515 get_device(&port->dev);
1526 1516
1527 zfcp_fsf_start_erp_timer(req); 1517 zfcp_fsf_start_erp_timer(req);
1528 retval = zfcp_fsf_req_send(req); 1518 retval = zfcp_fsf_req_send(req);
1529 if (retval) { 1519 if (retval) {
1530 zfcp_fsf_req_free(req); 1520 zfcp_fsf_req_free(req);
1531 erp_action->fsf_req = NULL; 1521 erp_action->fsf_req_id = 0;
1532 put_device(&port->sysfs_device); 1522 put_device(&port->dev);
1533 } 1523 }
1534out: 1524out:
1535 spin_unlock_bh(&qdio->req_q_lock); 1525 spin_unlock_bh(&qdio->req_q_lock);
@@ -1583,7 +1573,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1583 } 1573 }
1584 1574
1585 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1575 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1586 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1576 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1587 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1577 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1588 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1578 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1589 1579
@@ -1591,13 +1581,13 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1591 req->data = erp_action->port; 1581 req->data = erp_action->port;
1592 req->erp_action = erp_action; 1582 req->erp_action = erp_action;
1593 req->qtcb->header.port_handle = erp_action->port->handle; 1583 req->qtcb->header.port_handle = erp_action->port->handle;
1594 erp_action->fsf_req = req; 1584 erp_action->fsf_req_id = req->req_id;
1595 1585
1596 zfcp_fsf_start_erp_timer(req); 1586 zfcp_fsf_start_erp_timer(req);
1597 retval = zfcp_fsf_req_send(req); 1587 retval = zfcp_fsf_req_send(req);
1598 if (retval) { 1588 if (retval) {
1599 zfcp_fsf_req_free(req); 1589 zfcp_fsf_req_free(req);
1600 erp_action->fsf_req = NULL; 1590 erp_action->fsf_req_id = 0;
1601 } 1591 }
1602out: 1592out:
1603 spin_unlock_bh(&qdio->req_q_lock); 1593 spin_unlock_bh(&qdio->req_q_lock);
@@ -1660,7 +1650,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1660 } 1650 }
1661 1651
1662 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1652 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1663 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1653 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1664 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1654 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1665 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1655 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1666 1656
@@ -1715,7 +1705,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1715 } 1705 }
1716 1706
1717 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1707 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1718 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1708 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1719 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1709 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1720 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1710 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1721 1711
@@ -1809,7 +1799,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1809 } 1799 }
1810 1800
1811 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1801 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1812 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1802 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1813 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1803 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1814 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1804 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1815 1805
@@ -1817,13 +1807,13 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1817 req->qtcb->header.port_handle = erp_action->port->handle; 1807 req->qtcb->header.port_handle = erp_action->port->handle;
1818 req->erp_action = erp_action; 1808 req->erp_action = erp_action;
1819 req->handler = zfcp_fsf_close_physical_port_handler; 1809 req->handler = zfcp_fsf_close_physical_port_handler;
1820 erp_action->fsf_req = req; 1810 erp_action->fsf_req_id = req->req_id;
1821 1811
1822 zfcp_fsf_start_erp_timer(req); 1812 zfcp_fsf_start_erp_timer(req);
1823 retval = zfcp_fsf_req_send(req); 1813 retval = zfcp_fsf_req_send(req);
1824 if (retval) { 1814 if (retval) {
1825 zfcp_fsf_req_free(req); 1815 zfcp_fsf_req_free(req);
1826 erp_action->fsf_req = NULL; 1816 erp_action->fsf_req_id = 0;
1827 } 1817 }
1828out: 1818out:
1829 spin_unlock_bh(&qdio->req_q_lock); 1819 spin_unlock_bh(&qdio->req_q_lock);
@@ -1982,7 +1972,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1982 } 1972 }
1983 1973
1984 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1974 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1985 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1975 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1986 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1976 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1987 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1977 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1988 1978
@@ -1991,7 +1981,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1991 req->handler = zfcp_fsf_open_unit_handler; 1981 req->handler = zfcp_fsf_open_unit_handler;
1992 req->data = erp_action->unit; 1982 req->data = erp_action->unit;
1993 req->erp_action = erp_action; 1983 req->erp_action = erp_action;
1994 erp_action->fsf_req = req; 1984 erp_action->fsf_req_id = req->req_id;
1995 1985
1996 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1986 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1997 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1987 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@@ -2000,7 +1990,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2000 retval = zfcp_fsf_req_send(req); 1990 retval = zfcp_fsf_req_send(req);
2001 if (retval) { 1991 if (retval) {
2002 zfcp_fsf_req_free(req); 1992 zfcp_fsf_req_free(req);
2003 erp_action->fsf_req = NULL; 1993 erp_action->fsf_req_id = 0;
2004 } 1994 }
2005out: 1995out:
2006 spin_unlock_bh(&qdio->req_q_lock); 1996 spin_unlock_bh(&qdio->req_q_lock);
@@ -2068,7 +2058,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2068 } 2058 }
2069 2059
2070 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2060 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2071 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2061 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2072 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2062 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2073 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2063 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2074 2064
@@ -2077,13 +2067,13 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2077 req->handler = zfcp_fsf_close_unit_handler; 2067 req->handler = zfcp_fsf_close_unit_handler;
2078 req->data = erp_action->unit; 2068 req->data = erp_action->unit;
2079 req->erp_action = erp_action; 2069 req->erp_action = erp_action;
2080 erp_action->fsf_req = req; 2070 erp_action->fsf_req_id = req->req_id;
2081 2071
2082 zfcp_fsf_start_erp_timer(req); 2072 zfcp_fsf_start_erp_timer(req);
2083 retval = zfcp_fsf_req_send(req); 2073 retval = zfcp_fsf_req_send(req);
2084 if (retval) { 2074 if (retval) {
2085 zfcp_fsf_req_free(req); 2075 zfcp_fsf_req_free(req);
2086 erp_action->fsf_req = NULL; 2076 erp_action->fsf_req_id = 0;
2087 } 2077 }
2088out: 2078out:
2089 spin_unlock_bh(&qdio->req_q_lock); 2079 spin_unlock_bh(&qdio->req_q_lock);
@@ -2111,8 +2101,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2111 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2101 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2112 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2102 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2113 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2103 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2114 blktrc.inb_usage = req->queue_req.qdio_inb_usage; 2104 blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
2115 blktrc.outb_usage = req->queue_req.qdio_outb_usage; 2105 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2116 2106
2117 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { 2107 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2118 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2108 blktrc.flags |= ZFCP_BLK_LAT_VALID;
@@ -2169,12 +2159,7 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2169 zfcp_fsf_req_trace(req, scpnt); 2159 zfcp_fsf_req_trace(req, scpnt);
2170 2160
2171skip_fsfstatus: 2161skip_fsfstatus:
2172 if (scpnt->result != 0) 2162 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
2173 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
2174 else if (scpnt->retries > 0)
2175 zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2176 else
2177 zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2178 2163
2179 scpnt->host_scribble = NULL; 2164 scpnt->host_scribble = NULL;
2180 (scpnt->scsi_done) (scpnt); 2165 (scpnt->scsi_done) (scpnt);
@@ -2274,7 +2259,7 @@ skip_fsfstatus:
2274 else { 2259 else {
2275 zfcp_fsf_send_fcp_command_task_handler(req); 2260 zfcp_fsf_send_fcp_command_task_handler(req);
2276 req->unit = NULL; 2261 req->unit = NULL;
2277 put_device(&unit->sysfs_device); 2262 put_device(&unit->dev);
2278 } 2263 }
2279} 2264}
2280 2265
@@ -2312,7 +2297,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2312 } 2297 }
2313 2298
2314 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2299 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2315 get_device(&unit->sysfs_device); 2300 get_device(&unit->dev);
2316 req->unit = unit; 2301 req->unit = unit;
2317 req->data = scsi_cmnd; 2302 req->data = scsi_cmnd;
2318 req->handler = zfcp_fsf_send_fcp_command_handler; 2303 req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2346,11 +2331,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2346 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2331 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2347 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2332 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2348 2333
2349 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, 2334 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
2350 scsi_sglist(scsi_cmnd), 2335 scsi_sglist(scsi_cmnd),
2351 FSF_MAX_SBALS_PER_REQ); 2336 FSF_MAX_SBALS_PER_REQ);
2352 if (unlikely(real_bytes < 0)) { 2337 if (unlikely(real_bytes < 0)) {
2353 if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { 2338 if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2354 dev_err(&adapter->ccw_device->dev, 2339 dev_err(&adapter->ccw_device->dev,
2355 "Oversize data package, unit 0x%016Lx " 2340 "Oversize data package, unit 0x%016Lx "
2356 "on port 0x%016Lx closed\n", 2341 "on port 0x%016Lx closed\n",
@@ -2369,7 +2354,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2369 goto out; 2354 goto out;
2370 2355
2371failed_scsi_cmnd: 2356failed_scsi_cmnd:
2372 put_device(&unit->sysfs_device); 2357 put_device(&unit->dev);
2373 zfcp_fsf_req_free(req); 2358 zfcp_fsf_req_free(req);
2374 scsi_cmnd->host_scribble = NULL; 2359 scsi_cmnd->host_scribble = NULL;
2375out: 2360out:
@@ -2415,7 +2400,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2415 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2400 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2416 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2401 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2417 2402
2418 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2403 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2419 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2404 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2420 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2405 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2421 2406
@@ -2478,14 +2463,14 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2478 2463
2479 req->handler = zfcp_fsf_control_file_handler; 2464 req->handler = zfcp_fsf_control_file_handler;
2480 2465
2481 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2466 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2482 sbale[0].flags |= direction; 2467 sbale[0].flags |= direction;
2483 2468
2484 bottom = &req->qtcb->bottom.support; 2469 bottom = &req->qtcb->bottom.support;
2485 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2470 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2486 bottom->option = fsf_cfdc->option; 2471 bottom->option = fsf_cfdc->option;
2487 2472
2488 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, 2473 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2489 direction, fsf_cfdc->sg, 2474 direction, fsf_cfdc->sg,
2490 FSF_MAX_SBALS_PER_REQ); 2475 FSF_MAX_SBALS_PER_REQ);
2491 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2476 if (bytes != ZFCP_CFDC_MAX_SIZE) {
@@ -2516,15 +2501,14 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2516 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; 2501 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2517 struct qdio_buffer_element *sbale; 2502 struct qdio_buffer_element *sbale;
2518 struct zfcp_fsf_req *fsf_req; 2503 struct zfcp_fsf_req *fsf_req;
2519 unsigned long flags, req_id; 2504 unsigned long req_id;
2520 int idx; 2505 int idx;
2521 2506
2522 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2507 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2523 2508
2524 sbale = &sbal->element[idx]; 2509 sbale = &sbal->element[idx];
2525 req_id = (unsigned long) sbale->addr; 2510 req_id = (unsigned long) sbale->addr;
2526 spin_lock_irqsave(&adapter->req_list_lock, flags); 2511 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2527 fsf_req = zfcp_reqlist_find(adapter, req_id);
2528 2512
2529 if (!fsf_req) 2513 if (!fsf_req)
2530 /* 2514 /*
@@ -2534,11 +2518,8 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2534 panic("error: unknown req_id (%lx) on adapter %s.\n", 2518 panic("error: unknown req_id (%lx) on adapter %s.\n",
2535 req_id, dev_name(&adapter->ccw_device->dev)); 2519 req_id, dev_name(&adapter->ccw_device->dev));
2536 2520
2537 list_del(&fsf_req->list); 2521 fsf_req->qdio_req.sbal_response = sbal_idx;
2538 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 2522 fsf_req->qdio_req.qdio_inb_usage =
2539
2540 fsf_req->queue_req.sbal_response = sbal_idx;
2541 fsf_req->queue_req.qdio_inb_usage =
2542 atomic_read(&qdio->resp_q.count); 2523 atomic_read(&qdio->resp_q.count);
2543 zfcp_fsf_req_complete(fsf_req); 2524 zfcp_fsf_req_complete(fsf_req);
2544 2525
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6c5228b627fc..71b97ff77cf0 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -10,6 +10,7 @@
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13#include "zfcp_qdio.h"
13 14
14#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 15#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
15 16
@@ -28,12 +29,6 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
28 return 0; 29 return 0;
29} 30}
30 31
31static struct qdio_buffer_element *
32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
33{
34 return &q->sbal[sbal_idx]->element[sbale_idx];
35}
36
37static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) 32static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
38{ 33{
39 struct zfcp_adapter *adapter = qdio->adapter; 34 struct zfcp_adapter *adapter = qdio->adapter;
@@ -106,7 +101,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
106 101
107 if (unlikely(retval)) { 102 if (unlikely(retval)) {
108 atomic_set(&queue->count, count); 103 atomic_set(&queue->count, count);
109 /* FIXME: Recover this with an adapter reopen? */ 104 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
110 } else { 105 } else {
111 queue->first += count; 106 queue->first += count;
112 queue->first %= QDIO_MAX_BUFFERS_PER_Q; 107 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
@@ -145,32 +140,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
145 zfcp_qdio_resp_put_back(qdio, count); 140 zfcp_qdio_resp_put_back(qdio, count);
146} 141}
147 142
148/**
149 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
150 * @qdio: pointer to struct zfcp_qdio
151 * @q_rec: pointer to struct zfcp_queue_rec
152 * Returns: pointer to qdio_buffer_element (SBALE) structure
153 */
154struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
155 struct zfcp_queue_req *q_req)
156{
157 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
158}
159
160/**
161 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
162 * @fsf_req: pointer to struct fsf_req
163 * Returns: pointer to qdio_buffer_element (SBALE) structure
164 */
165struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
166 struct zfcp_queue_req *q_req)
167{
168 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
169 q_req->sbale_curr);
170}
171
172static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, 143static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
173 struct zfcp_queue_req *q_req, int max_sbals) 144 struct zfcp_qdio_req *q_req, int max_sbals)
174{ 145{
175 int count = atomic_read(&qdio->req_q.count); 146 int count = atomic_read(&qdio->req_q.count);
176 count = min(count, max_sbals); 147 count = min(count, max_sbals);
@@ -179,7 +150,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
179} 150}
180 151
181static struct qdio_buffer_element * 152static struct qdio_buffer_element *
182zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, 153zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
183 unsigned long sbtype) 154 unsigned long sbtype)
184{ 155{
185 struct qdio_buffer_element *sbale; 156 struct qdio_buffer_element *sbale;
@@ -214,7 +185,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
214} 185}
215 186
216static struct qdio_buffer_element * 187static struct qdio_buffer_element *
217zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, 188zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
218 unsigned int sbtype) 189 unsigned int sbtype)
219{ 190{
220 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 191 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -224,7 +195,7 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
224} 195}
225 196
226static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, 197static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
227 struct zfcp_queue_req *q_req) 198 struct zfcp_qdio_req *q_req)
228{ 199{
229 struct qdio_buffer **sbal = qdio->req_q.sbal; 200 struct qdio_buffer **sbal = qdio->req_q.sbal;
230 int first = q_req->sbal_first; 201 int first = q_req->sbal_first;
@@ -235,7 +206,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
235} 206}
236 207
237static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, 208static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
238 struct zfcp_queue_req *q_req, 209 struct zfcp_qdio_req *q_req,
239 unsigned int sbtype, void *start_addr, 210 unsigned int sbtype, void *start_addr,
240 unsigned int total_length) 211 unsigned int total_length)
241{ 212{
@@ -271,8 +242,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
271 * @max_sbals: upper bound for number of SBALs to be used 242 * @max_sbals: upper bound for number of SBALs to be used
272 * Returns: number of bytes, or error (negativ) 243 * Returns: number of bytes, or error (negativ)
273 */ 244 */
274int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, 245int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
275 struct zfcp_queue_req *q_req,
276 unsigned long sbtype, struct scatterlist *sg, 246 unsigned long sbtype, struct scatterlist *sg,
277 int max_sbals) 247 int max_sbals)
278{ 248{
@@ -304,10 +274,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
304/** 274/**
305 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 275 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
306 * @qdio: pointer to struct zfcp_qdio 276 * @qdio: pointer to struct zfcp_qdio
307 * @q_req: pointer to struct zfcp_queue_req 277 * @q_req: pointer to struct zfcp_qdio_req
308 * Returns: 0 on success, error otherwise 278 * Returns: 0 on success, error otherwise
309 */ 279 */
310int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) 280int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
311{ 281{
312 struct zfcp_qdio_queue *req_q = &qdio->req_q; 282 struct zfcp_qdio_queue *req_q = &qdio->req_q;
313 int first = q_req->sbal_first; 283 int first = q_req->sbal_first;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 000000000000..8cca54631e1e
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,109 @@
1/*
2 * zfcp device driver
3 *
4 * Header file for zfcp qdio interface
5 *
6 * Copyright IBM Corporation 2010
7 */
8
9#ifndef ZFCP_QDIO_H
10#define ZFCP_QDIO_H
11
12#include <asm/qdio.h>
13
14/**
15 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
16 * @sbal: qdio buffers
17 * @first: index of next free buffer in queue
18 * @count: number of free buffers in queue
19 */
20struct zfcp_qdio_queue {
21 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
22 u8 first;
23 atomic_t count;
24};
25
26/**
27 * struct zfcp_qdio - basic qdio data structure
28 * @resp_q: response queue
29 * @req_q: request queue
30 * @stat_lock: lock to protect req_q_util and req_q_time
31 * @req_q_lock: lock to serialize access to request queue
32 * @req_q_time: time of last fill level change
33 * @req_q_util: used for accounting
34 * @req_q_full: queue full incidents
35 * @req_q_wq: used to wait for SBAL availability
36 * @adapter: adapter used in conjunction with this qdio structure
37 */
38struct zfcp_qdio {
39 struct zfcp_qdio_queue resp_q;
40 struct zfcp_qdio_queue req_q;
41 spinlock_t stat_lock;
42 spinlock_t req_q_lock;
43 unsigned long long req_q_time;
44 u64 req_q_util;
45 atomic_t req_q_full;
46 wait_queue_head_t req_q_wq;
47 struct zfcp_adapter *adapter;
48};
49
50/**
51 * struct zfcp_qdio_req - qdio queue related values for a request
52 * @sbal_number: number of free sbals
53 * @sbal_first: first sbal for this request
54 * @sbal_last: last sbal for this request
55 * @sbal_limit: last possible sbal for this request
56 * @sbale_curr: current sbale at creation of this request
57 * @sbal_response: sbal used in interrupt
58 * @qdio_outb_usage: usage of outbound queue
59 * @qdio_inb_usage: usage of inbound queue
60 */
61struct zfcp_qdio_req {
62 u8 sbal_number;
63 u8 sbal_first;
64 u8 sbal_last;
65 u8 sbal_limit;
66 u8 sbale_curr;
67 u8 sbal_response;
68 u16 qdio_outb_usage;
69 u16 qdio_inb_usage;
70};
71
72/**
73 * zfcp_qdio_sbale - return pointer to sbale in qdio queue
74 * @q: queue where to find sbal
75 * @sbal_idx: sbal index in queue
76 * @sbale_idx: sbale index in sbal
77 */
78static inline struct qdio_buffer_element *
79zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
80{
81 return &q->sbal[sbal_idx]->element[sbale_idx];
82}
83
84/**
85 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
86 * @qdio: pointer to struct zfcp_qdio
87 * @q_rec: pointer to struct zfcp_qdio_req
88 * Returns: pointer to qdio_buffer_element (sbale) structure
89 */
90static inline struct qdio_buffer_element *
91zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
92{
93 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
94}
95
96/**
97 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
98 * @qdio: pointer to struct zfcp_qdio
99 * @fsf_req: pointer to struct zfcp_fsf_req
100 * Returns: pointer to qdio_buffer_element (sbale) structure
101 */
102static inline struct qdio_buffer_element *
103zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
104{
105 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
106 q_req->sbale_curr);
107}
108
109#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 000000000000..a72d1b730aba
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
1/*
2 * zfcp device driver
3 *
4 * Data structure and helper functions for tracking pending FSF
5 * requests.
6 *
7 * Copyright IBM Corporation 2009
8 */
9
10#ifndef ZFCP_REQLIST_H
11#define ZFCP_REQLIST_H
12
13/* number of hash buckets */
14#define ZFCP_REQ_LIST_BUCKETS 128
15
16/**
17 * struct zfcp_reqlist - Container for request list (reqlist)
18 * @lock: Spinlock for protecting the hash list
19 * @list: Array of hashbuckets, each is a list of requests in this bucket
20 */
21struct zfcp_reqlist {
22 spinlock_t lock;
23 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
24};
25
26static inline int zfcp_reqlist_hash(unsigned long req_id)
27{
28 return req_id % ZFCP_REQ_LIST_BUCKETS;
29}
30
31/**
32 * zfcp_reqlist_alloc - Allocate and initialize reqlist
33 *
34 * Returns pointer to allocated reqlist on success, or NULL on
35 * allocation failure.
36 */
37static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
38{
39 unsigned int i;
40 struct zfcp_reqlist *rl;
41
42 rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
43 if (!rl)
44 return NULL;
45
46 spin_lock_init(&rl->lock);
47
48 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
49 INIT_LIST_HEAD(&rl->buckets[i]);
50
51 return rl;
52}
53
54/**
55 * zfcp_reqlist_isempty - Check whether the request list empty
56 * @rl: pointer to reqlist
57 *
58 * Returns: 1 if list is empty, 0 if not
59 */
60static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
61{
62 unsigned int i;
63
64 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
65 if (!list_empty(&rl->buckets[i]))
66 return 0;
67 return 1;
68}
69
70/**
71 * zfcp_reqlist_free - Free allocated memory for reqlist
72 * @rl: The reqlist where to free memory
73 */
74static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
75{
76 /* sanity check */
77 BUG_ON(!zfcp_reqlist_isempty(rl));
78
79 kfree(rl);
80}
81
82static inline struct zfcp_fsf_req *
83_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
84{
85 struct zfcp_fsf_req *req;
86 unsigned int i;
87
88 i = zfcp_reqlist_hash(req_id);
89 list_for_each_entry(req, &rl->buckets[i], list)
90 if (req->req_id == req_id)
91 return req;
92 return NULL;
93}
94
95/**
96 * zfcp_reqlist_find - Lookup FSF request by its request id
97 * @rl: The reqlist where to lookup the FSF request
98 * @req_id: The request id to look for
99 *
100 * Returns a pointer to the FSF request with the specified request id
101 * or NULL if there is no known FSF request with this id.
102 */
103static inline struct zfcp_fsf_req *
104zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
105{
106 unsigned long flags;
107 struct zfcp_fsf_req *req;
108
109 spin_lock_irqsave(&rl->lock, flags);
110 req = _zfcp_reqlist_find(rl, req_id);
111 spin_unlock_irqrestore(&rl->lock, flags);
112
113 return req;
114}
115
116/**
117 * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
118 * @rl: reqlist where to search and remove entry
119 * @req_id: The request id of the request to look for
120 *
121 * This functions tries to find the FSF request with the specified
122 * id and then removes it from the reqlist. The reqlist lock is held
123 * during both steps of the operation.
124 *
125 * Returns: Pointer to the FSF request if the request has been found,
126 * NULL if it has not been found.
127 */
128static inline struct zfcp_fsf_req *
129zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
130{
131 unsigned long flags;
132 struct zfcp_fsf_req *req;
133
134 spin_lock_irqsave(&rl->lock, flags);
135 req = _zfcp_reqlist_find(rl, req_id);
136 if (req)
137 list_del(&req->list);
138 spin_unlock_irqrestore(&rl->lock, flags);
139
140 return req;
141}
142
143/**
144 * zfcp_reqlist_add - Add entry to reqlist
145 * @rl: reqlist where to add the entry
146 * @req: The entry to add
147 *
148 * The request id always increases. As an optimization new requests
149 * are added here with list_add_tail at the end of the bucket lists
150 * while old requests are looked up starting at the beginning of the
151 * lists.
152 */
153static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
154 struct zfcp_fsf_req *req)
155{
156 unsigned int i;
157 unsigned long flags;
158
159 i = zfcp_reqlist_hash(req->req_id);
160
161 spin_lock_irqsave(&rl->lock, flags);
162 list_add_tail(&req->list, &rl->buckets[i]);
163 spin_unlock_irqrestore(&rl->lock, flags);
164}
165
166/**
167 * zfcp_reqlist_move - Move all entries from reqlist to simple list
168 * @rl: The zfcp_reqlist where to remove all entries
169 * @list: The list where to move all entries
170 */
171static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
172 struct list_head *list)
173{
174 unsigned int i;
175 unsigned long flags;
176
177 spin_lock_irqsave(&rl->lock, flags);
178 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
179 list_splice_init(&rl->buckets[i], list);
180 spin_unlock_irqrestore(&rl->lock, flags);
181}
182
183#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 8e6fc68d6bd4..c3c4178888af 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -15,6 +15,7 @@
15#include "zfcp_ext.h" 15#include "zfcp_ext.h"
16#include "zfcp_dbf.h" 16#include "zfcp_dbf.h"
17#include "zfcp_fc.h" 17#include "zfcp_fc.h"
18#include "zfcp_reqlist.h"
18 19
19static unsigned int default_depth = 32; 20static unsigned int default_depth = 32;
20module_param_named(queue_depth, default_depth, uint, 0600); 21module_param_named(queue_depth, default_depth, uint, 0600);
@@ -43,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
43{ 44{
44 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 45 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
45 unit->device = NULL; 46 unit->device = NULL;
46 put_device(&unit->sysfs_device); 47 put_device(&unit->dev);
47} 48}
48 49
49static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 50static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -59,10 +60,9 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
59{ 60{
60 struct zfcp_adapter *adapter = 61 struct zfcp_adapter *adapter =
61 (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 62 (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
63
62 set_host_byte(scpnt, result); 64 set_host_byte(scpnt, result);
63 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 65 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
64 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
65 /* return directly */
66 scpnt->scsi_done(scpnt); 66 scpnt->scsi_done(scpnt);
67} 67}
68 68
@@ -86,18 +86,10 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
86 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 86 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
87 unit = scpnt->device->hostdata; 87 unit = scpnt->device->hostdata;
88 88
89 BUG_ON(!adapter || (adapter != unit->port->adapter));
90 BUG_ON(!scpnt->scsi_done);
91
92 if (unlikely(!unit)) {
93 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
94 return 0;
95 }
96
97 scsi_result = fc_remote_port_chkready(rport); 89 scsi_result = fc_remote_port_chkready(rport);
98 if (unlikely(scsi_result)) { 90 if (unlikely(scsi_result)) {
99 scpnt->result = scsi_result; 91 scpnt->result = scsi_result;
100 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); 92 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
101 scpnt->scsi_done(scpnt); 93 scpnt->scsi_done(scpnt);
102 return 0; 94 return 0;
103 } 95 }
@@ -189,9 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
189 /* avoid race condition between late normal completion and abort */ 181 /* avoid race condition between late normal completion and abort */
190 write_lock_irqsave(&adapter->abort_lock, flags); 182 write_lock_irqsave(&adapter->abort_lock, flags);
191 183
192 spin_lock(&adapter->req_list_lock); 184 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
193 old_req = zfcp_reqlist_find(adapter, old_reqid);
194 spin_unlock(&adapter->req_list_lock);
195 if (!old_req) { 185 if (!old_req) {
196 write_unlock_irqrestore(&adapter->abort_lock, flags); 186 write_unlock_irqrestore(&adapter->abort_lock, flags);
197 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, 187 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
@@ -521,7 +511,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
521 511
522 if (port) { 512 if (port) {
523 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); 513 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
524 put_device(&port->sysfs_device); 514 put_device(&port->dev);
525 } 515 }
526} 516}
527 517
@@ -563,23 +553,23 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
563 553
564void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 554void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
565{ 555{
566 get_device(&port->sysfs_device); 556 get_device(&port->dev);
567 port->rport_task = RPORT_ADD; 557 port->rport_task = RPORT_ADD;
568 558
569 if (!queue_work(port->adapter->work_queue, &port->rport_work)) 559 if (!queue_work(port->adapter->work_queue, &port->rport_work))
570 put_device(&port->sysfs_device); 560 put_device(&port->dev);
571} 561}
572 562
573void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) 563void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
574{ 564{
575 get_device(&port->sysfs_device); 565 get_device(&port->dev);
576 port->rport_task = RPORT_DEL; 566 port->rport_task = RPORT_DEL;
577 567
578 if (port->rport && queue_work(port->adapter->work_queue, 568 if (port->rport && queue_work(port->adapter->work_queue,
579 &port->rport_work)) 569 &port->rport_work))
580 return; 570 return;
581 571
582 put_device(&port->sysfs_device); 572 put_device(&port->dev);
583} 573}
584 574
585void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) 575void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@@ -608,7 +598,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
608 } 598 }
609 } 599 }
610 600
611 put_device(&port->sysfs_device); 601 put_device(&port->dev);
612} 602}
613 603
614 604
@@ -626,7 +616,7 @@ void zfcp_scsi_scan(struct work_struct *work)
626 scsilun_to_int((struct scsi_lun *) 616 scsilun_to_int((struct scsi_lun *)
627 &unit->fcp_lun), 0); 617 &unit->fcp_lun), 0);
628 618
629 put_device(&unit->sysfs_device); 619 put_device(&unit->dev);
630} 620}
631 621
632struct fc_function_template zfcp_transport_functions = { 622struct fc_function_template zfcp_transport_functions = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index f539e006683c..a43035d4bd70 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * sysfs attributes. 4 * sysfs attributes.
5 * 5 *
6 * Copyright IBM Corporation 2008, 2009 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -19,8 +19,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
19 struct device_attribute *at,\ 19 struct device_attribute *at,\
20 char *buf) \ 20 char *buf) \
21{ \ 21{ \
22 struct _feat_def *_feat = container_of(dev, struct _feat_def, \ 22 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
23 sysfs_device); \
24 \ 23 \
25 return sprintf(buf, _format, _value); \ 24 return sprintf(buf, _format, _value); \
26} \ 25} \
@@ -87,8 +86,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
87 struct device_attribute *attr, \ 86 struct device_attribute *attr, \
88 char *buf) \ 87 char *buf) \
89{ \ 88{ \
90 struct _feat_def *_feat = container_of(dev, struct _feat_def, \ 89 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
91 sysfs_device); \
92 \ 90 \
93 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ 91 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
94 return sprintf(buf, "1\n"); \ 92 return sprintf(buf, "1\n"); \
@@ -99,12 +97,11 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
99 struct device_attribute *attr,\ 97 struct device_attribute *attr,\
100 const char *buf, size_t count)\ 98 const char *buf, size_t count)\
101{ \ 99{ \
102 struct _feat_def *_feat = container_of(dev, struct _feat_def, \ 100 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
103 sysfs_device); \
104 unsigned long val; \ 101 unsigned long val; \
105 int retval = 0; \ 102 int retval = 0; \
106 \ 103 \
107 if (!(_feat && get_device(&_feat->sysfs_device))) \ 104 if (!(_feat && get_device(&_feat->dev))) \
108 return -EBUSY; \ 105 return -EBUSY; \
109 \ 106 \
110 if (strict_strtoul(buf, 0, &val) || val != 0) { \ 107 if (strict_strtoul(buf, 0, &val) || val != 0) { \
@@ -118,7 +115,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
118 _reopen_id, NULL); \ 115 _reopen_id, NULL); \
119 zfcp_erp_wait(_adapter); \ 116 zfcp_erp_wait(_adapter); \
120out: \ 117out: \
121 put_device(&_feat->sysfs_device); \ 118 put_device(&_feat->dev); \
122 return retval ? retval : (ssize_t) count; \ 119 return retval ? retval : (ssize_t) count; \
123} \ 120} \
124static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ 121static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
@@ -224,10 +221,10 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
224 list_del(&port->list); 221 list_del(&port->list);
225 write_unlock_irq(&adapter->port_list_lock); 222 write_unlock_irq(&adapter->port_list_lock);
226 223
227 put_device(&port->sysfs_device); 224 put_device(&port->dev);
228 225
229 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); 226 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
230 zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs); 227 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
231 out: 228 out:
232 zfcp_ccw_adapter_put(adapter); 229 zfcp_ccw_adapter_put(adapter);
233 return retval ? retval : (ssize_t) count; 230 return retval ? retval : (ssize_t) count;
@@ -258,13 +255,12 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
258 struct device_attribute *attr, 255 struct device_attribute *attr,
259 const char *buf, size_t count) 256 const char *buf, size_t count)
260{ 257{
261 struct zfcp_port *port = container_of(dev, struct zfcp_port, 258 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
262 sysfs_device);
263 struct zfcp_unit *unit; 259 struct zfcp_unit *unit;
264 u64 fcp_lun; 260 u64 fcp_lun;
265 int retval = -EINVAL; 261 int retval = -EINVAL;
266 262
267 if (!(port && get_device(&port->sysfs_device))) 263 if (!(port && get_device(&port->dev)))
268 return -EBUSY; 264 return -EBUSY;
269 265
270 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 266 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -280,7 +276,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
280 zfcp_erp_wait(unit->port->adapter); 276 zfcp_erp_wait(unit->port->adapter);
281 flush_work(&unit->scsi_work); 277 flush_work(&unit->scsi_work);
282out: 278out:
283 put_device(&port->sysfs_device); 279 put_device(&port->dev);
284 return retval ? retval : (ssize_t) count; 280 return retval ? retval : (ssize_t) count;
285} 281}
286static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 282static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -289,13 +285,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
289 struct device_attribute *attr, 285 struct device_attribute *attr,
290 const char *buf, size_t count) 286 const char *buf, size_t count)
291{ 287{
292 struct zfcp_port *port = container_of(dev, struct zfcp_port, 288 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
293 sysfs_device);
294 struct zfcp_unit *unit; 289 struct zfcp_unit *unit;
295 u64 fcp_lun; 290 u64 fcp_lun;
296 int retval = -EINVAL; 291 int retval = -EINVAL;
297 292
298 if (!(port && get_device(&port->sysfs_device))) 293 if (!(port && get_device(&port->dev)))
299 return -EBUSY; 294 return -EBUSY;
300 295
301 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 296 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -314,12 +309,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
314 list_del(&unit->list); 309 list_del(&unit->list);
315 write_unlock_irq(&port->unit_list_lock); 310 write_unlock_irq(&port->unit_list_lock);
316 311
317 put_device(&unit->sysfs_device); 312 put_device(&unit->dev);
318 313
319 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); 314 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
320 zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs); 315 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
321out: 316out:
322 put_device(&port->sysfs_device); 317 put_device(&port->dev);
323 return retval ? retval : (ssize_t) count; 318 return retval ? retval : (ssize_t) count;
324} 319}
325static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 320static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b898d382b7b0..e40cdfb7541f 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -3924,7 +3924,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
3924{ 3924{
3925 struct sccb_mgr_tar_info *currTar_Info; 3925 struct sccb_mgr_tar_info *currTar_Info;
3926 3926
3927 if ((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN)) { 3927 if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) {
3928 return; 3928 return;
3929 } 3929 }
3930 currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; 3930 currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a93a5040f087..136b49cea791 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -24,6 +24,10 @@
24#define FW_VER_LEN 32 24#define FW_VER_LEN 32
25#define MCC_Q_LEN 128 25#define MCC_Q_LEN 128
26#define MCC_CQ_LEN 256 26#define MCC_CQ_LEN 256
27#define MAX_MCC_CMD 16
28/* BladeEngine Generation numbers */
29#define BE_GEN2 2
30#define BE_GEN3 3
27 31
28struct be_dma_mem { 32struct be_dma_mem {
29 void *va; 33 void *va;
@@ -57,6 +61,11 @@ static inline void *queue_head_node(struct be_queue_info *q)
57 return q->dma_mem.va + q->head * q->entry_size; 61 return q->dma_mem.va + q->head * q->entry_size;
58} 62}
59 63
64static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
65{
66 return q->dma_mem.va + wrb_num * q->entry_size;
67}
68
60static inline void *queue_tail_node(struct be_queue_info *q) 69static inline void *queue_tail_node(struct be_queue_info *q)
61{ 70{
62 return q->dma_mem.va + q->tail * q->entry_size; 71 return q->dma_mem.va + q->tail * q->entry_size;
@@ -104,15 +113,19 @@ struct be_ctrl_info {
104 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 113 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
105 spinlock_t mcc_cq_lock; 114 spinlock_t mcc_cq_lock;
106 115
107 /* MCC Async callback */ 116 wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
108 void (*async_cb) (void *adapter, bool link_up); 117 unsigned int mcc_tag[MAX_MCC_CMD];
109 void *adapter_ctxt; 118 unsigned int mcc_numtag[MAX_MCC_CMD + 1];
119 unsigned short mcc_alloc_index;
120 unsigned short mcc_free_index;
121 unsigned int mcc_tag_available;
110}; 122};
111 123
112#include "be_cmds.h" 124#include "be_cmds.h"
113 125
114#define PAGE_SHIFT_4K 12 126#define PAGE_SHIFT_4K 12
115#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 127#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
128#define mcc_timeout 120000 /* 5s timeout */
116 129
117/* Returns number of pages spanned by the data starting at the given addr */ 130/* Returns number of pages spanned by the data starting at the given addr */
118#define PAGES_4K_SPANNED(_address, size) \ 131#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index f008708f1b08..67098578fba4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -19,7 +19,7 @@
19#include "be_mgmt.h" 19#include "be_mgmt.h"
20#include "be_main.h" 20#include "be_main.h"
21 21
22static void be_mcc_notify(struct beiscsi_hba *phba) 22void be_mcc_notify(struct beiscsi_hba *phba)
23{ 23{
24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
25 u32 val = 0; 25 u32 val = 0;
@@ -29,6 +29,52 @@ static void be_mcc_notify(struct beiscsi_hba *phba)
29 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); 29 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
30} 30}
31 31
32unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
33{
34 unsigned int tag = 0;
35 unsigned int num = 0;
36
37mcc_tag_rdy:
38 if (phba->ctrl.mcc_tag_available) {
39 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
40 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
41 phba->ctrl.mcc_numtag[tag] = 0;
42 } else {
43 udelay(100);
44 num++;
45 if (num < mcc_timeout)
46 goto mcc_tag_rdy;
47 }
48 if (tag) {
49 phba->ctrl.mcc_tag_available--;
50 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
51 phba->ctrl.mcc_alloc_index = 0;
52 else
53 phba->ctrl.mcc_alloc_index++;
54 }
55 return tag;
56}
57
58void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
59{
60 spin_lock(&ctrl->mbox_lock);
61 tag = tag & 0x000000FF;
62 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
63 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
64 ctrl->mcc_free_index = 0;
65 else
66 ctrl->mcc_free_index++;
67 ctrl->mcc_tag_available++;
68 spin_unlock(&ctrl->mbox_lock);
69}
70
71bool is_link_state_evt(u32 trailer)
72{
73 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
74 ASYNC_TRAILER_EVENT_CODE_MASK) ==
75 ASYNC_EVENT_CODE_LINK_STATE);
76}
77
32static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 78static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
33{ 79{
34 if (compl->flags != 0) { 80 if (compl->flags != 0) {
@@ -64,12 +110,30 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
64 return 0; 110 return 0;
65} 111}
66 112
67 113int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
68static inline bool is_link_state_evt(u32 trailer) 114 struct be_mcc_compl *compl)
69{ 115{
70 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 116 u16 compl_status, extd_status;
71 ASYNC_TRAILER_EVENT_CODE_MASK) == 117 unsigned short tag;
72 ASYNC_EVENT_CODE_LINK_STATE); 118
119 be_dws_le_to_cpu(compl, 4);
120
121 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
122 CQE_STATUS_COMPL_MASK;
123 /* The ctrl.mcc_numtag[tag] is filled with
124 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
125 * [7:0] = compl_status
126 */
127 tag = (compl->tag0 & 0x000000FF);
128 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
129 CQE_STATUS_EXTD_MASK;
130
131 ctrl->mcc_numtag[tag] = 0x80000000;
132 ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
133 ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
134 ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
135 wake_up_interruptible(&ctrl->mcc_wait[tag]);
136 return 0;
73} 137}
74 138
75static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba) 139static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
@@ -89,7 +153,7 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
89 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); 153 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
90} 154}
91 155
92static void beiscsi_async_link_state_process(struct beiscsi_hba *phba, 156void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
93 struct be_async_event_link_state *evt) 157 struct be_async_event_link_state *evt)
94{ 158{
95 switch (evt->port_link_status) { 159 switch (evt->port_link_status) {
@@ -97,13 +161,13 @@ static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
97 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n", 161 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
98 evt->physical_port); 162 evt->physical_port);
99 phba->state |= BE_ADAPTER_LINK_DOWN; 163 phba->state |= BE_ADAPTER_LINK_DOWN;
164 iscsi_host_for_each_session(phba->shost,
165 be2iscsi_fail_session);
100 break; 166 break;
101 case ASYNC_EVENT_LINK_UP: 167 case ASYNC_EVENT_LINK_UP:
102 phba->state = BE_ADAPTER_UP; 168 phba->state = BE_ADAPTER_UP;
103 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n", 169 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
104 evt->physical_port); 170 evt->physical_port);
105 iscsi_host_for_each_session(phba->shost,
106 be2iscsi_fail_session);
107 break; 171 break;
108 default: 172 default:
109 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on" 173 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
@@ -162,7 +226,6 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
162/* Wait till no more pending mcc requests are present */ 226/* Wait till no more pending mcc requests are present */
163static int be_mcc_wait_compl(struct beiscsi_hba *phba) 227static int be_mcc_wait_compl(struct beiscsi_hba *phba)
164{ 228{
165#define mcc_timeout 120000 /* 5s timeout */
166 int i, status; 229 int i, status;
167 for (i = 0; i < mcc_timeout; i++) { 230 for (i = 0; i < mcc_timeout; i++) {
168 status = beiscsi_process_mcc(phba); 231 status = beiscsi_process_mcc(phba);
@@ -372,9 +435,10 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
372 435
373 BUG_ON(atomic_read(&mccq->used) >= mccq->len); 436 BUG_ON(atomic_read(&mccq->used) >= mccq->len);
374 wrb = queue_head_node(mccq); 437 wrb = queue_head_node(mccq);
438 memset(wrb, 0, sizeof(*wrb));
439 wrb->tag0 = (mccq->head & 0x000000FF) << 16;
375 queue_head_inc(mccq); 440 queue_head_inc(mccq);
376 atomic_inc(&mccq->used); 441 atomic_inc(&mccq->used);
377 memset(wrb, 0, sizeof(*wrb));
378 return wrb; 442 return wrb;
379} 443}
380 444
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 5de8acb924cb..49fcc787ee8b 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -425,14 +425,20 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
425int be_poll_mcc(struct be_ctrl_info *ctrl); 425int be_poll_mcc(struct be_ctrl_info *ctrl);
426unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, 426unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
427 struct beiscsi_hba *phba); 427 struct beiscsi_hba *phba);
428int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr); 428unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
429 429void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
430/*ISCSI Functuions */ 430/*ISCSI Functuions */
431int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); 431int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
432 432
433struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); 433struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
434struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); 434struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
435int be_mcc_notify_wait(struct beiscsi_hba *phba); 435int be_mcc_notify_wait(struct beiscsi_hba *phba);
436void be_mcc_notify(struct beiscsi_hba *phba);
437unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
438void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
439 struct be_async_event_link_state *evt);
440int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
441 struct be_mcc_compl *compl);
436 442
437int be_mbox_notify(struct be_ctrl_info *ctrl); 443int be_mbox_notify(struct be_ctrl_info *ctrl);
438 444
@@ -448,6 +454,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
448int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, 454int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
449 struct be_queue_info *wrbq); 455 struct be_queue_info *wrbq);
450 456
457bool is_link_state_evt(u32 trailer);
458
451struct be_default_pdu_context { 459struct be_default_pdu_context {
452 u32 dw[4]; 460 u32 dw[4];
453} __packed; 461} __packed;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index d587b0362f18..29a3aaf35f9f 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -101,6 +101,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
101 struct iscsi_session *sess = cls_session->dd_data; 101 struct iscsi_session *sess = cls_session->dd_data;
102 struct beiscsi_session *beiscsi_sess = sess->dd_data; 102 struct beiscsi_session *beiscsi_sess = sess->dd_data;
103 103
104 SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
104 pci_pool_destroy(beiscsi_sess->bhs_pool); 105 pci_pool_destroy(beiscsi_sess->bhs_pool);
105 iscsi_session_teardown(cls_session); 106 iscsi_session_teardown(cls_session);
106} 107}
@@ -224,6 +225,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
224 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 225 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
225 int len = 0; 226 int len = 0;
226 227
228 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
227 beiscsi_ep = beiscsi_conn->ep; 229 beiscsi_ep = beiscsi_conn->ep;
228 if (!beiscsi_ep) { 230 if (!beiscsi_ep) {
229 SE_DEBUG(DBG_LVL_1, 231 SE_DEBUG(DBG_LVL_1,
@@ -254,6 +256,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
254 struct iscsi_session *session = conn->session; 256 struct iscsi_session *session = conn->session;
255 int ret; 257 int ret;
256 258
259 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
257 ret = iscsi_set_param(cls_conn, param, buf, buflen); 260 ret = iscsi_set_param(cls_conn, param, buf, buflen);
258 if (ret) 261 if (ret)
259 return ret; 262 return ret;
@@ -271,8 +274,8 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
271 conn->max_recv_dlength = 65536; 274 conn->max_recv_dlength = 65536;
272 break; 275 break;
273 case ISCSI_PARAM_MAX_BURST: 276 case ISCSI_PARAM_MAX_BURST:
274 if (session->first_burst > 262144) 277 if (session->max_burst > 262144)
275 session->first_burst = 262144; 278 session->max_burst = 262144;
276 break; 279 break;
277 default: 280 default:
278 return 0; 281 return 0;
@@ -293,12 +296,41 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
293 enum iscsi_host_param param, char *buf) 296 enum iscsi_host_param param, char *buf)
294{ 297{
295 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); 298 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
299 struct be_cmd_resp_get_mac_addr *resp;
300 struct be_mcc_wrb *wrb;
301 unsigned int tag, wrb_num;
296 int len = 0; 302 int len = 0;
303 unsigned short status, extd_status;
304 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
297 305
306 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
298 switch (param) { 307 switch (param) {
299 case ISCSI_HOST_PARAM_HWADDRESS: 308 case ISCSI_HOST_PARAM_HWADDRESS:
300 be_cmd_get_mac_addr(phba, phba->mac_address); 309 tag = be_cmd_get_mac_addr(phba);
301 len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); 310 if (!tag) {
311 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n");
312 return -1;
313 } else
314 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
315 phba->ctrl.mcc_numtag[tag]);
316
317 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
318 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
319 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
320 if (status || extd_status) {
321 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
322 " status = %d extd_status = %d \n",
323 status, extd_status);
324 free_mcc_tag(&phba->ctrl, tag);
325 return -1;
326 } else {
327 wrb = queue_get_wrb(mccq, wrb_num);
328 free_mcc_tag(&phba->ctrl, tag);
329 resp = embedded_payload(wrb);
330 memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
331 len = sysfs_format_mac(buf, phba->mac_address,
332 ETH_ALEN);
333 }
302 break; 334 break;
303 default: 335 default:
304 return iscsi_host_get_param(shost, param, buf); 336 return iscsi_host_get_param(shost, param, buf);
@@ -378,6 +410,7 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
378 struct beiscsi_endpoint *beiscsi_ep; 410 struct beiscsi_endpoint *beiscsi_ep;
379 struct beiscsi_offload_params params; 411 struct beiscsi_offload_params params;
380 412
413 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
381 memset(&params, 0, sizeof(struct beiscsi_offload_params)); 414 memset(&params, 0, sizeof(struct beiscsi_offload_params));
382 beiscsi_ep = beiscsi_conn->ep; 415 beiscsi_ep = beiscsi_conn->ep;
383 if (!beiscsi_ep) 416 if (!beiscsi_ep)
@@ -422,8 +455,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
422{ 455{
423 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 456 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
424 struct beiscsi_hba *phba = beiscsi_ep->phba; 457 struct beiscsi_hba *phba = beiscsi_ep->phba;
458 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
459 struct be_mcc_wrb *wrb;
460 struct tcp_connect_and_offload_out *ptcpcnct_out;
461 unsigned short status, extd_status;
462 unsigned int tag, wrb_num;
425 int ret = -1; 463 int ret = -1;
426 464
465 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
427 beiscsi_ep->ep_cid = beiscsi_get_cid(phba); 466 beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
428 if (beiscsi_ep->ep_cid == 0xFFFF) { 467 if (beiscsi_ep->ep_cid == 0xFFFF) {
429 SE_DEBUG(DBG_LVL_1, "No free cid available\n"); 468 SE_DEBUG(DBG_LVL_1, "No free cid available\n");
@@ -431,15 +470,44 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
431 } 470 }
432 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ", 471 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
433 beiscsi_ep->ep_cid); 472 beiscsi_ep->ep_cid);
434 phba->ep_array[beiscsi_ep->ep_cid] = ep; 473 phba->ep_array[beiscsi_ep->ep_cid -
435 if (beiscsi_ep->ep_cid > 474 phba->fw_config.iscsi_cid_start] = ep;
436 (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) { 475 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
476 phba->params.cxns_per_ctrl * 2)) {
437 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 477 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
438 return ret; 478 return ret;
439 } 479 }
440 480
441 beiscsi_ep->cid_vld = 0; 481 beiscsi_ep->cid_vld = 0;
442 return mgmt_open_connection(phba, dst_addr, beiscsi_ep); 482 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
483 if (!tag) {
484 SE_DEBUG(DBG_LVL_1,
485 "mgmt_invalidate_connection Failed for cid=%d \n",
486 beiscsi_ep->ep_cid);
487 } else {
488 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
489 phba->ctrl.mcc_numtag[tag]);
490 }
491 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
492 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
493 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
494 if (status || extd_status) {
495 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
496 " status = %d extd_status = %d \n",
497 status, extd_status);
498 free_mcc_tag(&phba->ctrl, tag);
499 return -1;
500 } else {
501 wrb = queue_get_wrb(mccq, wrb_num);
502 free_mcc_tag(&phba->ctrl, tag);
503
504 ptcpcnct_out = embedded_payload(wrb);
505 beiscsi_ep = ep->dd_data;
506 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
507 beiscsi_ep->cid_vld = 1;
508 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
509 }
510 return 0;
443} 511}
444 512
445/** 513/**
@@ -459,14 +527,12 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
459 * beiscsi_free_ep - free endpoint 527 * beiscsi_free_ep - free endpoint
460 * @ep: pointer to iscsi endpoint structure 528 * @ep: pointer to iscsi endpoint structure
461 */ 529 */
462static void beiscsi_free_ep(struct iscsi_endpoint *ep) 530static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
463{ 531{
464 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
465 struct beiscsi_hba *phba = beiscsi_ep->phba; 532 struct beiscsi_hba *phba = beiscsi_ep->phba;
466 533
467 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 534 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
468 beiscsi_ep->phba = NULL; 535 beiscsi_ep->phba = NULL;
469 iscsi_destroy_endpoint(ep);
470} 536}
471 537
472/** 538/**
@@ -495,9 +561,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
495 return ERR_PTR(ret); 561 return ERR_PTR(ret);
496 } 562 }
497 563
498 if (phba->state) { 564 if (phba->state != BE_ADAPTER_UP) {
499 ret = -EBUSY; 565 ret = -EBUSY;
500 SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n"); 566 SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n");
501 return ERR_PTR(ret); 567 return ERR_PTR(ret);
502 } 568 }
503 569
@@ -509,9 +575,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
509 575
510 beiscsi_ep = ep->dd_data; 576 beiscsi_ep = ep->dd_data;
511 beiscsi_ep->phba = phba; 577 beiscsi_ep->phba = phba;
512 578 beiscsi_ep->openiscsi_ep = ep;
513 if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) { 579 if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
514 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 580 SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n");
515 ret = -ENOMEM; 581 ret = -ENOMEM;
516 goto free_ep; 582 goto free_ep;
517 } 583 }
@@ -519,7 +585,7 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
519 return ep; 585 return ep;
520 586
521free_ep: 587free_ep:
522 beiscsi_free_ep(ep); 588 beiscsi_free_ep(beiscsi_ep);
523 return ERR_PTR(ret); 589 return ERR_PTR(ret);
524} 590}
525 591
@@ -546,20 +612,22 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
546 * @ep: The iscsi endpoint 612 * @ep: The iscsi endpoint
547 * @flag: The type of connection closure 613 * @flag: The type of connection closure
548 */ 614 */
549static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag) 615static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
550{ 616{
551 int ret = 0; 617 int ret = 0;
552 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 618 unsigned int tag;
553 struct beiscsi_hba *phba = beiscsi_ep->phba; 619 struct beiscsi_hba *phba = beiscsi_ep->phba;
554 620
555 if (MGMT_STATUS_SUCCESS != 621 tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
556 mgmt_upload_connection(phba, beiscsi_ep->ep_cid, 622 if (!tag) {
557 CONNECTION_UPLOAD_GRACEFUL)) {
558 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x", 623 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
559 beiscsi_ep->ep_cid); 624 beiscsi_ep->ep_cid);
560 ret = -1; 625 ret = -1;
626 } else {
627 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
628 phba->ctrl.mcc_numtag[tag]);
629 free_mcc_tag(&phba->ctrl, tag);
561 } 630 }
562
563 return ret; 631 return ret;
564} 632}
565 633
@@ -574,19 +642,17 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
574 struct beiscsi_conn *beiscsi_conn; 642 struct beiscsi_conn *beiscsi_conn;
575 struct beiscsi_endpoint *beiscsi_ep; 643 struct beiscsi_endpoint *beiscsi_ep;
576 struct beiscsi_hba *phba; 644 struct beiscsi_hba *phba;
577 int flag = 0;
578 645
579 beiscsi_ep = ep->dd_data; 646 beiscsi_ep = ep->dd_data;
580 phba = beiscsi_ep->phba; 647 phba = beiscsi_ep->phba;
581 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n"); 648 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
649 beiscsi_ep->ep_cid);
582 650
583 if (beiscsi_ep->conn) { 651 if (beiscsi_ep->conn) {
584 beiscsi_conn = beiscsi_ep->conn; 652 beiscsi_conn = beiscsi_ep->conn;
585 iscsi_suspend_queue(beiscsi_conn->conn); 653 iscsi_suspend_queue(beiscsi_conn->conn);
586 beiscsi_close_conn(ep, flag);
587 } 654 }
588 655
589 beiscsi_free_ep(ep);
590} 656}
591 657
592/** 658/**
@@ -619,23 +685,31 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
619 struct iscsi_session *session = conn->session; 685 struct iscsi_session *session = conn->session;
620 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); 686 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
621 struct beiscsi_hba *phba = iscsi_host_priv(shost); 687 struct beiscsi_hba *phba = iscsi_host_priv(shost);
622 unsigned int status; 688 unsigned int tag;
623 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; 689 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
624 690
625 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
626 beiscsi_ep = beiscsi_conn->ep; 691 beiscsi_ep = beiscsi_conn->ep;
627 if (!beiscsi_ep) { 692 if (!beiscsi_ep) {
628 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n"); 693 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
629 return; 694 return;
630 } 695 }
631 status = mgmt_invalidate_connection(phba, beiscsi_ep, 696 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop ep_cid = %d\n",
697 beiscsi_ep->ep_cid);
698 tag = mgmt_invalidate_connection(phba, beiscsi_ep,
632 beiscsi_ep->ep_cid, 1, 699 beiscsi_ep->ep_cid, 1,
633 savecfg_flag); 700 savecfg_flag);
634 if (status != MGMT_STATUS_SUCCESS) { 701 if (!tag) {
635 SE_DEBUG(DBG_LVL_1, 702 SE_DEBUG(DBG_LVL_1,
636 "mgmt_invalidate_connection Failed for cid=%d \n", 703 "mgmt_invalidate_connection Failed for cid=%d \n",
637 beiscsi_ep->ep_cid); 704 beiscsi_ep->ep_cid);
705 } else {
706 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
707 phba->ctrl.mcc_numtag[tag]);
708 free_mcc_tag(&phba->ctrl, tag);
638 } 709 }
710 beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
711 beiscsi_free_ep(beiscsi_ep);
712 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
639 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 713 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
640 iscsi_conn_stop(cls_conn, flag); 714 iscsi_conn_stop(cls_conn, flag);
641} 715}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index f92ffc5349fb..1f512c28cbf9 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1a557fa77888..7c22616ab141 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -40,7 +40,6 @@
40static unsigned int be_iopoll_budget = 10; 40static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64; 41static unsigned int be_max_phys_size = 64;
42static unsigned int enable_msix = 1; 42static unsigned int enable_msix = 1;
43static unsigned int ring_mode;
44 43
45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 44MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 45MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -62,10 +61,10 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
62/*------------------- PCI Driver operations and data ----------------- */ 61/*------------------- PCI Driver operations and data ----------------- */
63static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { 62static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 63 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
64 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
65 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 65 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 66 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 67 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69 { 0 } 68 { 0 }
70}; 69};
71MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 70MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
@@ -112,6 +111,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
112 memset(phba, 0, sizeof(*phba)); 111 memset(phba, 0, sizeof(*phba));
113 phba->shost = shost; 112 phba->shost = shost;
114 phba->pcidev = pci_dev_get(pcidev); 113 phba->pcidev = pci_dev_get(pcidev);
114 pci_set_drvdata(pcidev, phba);
115 115
116 if (iscsi_host_add(shost, &phba->pcidev->dev)) 116 if (iscsi_host_add(shost, &phba->pcidev->dev))
117 goto free_devices; 117 goto free_devices;
@@ -143,6 +143,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143 struct pci_dev *pcidev) 143 struct pci_dev *pcidev)
144{ 144{
145 u8 __iomem *addr; 145 u8 __iomem *addr;
146 int pcicfg_reg;
146 147
147 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 148 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148 pci_resource_len(pcidev, 2)); 149 pci_resource_len(pcidev, 2));
@@ -159,13 +160,19 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
159 phba->db_va = addr; 160 phba->db_va = addr;
160 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 161 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
161 162
162 addr = ioremap_nocache(pci_resource_start(pcidev, 1), 163 if (phba->generation == BE_GEN2)
163 pci_resource_len(pcidev, 1)); 164 pcicfg_reg = 1;
165 else
166 pcicfg_reg = 0;
167
168 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
169 pci_resource_len(pcidev, pcicfg_reg));
170
164 if (addr == NULL) 171 if (addr == NULL)
165 goto pci_map_err; 172 goto pci_map_err;
166 phba->ctrl.pcicfg = addr; 173 phba->ctrl.pcicfg = addr;
167 phba->pci_va = addr; 174 phba->pci_va = addr;
168 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1); 175 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
169 return 0; 176 return 0;
170 177
171pci_map_err: 178pci_map_err:
@@ -230,29 +237,27 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
230 237
231static void beiscsi_get_params(struct beiscsi_hba *phba) 238static void beiscsi_get_params(struct beiscsi_hba *phba)
232{ 239{
233 phba->params.ios_per_ctrl = BE2_IO_DEPTH; 240 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
234 phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS; 241 - (phba->fw_config.iscsi_cid_count
235 phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS; 242 + BE2_TMFS
236 phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2; 243 + BE2_NOPOUT_REQ));
244 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
245 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
246 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
237 phba->params.num_sge_per_io = BE2_SGE; 247 phba->params.num_sge_per_io = BE2_SGE;
238 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 248 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
239 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 249 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
240 phba->params.eq_timer = 64; 250 phba->params.eq_timer = 64;
241 phba->params.num_eq_entries = 251 phba->params.num_eq_entries =
242 (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) / 252 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
243 512) + 1) * 512; 253 + BE2_TMFS) / 512) + 1) * 512;
244 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 254 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
245 ? 1024 : phba->params.num_eq_entries; 255 ? 1024 : phba->params.num_eq_entries;
246 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n", 256 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
247 phba->params.num_eq_entries); 257 phba->params.num_eq_entries);
248 phba->params.num_cq_entries = 258 phba->params.num_cq_entries =
249 (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) / 259 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
250 512) + 1) * 512; 260 + BE2_TMFS) / 512) + 1) * 512;
251 SE_DEBUG(DBG_LVL_8,
252 "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
253 "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
254 phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
255 BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
256 phba->params.wrbs_per_cxn = 256; 261 phba->params.wrbs_per_cxn = 256;
257} 262}
258 263
@@ -443,7 +448,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
443 if (phba->todo_mcc_cq) 448 if (phba->todo_mcc_cq)
444 queue_work(phba->wq, &phba->work_cqs); 449 queue_work(phba->wq, &phba->work_cqs);
445 450
446 if ((num_mcceq_processed) && (!num_ioeq_processed)) 451 if ((num_mcceq_processed) && (!num_ioeq_processed))
447 hwi_ring_eq_db(phba, eq->id, 0, 452 hwi_ring_eq_db(phba, eq->id, 0,
448 (num_ioeq_processed + 453 (num_ioeq_processed +
449 num_mcceq_processed) , 1, 1); 454 num_mcceq_processed) , 1, 1);
@@ -561,6 +566,7 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
561 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); 566 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
562 break; 567 break;
563 case ISCSI_OP_LOGIN_RSP: 568 case ISCSI_OP_LOGIN_RSP:
569 case ISCSI_OP_TEXT_RSP:
564 task = conn->login_task; 570 task = conn->login_task;
565 io_task = task->dd_data; 571 io_task = task->dd_data;
566 login_hdr = (struct iscsi_hdr *)ppdu; 572 login_hdr = (struct iscsi_hdr *)ppdu;
@@ -631,29 +637,29 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
631 * alloc_wrb_handle - To allocate a wrb handle 637 * alloc_wrb_handle - To allocate a wrb handle
632 * @phba: The hba pointer 638 * @phba: The hba pointer
633 * @cid: The cid to use for allocation 639 * @cid: The cid to use for allocation
634 * @index: index allocation and wrb index
635 * 640 *
636 * This happens under session_lock until submission to chip 641 * This happens under session_lock until submission to chip
637 */ 642 */
638struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 643struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
639 int index)
640{ 644{
641 struct hwi_wrb_context *pwrb_context; 645 struct hwi_wrb_context *pwrb_context;
642 struct hwi_controller *phwi_ctrlr; 646 struct hwi_controller *phwi_ctrlr;
643 struct wrb_handle *pwrb_handle; 647 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
644 648
645 phwi_ctrlr = phba->phwi_ctrlr; 649 phwi_ctrlr = phba->phwi_ctrlr;
646 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 650 pwrb_context = &phwi_ctrlr->wrb_context[cid];
647 if (pwrb_context->wrb_handles_available) { 651 if (pwrb_context->wrb_handles_available >= 2) {
648 pwrb_handle = pwrb_context->pwrb_handle_base[ 652 pwrb_handle = pwrb_context->pwrb_handle_base[
649 pwrb_context->alloc_index]; 653 pwrb_context->alloc_index];
650 pwrb_context->wrb_handles_available--; 654 pwrb_context->wrb_handles_available--;
651 pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
652 if (pwrb_context->alloc_index == 655 if (pwrb_context->alloc_index ==
653 (phba->params.wrbs_per_cxn - 1)) 656 (phba->params.wrbs_per_cxn - 1))
654 pwrb_context->alloc_index = 0; 657 pwrb_context->alloc_index = 0;
655 else 658 else
656 pwrb_context->alloc_index++; 659 pwrb_context->alloc_index++;
660 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
661 pwrb_context->alloc_index];
662 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
657 } else 663 } else
658 pwrb_handle = NULL; 664 pwrb_handle = NULL;
659 return pwrb_handle; 665 return pwrb_handle;
@@ -671,9 +677,7 @@ static void
671free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 677free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
672 struct wrb_handle *pwrb_handle) 678 struct wrb_handle *pwrb_handle)
673{ 679{
674 if (!ring_mode) 680 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
675 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
676 pwrb_handle;
677 pwrb_context->wrb_handles_available++; 681 pwrb_context->wrb_handles_available++;
678 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) 682 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
679 pwrb_context->free_index = 0; 683 pwrb_context->free_index = 0;
@@ -790,6 +794,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
790 memcpy(task->sc->sense_buffer, sense, 794 memcpy(task->sc->sense_buffer, sense,
791 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 795 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
792 } 796 }
797
793 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) { 798 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
794 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 799 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
795 & SOL_RES_CNT_MASK) 800 & SOL_RES_CNT_MASK)
@@ -811,6 +816,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
811 struct iscsi_conn *conn = beiscsi_conn->conn; 816 struct iscsi_conn *conn = beiscsi_conn->conn;
812 817
813 hdr = (struct iscsi_logout_rsp *)task->hdr; 818 hdr = (struct iscsi_logout_rsp *)task->hdr;
819 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
814 hdr->t2wait = 5; 820 hdr->t2wait = 5;
815 hdr->t2retain = 0; 821 hdr->t2retain = 0;
816 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 822 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -825,6 +831,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
825 & SOL_EXP_CMD_SN_MASK) + 831 & SOL_EXP_CMD_SN_MASK) +
826 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 832 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
827 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 833 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
834 hdr->dlength[0] = 0;
835 hdr->dlength[1] = 0;
836 hdr->dlength[2] = 0;
828 hdr->hlength = 0; 837 hdr->hlength = 0;
829 hdr->itt = io_task->libiscsi_itt; 838 hdr->itt = io_task->libiscsi_itt;
830 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 839 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -839,6 +848,7 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
839 struct beiscsi_io_task *io_task = task->dd_data; 848 struct beiscsi_io_task *io_task = task->dd_data;
840 849
841 hdr = (struct iscsi_tm_rsp *)task->hdr; 850 hdr = (struct iscsi_tm_rsp *)task->hdr;
851 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
842 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 852 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
843 & SOL_FLAGS_MASK) >> 24) | 0x80; 853 & SOL_FLAGS_MASK) >> 24) | 0x80;
844 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 854 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
@@ -859,7 +869,6 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
859{ 869{
860 struct hwi_wrb_context *pwrb_context; 870 struct hwi_wrb_context *pwrb_context;
861 struct wrb_handle *pwrb_handle = NULL; 871 struct wrb_handle *pwrb_handle = NULL;
862 struct sgl_handle *psgl_handle = NULL;
863 struct hwi_controller *phwi_ctrlr; 872 struct hwi_controller *phwi_ctrlr;
864 struct iscsi_task *task; 873 struct iscsi_task *task;
865 struct beiscsi_io_task *io_task; 874 struct beiscsi_io_task *io_task;
@@ -867,22 +876,14 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
867 struct iscsi_session *session = conn->session; 876 struct iscsi_session *session = conn->session;
868 877
869 phwi_ctrlr = phba->phwi_ctrlr; 878 phwi_ctrlr = phba->phwi_ctrlr;
870 if (ring_mode) { 879 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
871 psgl_handle = phba->sgl_hndl_array[((psol->
872 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
873 32] & SOL_ICD_INDEX_MASK) >> 6)];
874 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
875 task = psgl_handle->task;
876 pwrb_handle = NULL;
877 } else {
878 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
879 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 880 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
880 SOL_CID_MASK) >> 6)]; 881 SOL_CID_MASK) >> 6) -
881 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 882 phba->fw_config.iscsi_cid_start];
883 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
882 dw[offsetof(struct amap_sol_cqe, wrb_index) / 884 dw[offsetof(struct amap_sol_cqe, wrb_index) /
883 32] & SOL_WRB_INDEX_MASK) >> 16)]; 885 32] & SOL_WRB_INDEX_MASK) >> 16)];
884 task = pwrb_handle->pio_handle; 886 task = pwrb_handle->pio_handle;
885 }
886 887
887 io_task = task->dd_data; 888 io_task = task->dd_data;
888 spin_lock(&phba->mgmt_sgl_lock); 889 spin_lock(&phba->mgmt_sgl_lock);
@@ -923,31 +924,23 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
923 struct iscsi_wrb *pwrb = NULL; 924 struct iscsi_wrb *pwrb = NULL;
924 struct hwi_controller *phwi_ctrlr; 925 struct hwi_controller *phwi_ctrlr;
925 struct iscsi_task *task; 926 struct iscsi_task *task;
926 struct sgl_handle *psgl_handle = NULL;
927 unsigned int type; 927 unsigned int type;
928 struct iscsi_conn *conn = beiscsi_conn->conn; 928 struct iscsi_conn *conn = beiscsi_conn->conn;
929 struct iscsi_session *session = conn->session; 929 struct iscsi_session *session = conn->session;
930 930
931 phwi_ctrlr = phba->phwi_ctrlr; 931 phwi_ctrlr = phba->phwi_ctrlr;
932 if (ring_mode) { 932 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
933 psgl_handle = phba->sgl_hndl_array[((psol->
934 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
935 32] & SOL_ICD_INDEX_MASK) >> 6)];
936 task = psgl_handle->task;
937 type = psgl_handle->type;
938 } else {
939 pwrb_context = &phwi_ctrlr->
940 wrb_context[((psol->dw[offsetof
941 (struct amap_sol_cqe, cid) / 32] 933 (struct amap_sol_cqe, cid) / 32]
942 & SOL_CID_MASK) >> 6)]; 934 & SOL_CID_MASK) >> 6) -
943 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 935 phba->fw_config.iscsi_cid_start];
936 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
944 dw[offsetof(struct amap_sol_cqe, wrb_index) / 937 dw[offsetof(struct amap_sol_cqe, wrb_index) /
945 32] & SOL_WRB_INDEX_MASK) >> 16)]; 938 32] & SOL_WRB_INDEX_MASK) >> 16)];
946 task = pwrb_handle->pio_handle; 939 task = pwrb_handle->pio_handle;
947 pwrb = pwrb_handle->pwrb; 940 pwrb = pwrb_handle->pwrb;
948 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & 941 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
949 WRB_TYPE_MASK) >> 28; 942 WRB_TYPE_MASK) >> 28;
950 } 943
951 spin_lock_bh(&session->lock); 944 spin_lock_bh(&session->lock);
952 switch (type) { 945 switch (type) {
953 case HWH_TYPE_IO: 946 case HWH_TYPE_IO:
@@ -978,15 +971,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
978 break; 971 break;
979 972
980 default: 973 default:
981 if (ring_mode) 974 shost_printk(KERN_WARNING, phba->shost,
982 shost_printk(KERN_WARNING, phba->shost,
983 "In hwi_complete_cmd, unknown type = %d"
984 "icd_index 0x%x CID 0x%x\n", type,
985 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
986 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
987 psgl_handle->cid);
988 else
989 shost_printk(KERN_WARNING, phba->shost,
990 "In hwi_complete_cmd, unknown type = %d" 975 "In hwi_complete_cmd, unknown type = %d"
991 "wrb_index 0x%x CID 0x%x\n", type, 976 "wrb_index 0x%x CID 0x%x\n", type,
992 ((psol->dw[offsetof(struct amap_iscsi_wrb, 977 ((psol->dw[offsetof(struct amap_iscsi_wrb,
@@ -1077,7 +1062,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1077 1062
1078 WARN_ON(!pasync_handle); 1063 WARN_ON(!pasync_handle);
1079 1064
1080 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid; 1065 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1066 phba->fw_config.iscsi_cid_start;
1081 pasync_handle->is_header = is_header; 1067 pasync_handle->is_header = is_header;
1082 pasync_handle->buffer_len = ((pdpdu_cqe-> 1068 pasync_handle->buffer_len = ((pdpdu_cqe->
1083 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] 1069 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
@@ -1327,9 +1313,10 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1327 } 1313 }
1328 1314
1329 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1315 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1330 beiscsi_conn->beiscsi_conn_cid, 1316 (beiscsi_conn->beiscsi_conn_cid -
1331 phdr, hdr_len, pfirst_buffer, 1317 phba->fw_config.iscsi_cid_start),
1332 buf_len); 1318 phdr, hdr_len, pfirst_buffer,
1319 buf_len);
1333 1320
1334 if (status == 0) 1321 if (status == 0)
1335 hwi_free_async_msg(phba, cri); 1322 hwi_free_async_msg(phba, cri);
@@ -1422,6 +1409,48 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1422 hwi_post_async_buffers(phba, pasync_handle->is_header); 1409 hwi_post_async_buffers(phba, pasync_handle->is_header);
1423} 1410}
1424 1411
1412static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1413{
1414 struct be_queue_info *mcc_cq;
1415 struct be_mcc_compl *mcc_compl;
1416 unsigned int num_processed = 0;
1417
1418 mcc_cq = &phba->ctrl.mcc_obj.cq;
1419 mcc_compl = queue_tail_node(mcc_cq);
1420 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1421 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1422
1423 if (num_processed >= 32) {
1424 hwi_ring_cq_db(phba, mcc_cq->id,
1425 num_processed, 0, 0);
1426 num_processed = 0;
1427 }
1428 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1429 /* Interpret flags as an async trailer */
1430 if (is_link_state_evt(mcc_compl->flags))
1431 /* Interpret compl as a async link evt */
1432 beiscsi_async_link_state_process(phba,
1433 (struct be_async_event_link_state *) mcc_compl);
1434 else
1435 SE_DEBUG(DBG_LVL_1,
1436 " Unsupported Async Event, flags"
1437 " = 0x%08x \n", mcc_compl->flags);
1438 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1439 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1440 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1441 }
1442
1443 mcc_compl->flags = 0;
1444 queue_tail_inc(mcc_cq);
1445 mcc_compl = queue_tail_node(mcc_cq);
1446 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1447 num_processed++;
1448 }
1449
1450 if (num_processed > 0)
1451 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1452
1453}
1425 1454
1426static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 1455static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1427{ 1456{
@@ -1431,7 +1460,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1431 unsigned int num_processed = 0; 1460 unsigned int num_processed = 0;
1432 unsigned int tot_nump = 0; 1461 unsigned int tot_nump = 0;
1433 struct beiscsi_conn *beiscsi_conn; 1462 struct beiscsi_conn *beiscsi_conn;
1434 struct sgl_handle *psgl_handle = NULL; 1463 struct beiscsi_endpoint *beiscsi_ep;
1464 struct iscsi_endpoint *ep;
1435 struct beiscsi_hba *phba; 1465 struct beiscsi_hba *phba;
1436 1466
1437 cq = pbe_eq->cq; 1467 cq = pbe_eq->cq;
@@ -1442,32 +1472,13 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1442 CQE_VALID_MASK) { 1472 CQE_VALID_MASK) {
1443 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1473 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1444 1474
1445 if (ring_mode) { 1475 ep = phba->ep_array[(u32) ((sol->
1446 psgl_handle = phba->sgl_hndl_array[((sol-> 1476 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1447 dw[offsetof(struct amap_sol_cqe_ring, 1477 SOL_CID_MASK) >> 6) -
1448 icd_index) / 32] & SOL_ICD_INDEX_MASK) 1478 phba->fw_config.iscsi_cid_start];
1449 >> 6)];
1450 beiscsi_conn = phba->conn_table[psgl_handle->cid];
1451 if (!beiscsi_conn || !beiscsi_conn->ep) {
1452 shost_printk(KERN_WARNING, phba->shost,
1453 "Connection table empty for cid = %d\n",
1454 psgl_handle->cid);
1455 return 0;
1456 }
1457 1479
1458 } else { 1480 beiscsi_ep = ep->dd_data;
1459 beiscsi_conn = phba->conn_table[(u32) (sol-> 1481 beiscsi_conn = beiscsi_ep->conn;
1460 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1461 SOL_CID_MASK) >> 6];
1462
1463 if (!beiscsi_conn || !beiscsi_conn->ep) {
1464 shost_printk(KERN_WARNING, phba->shost,
1465 "Connection table empty for cid = %d\n",
1466 (u32)(sol->dw[offsetof(struct amap_sol_cqe,
1467 cid) / 32] & SOL_CID_MASK) >> 6);
1468 return 0;
1469 }
1470 }
1471 1482
1472 if (num_processed >= 32) { 1483 if (num_processed >= 32) {
1473 hwi_ring_cq_db(phba, cq->id, 1484 hwi_ring_cq_db(phba, cq->id,
@@ -1511,21 +1522,13 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1511 case CMD_CXN_KILLED_ITT_INVALID: 1522 case CMD_CXN_KILLED_ITT_INVALID:
1512 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1523 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1513 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1524 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1514 if (ring_mode) { 1525 SE_DEBUG(DBG_LVL_1,
1515 SE_DEBUG(DBG_LVL_1,
1516 "CQ Error notification for cmd.. "
1517 "code %d cid 0x%x\n",
1518 sol->dw[offsetof(struct amap_sol_cqe, code) /
1519 32] & CQE_CODE_MASK, psgl_handle->cid);
1520 } else {
1521 SE_DEBUG(DBG_LVL_1,
1522 "CQ Error notification for cmd.. " 1526 "CQ Error notification for cmd.. "
1523 "code %d cid 0x%x\n", 1527 "code %d cid 0x%x\n",
1524 sol->dw[offsetof(struct amap_sol_cqe, code) / 1528 sol->dw[offsetof(struct amap_sol_cqe, code) /
1525 32] & CQE_CODE_MASK, 1529 32] & CQE_CODE_MASK,
1526 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 1530 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1527 32] & SOL_CID_MASK)); 1531 32] & SOL_CID_MASK));
1528 }
1529 break; 1532 break;
1530 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1533 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1531 SE_DEBUG(DBG_LVL_1, 1534 SE_DEBUG(DBG_LVL_1,
@@ -1547,37 +1550,23 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1547 case CXN_KILLED_OVER_RUN_RESIDUAL: 1550 case CXN_KILLED_OVER_RUN_RESIDUAL:
1548 case CXN_KILLED_UNDER_RUN_RESIDUAL: 1551 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1549 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 1552 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1550 if (ring_mode) { 1553 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1551 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1552 "0x%x...\n",
1553 sol->dw[offsetof(struct amap_sol_cqe, code) /
1554 32] & CQE_CODE_MASK, psgl_handle->cid);
1555 } else {
1556 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1557 "0x%x...\n", 1554 "0x%x...\n",
1558 sol->dw[offsetof(struct amap_sol_cqe, code) / 1555 sol->dw[offsetof(struct amap_sol_cqe, code) /
1559 32] & CQE_CODE_MASK, 1556 32] & CQE_CODE_MASK,
1560 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1557 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1561 32] & CQE_CID_MASK); 1558 32] & CQE_CID_MASK));
1562 }
1563 iscsi_conn_failure(beiscsi_conn->conn, 1559 iscsi_conn_failure(beiscsi_conn->conn,
1564 ISCSI_ERR_CONN_FAILED); 1560 ISCSI_ERR_CONN_FAILED);
1565 break; 1561 break;
1566 case CXN_KILLED_RST_SENT: 1562 case CXN_KILLED_RST_SENT:
1567 case CXN_KILLED_RST_RCVD: 1563 case CXN_KILLED_RST_RCVD:
1568 if (ring_mode) { 1564 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1569 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1570 "received/sent on CID 0x%x...\n",
1571 sol->dw[offsetof(struct amap_sol_cqe, code) /
1572 32] & CQE_CODE_MASK, psgl_handle->cid);
1573 } else {
1574 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1575 "received/sent on CID 0x%x...\n", 1565 "received/sent on CID 0x%x...\n",
1576 sol->dw[offsetof(struct amap_sol_cqe, code) / 1566 sol->dw[offsetof(struct amap_sol_cqe, code) /
1577 32] & CQE_CODE_MASK, 1567 32] & CQE_CODE_MASK,
1578 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1568 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1579 32] & CQE_CID_MASK); 1569 32] & CQE_CID_MASK));
1580 }
1581 iscsi_conn_failure(beiscsi_conn->conn, 1570 iscsi_conn_failure(beiscsi_conn->conn,
1582 ISCSI_ERR_CONN_FAILED); 1571 ISCSI_ERR_CONN_FAILED);
1583 break; 1572 break;
@@ -1586,8 +1575,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1586 "received on CID 0x%x...\n", 1575 "received on CID 0x%x...\n",
1587 sol->dw[offsetof(struct amap_sol_cqe, code) / 1576 sol->dw[offsetof(struct amap_sol_cqe, code) /
1588 32] & CQE_CODE_MASK, 1577 32] & CQE_CODE_MASK,
1589 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1578 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1590 32] & CQE_CID_MASK); 1579 32] & CQE_CID_MASK));
1591 break; 1580 break;
1592 } 1581 }
1593 1582
@@ -1604,7 +1593,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1604 return tot_nump; 1593 return tot_nump;
1605} 1594}
1606 1595
1607static void beiscsi_process_all_cqs(struct work_struct *work) 1596void beiscsi_process_all_cqs(struct work_struct *work)
1608{ 1597{
1609 unsigned long flags; 1598 unsigned long flags;
1610 struct hwi_controller *phwi_ctrlr; 1599 struct hwi_controller *phwi_ctrlr;
@@ -1624,6 +1613,7 @@ static void beiscsi_process_all_cqs(struct work_struct *work)
1624 spin_lock_irqsave(&phba->isr_lock, flags); 1613 spin_lock_irqsave(&phba->isr_lock, flags);
1625 phba->todo_mcc_cq = 0; 1614 phba->todo_mcc_cq = 0;
1626 spin_unlock_irqrestore(&phba->isr_lock, flags); 1615 spin_unlock_irqrestore(&phba->isr_lock, flags);
1616 beiscsi_process_mcc_isr(phba);
1627 } 1617 }
1628 1618
1629 if (phba->todo_cq) { 1619 if (phba->todo_cq) {
@@ -1668,7 +1658,8 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1668 io_task->bhs_pa.u.a32.address_hi); 1658 io_task->bhs_pa.u.a32.address_hi);
1669 1659
1670 l_sg = sg; 1660 l_sg = sg;
1671 for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) { 1661 for (index = 0; (index < num_sg) && (index < 2); index++,
1662 sg = sg_next(sg)) {
1672 if (index == 0) { 1663 if (index == 0) {
1673 sg_len = sg_dma_len(sg); 1664 sg_len = sg_dma_len(sg);
1674 addr = (u64) sg_dma_address(sg); 1665 addr = (u64) sg_dma_address(sg);
@@ -1679,11 +1670,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1679 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 1670 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1680 sg_len); 1671 sg_len);
1681 sge_len = sg_len; 1672 sge_len = sg_len;
1682 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1683 1);
1684 } else { 1673 } else {
1685 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1686 0);
1687 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 1674 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1688 pwrb, sge_len); 1675 pwrb, sge_len);
1689 sg_len = sg_dma_len(sg); 1676 sg_len = sg_dma_len(sg);
@@ -1706,13 +1693,27 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1706 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 1693 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1707 io_task->bhs_pa.u.a32.address_lo); 1694 io_task->bhs_pa.u.a32.address_lo);
1708 1695
1709 if (num_sg == 2) 1696 if (num_sg == 1) {
1710 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1); 1697 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1698 1);
1699 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1700 0);
1701 } else if (num_sg == 2) {
1702 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1703 0);
1704 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1705 1);
1706 } else {
1707 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1708 0);
1709 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1710 0);
1711 }
1711 sg = l_sg; 1712 sg = l_sg;
1712 psgl++; 1713 psgl++;
1713 psgl++; 1714 psgl++;
1714 offset = 0; 1715 offset = 0;
1715 for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) { 1716 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1716 sg_len = sg_dma_len(sg); 1717 sg_len = sg_dma_len(sg);
1717 addr = (u64) sg_dma_address(sg); 1718 addr = (u64) sg_dma_address(sg);
1718 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 1719 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
@@ -2048,10 +2049,9 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2048 } 2049 }
2049 idx = 0; 2050 idx = 0;
2050 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2051 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2051 num_cxn_wrb = 2052 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2052 ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) * 2053 ((sizeof(struct iscsi_wrb) *
2053 phba->params.wrbs_per_cxn); 2054 phba->params.wrbs_per_cxn));
2054
2055 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) { 2055 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2056 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2056 pwrb_context = &phwi_ctrlr->wrb_context[index];
2057 if (num_cxn_wrb) { 2057 if (num_cxn_wrb) {
@@ -2064,9 +2064,9 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2064 } else { 2064 } else {
2065 idx++; 2065 idx++;
2066 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2066 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2067 num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) / 2067 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2068 (sizeof(struct iscsi_wrb)) * 2068 ((sizeof(struct iscsi_wrb) *
2069 phba->params.wrbs_per_cxn); 2069 phba->params.wrbs_per_cxn));
2070 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2070 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2071 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2071 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2072 pwrb_handle->pwrb = pwrb; 2072 pwrb_handle->pwrb = pwrb;
@@ -2383,7 +2383,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2383 &paddr); 2383 &paddr);
2384 if (!cq_vaddress) 2384 if (!cq_vaddress)
2385 goto create_cq_error; 2385 goto create_cq_error;
2386 ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2, 2386 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2387 sizeof(struct sol_cqe), cq_vaddress); 2387 sizeof(struct sol_cqe), cq_vaddress);
2388 if (ret) { 2388 if (ret) {
2389 shost_printk(KERN_ERR, phba->shost, 2389 shost_printk(KERN_ERR, phba->shost,
@@ -2634,7 +2634,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2634 "wrbq create failed."); 2634 "wrbq create failed.");
2635 return status; 2635 return status;
2636 } 2636 }
2637 phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id; 2637 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2638 id;
2638 } 2639 }
2639 kfree(pwrb_arr); 2640 kfree(pwrb_arr);
2640 return 0; 2641 return 0;
@@ -2803,17 +2804,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
2803 goto error; 2804 goto error;
2804 } 2805 }
2805 2806
2806 if (phba->fw_config.iscsi_features == 0x1)
2807 ring_mode = 1;
2808 else
2809 ring_mode = 0;
2810 status = mgmt_get_fw_config(ctrl, phba);
2811 if (status != 0) {
2812 shost_printk(KERN_ERR, phba->shost,
2813 "Error getting fw config\n");
2814 goto error;
2815 }
2816
2817 status = beiscsi_create_cqs(phba, phwi_context); 2807 status = beiscsi_create_cqs(phba, phwi_context);
2818 if (status != 0) { 2808 if (status != 0) {
2819 shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); 2809 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
@@ -2941,17 +2931,6 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2941 phba->io_sgl_hndl_avbl = 0; 2931 phba->io_sgl_hndl_avbl = 0;
2942 phba->eh_sgl_hndl_avbl = 0; 2932 phba->eh_sgl_hndl_avbl = 0;
2943 2933
2944 if (ring_mode) {
2945 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2946 phba->params.icds_per_ctrl,
2947 GFP_KERNEL);
2948 if (!phba->sgl_hndl_array) {
2949 shost_printk(KERN_ERR, phba->shost,
2950 "Mem Alloc Failed. Failing to load\n");
2951 return -ENOMEM;
2952 }
2953 }
2954
2955 mem_descr_sglh = phba->init_mem; 2934 mem_descr_sglh = phba->init_mem;
2956 mem_descr_sglh += HWI_MEM_SGLH; 2935 mem_descr_sglh += HWI_MEM_SGLH;
2957 if (1 == mem_descr_sglh->num_elements) { 2936 if (1 == mem_descr_sglh->num_elements) {
@@ -2959,8 +2938,6 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2959 phba->params.ios_per_ctrl, 2938 phba->params.ios_per_ctrl,
2960 GFP_KERNEL); 2939 GFP_KERNEL);
2961 if (!phba->io_sgl_hndl_base) { 2940 if (!phba->io_sgl_hndl_base) {
2962 if (ring_mode)
2963 kfree(phba->sgl_hndl_array);
2964 shost_printk(KERN_ERR, phba->shost, 2941 shost_printk(KERN_ERR, phba->shost,
2965 "Mem Alloc Failed. Failing to load\n"); 2942 "Mem Alloc Failed. Failing to load\n");
2966 return -ENOMEM; 2943 return -ENOMEM;
@@ -3032,7 +3009,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3032 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3009 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3033 pfrag += phba->params.num_sge_per_io; 3010 pfrag += phba->params.num_sge_per_io;
3034 psgl_handle->sgl_index = 3011 psgl_handle->sgl_index =
3035 phba->fw_config.iscsi_cid_start + arr_index++; 3012 phba->fw_config.iscsi_icd_start + arr_index++;
3036 } 3013 }
3037 idx++; 3014 idx++;
3038 } 3015 }
@@ -3047,7 +3024,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3047{ 3024{
3048 int i, new_cid; 3025 int i, new_cid;
3049 3026
3050 phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3027 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3051 GFP_KERNEL); 3028 GFP_KERNEL);
3052 if (!phba->cid_array) { 3029 if (!phba->cid_array) {
3053 shost_printk(KERN_ERR, phba->shost, 3030 shost_printk(KERN_ERR, phba->shost,
@@ -3055,7 +3032,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3055 "hba_setup_cid_tbls\n"); 3032 "hba_setup_cid_tbls\n");
3056 return -ENOMEM; 3033 return -ENOMEM;
3057 } 3034 }
3058 phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) * 3035 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3059 phba->params.cxns_per_ctrl * 2, GFP_KERNEL); 3036 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3060 if (!phba->ep_array) { 3037 if (!phba->ep_array) {
3061 shost_printk(KERN_ERR, phba->shost, 3038 shost_printk(KERN_ERR, phba->shost,
@@ -3064,7 +3041,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3064 kfree(phba->cid_array); 3041 kfree(phba->cid_array);
3065 return -ENOMEM; 3042 return -ENOMEM;
3066 } 3043 }
3067 new_cid = phba->fw_config.iscsi_icd_start; 3044 new_cid = phba->fw_config.iscsi_cid_start;
3068 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3045 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3069 phba->cid_array[i] = new_cid; 3046 phba->cid_array[i] = new_cid;
3070 new_cid += 2; 3047 new_cid += 2;
@@ -3145,8 +3122,6 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
3145 if (hba_setup_cid_tbls(phba)) { 3122 if (hba_setup_cid_tbls(phba)) {
3146 shost_printk(KERN_ERR, phba->shost, 3123 shost_printk(KERN_ERR, phba->shost,
3147 "Failed in hba_setup_cid_tbls\n"); 3124 "Failed in hba_setup_cid_tbls\n");
3148 if (ring_mode)
3149 kfree(phba->sgl_hndl_array);
3150 kfree(phba->io_sgl_hndl_base); 3125 kfree(phba->io_sgl_hndl_base);
3151 kfree(phba->eh_sgl_hndl_base); 3126 kfree(phba->eh_sgl_hndl_base);
3152 goto do_cleanup_ctrlr; 3127 goto do_cleanup_ctrlr;
@@ -3166,6 +3141,7 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
3166 struct be_queue_info *eq; 3141 struct be_queue_info *eq;
3167 struct be_eq_entry *eqe = NULL; 3142 struct be_eq_entry *eqe = NULL;
3168 int i, eq_msix; 3143 int i, eq_msix;
3144 unsigned int num_processed;
3169 3145
3170 phwi_ctrlr = phba->phwi_ctrlr; 3146 phwi_ctrlr = phba->phwi_ctrlr;
3171 phwi_context = phwi_ctrlr->phwi_ctxt; 3147 phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -3177,13 +3153,17 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
3177 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3153 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3178 eq = &phwi_context->be_eq[i].q; 3154 eq = &phwi_context->be_eq[i].q;
3179 eqe = queue_tail_node(eq); 3155 eqe = queue_tail_node(eq);
3180 3156 num_processed = 0;
3181 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3157 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3182 & EQE_VALID_MASK) { 3158 & EQE_VALID_MASK) {
3183 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3159 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3184 queue_tail_inc(eq); 3160 queue_tail_inc(eq);
3185 eqe = queue_tail_node(eq); 3161 eqe = queue_tail_node(eq);
3162 num_processed++;
3186 } 3163 }
3164
3165 if (num_processed)
3166 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3187 } 3167 }
3188} 3168}
3189 3169
@@ -3195,10 +3175,9 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
3195 if (mgmt_status) 3175 if (mgmt_status)
3196 shost_printk(KERN_WARNING, phba->shost, 3176 shost_printk(KERN_WARNING, phba->shost,
3197 "mgmt_epfw_cleanup FAILED \n"); 3177 "mgmt_epfw_cleanup FAILED \n");
3198 hwi_cleanup(phba); 3178
3199 hwi_purge_eq(phba); 3179 hwi_purge_eq(phba);
3200 if (ring_mode) 3180 hwi_cleanup(phba);
3201 kfree(phba->sgl_hndl_array);
3202 kfree(phba->io_sgl_hndl_base); 3181 kfree(phba->io_sgl_hndl_base);
3203 kfree(phba->eh_sgl_hndl_base); 3182 kfree(phba->eh_sgl_hndl_base);
3204 kfree(phba->cid_array); 3183 kfree(phba->cid_array);
@@ -3219,7 +3198,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3219 * We can always use 0 here because it is reserved by libiscsi for 3198 * We can always use 0 here because it is reserved by libiscsi for
3220 * login/startup related tasks. 3199 * login/startup related tasks.
3221 */ 3200 */
3222 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0); 3201 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3202 phba->fw_config.iscsi_cid_start));
3223 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; 3203 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3224 memset(pwrb, 0, sizeof(*pwrb)); 3204 memset(pwrb, 0, sizeof(*pwrb));
3225 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3205 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
@@ -3283,8 +3263,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3283 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); 3263 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3284 3264
3285 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3265 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3286 if (!ring_mode) 3266 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3287 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3288 << DB_DEF_PDU_WRB_INDEX_SHIFT; 3267 << DB_DEF_PDU_WRB_INDEX_SHIFT;
3289 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3268 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3290 3269
@@ -3328,8 +3307,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3328 io_task->bhs_pa.u.a64.address = paddr; 3307 io_task->bhs_pa.u.a64.address = paddr;
3329 io_task->libiscsi_itt = (itt_t)task->itt; 3308 io_task->libiscsi_itt = (itt_t)task->itt;
3330 io_task->pwrb_handle = alloc_wrb_handle(phba, 3309 io_task->pwrb_handle = alloc_wrb_handle(phba,
3331 beiscsi_conn->beiscsi_conn_cid, 3310 beiscsi_conn->beiscsi_conn_cid -
3332 task->itt); 3311 phba->fw_config.iscsi_cid_start
3312 );
3333 io_task->conn = beiscsi_conn; 3313 io_task->conn = beiscsi_conn;
3334 3314
3335 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 3315 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
@@ -3343,7 +3323,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3343 goto free_hndls; 3323 goto free_hndls;
3344 } else { 3324 } else {
3345 io_task->scsi_cmnd = NULL; 3325 io_task->scsi_cmnd = NULL;
3346 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 3326 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3347 if (!beiscsi_conn->login_in_progress) { 3327 if (!beiscsi_conn->login_in_progress) {
3348 spin_lock(&phba->mgmt_sgl_lock); 3328 spin_lock(&phba->mgmt_sgl_lock);
3349 io_task->psgl_handle = (struct sgl_handle *) 3329 io_task->psgl_handle = (struct sgl_handle *)
@@ -3370,21 +3350,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3370 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 3350 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3371 wrb_index << 16) | (unsigned int) 3351 wrb_index << 16) | (unsigned int)
3372 (io_task->psgl_handle->sgl_index)); 3352 (io_task->psgl_handle->sgl_index));
3373 if (ring_mode) { 3353 io_task->pwrb_handle->pio_handle = task;
3374 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
3375 phba->fw_config.iscsi_cid_start] =
3376 io_task->psgl_handle;
3377 io_task->psgl_handle->task = task;
3378 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid;
3379 } else
3380 io_task->pwrb_handle->pio_handle = task;
3381 3354
3382 io_task->cmd_bhs->iscsi_hdr.itt = itt; 3355 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3383 return 0; 3356 return 0;
3384 3357
3385free_hndls: 3358free_hndls:
3386 phwi_ctrlr = phba->phwi_ctrlr; 3359 phwi_ctrlr = phba->phwi_ctrlr;
3387 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid]; 3360 pwrb_context = &phwi_ctrlr->wrb_context[
3361 beiscsi_conn->beiscsi_conn_cid -
3362 phba->fw_config.iscsi_cid_start];
3388 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 3363 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3389 io_task->pwrb_handle = NULL; 3364 io_task->pwrb_handle = NULL;
3390 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 3365 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -3404,7 +3379,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
3404 struct hwi_controller *phwi_ctrlr; 3379 struct hwi_controller *phwi_ctrlr;
3405 3380
3406 phwi_ctrlr = phba->phwi_ctrlr; 3381 phwi_ctrlr = phba->phwi_ctrlr;
3407 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid]; 3382 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3383 - phba->fw_config.iscsi_cid_start];
3408 if (io_task->pwrb_handle) { 3384 if (io_task->pwrb_handle) {
3409 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 3385 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3410 io_task->pwrb_handle = NULL; 3386 io_task->pwrb_handle = NULL;
@@ -3460,18 +3436,12 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3460 ISCSI_OPCODE_SCSI_DATA_OUT); 3436 ISCSI_OPCODE_SCSI_DATA_OUT);
3461 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, 3437 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3462 &io_task->cmd_bhs->iscsi_data_pdu, 1); 3438 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3463 if (ring_mode) 3439 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3464 io_task->psgl_handle->type = INI_WR_CMD; 3440 INI_WR_CMD);
3465 else
3466 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3467 INI_WR_CMD);
3468 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 3441 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3469 } else { 3442 } else {
3470 if (ring_mode) 3443 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3471 io_task->psgl_handle->type = INI_RD_CMD; 3444 INI_RD_CMD);
3472 else
3473 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3474 INI_RD_CMD);
3475 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 3445 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3476 } 3446 }
3477 memcpy(&io_task->cmd_bhs->iscsi_data_pdu. 3447 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
@@ -3496,8 +3466,7 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3496 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 3466 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3497 3467
3498 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3468 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3499 if (!ring_mode) 3469 doorbell |= (io_task->pwrb_handle->wrb_index &
3500 doorbell |= (io_task->pwrb_handle->wrb_index &
3501 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 3470 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3502 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3471 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3503 3472
@@ -3519,49 +3488,46 @@ static int beiscsi_mtask(struct iscsi_task *task)
3519 unsigned int doorbell = 0; 3488 unsigned int doorbell = 0;
3520 unsigned int i, cid; 3489 unsigned int i, cid;
3521 struct iscsi_task *aborted_task; 3490 struct iscsi_task *aborted_task;
3491 unsigned int tag;
3522 3492
3523 cid = beiscsi_conn->beiscsi_conn_cid; 3493 cid = beiscsi_conn->beiscsi_conn_cid;
3524 pwrb = io_task->pwrb_handle->pwrb; 3494 pwrb = io_task->pwrb_handle->pwrb;
3495 memset(pwrb, 0, sizeof(*pwrb));
3525 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 3496 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3526 be32_to_cpu(task->cmdsn)); 3497 be32_to_cpu(task->cmdsn));
3527 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 3498 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3528 io_task->pwrb_handle->wrb_index); 3499 io_task->pwrb_handle->wrb_index);
3529 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 3500 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3530 io_task->psgl_handle->sgl_index); 3501 io_task->psgl_handle->sgl_index);
3531
3532 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 3502 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3533 case ISCSI_OP_LOGIN: 3503 case ISCSI_OP_LOGIN:
3534 if (ring_mode) 3504 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3535 io_task->psgl_handle->type = TGT_DM_CMD; 3505 TGT_DM_CMD);
3536 else
3537 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3538 TGT_DM_CMD);
3539 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3506 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3540 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 3507 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3541 hwi_write_buffer(pwrb, task); 3508 hwi_write_buffer(pwrb, task);
3542 break; 3509 break;
3543 case ISCSI_OP_NOOP_OUT: 3510 case ISCSI_OP_NOOP_OUT:
3544 if (ring_mode) 3511 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3545 io_task->psgl_handle->type = INI_RD_CMD; 3512 INI_RD_CMD);
3513 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3514 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3546 else 3515 else
3547 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3516 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3548 INI_RD_CMD);
3549 hwi_write_buffer(pwrb, task); 3517 hwi_write_buffer(pwrb, task);
3550 break; 3518 break;
3551 case ISCSI_OP_TEXT: 3519 case ISCSI_OP_TEXT:
3552 if (ring_mode) 3520 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3553 io_task->psgl_handle->type = INI_WR_CMD; 3521 TGT_DM_CMD);
3554 else 3522 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3555 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3556 INI_WR_CMD);
3557 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3558 hwi_write_buffer(pwrb, task); 3523 hwi_write_buffer(pwrb, task);
3559 break; 3524 break;
3560 case ISCSI_OP_SCSI_TMFUNC: 3525 case ISCSI_OP_SCSI_TMFUNC:
3561 session = conn->session; 3526 session = conn->session;
3562 i = ((struct iscsi_tm *)task->hdr)->rtt; 3527 i = ((struct iscsi_tm *)task->hdr)->rtt;
3563 phwi_ctrlr = phba->phwi_ctrlr; 3528 phwi_ctrlr = phba->phwi_ctrlr;
3564 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 3529 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3530 phba->fw_config.iscsi_cid_start];
3565 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i) 3531 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3566 >> 16]; 3532 >> 16];
3567 aborted_task = pwrb_handle->pio_handle; 3533 aborted_task = pwrb_handle->pio_handle;
@@ -3572,22 +3538,25 @@ static int beiscsi_mtask(struct iscsi_task *task)
3572 if (!aborted_io_task->scsi_cmnd) 3538 if (!aborted_io_task->scsi_cmnd)
3573 return 0; 3539 return 0;
3574 3540
3575 mgmt_invalidate_icds(phba, 3541 tag = mgmt_invalidate_icds(phba,
3576 aborted_io_task->psgl_handle->sgl_index, 3542 aborted_io_task->psgl_handle->sgl_index,
3577 cid); 3543 cid);
3578 if (ring_mode) 3544 if (!tag) {
3579 io_task->psgl_handle->type = INI_TMF_CMD; 3545 shost_printk(KERN_WARNING, phba->shost,
3580 else 3546 "mgmt_invalidate_icds could not be"
3581 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3547 " submitted\n");
3582 INI_TMF_CMD); 3548 } else {
3549 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3550 phba->ctrl.mcc_numtag[tag]);
3551 free_mcc_tag(&phba->ctrl, tag);
3552 }
3553 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3554 INI_TMF_CMD);
3583 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3555 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3584 hwi_write_buffer(pwrb, task); 3556 hwi_write_buffer(pwrb, task);
3585 break; 3557 break;
3586 case ISCSI_OP_LOGOUT: 3558 case ISCSI_OP_LOGOUT:
3587 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3559 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3588 if (ring_mode)
3589 io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3590 else
3591 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3560 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3592 HWH_TYPE_LOGOUT); 3561 HWH_TYPE_LOGOUT);
3593 hwi_write_buffer(pwrb, task); 3562 hwi_write_buffer(pwrb, task);
@@ -3600,14 +3569,13 @@ static int beiscsi_mtask(struct iscsi_task *task)
3600 } 3569 }
3601 3570
3602 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 3571 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3603 be32_to_cpu(task->data_count)); 3572 task->data_count);
3604 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 3573 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3605 io_task->pwrb_handle->nxt_wrb_index); 3574 io_task->pwrb_handle->nxt_wrb_index);
3606 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 3575 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3607 3576
3608 doorbell |= cid & DB_WRB_POST_CID_MASK; 3577 doorbell |= cid & DB_WRB_POST_CID_MASK;
3609 if (!ring_mode) 3578 doorbell |= (io_task->pwrb_handle->wrb_index &
3610 doorbell |= (io_task->pwrb_handle->wrb_index &
3611 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 3579 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3612 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3580 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3613 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 3581 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -3649,7 +3617,6 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
3649 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); 3617 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3650} 3618}
3651 3619
3652
3653static void beiscsi_remove(struct pci_dev *pcidev) 3620static void beiscsi_remove(struct pci_dev *pcidev)
3654{ 3621{
3655 struct beiscsi_hba *phba = NULL; 3622 struct beiscsi_hba *phba = NULL;
@@ -3734,7 +3701,20 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3734 } 3701 }
3735 SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba); 3702 SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3736 3703
3737 pci_set_drvdata(pcidev, phba); 3704 switch (pcidev->device) {
3705 case BE_DEVICE_ID1:
3706 case OC_DEVICE_ID1:
3707 case OC_DEVICE_ID2:
3708 phba->generation = BE_GEN2;
3709 break;
3710 case BE_DEVICE_ID2:
3711 case OC_DEVICE_ID3:
3712 phba->generation = BE_GEN3;
3713 break;
3714 default:
3715 phba->generation = 0;
3716 }
3717
3738 if (enable_msix) 3718 if (enable_msix)
3739 num_cpus = find_num_cpus(); 3719 num_cpus = find_num_cpus();
3740 else 3720 else
@@ -3754,7 +3734,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3754 spin_lock_init(&phba->io_sgl_lock); 3734 spin_lock_init(&phba->io_sgl_lock);
3755 spin_lock_init(&phba->mgmt_sgl_lock); 3735 spin_lock_init(&phba->mgmt_sgl_lock);
3756 spin_lock_init(&phba->isr_lock); 3736 spin_lock_init(&phba->isr_lock);
3737 ret = mgmt_get_fw_config(&phba->ctrl, phba);
3738 if (ret != 0) {
3739 shost_printk(KERN_ERR, phba->shost,
3740 "Error getting fw config\n");
3741 goto free_port;
3742 }
3743 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3757 beiscsi_get_params(phba); 3744 beiscsi_get_params(phba);
3745 phba->shost->can_queue = phba->params.ios_per_ctrl;
3758 ret = beiscsi_init_port(phba); 3746 ret = beiscsi_init_port(phba);
3759 if (ret < 0) { 3747 if (ret < 0) {
3760 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3748 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3762,6 +3750,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3762 goto free_port; 3750 goto free_port;
3763 } 3751 }
3764 3752
3753 for (i = 0; i < MAX_MCC_CMD ; i++) {
3754 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3755 phba->ctrl.mcc_tag[i] = i + 1;
3756 phba->ctrl.mcc_numtag[i + 1] = 0;
3757 phba->ctrl.mcc_tag_available++;
3758 }
3759
3760 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3761
3765 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", 3762 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3766 phba->shost->host_no); 3763 phba->shost->host_no);
3767 phba->wq = create_workqueue(phba->wq_name); 3764 phba->wq = create_workqueue(phba->wq_name);
@@ -3836,7 +3833,7 @@ disable_pci:
3836struct iscsi_transport beiscsi_iscsi_transport = { 3833struct iscsi_transport beiscsi_iscsi_transport = {
3837 .owner = THIS_MODULE, 3834 .owner = THIS_MODULE,
3838 .name = DRV_NAME, 3835 .name = DRV_NAME,
3839 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | 3836 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3840 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 3837 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3841 .param_mask = ISCSI_MAX_RECV_DLENGTH | 3838 .param_mask = ISCSI_MAX_RECV_DLENGTH |
3842 ISCSI_MAX_XMIT_DLENGTH | 3839 ISCSI_MAX_XMIT_DLENGTH |
@@ -3859,7 +3856,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
3859 ISCSI_USERNAME | ISCSI_PASSWORD | 3856 ISCSI_USERNAME | ISCSI_PASSWORD |
3860 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 3857 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3861 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 3858 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3862 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | 3859 ISCSI_LU_RESET_TMO |
3863 ISCSI_PING_TMO | ISCSI_RECV_TMO | 3860 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3864 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 3861 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3865 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 3862 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
@@ -3905,7 +3902,7 @@ static int __init beiscsi_module_init(void)
3905 SE_DEBUG(DBG_LVL_1, 3902 SE_DEBUG(DBG_LVL_1,
3906 "beiscsi_module_init - Unable to register beiscsi" 3903 "beiscsi_module_init - Unable to register beiscsi"
3907 "transport.\n"); 3904 "transport.\n");
3908 ret = -ENOMEM; 3905 return -ENOMEM;
3909 } 3906 }
3910 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n", 3907 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3911 &beiscsi_iscsi_transport); 3908 &beiscsi_iscsi_transport);
@@ -3917,7 +3914,6 @@ static int __init beiscsi_module_init(void)
3917 "beiscsi pci driver.\n"); 3914 "beiscsi pci driver.\n");
3918 goto unregister_iscsi_transport; 3915 goto unregister_iscsi_transport;
3919 } 3916 }
3920 ring_mode = 0;
3921 return 0; 3917 return 0;
3922 3918
3923unregister_iscsi_transport: 3919unregister_iscsi_transport:
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 25e6b208b771..c53a80ab796c 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -40,31 +40,29 @@
40#define DRV_DESC BE_NAME " " "Driver" 40#define DRV_DESC BE_NAME " " "Driver"
41 41
42#define BE_VENDOR_ID 0x19A2 42#define BE_VENDOR_ID 0x19A2
43/* DEVICE ID's for BE2 */
43#define BE_DEVICE_ID1 0x212 44#define BE_DEVICE_ID1 0x212
44#define OC_DEVICE_ID1 0x702 45#define OC_DEVICE_ID1 0x702
45#define OC_DEVICE_ID2 0x703 46#define OC_DEVICE_ID2 0x703
47
48/* DEVICE ID's for BE3 */
49#define BE_DEVICE_ID2 0x222
46#define OC_DEVICE_ID3 0x712 50#define OC_DEVICE_ID3 0x712
47#define OC_DEVICE_ID4 0x222
48 51
49#define BE2_MAX_SESSIONS 64 52#define BE2_IO_DEPTH 1024
53#define BE2_MAX_SESSIONS 256
50#define BE2_CMDS_PER_CXN 128 54#define BE2_CMDS_PER_CXN 128
51#define BE2_LOGOUTS BE2_MAX_SESSIONS
52#define BE2_TMFS 16 55#define BE2_TMFS 16
53#define BE2_NOPOUT_REQ 16 56#define BE2_NOPOUT_REQ 16
54#define BE2_ASYNCPDUS BE2_MAX_SESSIONS
55#define BE2_MAX_ICDS 2048
56#define BE2_SGE 32 57#define BE2_SGE 32
57#define BE2_DEFPDU_HDR_SZ 64 58#define BE2_DEFPDU_HDR_SZ 64
58#define BE2_DEFPDU_DATA_SZ 8192 59#define BE2_DEFPDU_DATA_SZ 8192
59#define BE2_IO_DEPTH \
60 (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
61 60
62#define MAX_CPUS 31 61#define MAX_CPUS 31
63#define BEISCSI_SGLIST_ELEMENTS BE2_SGE 62#define BEISCSI_SGLIST_ELEMENTS 30
64 63
65#define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */
66#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ 64#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
67#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ 65#define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */
68 66
69#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ 67#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
70#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ 68#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@@ -330,6 +328,7 @@ struct beiscsi_hba {
330 struct workqueue_struct *wq; /* The actuak work queue */ 328 struct workqueue_struct *wq; /* The actuak work queue */
331 struct work_struct work_cqs; /* The work being queued */ 329 struct work_struct work_cqs; /* The work being queued */
332 struct be_ctrl_info ctrl; 330 struct be_ctrl_info ctrl;
331 unsigned int generation;
333}; 332};
334 333
335struct beiscsi_session { 334struct beiscsi_session {
@@ -656,11 +655,12 @@ struct amap_iscsi_wrb {
656 655
657} __packed; 656} __packed;
658 657
659struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 658struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
660 int index);
661void 659void
662free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); 660free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
663 661
662void beiscsi_process_all_cqs(struct work_struct *work);
663
664struct pdu_nop_out { 664struct pdu_nop_out {
665 u32 dw[12]; 665 u32 dw[12];
666}; 666};
@@ -802,7 +802,6 @@ struct hwi_controller {
802 struct be_ring default_pdu_hdr; 802 struct be_ring default_pdu_hdr;
803 struct be_ring default_pdu_data; 803 struct be_ring default_pdu_data;
804 struct hwi_context_memory *phwi_ctxt; 804 struct hwi_context_memory *phwi_ctxt;
805 unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
806}; 805};
807 806
808enum hwh_type_enum { 807enum hwh_type_enum {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 79c2bd525a84..317bcd042ced 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -48,6 +48,14 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
48 pfw_cfg->ulp[0].sq_base; 48 pfw_cfg->ulp[0].sq_base;
49 phba->fw_config.iscsi_cid_count = 49 phba->fw_config.iscsi_cid_count =
50 pfw_cfg->ulp[0].sq_count; 50 pfw_cfg->ulp[0].sq_count;
51 if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
52 SE_DEBUG(DBG_LVL_8,
53 "FW reported MAX CXNS as %d \t"
54 "Max Supported = %d.\n",
55 phba->fw_config.iscsi_cid_count,
56 BE2_MAX_SESSIONS);
57 phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
58 }
51 } else { 59 } else {
52 shost_printk(KERN_WARNING, phba->shost, 60 shost_printk(KERN_WARNING, phba->shost,
53 "Failed in mgmt_get_fw_config \n"); 61 "Failed in mgmt_get_fw_config \n");
@@ -77,6 +85,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
77 } 85 }
78 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); 86 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
79 req = nonemb_cmd.va; 87 req = nonemb_cmd.va;
88 memset(req, 0, sizeof(*req));
80 spin_lock(&ctrl->mbox_lock); 89 spin_lock(&ctrl->mbox_lock);
81 memset(wrb, 0, sizeof(*wrb)); 90 memset(wrb, 0, sizeof(*wrb));
82 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); 91 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
@@ -140,10 +149,17 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
140{ 149{
141 struct be_dma_mem nonemb_cmd; 150 struct be_dma_mem nonemb_cmd;
142 struct be_ctrl_info *ctrl = &phba->ctrl; 151 struct be_ctrl_info *ctrl = &phba->ctrl;
143 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 152 struct be_mcc_wrb *wrb;
144 struct be_sge *sge = nonembedded_sgl(wrb); 153 struct be_sge *sge;
145 struct invalidate_commands_params_in *req; 154 struct invalidate_commands_params_in *req;
146 int status = 0; 155 unsigned int tag = 0;
156
157 spin_lock(&ctrl->mbox_lock);
158 tag = alloc_mcc_tag(phba);
159 if (!tag) {
160 spin_unlock(&ctrl->mbox_lock);
161 return tag;
162 }
147 163
148 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, 164 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
149 sizeof(struct invalidate_commands_params_in), 165 sizeof(struct invalidate_commands_params_in),
@@ -156,8 +172,10 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
156 } 172 }
157 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 173 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
158 req = nonemb_cmd.va; 174 req = nonemb_cmd.va;
159 spin_lock(&ctrl->mbox_lock); 175 memset(req, 0, sizeof(*req));
160 memset(wrb, 0, sizeof(*wrb)); 176 wrb = wrb_from_mccq(phba);
177 sge = nonembedded_sgl(wrb);
178 wrb->tag0 |= tag;
161 179
162 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); 180 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
163 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 181 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -172,14 +190,12 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
172 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); 190 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
173 sge->len = cpu_to_le32(nonemb_cmd.size); 191 sge->len = cpu_to_le32(nonemb_cmd.size);
174 192
175 status = be_mcc_notify_wait(phba); 193 be_mcc_notify(phba);
176 if (status)
177 SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
178 spin_unlock(&ctrl->mbox_lock); 194 spin_unlock(&ctrl->mbox_lock);
179 if (nonemb_cmd.va) 195 if (nonemb_cmd.va)
180 pci_free_consistent(ctrl->pdev, nonemb_cmd.size, 196 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
181 nonemb_cmd.va, nonemb_cmd.dma); 197 nonemb_cmd.va, nonemb_cmd.dma);
182 return status; 198 return tag;
183} 199}
184 200
185unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, 201unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
@@ -189,13 +205,19 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
189 unsigned short savecfg_flag) 205 unsigned short savecfg_flag)
190{ 206{
191 struct be_ctrl_info *ctrl = &phba->ctrl; 207 struct be_ctrl_info *ctrl = &phba->ctrl;
192 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 208 struct be_mcc_wrb *wrb;
193 struct iscsi_invalidate_connection_params_in *req = 209 struct iscsi_invalidate_connection_params_in *req;
194 embedded_payload(wrb); 210 unsigned int tag = 0;
195 int status = 0;
196 211
197 spin_lock(&ctrl->mbox_lock); 212 spin_lock(&ctrl->mbox_lock);
198 memset(wrb, 0, sizeof(*wrb)); 213 tag = alloc_mcc_tag(phba);
214 if (!tag) {
215 spin_unlock(&ctrl->mbox_lock);
216 return tag;
217 }
218 wrb = wrb_from_mccq(phba);
219 wrb->tag0 |= tag;
220 req = embedded_payload(wrb);
199 221
200 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 222 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
201 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, 223 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
@@ -208,35 +230,37 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
208 else 230 else
209 req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; 231 req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
210 req->save_cfg = savecfg_flag; 232 req->save_cfg = savecfg_flag;
211 status = be_mcc_notify_wait(phba); 233 be_mcc_notify(phba);
212 if (status)
213 SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
214
215 spin_unlock(&ctrl->mbox_lock); 234 spin_unlock(&ctrl->mbox_lock);
216 return status; 235 return tag;
217} 236}
218 237
219unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, 238unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
220 unsigned short cid, unsigned int upload_flag) 239 unsigned short cid, unsigned int upload_flag)
221{ 240{
222 struct be_ctrl_info *ctrl = &phba->ctrl; 241 struct be_ctrl_info *ctrl = &phba->ctrl;
223 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 242 struct be_mcc_wrb *wrb;
224 struct tcp_upload_params_in *req = embedded_payload(wrb); 243 struct tcp_upload_params_in *req;
225 int status = 0; 244 unsigned int tag = 0;
226 245
227 spin_lock(&ctrl->mbox_lock); 246 spin_lock(&ctrl->mbox_lock);
228 memset(wrb, 0, sizeof(*wrb)); 247 tag = alloc_mcc_tag(phba);
248 if (!tag) {
249 spin_unlock(&ctrl->mbox_lock);
250 return tag;
251 }
252 wrb = wrb_from_mccq(phba);
253 req = embedded_payload(wrb);
254 wrb->tag0 |= tag;
229 255
230 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 256 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
231 be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, 257 be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
232 OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); 258 OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
233 req->id = (unsigned short)cid; 259 req->id = (unsigned short)cid;
234 req->upload_type = (unsigned char)upload_flag; 260 req->upload_type = (unsigned char)upload_flag;
235 status = be_mcc_notify_wait(phba); 261 be_mcc_notify(phba);
236 if (status)
237 SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
238 spin_unlock(&ctrl->mbox_lock); 262 spin_unlock(&ctrl->mbox_lock);
239 return status; 263 return tag;
240} 264}
241 265
242int mgmt_open_connection(struct beiscsi_hba *phba, 266int mgmt_open_connection(struct beiscsi_hba *phba,
@@ -248,13 +272,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
248 struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; 272 struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
249 struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; 273 struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
250 struct be_ctrl_info *ctrl = &phba->ctrl; 274 struct be_ctrl_info *ctrl = &phba->ctrl;
251 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 275 struct be_mcc_wrb *wrb;
252 struct tcp_connect_and_offload_in *req = embedded_payload(wrb); 276 struct tcp_connect_and_offload_in *req;
253 unsigned short def_hdr_id; 277 unsigned short def_hdr_id;
254 unsigned short def_data_id; 278 unsigned short def_data_id;
255 struct phys_addr template_address = { 0, 0 }; 279 struct phys_addr template_address = { 0, 0 };
256 struct phys_addr *ptemplate_address; 280 struct phys_addr *ptemplate_address;
257 int status = 0; 281 unsigned int tag = 0;
258 unsigned int i; 282 unsigned int i;
259 unsigned short cid = beiscsi_ep->ep_cid; 283 unsigned short cid = beiscsi_ep->ep_cid;
260 284
@@ -266,7 +290,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
266 ptemplate_address = &template_address; 290 ptemplate_address = &template_address;
267 ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); 291 ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
268 spin_lock(&ctrl->mbox_lock); 292 spin_lock(&ctrl->mbox_lock);
269 memset(wrb, 0, sizeof(*wrb)); 293 tag = alloc_mcc_tag(phba);
294 if (!tag) {
295 spin_unlock(&ctrl->mbox_lock);
296 return tag;
297 }
298 wrb = wrb_from_mccq(phba);
299 req = embedded_payload(wrb);
300 wrb->tag0 |= tag;
270 301
271 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 302 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
272 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 303 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -311,46 +342,36 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
311 req->do_offload = 1; 342 req->do_offload = 1;
312 req->dataout_template_pa.lo = ptemplate_address->lo; 343 req->dataout_template_pa.lo = ptemplate_address->lo;
313 req->dataout_template_pa.hi = ptemplate_address->hi; 344 req->dataout_template_pa.hi = ptemplate_address->hi;
314 status = be_mcc_notify_wait(phba); 345 be_mcc_notify(phba);
315 if (!status) {
316 struct iscsi_endpoint *ep;
317 struct tcp_connect_and_offload_out *ptcpcnct_out =
318 embedded_payload(wrb);
319
320 ep = phba->ep_array[ptcpcnct_out->cid];
321 beiscsi_ep = ep->dd_data;
322 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
323 beiscsi_ep->cid_vld = 1;
324 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
325 } else
326 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
327 spin_unlock(&ctrl->mbox_lock); 346 spin_unlock(&ctrl->mbox_lock);
328 return status; 347 return tag;
329} 348}
330 349
331int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr) 350unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
332{ 351{
333 struct be_ctrl_info *ctrl = &phba->ctrl; 352 struct be_ctrl_info *ctrl = &phba->ctrl;
334 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 353 struct be_mcc_wrb *wrb;
335 struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb); 354 struct be_cmd_req_get_mac_addr *req;
336 int status; 355 unsigned int tag = 0;
337 356
338 SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n"); 357 SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
339 spin_lock(&ctrl->mbox_lock); 358 spin_lock(&ctrl->mbox_lock);
340 memset(wrb, 0, sizeof(*wrb)); 359 tag = alloc_mcc_tag(phba);
360 if (!tag) {
361 spin_unlock(&ctrl->mbox_lock);
362 return tag;
363 }
364
365 wrb = wrb_from_mccq(phba);
366 req = embedded_payload(wrb);
367 wrb->tag0 |= tag;
341 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 368 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
342 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 369 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
343 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, 370 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
344 sizeof(*req)); 371 sizeof(*req));
345 372
346 status = be_mcc_notify_wait(phba); 373 be_mcc_notify(phba);
347 if (!status) {
348 struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
349
350 memcpy(mac_addr, resp->mac_address, ETH_ALEN);
351 }
352
353 spin_unlock(&ctrl->mbox_lock); 374 spin_unlock(&ctrl->mbox_lock);
354 return status; 375 return tag;
355} 376}
356 377
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 24eaff923f85..ecead6a5aa56 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -231,6 +231,7 @@ struct beiscsi_endpoint {
231 struct beiscsi_hba *phba; 231 struct beiscsi_hba *phba;
232 struct beiscsi_sess *sess; 232 struct beiscsi_sess *sess;
233 struct beiscsi_conn *conn; 233 struct beiscsi_conn *conn;
234 struct iscsi_endpoint *openiscsi_ep;
234 unsigned short ip_type; 235 unsigned short ip_type;
235 char dst6_addr[ISCSI_ADDRESS_BUF_LEN]; 236 char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
236 unsigned long dst_addr; 237 unsigned long dst_addr;
@@ -249,7 +250,4 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
249 unsigned short issue_reset, 250 unsigned short issue_reset,
250 unsigned short savecfg_flag); 251 unsigned short savecfg_flag);
251 252
252unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl,
253 struct beiscsi_hba *phba,
254 char *buf, unsigned int len);
255#endif 253#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 33b2294625bb..1c4d1215769d 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1426,8 +1426,8 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1426 break; 1426 break;
1427 case ISCSI_PARAM_CONN_ADDRESS: 1427 case ISCSI_PARAM_CONN_ADDRESS:
1428 if (bnx2i_conn->ep) 1428 if (bnx2i_conn->ep)
1429 len = sprintf(buf, NIPQUAD_FMT "\n", 1429 len = sprintf(buf, "%pI4\n",
1430 NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip)); 1430 &bnx2i_conn->ep->cm_sk->dst_ip);
1431 break; 1431 break;
1432 default: 1432 default:
1433 return iscsi_conn_get_param(cls_conn, param, buf); 1433 return iscsi_conn_get_param(cls_conn, param, buf);
@@ -1990,6 +1990,7 @@ static struct scsi_host_template bnx2i_host_template = {
1990 .eh_abort_handler = iscsi_eh_abort, 1990 .eh_abort_handler = iscsi_eh_abort,
1991 .eh_device_reset_handler = iscsi_eh_device_reset, 1991 .eh_device_reset_handler = iscsi_eh_device_reset,
1992 .eh_target_reset_handler = iscsi_eh_target_reset, 1992 .eh_target_reset_handler = iscsi_eh_target_reset,
1993 .change_queue_depth = iscsi_change_queue_depth,
1993 .can_queue = 1024, 1994 .can_queue = 1024,
1994 .max_sectors = 127, 1995 .max_sectors = 127,
1995 .cmd_per_lun = 32, 1996 .cmd_per_lun = 32,
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 9129bcf117cf..cd05e049d5f6 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -219,18 +219,15 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
219 break; 219 break;
220 } 220 }
221 sa = (cdbp[8] << 8) + cdbp[9]; 221 sa = (cdbp[8] << 8) + cdbp[9];
222 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); 222 name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa);
223 if (name) { 223 if (name)
224 printk("%s", name); 224 printk("%s", name);
225 if ((cdb_len > 0) && (len != cdb_len)) 225 else
226 printk(", in_cdb_len=%d, ext_len=%d",
227 len, cdb_len);
228 } else {
229 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); 226 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
230 if ((cdb_len > 0) && (len != cdb_len)) 227
231 printk(", in_cdb_len=%d, ext_len=%d", 228 if ((cdb_len > 0) && (len != cdb_len))
232 len, cdb_len); 229 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
233 } 230
234 break; 231 break;
235 case MAINTENANCE_IN: 232 case MAINTENANCE_IN:
236 sa = cdbp[1] & 0x1f; 233 sa = cdbp[1] & 0x1f;
@@ -349,6 +346,9 @@ void scsi_print_command(struct scsi_cmnd *cmd)
349{ 346{
350 int k; 347 int k;
351 348
349 if (cmd->cmnd == NULL)
350 return;
351
352 scmd_printk(KERN_INFO, cmd, "CDB: "); 352 scmd_printk(KERN_INFO, cmd, "CDB: ");
353 print_opcode_name(cmd->cmnd, cmd->cmd_len); 353 print_opcode_name(cmd->cmnd, cmd->cmd_len);
354 354
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 969c83162cc4..412853c65372 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -591,8 +591,7 @@ static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
591 cxgb3i_conn_max_recv_dlength(conn); 591 cxgb3i_conn_max_recv_dlength(conn);
592 592
593 spin_lock_bh(&conn->session->lock); 593 spin_lock_bh(&conn->session->lock);
594 sprintf(conn->portal_address, NIPQUAD_FMT, 594 sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr);
595 NIPQUAD(c3cn->daddr.sin_addr.s_addr));
596 conn->portal_port = ntohs(c3cn->daddr.sin_port); 595 conn->portal_port = ntohs(c3cn->daddr.sin_port);
597 spin_unlock_bh(&conn->session->lock); 596 spin_unlock_bh(&conn->session->lock);
598 597
@@ -709,6 +708,12 @@ static int cxgb3i_host_set_param(struct Scsi_Host *shost,
709{ 708{
710 struct cxgb3i_hba *hba = iscsi_host_priv(shost); 709 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
711 710
711 if (!hba->ndev) {
712 shost_printk(KERN_ERR, shost, "Could not set host param. "
713 "Netdev for host not set.\n");
714 return -ENODEV;
715 }
716
712 cxgb3i_api_debug("param %d, buf %s.\n", param, buf); 717 cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
713 718
714 switch (param) { 719 switch (param) {
@@ -739,6 +744,12 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
739 struct cxgb3i_hba *hba = iscsi_host_priv(shost); 744 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
740 int len = 0; 745 int len = 0;
741 746
747 if (!hba->ndev) {
748 shost_printk(KERN_ERR, shost, "Could not set host param. "
749 "Netdev for host not set.\n");
750 return -ENODEV;
751 }
752
742 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param); 753 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
743 754
744 switch (param) { 755 switch (param) {
@@ -753,7 +764,7 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
753 __be32 addr; 764 __be32 addr;
754 765
755 addr = cxgb3i_get_private_ipv4addr(hba->ndev); 766 addr = cxgb3i_get_private_ipv4addr(hba->ndev);
756 len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr)); 767 len = sprintf(buf, "%pI4", &addr);
757 break; 768 break;
758 } 769 }
759 default: 770 default:
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 15a00e8b7122..3e08c430ff29 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1675,10 +1675,11 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1675 } else 1675 } else
1676 c3cn->saddr.sin_addr.s_addr = sipv4; 1676 c3cn->saddr.sin_addr.s_addr = sipv4;
1677 1677
1678 c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n", 1678 c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
1679 c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr), 1679 c3cn,
1680 &c3cn->saddr.sin_addr.s_addr,
1680 ntohs(c3cn->saddr.sin_port), 1681 ntohs(c3cn->saddr.sin_port),
1681 NIPQUAD(c3cn->daddr.sin_addr.s_addr), 1682 &c3cn->daddr.sin_addr.s_addr,
1682 ntohs(c3cn->daddr.sin_port)); 1683 ntohs(c3cn->daddr.sin_port));
1683 1684
1684 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING); 1685 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 1fe3b0f1f3c9..9c38539557fc 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -461,10 +461,8 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
461 skb = skb_peek(&c3cn->receive_queue); 461 skb = skb_peek(&c3cn->receive_queue);
462 } 462 }
463 read_unlock(&c3cn->callback_lock); 463 read_unlock(&c3cn->callback_lock);
464 if (c3cn) { 464 c3cn->copied_seq += read;
465 c3cn->copied_seq += read; 465 cxgb3i_c3cn_rx_credits(c3cn, read);
466 cxgb3i_c3cn_rx_credits(c3cn, read);
467 }
468 conn->rxdata_octets += read; 466 conn->rxdata_octets += read;
469 467
470 if (err) { 468 if (err) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4f0d0138f48b..bc9e94f5915e 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -717,6 +717,8 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
717 {"IBM", "2145" }, 717 {"IBM", "2145" },
718 {"Pillar", "Axiom" }, 718 {"Pillar", "Axiom" },
719 {"Intel", "Multi-Flex"}, 719 {"Intel", "Multi-Flex"},
720 {"NETAPP", "LUN"},
721 {"AIX", "NVDISK"},
720 {NULL, NULL} 722 {NULL, NULL}
721}; 723};
722 724
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index c7076ce25e21..3c5abf7cd762 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1509,7 +1509,7 @@ static int option_setup(char *str)
1509 char *cur = str; 1509 char *cur = str;
1510 int i = 1; 1510 int i = 1;
1511 1511
1512 while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) { 1512 while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
1513 ints[i++] = simple_strtoul(cur, NULL, 0); 1513 ints[i++] = simple_strtoul(cur, NULL, 0);
1514 1514
1515 if ((cur = strchr(cur, ',')) != NULL) 1515 if ((cur = strchr(cur, ',')) != NULL)
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index a680e18b5f3b..e2bc779f86c1 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1449,9 +1449,6 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1449 if (offset > 15) 1449 if (offset > 15)
1450 goto do_reject; 1450 goto do_reject;
1451 1451
1452 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
1453 offset = 0;
1454
1455 if (offset) { 1452 if (offset) {
1456 int one_clock; 1453 int one_clock;
1457 1454
@@ -2405,12 +2402,6 @@ static int esp_slave_configure(struct scsi_device *dev)
2405 struct esp_target_data *tp = &esp->target[dev->id]; 2402 struct esp_target_data *tp = &esp->target[dev->id];
2406 int goal_tags, queue_depth; 2403 int goal_tags, queue_depth;
2407 2404
2408 if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
2409 /* Bypass async domain validation */
2410 dev->ppr = 0;
2411 dev->sdtr = 0;
2412 }
2413
2414 goal_tags = 0; 2405 goal_tags = 0;
2415 2406
2416 if (dev->tagged_supported) { 2407 if (dev->tagged_supported) {
@@ -2660,7 +2651,10 @@ static void esp_set_offset(struct scsi_target *target, int offset)
2660 struct esp *esp = shost_priv(host); 2651 struct esp *esp = shost_priv(host);
2661 struct esp_target_data *tp = &esp->target[target->id]; 2652 struct esp_target_data *tp = &esp->target[target->id];
2662 2653
2663 tp->nego_goal_offset = offset; 2654 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2655 tp->nego_goal_offset = 0;
2656 else
2657 tp->nego_goal_offset = offset;
2664 tp->flags |= ESP_TGT_CHECK_NEGO; 2658 tp->flags |= ESP_TGT_CHECK_NEGO;
2665} 2659}
2666 2660
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index bb208a6091e7..3966c71d0095 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -36,7 +36,7 @@
36 36
37#define DRV_NAME "fnic" 37#define DRV_NAME "fnic"
38#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 38#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
39#define DRV_VERSION "1.0.0.1121" 39#define DRV_VERSION "1.4.0.98"
40#define PFX DRV_NAME ": " 40#define PFX DRV_NAME ": "
41#define DFX DRV_NAME "%d: " 41#define DFX DRV_NAME "%d: "
42 42
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fe1b1031f7ab..507e26c1c29f 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -620,6 +620,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
620 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 620 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
621 shost_printk(KERN_INFO, fnic->lport->host, 621 shost_printk(KERN_INFO, fnic->lport->host,
622 "firmware supports FIP\n"); 622 "firmware supports FIP\n");
623 /* enable directed and multicast */
624 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
623 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); 625 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
624 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 626 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
625 } else { 627 } else {
@@ -698,6 +700,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
698 goto err_out_remove_scsi_host; 700 goto err_out_remove_scsi_host;
699 } 701 }
700 702
703 fc_lport_init_stats(lp);
704
701 fc_lport_config(lp); 705 fc_lport_config(lp);
702 706
703 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + 707 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index d62b9061bf12..7c9ccbd4134b 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -94,7 +94,7 @@ enum vnic_devcmd_cmd {
94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), 94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
95 95
96 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ 96 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
97 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), 97 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
98 98
99 /* hang detection notification */ 99 /* hang detection notification */
100 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), 100 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 9e8fce0f0c1b..ba3c94c9c25f 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -140,40 +140,40 @@
140#include "gdth.h" 140#include "gdth.h"
141 141
142static void gdth_delay(int milliseconds); 142static void gdth_delay(int milliseconds);
143static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs); 143static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
144static irqreturn_t gdth_interrupt(int irq, void *dev_id); 144static irqreturn_t gdth_interrupt(int irq, void *dev_id);
145static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, 145static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
146 int gdth_from_wait, int* pIndex); 146 int gdth_from_wait, int* pIndex);
147static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, 147static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
148 Scsi_Cmnd *scp); 148 Scsi_Cmnd *scp);
149static int gdth_async_event(gdth_ha_str *ha); 149static int gdth_async_event(gdth_ha_str *ha);
150static void gdth_log_event(gdth_evt_data *dvr, char *buffer); 150static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
151 151
152static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority); 152static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
153static void gdth_next(gdth_ha_str *ha); 153static void gdth_next(gdth_ha_str *ha);
154static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b); 154static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
155static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); 155static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
156static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, 156static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
157 ushort idx, gdth_evt_data *evt); 157 u16 idx, gdth_evt_data *evt);
158static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); 158static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
159static void gdth_readapp_event(gdth_ha_str *ha, unchar application, 159static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
160 gdth_evt_str *estr); 160 gdth_evt_str *estr);
161static void gdth_clear_events(void); 161static void gdth_clear_events(void);
162 162
163static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 163static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
164 char *buffer, ushort count); 164 char *buffer, u16 count);
165static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); 165static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
166static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); 166static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
167 167
168static void gdth_enable_int(gdth_ha_str *ha); 168static void gdth_enable_int(gdth_ha_str *ha);
169static int gdth_test_busy(gdth_ha_str *ha); 169static int gdth_test_busy(gdth_ha_str *ha);
170static int gdth_get_cmd_index(gdth_ha_str *ha); 170static int gdth_get_cmd_index(gdth_ha_str *ha);
171static void gdth_release_event(gdth_ha_str *ha); 171static void gdth_release_event(gdth_ha_str *ha);
172static int gdth_wait(gdth_ha_str *ha, int index,ulong32 time); 172static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
173static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, 173static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
174 ulong32 p1, ulong64 p2,ulong64 p3); 174 u32 p1, u64 p2,u64 p3);
175static int gdth_search_drives(gdth_ha_str *ha); 175static int gdth_search_drives(gdth_ha_str *ha);
176static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive); 176static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
177 177
178static const char *gdth_ctr_name(gdth_ha_str *ha); 178static const char *gdth_ctr_name(gdth_ha_str *ha);
179 179
@@ -189,7 +189,7 @@ static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
189static void gdth_scsi_done(struct scsi_cmnd *scp); 189static void gdth_scsi_done(struct scsi_cmnd *scp);
190 190
191#ifdef DEBUG_GDTH 191#ifdef DEBUG_GDTH
192static unchar DebugState = DEBUG_GDTH; 192static u8 DebugState = DEBUG_GDTH;
193 193
194#ifdef __SERIAL__ 194#ifdef __SERIAL__
195#define MAX_SERBUF 160 195#define MAX_SERBUF 160
@@ -270,30 +270,30 @@ static int ser_printk(const char *fmt, ...)
270#endif 270#endif
271 271
272#ifdef GDTH_STATISTICS 272#ifdef GDTH_STATISTICS
273static ulong32 max_rq=0, max_index=0, max_sg=0; 273static u32 max_rq=0, max_index=0, max_sg=0;
274#ifdef INT_COAL 274#ifdef INT_COAL
275static ulong32 max_int_coal=0; 275static u32 max_int_coal=0;
276#endif 276#endif
277static ulong32 act_ints=0, act_ios=0, act_stats=0, act_rq=0; 277static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
278static struct timer_list gdth_timer; 278static struct timer_list gdth_timer;
279#endif 279#endif
280 280
281#define PTR2USHORT(a) (ushort)(ulong)(a) 281#define PTR2USHORT(a) (u16)(unsigned long)(a)
282#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b) 282#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
283#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t)) 283#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
284 284
285#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b)) 285#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
286 286
287#ifdef CONFIG_ISA 287#ifdef CONFIG_ISA
288static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */ 288static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
289#endif 289#endif
290#if defined(CONFIG_EISA) || defined(CONFIG_ISA) 290#if defined(CONFIG_EISA) || defined(CONFIG_ISA)
291static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */ 291static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
292#endif 292#endif
293static unchar gdth_polling; /* polling if TRUE */ 293static u8 gdth_polling; /* polling if TRUE */
294static int gdth_ctr_count = 0; /* controller count */ 294static int gdth_ctr_count = 0; /* controller count */
295static LIST_HEAD(gdth_instances); /* controller list */ 295static LIST_HEAD(gdth_instances); /* controller list */
296static unchar gdth_write_through = FALSE; /* write through */ 296static u8 gdth_write_through = FALSE; /* write through */
297static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */ 297static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
298static int elastidx; 298static int elastidx;
299static int eoldidx; 299static int eoldidx;
@@ -303,7 +303,7 @@ static int major;
303#define DOU 2 /* OUT data direction */ 303#define DOU 2 /* OUT data direction */
304#define DNO DIN /* no data transfer */ 304#define DNO DIN /* no data transfer */
305#define DUN DIN /* unknown data direction */ 305#define DUN DIN /* unknown data direction */
306static unchar gdth_direction_tab[0x100] = { 306static u8 gdth_direction_tab[0x100] = {
307 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN, 307 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
308 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN, 308 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
309 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU, 309 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
@@ -390,7 +390,7 @@ static gdth_ha_str *gdth_find_ha(int hanum)
390static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) 390static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
391{ 391{
392 struct gdth_cmndinfo *priv = NULL; 392 struct gdth_cmndinfo *priv = NULL;
393 ulong flags; 393 unsigned long flags;
394 int i; 394 int i;
395 395
396 spin_lock_irqsave(&ha->smp_lock, flags); 396 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -493,7 +493,7 @@ int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
493 return rval; 493 return rval;
494} 494}
495 495
496static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs) 496static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
497{ 497{
498 *cyls = size /HEADS/SECS; 498 *cyls = size /HEADS/SECS;
499 if (*cyls <= MAXCYLS) { 499 if (*cyls <= MAXCYLS) {
@@ -514,9 +514,9 @@ static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs
514 514
515/* controller search and initialization functions */ 515/* controller search and initialization functions */
516#ifdef CONFIG_EISA 516#ifdef CONFIG_EISA
517static int __init gdth_search_eisa(ushort eisa_adr) 517static int __init gdth_search_eisa(u16 eisa_adr)
518{ 518{
519 ulong32 id; 519 u32 id;
520 520
521 TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr)); 521 TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
522 id = inl(eisa_adr+ID0REG); 522 id = inl(eisa_adr+ID0REG);
@@ -533,13 +533,13 @@ static int __init gdth_search_eisa(ushort eisa_adr)
533#endif /* CONFIG_EISA */ 533#endif /* CONFIG_EISA */
534 534
535#ifdef CONFIG_ISA 535#ifdef CONFIG_ISA
536static int __init gdth_search_isa(ulong32 bios_adr) 536static int __init gdth_search_isa(u32 bios_adr)
537{ 537{
538 void __iomem *addr; 538 void __iomem *addr;
539 ulong32 id; 539 u32 id;
540 540
541 TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr)); 541 TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
542 if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(ulong32))) != NULL) { 542 if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) {
543 id = readl(addr); 543 id = readl(addr);
544 iounmap(addr); 544 iounmap(addr);
545 if (id == GDT2_ID) /* GDT2000 */ 545 if (id == GDT2_ID) /* GDT2000 */
@@ -551,7 +551,7 @@ static int __init gdth_search_isa(ulong32 bios_adr)
551 551
552#ifdef CONFIG_PCI 552#ifdef CONFIG_PCI
553 553
554static bool gdth_search_vortex(ushort device) 554static bool gdth_search_vortex(u16 device)
555{ 555{
556 if (device <= PCI_DEVICE_ID_VORTEX_GDT6555) 556 if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
557 return true; 557 return true;
@@ -603,9 +603,9 @@ static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
603static int __devinit gdth_pci_init_one(struct pci_dev *pdev, 603static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
604 const struct pci_device_id *ent) 604 const struct pci_device_id *ent)
605{ 605{
606 ushort vendor = pdev->vendor; 606 u16 vendor = pdev->vendor;
607 ushort device = pdev->device; 607 u16 device = pdev->device;
608 ulong base0, base1, base2; 608 unsigned long base0, base1, base2;
609 int rc; 609 int rc;
610 gdth_pci_str gdth_pcistr; 610 gdth_pci_str gdth_pcistr;
611 gdth_ha_str *ha = NULL; 611 gdth_ha_str *ha = NULL;
@@ -658,10 +658,10 @@ static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
658#endif /* CONFIG_PCI */ 658#endif /* CONFIG_PCI */
659 659
660#ifdef CONFIG_EISA 660#ifdef CONFIG_EISA
661static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha) 661static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha)
662{ 662{
663 ulong32 retries,id; 663 u32 retries,id;
664 unchar prot_ver,eisacf,i,irq_found; 664 u8 prot_ver,eisacf,i,irq_found;
665 665
666 TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr)); 666 TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
667 667
@@ -688,7 +688,7 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
688 return 0; 688 return 0;
689 } 689 }
690 ha->bmic = eisa_adr; 690 ha->bmic = eisa_adr;
691 ha->brd_phys = (ulong32)eisa_adr >> 12; 691 ha->brd_phys = (u32)eisa_adr >> 12;
692 692
693 outl(0,eisa_adr+MAILBOXREG); 693 outl(0,eisa_adr+MAILBOXREG);
694 outl(0,eisa_adr+MAILBOXREG+4); 694 outl(0,eisa_adr+MAILBOXREG+4);
@@ -752,12 +752,12 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
752#endif /* CONFIG_EISA */ 752#endif /* CONFIG_EISA */
753 753
754#ifdef CONFIG_ISA 754#ifdef CONFIG_ISA
755static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha) 755static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
756{ 756{
757 register gdt2_dpram_str __iomem *dp2_ptr; 757 register gdt2_dpram_str __iomem *dp2_ptr;
758 int i; 758 int i;
759 unchar irq_drq,prot_ver; 759 u8 irq_drq,prot_ver;
760 ulong32 retries; 760 u32 retries;
761 761
762 TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr)); 762 TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
763 763
@@ -812,7 +812,7 @@ static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
812 } 812 }
813 gdth_delay(1); 813 gdth_delay(1);
814 } 814 }
815 prot_ver = (unchar)readl(&dp2_ptr->u.ic.S_Info[0]); 815 prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]);
816 writeb(0, &dp2_ptr->u.ic.Status); 816 writeb(0, &dp2_ptr->u.ic.Status);
817 writeb(0xff, &dp2_ptr->io.irqdel); 817 writeb(0xff, &dp2_ptr->io.irqdel);
818 if (prot_ver != PROTOCOL_VERSION) { 818 if (prot_ver != PROTOCOL_VERSION) {
@@ -859,9 +859,9 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
859 register gdt6_dpram_str __iomem *dp6_ptr; 859 register gdt6_dpram_str __iomem *dp6_ptr;
860 register gdt6c_dpram_str __iomem *dp6c_ptr; 860 register gdt6c_dpram_str __iomem *dp6c_ptr;
861 register gdt6m_dpram_str __iomem *dp6m_ptr; 861 register gdt6m_dpram_str __iomem *dp6m_ptr;
862 ulong32 retries; 862 u32 retries;
863 unchar prot_ver; 863 u8 prot_ver;
864 ushort command; 864 u16 command;
865 int i, found = FALSE; 865 int i, found = FALSE;
866 866
867 TRACE(("gdth_init_pci()\n")); 867 TRACE(("gdth_init_pci()\n"));
@@ -871,7 +871,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
871 else 871 else
872 ha->oem_id = OEM_ID_ICP; 872 ha->oem_id = OEM_ID_ICP;
873 ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8); 873 ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
874 ha->stype = (ulong32)pdev->device; 874 ha->stype = (u32)pdev->device;
875 ha->irq = pdev->irq; 875 ha->irq = pdev->irq;
876 ha->pdev = pdev; 876 ha->pdev = pdev;
877 877
@@ -891,7 +891,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
891 found = FALSE; 891 found = FALSE;
892 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 892 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
893 iounmap(ha->brd); 893 iounmap(ha->brd);
894 ha->brd = ioremap(i, sizeof(ushort)); 894 ha->brd = ioremap(i, sizeof(u16));
895 if (ha->brd == NULL) { 895 if (ha->brd == NULL) {
896 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 896 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
897 return 0; 897 return 0;
@@ -947,7 +947,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
947 } 947 }
948 gdth_delay(1); 948 gdth_delay(1);
949 } 949 }
950 prot_ver = (unchar)readl(&dp6_ptr->u.ic.S_Info[0]); 950 prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
951 writeb(0, &dp6_ptr->u.ic.S_Status); 951 writeb(0, &dp6_ptr->u.ic.S_Status);
952 writeb(0xff, &dp6_ptr->io.irqdel); 952 writeb(0xff, &dp6_ptr->io.irqdel);
953 if (prot_ver != PROTOCOL_VERSION) { 953 if (prot_ver != PROTOCOL_VERSION) {
@@ -1000,7 +1000,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1000 found = FALSE; 1000 found = FALSE;
1001 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 1001 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
1002 iounmap(ha->brd); 1002 iounmap(ha->brd);
1003 ha->brd = ioremap(i, sizeof(ushort)); 1003 ha->brd = ioremap(i, sizeof(u16));
1004 if (ha->brd == NULL) { 1004 if (ha->brd == NULL) {
1005 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1005 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1006 return 0; 1006 return 0;
@@ -1059,7 +1059,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1059 } 1059 }
1060 gdth_delay(1); 1060 gdth_delay(1);
1061 } 1061 }
1062 prot_ver = (unchar)readl(&dp6c_ptr->u.ic.S_Info[0]); 1062 prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
1063 writeb(0, &dp6c_ptr->u.ic.Status); 1063 writeb(0, &dp6c_ptr->u.ic.Status);
1064 if (prot_ver != PROTOCOL_VERSION) { 1064 if (prot_ver != PROTOCOL_VERSION) {
1065 printk("GDT-PCI: Illegal protocol version\n"); 1065 printk("GDT-PCI: Illegal protocol version\n");
@@ -1128,7 +1128,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1128 found = FALSE; 1128 found = FALSE;
1129 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 1129 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
1130 iounmap(ha->brd); 1130 iounmap(ha->brd);
1131 ha->brd = ioremap(i, sizeof(ushort)); 1131 ha->brd = ioremap(i, sizeof(u16));
1132 if (ha->brd == NULL) { 1132 if (ha->brd == NULL) {
1133 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1133 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1134 return 0; 1134 return 0;
@@ -1180,7 +1180,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1180 } 1180 }
1181 gdth_delay(1); 1181 gdth_delay(1);
1182 } 1182 }
1183 prot_ver = (unchar)readl(&dp6m_ptr->u.ic.S_Info[0]); 1183 prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
1184 writeb(0, &dp6m_ptr->u.ic.S_Status); 1184 writeb(0, &dp6m_ptr->u.ic.S_Status);
1185 if (prot_ver != PROTOCOL_VERSION) { 1185 if (prot_ver != PROTOCOL_VERSION) {
1186 printk("GDT-PCI: Illegal protocol version\n"); 1186 printk("GDT-PCI: Illegal protocol version\n");
@@ -1223,7 +1223,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1223 } 1223 }
1224 gdth_delay(1); 1224 gdth_delay(1);
1225 } 1225 }
1226 prot_ver = (unchar)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16); 1226 prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
1227 writeb(0, &dp6m_ptr->u.ic.S_Status); 1227 writeb(0, &dp6m_ptr->u.ic.S_Status);
1228 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */ 1228 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
1229 ha->dma64_support = 0; 1229 ha->dma64_support = 0;
@@ -1239,7 +1239,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1239 1239
1240static void __devinit gdth_enable_int(gdth_ha_str *ha) 1240static void __devinit gdth_enable_int(gdth_ha_str *ha)
1241{ 1241{
1242 ulong flags; 1242 unsigned long flags;
1243 gdt2_dpram_str __iomem *dp2_ptr; 1243 gdt2_dpram_str __iomem *dp2_ptr;
1244 gdt6_dpram_str __iomem *dp6_ptr; 1244 gdt6_dpram_str __iomem *dp6_ptr;
1245 gdt6m_dpram_str __iomem *dp6m_ptr; 1245 gdt6m_dpram_str __iomem *dp6m_ptr;
@@ -1274,14 +1274,14 @@ static void __devinit gdth_enable_int(gdth_ha_str *ha)
1274} 1274}
1275 1275
1276/* return IStatus if interrupt was from this card else 0 */ 1276/* return IStatus if interrupt was from this card else 0 */
1277static unchar gdth_get_status(gdth_ha_str *ha) 1277static u8 gdth_get_status(gdth_ha_str *ha)
1278{ 1278{
1279 unchar IStatus = 0; 1279 u8 IStatus = 0;
1280 1280
1281 TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count)); 1281 TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
1282 1282
1283 if (ha->type == GDT_EISA) 1283 if (ha->type == GDT_EISA)
1284 IStatus = inb((ushort)ha->bmic + EDOORREG); 1284 IStatus = inb((u16)ha->bmic + EDOORREG);
1285 else if (ha->type == GDT_ISA) 1285 else if (ha->type == GDT_ISA)
1286 IStatus = 1286 IStatus =
1287 readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); 1287 readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
@@ -1329,7 +1329,7 @@ static int gdth_get_cmd_index(gdth_ha_str *ha)
1329 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) { 1329 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
1330 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer; 1330 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
1331 ha->cmd_tab[i].service = ha->pccb->Service; 1331 ha->cmd_tab[i].service = ha->pccb->Service;
1332 ha->pccb->CommandIndex = (ulong32)i+2; 1332 ha->pccb->CommandIndex = (u32)i+2;
1333 return (i+2); 1333 return (i+2);
1334 } 1334 }
1335 } 1335 }
@@ -1362,7 +1362,7 @@ static void gdth_copy_command(gdth_ha_str *ha)
1362 register gdt6c_dpram_str __iomem *dp6c_ptr; 1362 register gdt6c_dpram_str __iomem *dp6c_ptr;
1363 gdt6_dpram_str __iomem *dp6_ptr; 1363 gdt6_dpram_str __iomem *dp6_ptr;
1364 gdt2_dpram_str __iomem *dp2_ptr; 1364 gdt2_dpram_str __iomem *dp2_ptr;
1365 ushort cp_count,dp_offset,cmd_no; 1365 u16 cp_count,dp_offset,cmd_no;
1366 1366
1367 TRACE(("gdth_copy_command() hanum %d\n", ha->hanum)); 1367 TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
1368 1368
@@ -1386,28 +1386,28 @@ static void gdth_copy_command(gdth_ha_str *ha)
1386 dp2_ptr = ha->brd; 1386 dp2_ptr = ha->brd;
1387 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1387 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1388 &dp2_ptr->u.ic.comm_queue[cmd_no].offset); 1388 &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
1389 writew((ushort)cmd_ptr->Service, 1389 writew((u16)cmd_ptr->Service,
1390 &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id); 1390 &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
1391 memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1391 memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1392 } else if (ha->type == GDT_PCI) { 1392 } else if (ha->type == GDT_PCI) {
1393 dp6_ptr = ha->brd; 1393 dp6_ptr = ha->brd;
1394 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1394 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1395 &dp6_ptr->u.ic.comm_queue[cmd_no].offset); 1395 &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
1396 writew((ushort)cmd_ptr->Service, 1396 writew((u16)cmd_ptr->Service,
1397 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id); 1397 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
1398 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1398 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1399 } else if (ha->type == GDT_PCINEW) { 1399 } else if (ha->type == GDT_PCINEW) {
1400 dp6c_ptr = ha->brd; 1400 dp6c_ptr = ha->brd;
1401 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1401 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1402 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset); 1402 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
1403 writew((ushort)cmd_ptr->Service, 1403 writew((u16)cmd_ptr->Service,
1404 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id); 1404 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
1405 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1405 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1406 } else if (ha->type == GDT_PCIMPR) { 1406 } else if (ha->type == GDT_PCIMPR) {
1407 dp6m_ptr = ha->brd; 1407 dp6m_ptr = ha->brd;
1408 writew(dp_offset + DPMEM_COMMAND_OFFSET, 1408 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1409 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset); 1409 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
1410 writew((ushort)cmd_ptr->Service, 1410 writew((u16)cmd_ptr->Service,
1411 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id); 1411 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
1412 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1412 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1413 } 1413 }
@@ -1420,14 +1420,14 @@ static void gdth_release_event(gdth_ha_str *ha)
1420 1420
1421#ifdef GDTH_STATISTICS 1421#ifdef GDTH_STATISTICS
1422 { 1422 {
1423 ulong32 i,j; 1423 u32 i,j;
1424 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) { 1424 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
1425 if (ha->cmd_tab[j].cmnd != UNUSED_CMND) 1425 if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
1426 ++i; 1426 ++i;
1427 } 1427 }
1428 if (max_index < i) { 1428 if (max_index < i) {
1429 max_index = i; 1429 max_index = i;
1430 TRACE3(("GDT: max_index = %d\n",(ushort)i)); 1430 TRACE3(("GDT: max_index = %d\n",(u16)i));
1431 } 1431 }
1432 } 1432 }
1433#endif 1433#endif
@@ -1450,7 +1450,7 @@ static void gdth_release_event(gdth_ha_str *ha)
1450 } 1450 }
1451} 1451}
1452 1452
1453static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time) 1453static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
1454{ 1454{
1455 int answer_found = FALSE; 1455 int answer_found = FALSE;
1456 int wait_index = 0; 1456 int wait_index = 0;
@@ -1476,8 +1476,8 @@ static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
1476} 1476}
1477 1477
1478 1478
1479static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, 1479static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
1480 ulong32 p1, ulong64 p2, ulong64 p3) 1480 u32 p1, u64 p2, u64 p3)
1481{ 1481{
1482 register gdth_cmd_str *cmd_ptr; 1482 register gdth_cmd_str *cmd_ptr;
1483 int retries,index; 1483 int retries,index;
@@ -1501,35 +1501,35 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
1501 if (service == CACHESERVICE) { 1501 if (service == CACHESERVICE) {
1502 if (opcode == GDT_IOCTL) { 1502 if (opcode == GDT_IOCTL) {
1503 cmd_ptr->u.ioctl.subfunc = p1; 1503 cmd_ptr->u.ioctl.subfunc = p1;
1504 cmd_ptr->u.ioctl.channel = (ulong32)p2; 1504 cmd_ptr->u.ioctl.channel = (u32)p2;
1505 cmd_ptr->u.ioctl.param_size = (ushort)p3; 1505 cmd_ptr->u.ioctl.param_size = (u16)p3;
1506 cmd_ptr->u.ioctl.p_param = ha->scratch_phys; 1506 cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
1507 } else { 1507 } else {
1508 if (ha->cache_feat & GDT_64BIT) { 1508 if (ha->cache_feat & GDT_64BIT) {
1509 cmd_ptr->u.cache64.DeviceNo = (ushort)p1; 1509 cmd_ptr->u.cache64.DeviceNo = (u16)p1;
1510 cmd_ptr->u.cache64.BlockNo = p2; 1510 cmd_ptr->u.cache64.BlockNo = p2;
1511 } else { 1511 } else {
1512 cmd_ptr->u.cache.DeviceNo = (ushort)p1; 1512 cmd_ptr->u.cache.DeviceNo = (u16)p1;
1513 cmd_ptr->u.cache.BlockNo = (ulong32)p2; 1513 cmd_ptr->u.cache.BlockNo = (u32)p2;
1514 } 1514 }
1515 } 1515 }
1516 } else if (service == SCSIRAWSERVICE) { 1516 } else if (service == SCSIRAWSERVICE) {
1517 if (ha->raw_feat & GDT_64BIT) { 1517 if (ha->raw_feat & GDT_64BIT) {
1518 cmd_ptr->u.raw64.direction = p1; 1518 cmd_ptr->u.raw64.direction = p1;
1519 cmd_ptr->u.raw64.bus = (unchar)p2; 1519 cmd_ptr->u.raw64.bus = (u8)p2;
1520 cmd_ptr->u.raw64.target = (unchar)p3; 1520 cmd_ptr->u.raw64.target = (u8)p3;
1521 cmd_ptr->u.raw64.lun = (unchar)(p3 >> 8); 1521 cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
1522 } else { 1522 } else {
1523 cmd_ptr->u.raw.direction = p1; 1523 cmd_ptr->u.raw.direction = p1;
1524 cmd_ptr->u.raw.bus = (unchar)p2; 1524 cmd_ptr->u.raw.bus = (u8)p2;
1525 cmd_ptr->u.raw.target = (unchar)p3; 1525 cmd_ptr->u.raw.target = (u8)p3;
1526 cmd_ptr->u.raw.lun = (unchar)(p3 >> 8); 1526 cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
1527 } 1527 }
1528 } else if (service == SCREENSERVICE) { 1528 } else if (service == SCREENSERVICE) {
1529 if (opcode == GDT_REALTIME) { 1529 if (opcode == GDT_REALTIME) {
1530 *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1; 1530 *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
1531 *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2; 1531 *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
1532 *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3; 1532 *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
1533 } 1533 }
1534 } 1534 }
1535 ha->cmd_len = sizeof(gdth_cmd_str); 1535 ha->cmd_len = sizeof(gdth_cmd_str);
@@ -1555,9 +1555,9 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
1555 1555
1556static int __devinit gdth_search_drives(gdth_ha_str *ha) 1556static int __devinit gdth_search_drives(gdth_ha_str *ha)
1557{ 1557{
1558 ushort cdev_cnt, i; 1558 u16 cdev_cnt, i;
1559 int ok; 1559 int ok;
1560 ulong32 bus_no, drv_cnt, drv_no, j; 1560 u32 bus_no, drv_cnt, drv_no, j;
1561 gdth_getch_str *chn; 1561 gdth_getch_str *chn;
1562 gdth_drlist_str *drl; 1562 gdth_drlist_str *drl;
1563 gdth_iochan_str *ioc; 1563 gdth_iochan_str *ioc;
@@ -1570,8 +1570,8 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1570#endif 1570#endif
1571 1571
1572#ifdef GDTH_RTC 1572#ifdef GDTH_RTC
1573 unchar rtc[12]; 1573 u8 rtc[12];
1574 ulong flags; 1574 unsigned long flags;
1575#endif 1575#endif
1576 1576
1577 TRACE(("gdth_search_drives() hanum %d\n", ha->hanum)); 1577 TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
@@ -1584,7 +1584,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1584 if (ok) 1584 if (ok)
1585 ha->screen_feat = GDT_64BIT; 1585 ha->screen_feat = GDT_64BIT;
1586 } 1586 }
1587 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1587 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1588 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0); 1588 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
1589 if (!ok) { 1589 if (!ok) {
1590 printk("GDT-HA %d: Initialization error screen service (code %d)\n", 1590 printk("GDT-HA %d: Initialization error screen service (code %d)\n",
@@ -1609,11 +1609,11 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1609 rtc[j] = CMOS_READ(j); 1609 rtc[j] = CMOS_READ(j);
1610 } while (rtc[0] != CMOS_READ(0)); 1610 } while (rtc[0] != CMOS_READ(0));
1611 spin_unlock_irqrestore(&rtc_lock, flags); 1611 spin_unlock_irqrestore(&rtc_lock, flags);
1612 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0], 1612 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0],
1613 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8])); 1613 *(u32 *)&rtc[4], *(u32 *)&rtc[8]));
1614 /* 3. send to controller firmware */ 1614 /* 3. send to controller firmware */
1615 gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(ulong32 *)&rtc[0], 1615 gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0],
1616 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]); 1616 *(u32 *)&rtc[4], *(u32 *)&rtc[8]);
1617#endif 1617#endif
1618 1618
1619 /* unfreeze all IOs */ 1619 /* unfreeze all IOs */
@@ -1627,7 +1627,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1627 if (ok) 1627 if (ok)
1628 ha->cache_feat = GDT_64BIT; 1628 ha->cache_feat = GDT_64BIT;
1629 } 1629 }
1630 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1630 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1631 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0); 1631 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
1632 if (!ok) { 1632 if (!ok) {
1633 printk("GDT-HA %d: Initialization error cache service (code %d)\n", 1633 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
@@ -1635,7 +1635,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1635 return 0; 1635 return 0;
1636 } 1636 }
1637 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n")); 1637 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
1638 cdev_cnt = (ushort)ha->info; 1638 cdev_cnt = (u16)ha->info;
1639 ha->fw_vers = ha->service; 1639 ha->fw_vers = ha->service;
1640 1640
1641#ifdef INT_COAL 1641#ifdef INT_COAL
@@ -1644,7 +1644,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1644 pmod = (gdth_perf_modes *)ha->pscratch; 1644 pmod = (gdth_perf_modes *)ha->pscratch;
1645 pmod->version = 1; 1645 pmod->version = 1;
1646 pmod->st_mode = 1; /* enable one status buffer */ 1646 pmod->st_mode = 1; /* enable one status buffer */
1647 *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; 1647 *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
1648 pmod->st_buff_indx1 = COALINDEX; 1648 pmod->st_buff_indx1 = COALINDEX;
1649 pmod->st_buff_addr2 = 0; 1649 pmod->st_buff_addr2 = 0;
1650 pmod->st_buff_u_addr2 = 0; 1650 pmod->st_buff_u_addr2 = 0;
@@ -1705,7 +1705,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1705 else 1705 else
1706 ha->bus_id[bus_no] = 0xff; 1706 ha->bus_id[bus_no] = 0xff;
1707 } 1707 }
1708 ha->bus_cnt = (unchar)bus_no; 1708 ha->bus_cnt = (u8)bus_no;
1709 } 1709 }
1710 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt)); 1710 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
1711 1711
@@ -1789,12 +1789,12 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1789 1789
1790 /* logical drives */ 1790 /* logical drives */
1791 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT, 1791 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
1792 INVALID_CHANNEL,sizeof(ulong32))) { 1792 INVALID_CHANNEL,sizeof(u32))) {
1793 drv_cnt = *(ulong32 *)ha->pscratch; 1793 drv_cnt = *(u32 *)ha->pscratch;
1794 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST, 1794 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
1795 INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) { 1795 INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
1796 for (j = 0; j < drv_cnt; ++j) { 1796 for (j = 0; j < drv_cnt; ++j) {
1797 drv_no = ((ulong32 *)ha->pscratch)[j]; 1797 drv_no = ((u32 *)ha->pscratch)[j];
1798 if (drv_no < MAX_LDRIVES) { 1798 if (drv_no < MAX_LDRIVES) {
1799 ha->hdr[drv_no].is_logdrv = TRUE; 1799 ha->hdr[drv_no].is_logdrv = TRUE;
1800 TRACE2(("Drive %d is log. drive\n",drv_no)); 1800 TRACE2(("Drive %d is log. drive\n",drv_no));
@@ -1838,7 +1838,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1838 if (ok) 1838 if (ok)
1839 ha->raw_feat = GDT_64BIT; 1839 ha->raw_feat = GDT_64BIT;
1840 } 1840 }
1841 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1841 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1842 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0); 1842 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
1843 if (!ok) { 1843 if (!ok) {
1844 printk("GDT-HA %d: Initialization error raw service (code %d)\n", 1844 printk("GDT-HA %d: Initialization error raw service (code %d)\n",
@@ -1854,7 +1854,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1854 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { 1854 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1855 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n", 1855 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
1856 ha->info)); 1856 ha->info));
1857 ha->raw_feat |= (ushort)ha->info; 1857 ha->raw_feat |= (u16)ha->info;
1858 } 1858 }
1859 } 1859 }
1860 1860
@@ -1865,7 +1865,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1865 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { 1865 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1866 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n", 1866 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
1867 ha->info)); 1867 ha->info));
1868 ha->cache_feat |= (ushort)ha->info; 1868 ha->cache_feat |= (u16)ha->info;
1869 } 1869 }
1870 } 1870 }
1871 1871
@@ -1923,9 +1923,9 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
1923 return 1; 1923 return 1;
1924} 1924}
1925 1925
1926static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) 1926static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
1927{ 1927{
1928 ulong32 drv_cyls; 1928 u32 drv_cyls;
1929 int drv_hds, drv_secs; 1929 int drv_hds, drv_secs;
1930 1930
1931 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive)); 1931 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
@@ -1944,17 +1944,17 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1944 } else { 1944 } else {
1945 drv_hds = ha->info2 & 0xff; 1945 drv_hds = ha->info2 & 0xff;
1946 drv_secs = (ha->info2 >> 8) & 0xff; 1946 drv_secs = (ha->info2 >> 8) & 0xff;
1947 drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs; 1947 drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
1948 } 1948 }
1949 ha->hdr[hdrive].heads = (unchar)drv_hds; 1949 ha->hdr[hdrive].heads = (u8)drv_hds;
1950 ha->hdr[hdrive].secs = (unchar)drv_secs; 1950 ha->hdr[hdrive].secs = (u8)drv_secs;
1951 /* round size */ 1951 /* round size */
1952 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs; 1952 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
1953 1953
1954 if (ha->cache_feat & GDT_64BIT) { 1954 if (ha->cache_feat & GDT_64BIT) {
1955 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0) 1955 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
1956 && ha->info2 != 0) { 1956 && ha->info2 != 0) {
1957 ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info; 1957 ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
1958 } 1958 }
1959 } 1959 }
1960 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n", 1960 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
@@ -1964,7 +1964,7 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1964 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) { 1964 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
1965 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n", 1965 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
1966 hdrive,ha->info)); 1966 hdrive,ha->info));
1967 ha->hdr[hdrive].devtype = (ushort)ha->info; 1967 ha->hdr[hdrive].devtype = (u16)ha->info;
1968 } 1968 }
1969 1969
1970 /* cluster info */ 1970 /* cluster info */
@@ -1972,14 +1972,14 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1972 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n", 1972 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
1973 hdrive,ha->info)); 1973 hdrive,ha->info));
1974 if (!shared_access) 1974 if (!shared_access)
1975 ha->hdr[hdrive].cluster_type = (unchar)ha->info; 1975 ha->hdr[hdrive].cluster_type = (u8)ha->info;
1976 } 1976 }
1977 1977
1978 /* R/W attributes */ 1978 /* R/W attributes */
1979 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) { 1979 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
1980 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n", 1980 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
1981 hdrive,ha->info)); 1981 hdrive,ha->info));
1982 ha->hdr[hdrive].rw_attribs = (unchar)ha->info; 1982 ha->hdr[hdrive].rw_attribs = (u8)ha->info;
1983 } 1983 }
1984 1984
1985 return 1; 1985 return 1;
@@ -1988,12 +1988,12 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
1988 1988
1989/* command queueing/sending functions */ 1989/* command queueing/sending functions */
1990 1990
1991static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority) 1991static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
1992{ 1992{
1993 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 1993 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1994 register Scsi_Cmnd *pscp; 1994 register Scsi_Cmnd *pscp;
1995 register Scsi_Cmnd *nscp; 1995 register Scsi_Cmnd *nscp;
1996 ulong flags; 1996 unsigned long flags;
1997 1997
1998 TRACE(("gdth_putq() priority %d\n",priority)); 1998 TRACE(("gdth_putq() priority %d\n",priority));
1999 spin_lock_irqsave(&ha->smp_lock, flags); 1999 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2023,7 +2023,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
2023 ++flags; 2023 ++flags;
2024 if (max_rq < flags) { 2024 if (max_rq < flags) {
2025 max_rq = flags; 2025 max_rq = flags;
2026 TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq)); 2026 TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
2027 } 2027 }
2028#endif 2028#endif
2029} 2029}
@@ -2032,9 +2032,9 @@ static void gdth_next(gdth_ha_str *ha)
2032{ 2032{
2033 register Scsi_Cmnd *pscp; 2033 register Scsi_Cmnd *pscp;
2034 register Scsi_Cmnd *nscp; 2034 register Scsi_Cmnd *nscp;
2035 unchar b, t, l, firsttime; 2035 u8 b, t, l, firsttime;
2036 unchar this_cmd, next_cmd; 2036 u8 this_cmd, next_cmd;
2037 ulong flags = 0; 2037 unsigned long flags = 0;
2038 int cmd_index; 2038 int cmd_index;
2039 2039
2040 TRACE(("gdth_next() hanum %d\n", ha->hanum)); 2040 TRACE(("gdth_next() hanum %d\n", ha->hanum));
@@ -2282,20 +2282,20 @@ static void gdth_next(gdth_ha_str *ha)
2282 * buffers, kmap_atomic() as needed. 2282 * buffers, kmap_atomic() as needed.
2283 */ 2283 */
2284static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 2284static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2285 char *buffer, ushort count) 2285 char *buffer, u16 count)
2286{ 2286{
2287 ushort cpcount,i, max_sg = scsi_sg_count(scp); 2287 u16 cpcount,i, max_sg = scsi_sg_count(scp);
2288 ushort cpsum,cpnow; 2288 u16 cpsum,cpnow;
2289 struct scatterlist *sl; 2289 struct scatterlist *sl;
2290 char *address; 2290 char *address;
2291 2291
2292 cpcount = min_t(ushort, count, scsi_bufflen(scp)); 2292 cpcount = min_t(u16, count, scsi_bufflen(scp));
2293 2293
2294 if (cpcount) { 2294 if (cpcount) {
2295 cpsum=0; 2295 cpsum=0;
2296 scsi_for_each_sg(scp, sl, max_sg, i) { 2296 scsi_for_each_sg(scp, sl, max_sg, i) {
2297 unsigned long flags; 2297 unsigned long flags;
2298 cpnow = (ushort)sl->length; 2298 cpnow = (u16)sl->length;
2299 TRACE(("copy_internal() now %d sum %d count %d %d\n", 2299 TRACE(("copy_internal() now %d sum %d count %d %d\n",
2300 cpnow, cpsum, cpcount, scsi_bufflen(scp))); 2300 cpnow, cpsum, cpcount, scsi_bufflen(scp)));
2301 if (cpsum+cpnow > cpcount) 2301 if (cpsum+cpnow > cpcount)
@@ -2325,7 +2325,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2325 2325
2326static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) 2326static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2327{ 2327{
2328 unchar t; 2328 u8 t;
2329 gdth_inq_data inq; 2329 gdth_inq_data inq;
2330 gdth_rdcap_data rdc; 2330 gdth_rdcap_data rdc;
2331 gdth_sense_data sd; 2331 gdth_sense_data sd;
@@ -2389,7 +2389,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2389 2389
2390 case READ_CAPACITY: 2390 case READ_CAPACITY:
2391 TRACE2(("Read capacity hdrive %d\n",t)); 2391 TRACE2(("Read capacity hdrive %d\n",t));
2392 if (ha->hdr[t].size > (ulong64)0xffffffff) 2392 if (ha->hdr[t].size > (u64)0xffffffff)
2393 rdc.last_block_no = 0xffffffff; 2393 rdc.last_block_no = 0xffffffff;
2394 else 2394 else
2395 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); 2395 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
@@ -2425,12 +2425,12 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2425 return 0; 2425 return 0;
2426} 2426}
2427 2427
2428static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) 2428static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
2429{ 2429{
2430 register gdth_cmd_str *cmdp; 2430 register gdth_cmd_str *cmdp;
2431 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 2431 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2432 ulong32 cnt, blockcnt; 2432 u32 cnt, blockcnt;
2433 ulong64 no, blockno; 2433 u64 no, blockno;
2434 int i, cmd_index, read_write, sgcnt, mode64; 2434 int i, cmd_index, read_write, sgcnt, mode64;
2435 2435
2436 cmdp = ha->pccb; 2436 cmdp = ha->pccb;
@@ -2498,17 +2498,17 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2498 2498
2499 if (read_write) { 2499 if (read_write) {
2500 if (scp->cmd_len == 16) { 2500 if (scp->cmd_len == 16) {
2501 memcpy(&no, &scp->cmnd[2], sizeof(ulong64)); 2501 memcpy(&no, &scp->cmnd[2], sizeof(u64));
2502 blockno = be64_to_cpu(no); 2502 blockno = be64_to_cpu(no);
2503 memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32)); 2503 memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
2504 blockcnt = be32_to_cpu(cnt); 2504 blockcnt = be32_to_cpu(cnt);
2505 } else if (scp->cmd_len == 10) { 2505 } else if (scp->cmd_len == 10) {
2506 memcpy(&no, &scp->cmnd[2], sizeof(ulong32)); 2506 memcpy(&no, &scp->cmnd[2], sizeof(u32));
2507 blockno = be32_to_cpu(no); 2507 blockno = be32_to_cpu(no);
2508 memcpy(&cnt, &scp->cmnd[7], sizeof(ushort)); 2508 memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
2509 blockcnt = be16_to_cpu(cnt); 2509 blockcnt = be16_to_cpu(cnt);
2510 } else { 2510 } else {
2511 memcpy(&no, &scp->cmnd[0], sizeof(ulong32)); 2511 memcpy(&no, &scp->cmnd[0], sizeof(u32));
2512 blockno = be32_to_cpu(no) & 0x001fffffUL; 2512 blockno = be32_to_cpu(no) & 0x001fffffUL;
2513 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4]; 2513 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
2514 } 2514 }
@@ -2516,7 +2516,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2516 cmdp->u.cache64.BlockNo = blockno; 2516 cmdp->u.cache64.BlockNo = blockno;
2517 cmdp->u.cache64.BlockCnt = blockcnt; 2517 cmdp->u.cache64.BlockCnt = blockcnt;
2518 } else { 2518 } else {
2519 cmdp->u.cache.BlockNo = (ulong32)blockno; 2519 cmdp->u.cache.BlockNo = (u32)blockno;
2520 cmdp->u.cache.BlockCnt = blockcnt; 2520 cmdp->u.cache.BlockCnt = blockcnt;
2521 } 2521 }
2522 2522
@@ -2528,12 +2528,12 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2528 if (mode64) { 2528 if (mode64) {
2529 struct scatterlist *sl; 2529 struct scatterlist *sl;
2530 2530
2531 cmdp->u.cache64.DestAddr= (ulong64)-1; 2531 cmdp->u.cache64.DestAddr= (u64)-1;
2532 cmdp->u.cache64.sg_canz = sgcnt; 2532 cmdp->u.cache64.sg_canz = sgcnt;
2533 scsi_for_each_sg(scp, sl, sgcnt, i) { 2533 scsi_for_each_sg(scp, sl, sgcnt, i) {
2534 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); 2534 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2535#ifdef GDTH_DMA_STATISTICS 2535#ifdef GDTH_DMA_STATISTICS
2536 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 2536 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff)
2537 ha->dma64_cnt++; 2537 ha->dma64_cnt++;
2538 else 2538 else
2539 ha->dma32_cnt++; 2539 ha->dma32_cnt++;
@@ -2555,8 +2555,8 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2555 } 2555 }
2556 2556
2557#ifdef GDTH_STATISTICS 2557#ifdef GDTH_STATISTICS
2558 if (max_sg < (ulong32)sgcnt) { 2558 if (max_sg < (u32)sgcnt) {
2559 max_sg = (ulong32)sgcnt; 2559 max_sg = (u32)sgcnt;
2560 TRACE3(("GDT: max_sg = %d\n",max_sg)); 2560 TRACE3(("GDT: max_sg = %d\n",max_sg));
2561 } 2561 }
2562#endif 2562#endif
@@ -2572,7 +2572,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2572 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", 2572 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2573 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt)); 2573 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
2574 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + 2574 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
2575 (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str); 2575 (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
2576 } else { 2576 } else {
2577 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 2577 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2578 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, 2578 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
@@ -2581,7 +2581,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2581 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", 2581 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2582 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); 2582 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
2583 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + 2583 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
2584 (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); 2584 (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
2585 } 2585 }
2586 if (ha->cmd_len & 3) 2586 if (ha->cmd_len & 3)
2587 ha->cmd_len += (4 - (ha->cmd_len & 3)); 2587 ha->cmd_len += (4 - (ha->cmd_len & 3));
@@ -2600,15 +2600,15 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2600 return cmd_index; 2600 return cmd_index;
2601} 2601}
2602 2602
2603static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) 2603static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
2604{ 2604{
2605 register gdth_cmd_str *cmdp; 2605 register gdth_cmd_str *cmdp;
2606 ushort i; 2606 u16 i;
2607 dma_addr_t sense_paddr; 2607 dma_addr_t sense_paddr;
2608 int cmd_index, sgcnt, mode64; 2608 int cmd_index, sgcnt, mode64;
2609 unchar t,l; 2609 u8 t,l;
2610 struct page *page; 2610 struct page *page;
2611 ulong offset; 2611 unsigned long offset;
2612 struct gdth_cmndinfo *cmndinfo; 2612 struct gdth_cmndinfo *cmndinfo;
2613 2613
2614 t = scp->device->id; 2614 t = scp->device->id;
@@ -2654,7 +2654,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2654 2654
2655 } else { 2655 } else {
2656 page = virt_to_page(scp->sense_buffer); 2656 page = virt_to_page(scp->sense_buffer);
2657 offset = (ulong)scp->sense_buffer & ~PAGE_MASK; 2657 offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK;
2658 sense_paddr = pci_map_page(ha->pdev,page,offset, 2658 sense_paddr = pci_map_page(ha->pdev,page,offset,
2659 16,PCI_DMA_FROMDEVICE); 2659 16,PCI_DMA_FROMDEVICE);
2660 2660
@@ -2703,12 +2703,12 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2703 if (mode64) { 2703 if (mode64) {
2704 struct scatterlist *sl; 2704 struct scatterlist *sl;
2705 2705
2706 cmdp->u.raw64.sdata = (ulong64)-1; 2706 cmdp->u.raw64.sdata = (u64)-1;
2707 cmdp->u.raw64.sg_ranz = sgcnt; 2707 cmdp->u.raw64.sg_ranz = sgcnt;
2708 scsi_for_each_sg(scp, sl, sgcnt, i) { 2708 scsi_for_each_sg(scp, sl, sgcnt, i) {
2709 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); 2709 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2710#ifdef GDTH_DMA_STATISTICS 2710#ifdef GDTH_DMA_STATISTICS
2711 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 2711 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff)
2712 ha->dma64_cnt++; 2712 ha->dma64_cnt++;
2713 else 2713 else
2714 ha->dma32_cnt++; 2714 ha->dma32_cnt++;
@@ -2744,7 +2744,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2744 cmdp->u.raw64.sg_lst[0].sg_len)); 2744 cmdp->u.raw64.sg_lst[0].sg_len));
2745 /* evaluate command size */ 2745 /* evaluate command size */
2746 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + 2746 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
2747 (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str); 2747 (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
2748 } else { 2748 } else {
2749 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 2749 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2750 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz, 2750 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
@@ -2752,7 +2752,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2752 cmdp->u.raw.sg_lst[0].sg_len)); 2752 cmdp->u.raw.sg_lst[0].sg_len));
2753 /* evaluate command size */ 2753 /* evaluate command size */
2754 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + 2754 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
2755 (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str); 2755 (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
2756 } 2756 }
2757 } 2757 }
2758 /* check space */ 2758 /* check space */
@@ -2802,7 +2802,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2802 if (cmdp->OpCode == GDT_IOCTL) { 2802 if (cmdp->OpCode == GDT_IOCTL) {
2803 TRACE2(("IOCTL\n")); 2803 TRACE2(("IOCTL\n"));
2804 ha->cmd_len = 2804 ha->cmd_len =
2805 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64); 2805 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
2806 } else if (cmdp->Service == CACHESERVICE) { 2806 } else if (cmdp->Service == CACHESERVICE) {
2807 TRACE2(("cache command %d\n",cmdp->OpCode)); 2807 TRACE2(("cache command %d\n",cmdp->OpCode));
2808 if (ha->cache_feat & GDT_64BIT) 2808 if (ha->cache_feat & GDT_64BIT)
@@ -2840,8 +2840,8 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2840 2840
2841 2841
2842/* Controller event handling functions */ 2842/* Controller event handling functions */
2843static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, 2843static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
2844 ushort idx, gdth_evt_data *evt) 2844 u16 idx, gdth_evt_data *evt)
2845{ 2845{
2846 gdth_evt_str *e; 2846 gdth_evt_str *e;
2847 struct timeval tv; 2847 struct timeval tv;
@@ -2890,7 +2890,7 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2890{ 2890{
2891 gdth_evt_str *e; 2891 gdth_evt_str *e;
2892 int eindex; 2892 int eindex;
2893 ulong flags; 2893 unsigned long flags;
2894 2894
2895 TRACE2(("gdth_read_event() handle %d\n", handle)); 2895 TRACE2(("gdth_read_event() handle %d\n", handle));
2896 spin_lock_irqsave(&ha->smp_lock, flags); 2896 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2919,12 +2919,12 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2919} 2919}
2920 2920
2921static void gdth_readapp_event(gdth_ha_str *ha, 2921static void gdth_readapp_event(gdth_ha_str *ha,
2922 unchar application, gdth_evt_str *estr) 2922 u8 application, gdth_evt_str *estr)
2923{ 2923{
2924 gdth_evt_str *e; 2924 gdth_evt_str *e;
2925 int eindex; 2925 int eindex;
2926 ulong flags; 2926 unsigned long flags;
2927 unchar found = FALSE; 2927 u8 found = FALSE;
2928 2928
2929 TRACE2(("gdth_readapp_event() app. %d\n", application)); 2929 TRACE2(("gdth_readapp_event() app. %d\n", application));
2930 spin_lock_irqsave(&ha->smp_lock, flags); 2930 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2969,9 +2969,9 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
2969 gdt2_dpram_str __iomem *dp2_ptr; 2969 gdt2_dpram_str __iomem *dp2_ptr;
2970 Scsi_Cmnd *scp; 2970 Scsi_Cmnd *scp;
2971 int rval, i; 2971 int rval, i;
2972 unchar IStatus; 2972 u8 IStatus;
2973 ushort Service; 2973 u16 Service;
2974 ulong flags = 0; 2974 unsigned long flags = 0;
2975#ifdef INT_COAL 2975#ifdef INT_COAL
2976 int coalesced = FALSE; 2976 int coalesced = FALSE;
2977 int next = FALSE; 2977 int next = FALSE;
@@ -3018,7 +3018,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
3018 if (coalesced) { 3018 if (coalesced) {
3019 /* For coalesced requests all status 3019 /* For coalesced requests all status
3020 information is found in the status buffer */ 3020 information is found in the status buffer */
3021 IStatus = (unchar)(pcs->status & 0xff); 3021 IStatus = (u8)(pcs->status & 0xff);
3022 } 3022 }
3023#endif 3023#endif
3024 3024
@@ -3197,7 +3197,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
3197 ++act_int_coal; 3197 ++act_int_coal;
3198 if (act_int_coal > max_int_coal) { 3198 if (act_int_coal > max_int_coal) {
3199 max_int_coal = act_int_coal; 3199 max_int_coal = act_int_coal;
3200 printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal); 3200 printk("GDT: max_int_coal = %d\n",(u16)max_int_coal);
3201 } 3201 }
3202#endif 3202#endif
3203 /* see if there is another status */ 3203 /* see if there is another status */
@@ -3225,12 +3225,12 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id)
3225 return __gdth_interrupt(ha, false, NULL); 3225 return __gdth_interrupt(ha, false, NULL);
3226} 3226}
3227 3227
3228static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, 3228static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
3229 Scsi_Cmnd *scp) 3229 Scsi_Cmnd *scp)
3230{ 3230{
3231 gdth_msg_str *msg; 3231 gdth_msg_str *msg;
3232 gdth_cmd_str *cmdp; 3232 gdth_cmd_str *cmdp;
3233 unchar b, t; 3233 u8 b, t;
3234 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 3234 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3235 3235
3236 cmdp = ha->pccb; 3236 cmdp = ha->pccb;
@@ -3263,7 +3263,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3263 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3263 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3264 ha->cmd_offs_dpmem = 0; 3264 ha->cmd_offs_dpmem = 0;
3265 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3265 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3266 + sizeof(ulong64); 3266 + sizeof(u64);
3267 ha->cmd_cnt = 0; 3267 ha->cmd_cnt = 0;
3268 gdth_copy_command(ha); 3268 gdth_copy_command(ha);
3269 gdth_release_event(ha); 3269 gdth_release_event(ha);
@@ -3297,7 +3297,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3297 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3297 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3298 ha->cmd_offs_dpmem = 0; 3298 ha->cmd_offs_dpmem = 0;
3299 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3299 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3300 + sizeof(ulong64); 3300 + sizeof(u64);
3301 ha->cmd_cnt = 0; 3301 ha->cmd_cnt = 0;
3302 gdth_copy_command(ha); 3302 gdth_copy_command(ha);
3303 gdth_release_event(ha); 3303 gdth_release_event(ha);
@@ -3335,7 +3335,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3335 cmndinfo->OpCode)); 3335 cmndinfo->OpCode));
3336 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */ 3336 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
3337 if (cmndinfo->OpCode == GDT_CLUST_INFO) { 3337 if (cmndinfo->OpCode == GDT_CLUST_INFO) {
3338 ha->hdr[t].cluster_type = (unchar)ha->info; 3338 ha->hdr[t].cluster_type = (u8)ha->info;
3339 if (!(ha->hdr[t].cluster_type & 3339 if (!(ha->hdr[t].cluster_type &
3340 CLUSTER_MOUNTED)) { 3340 CLUSTER_MOUNTED)) {
3341 /* NOT MOUNTED -> MOUNT */ 3341 /* NOT MOUNTED -> MOUNT */
@@ -3397,7 +3397,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3397 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; 3397 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
3398 } 3398 }
3399 memset((char*)scp->sense_buffer,0,16); 3399 memset((char*)scp->sense_buffer,0,16);
3400 if (ha->status == (ushort)S_CACHE_RESERV) { 3400 if (ha->status == (u16)S_CACHE_RESERV) {
3401 scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1); 3401 scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
3402 } else { 3402 } else {
3403 scp->sense_buffer[0] = 0x70; 3403 scp->sense_buffer[0] = 0x70;
@@ -3614,16 +3614,16 @@ static int gdth_async_event(gdth_ha_str *ha)
3614 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3614 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3615 ha->cmd_offs_dpmem = 0; 3615 ha->cmd_offs_dpmem = 0;
3616 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3616 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3617 + sizeof(ulong64); 3617 + sizeof(u64);
3618 ha->cmd_cnt = 0; 3618 ha->cmd_cnt = 0;
3619 gdth_copy_command(ha); 3619 gdth_copy_command(ha);
3620 if (ha->type == GDT_EISA) 3620 if (ha->type == GDT_EISA)
3621 printk("[EISA slot %d] ",(ushort)ha->brd_phys); 3621 printk("[EISA slot %d] ",(u16)ha->brd_phys);
3622 else if (ha->type == GDT_ISA) 3622 else if (ha->type == GDT_ISA)
3623 printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys); 3623 printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys);
3624 else 3624 else
3625 printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8), 3625 printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
3626 (ushort)((ha->brd_phys>>3)&0x1f)); 3626 (u16)((ha->brd_phys>>3)&0x1f));
3627 gdth_release_event(ha); 3627 gdth_release_event(ha);
3628 } 3628 }
3629 3629
@@ -3640,7 +3640,7 @@ static int gdth_async_event(gdth_ha_str *ha)
3640 ha->dvr.eu.async.service = ha->service; 3640 ha->dvr.eu.async.service = ha->service;
3641 ha->dvr.eu.async.status = ha->status; 3641 ha->dvr.eu.async.status = ha->status;
3642 ha->dvr.eu.async.info = ha->info; 3642 ha->dvr.eu.async.info = ha->info;
3643 *(ulong32 *)ha->dvr.eu.async.scsi_coord = ha->info2; 3643 *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
3644 } 3644 }
3645 gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr ); 3645 gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
3646 gdth_log_event( &ha->dvr, NULL ); 3646 gdth_log_event( &ha->dvr, NULL );
@@ -3648,8 +3648,8 @@ static int gdth_async_event(gdth_ha_str *ha)
3648 /* new host drive from expand? */ 3648 /* new host drive from expand? */
3649 if (ha->service == CACHESERVICE && ha->status == 56) { 3649 if (ha->service == CACHESERVICE && ha->status == 56) {
3650 TRACE2(("gdth_async_event(): new host drive %d created\n", 3650 TRACE2(("gdth_async_event(): new host drive %d created\n",
3651 (ushort)ha->info)); 3651 (u16)ha->info));
3652 /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */ 3652 /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
3653 } 3653 }
3654 } 3654 }
3655 return 1; 3655 return 1;
@@ -3680,13 +3680,13 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
3680 for (j=0,i=1; i < f[0]; i+=2) { 3680 for (j=0,i=1; i < f[0]; i+=2) {
3681 switch (f[i+1]) { 3681 switch (f[i+1]) {
3682 case 4: 3682 case 4:
3683 stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]]; 3683 stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
3684 break; 3684 break;
3685 case 2: 3685 case 2:
3686 stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]]; 3686 stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
3687 break; 3687 break;
3688 case 1: 3688 case 1:
3689 stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]]; 3689 stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
3690 break; 3690 break;
3691 default: 3691 default:
3692 break; 3692 break;
@@ -3712,14 +3712,14 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
3712} 3712}
3713 3713
3714#ifdef GDTH_STATISTICS 3714#ifdef GDTH_STATISTICS
3715static unchar gdth_timer_running; 3715static u8 gdth_timer_running;
3716 3716
3717static void gdth_timeout(ulong data) 3717static void gdth_timeout(unsigned long data)
3718{ 3718{
3719 ulong32 i; 3719 u32 i;
3720 Scsi_Cmnd *nscp; 3720 Scsi_Cmnd *nscp;
3721 gdth_ha_str *ha; 3721 gdth_ha_str *ha;
3722 ulong flags; 3722 unsigned long flags;
3723 3723
3724 if(unlikely(list_empty(&gdth_instances))) { 3724 if(unlikely(list_empty(&gdth_instances))) {
3725 gdth_timer_running = 0; 3725 gdth_timer_running = 0;
@@ -3891,8 +3891,8 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3891{ 3891{
3892 gdth_ha_str *ha = shost_priv(scp->device->host); 3892 gdth_ha_str *ha = shost_priv(scp->device->host);
3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3894 unchar b, t; 3894 u8 b, t;
3895 ulong flags; 3895 unsigned long flags;
3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; 3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
3897 3897
3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); 3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
@@ -3924,9 +3924,9 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3924{ 3924{
3925 gdth_ha_str *ha = shost_priv(scp->device->host); 3925 gdth_ha_str *ha = shost_priv(scp->device->host);
3926 int i; 3926 int i;
3927 ulong flags; 3927 unsigned long flags;
3928 Scsi_Cmnd *cmnd; 3928 Scsi_Cmnd *cmnd;
3929 unchar b; 3929 u8 b;
3930 3930
3931 TRACE2(("gdth_eh_bus_reset()\n")); 3931 TRACE2(("gdth_eh_bus_reset()\n"));
3932 3932
@@ -3974,7 +3974,7 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3974 3974
3975static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) 3975static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
3976{ 3976{
3977 unchar b, t; 3977 u8 b, t;
3978 gdth_ha_str *ha = shost_priv(sdev->host); 3978 gdth_ha_str *ha = shost_priv(sdev->host);
3979 struct scsi_device *sd; 3979 struct scsi_device *sd;
3980 unsigned capacity; 3980 unsigned capacity;
@@ -4062,7 +4062,7 @@ static int ioc_event(void __user *arg)
4062{ 4062{
4063 gdth_ioctl_event evt; 4063 gdth_ioctl_event evt;
4064 gdth_ha_str *ha; 4064 gdth_ha_str *ha;
4065 ulong flags; 4065 unsigned long flags;
4066 4066
4067 if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event))) 4067 if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
4068 return -EFAULT; 4068 return -EFAULT;
@@ -4098,8 +4098,8 @@ static int ioc_event(void __user *arg)
4098static int ioc_lockdrv(void __user *arg) 4098static int ioc_lockdrv(void __user *arg)
4099{ 4099{
4100 gdth_ioctl_lockdrv ldrv; 4100 gdth_ioctl_lockdrv ldrv;
4101 unchar i, j; 4101 u8 i, j;
4102 ulong flags; 4102 unsigned long flags;
4103 gdth_ha_str *ha; 4103 gdth_ha_str *ha;
4104 4104
4105 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) 4105 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
@@ -4165,7 +4165,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4165{ 4165{
4166 gdth_ioctl_general gen; 4166 gdth_ioctl_general gen;
4167 char *buf = NULL; 4167 char *buf = NULL;
4168 ulong64 paddr; 4168 u64 paddr;
4169 gdth_ha_str *ha; 4169 gdth_ha_str *ha;
4170 int rval; 4170 int rval;
4171 4171
@@ -4194,7 +4194,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4194 gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; 4194 gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
4195 /* addresses */ 4195 /* addresses */
4196 if (ha->cache_feat & SCATTER_GATHER) { 4196 if (ha->cache_feat & SCATTER_GATHER) {
4197 gen.command.u.cache64.DestAddr = (ulong64)-1; 4197 gen.command.u.cache64.DestAddr = (u64)-1;
4198 gen.command.u.cache64.sg_canz = 1; 4198 gen.command.u.cache64.sg_canz = 1;
4199 gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; 4199 gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
4200 gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; 4200 gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
@@ -4207,7 +4207,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4207 if (ha->cache_feat & SCATTER_GATHER) { 4207 if (ha->cache_feat & SCATTER_GATHER) {
4208 gen.command.u.cache.DestAddr = 0xffffffff; 4208 gen.command.u.cache.DestAddr = 0xffffffff;
4209 gen.command.u.cache.sg_canz = 1; 4209 gen.command.u.cache.sg_canz = 1;
4210 gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr; 4210 gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
4211 gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; 4211 gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
4212 gen.command.u.cache.sg_lst[1].sg_len = 0; 4212 gen.command.u.cache.sg_lst[1].sg_len = 0;
4213 } else { 4213 } else {
@@ -4230,7 +4230,7 @@ static int ioc_general(void __user *arg, char *cmnd)
4230 gen.command.u.raw64.direction = gen.command.u.raw.direction; 4230 gen.command.u.raw64.direction = gen.command.u.raw.direction;
4231 /* addresses */ 4231 /* addresses */
4232 if (ha->raw_feat & SCATTER_GATHER) { 4232 if (ha->raw_feat & SCATTER_GATHER) {
4233 gen.command.u.raw64.sdata = (ulong64)-1; 4233 gen.command.u.raw64.sdata = (u64)-1;
4234 gen.command.u.raw64.sg_ranz = 1; 4234 gen.command.u.raw64.sg_ranz = 1;
4235 gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; 4235 gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
4236 gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; 4236 gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
@@ -4244,14 +4244,14 @@ static int ioc_general(void __user *arg, char *cmnd)
4244 if (ha->raw_feat & SCATTER_GATHER) { 4244 if (ha->raw_feat & SCATTER_GATHER) {
4245 gen.command.u.raw.sdata = 0xffffffff; 4245 gen.command.u.raw.sdata = 0xffffffff;
4246 gen.command.u.raw.sg_ranz = 1; 4246 gen.command.u.raw.sg_ranz = 1;
4247 gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr; 4247 gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
4248 gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; 4248 gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
4249 gen.command.u.raw.sg_lst[1].sg_len = 0; 4249 gen.command.u.raw.sg_lst[1].sg_len = 0;
4250 } else { 4250 } else {
4251 gen.command.u.raw.sdata = paddr; 4251 gen.command.u.raw.sdata = paddr;
4252 gen.command.u.raw.sg_ranz = 0; 4252 gen.command.u.raw.sg_ranz = 0;
4253 } 4253 }
4254 gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len; 4254 gen.command.u.raw.sense_data = (u32)paddr + gen.data_len;
4255 } 4255 }
4256 } else { 4256 } else {
4257 gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); 4257 gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
@@ -4283,7 +4283,7 @@ static int ioc_hdrlist(void __user *arg, char *cmnd)
4283 gdth_ioctl_rescan *rsc; 4283 gdth_ioctl_rescan *rsc;
4284 gdth_cmd_str *cmd; 4284 gdth_cmd_str *cmd;
4285 gdth_ha_str *ha; 4285 gdth_ha_str *ha;
4286 unchar i; 4286 u8 i;
4287 int rc = -ENOMEM; 4287 int rc = -ENOMEM;
4288 u32 cluster_type = 0; 4288 u32 cluster_type = 0;
4289 4289
@@ -4335,11 +4335,11 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4335{ 4335{
4336 gdth_ioctl_rescan *rsc; 4336 gdth_ioctl_rescan *rsc;
4337 gdth_cmd_str *cmd; 4337 gdth_cmd_str *cmd;
4338 ushort i, status, hdr_cnt; 4338 u16 i, status, hdr_cnt;
4339 ulong32 info; 4339 u32 info;
4340 int cyls, hds, secs; 4340 int cyls, hds, secs;
4341 int rc = -ENOMEM; 4341 int rc = -ENOMEM;
4342 ulong flags; 4342 unsigned long flags;
4343 gdth_ha_str *ha; 4343 gdth_ha_str *ha;
4344 4344
4345 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); 4345 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
@@ -4367,7 +4367,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4367 4367
4368 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 4368 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
4369 i = 0; 4369 i = 0;
4370 hdr_cnt = (status == S_OK ? (ushort)info : 0); 4370 hdr_cnt = (status == S_OK ? (u16)info : 0);
4371 } else { 4371 } else {
4372 i = rsc->hdr_no; 4372 i = rsc->hdr_no;
4373 hdr_cnt = i + 1; 4373 hdr_cnt = i + 1;
@@ -4418,7 +4418,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4418 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 4418 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
4419 4419
4420 spin_lock_irqsave(&ha->smp_lock, flags); 4420 spin_lock_irqsave(&ha->smp_lock, flags);
4421 ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0); 4421 ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
4422 spin_unlock_irqrestore(&ha->smp_lock, flags); 4422 spin_unlock_irqrestore(&ha->smp_lock, flags);
4423 4423
4424 cmd->Service = CACHESERVICE; 4424 cmd->Service = CACHESERVICE;
@@ -4432,7 +4432,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4432 4432
4433 spin_lock_irqsave(&ha->smp_lock, flags); 4433 spin_lock_irqsave(&ha->smp_lock, flags);
4434 ha->hdr[i].cluster_type = 4434 ha->hdr[i].cluster_type =
4435 ((status == S_OK && !shared_access) ? (ushort)info : 0); 4435 ((status == S_OK && !shared_access) ? (u16)info : 0);
4436 spin_unlock_irqrestore(&ha->smp_lock, flags); 4436 spin_unlock_irqrestore(&ha->smp_lock, flags);
4437 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; 4437 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
4438 4438
@@ -4446,7 +4446,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
4446 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 4446 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
4447 4447
4448 spin_lock_irqsave(&ha->smp_lock, flags); 4448 spin_lock_irqsave(&ha->smp_lock, flags);
4449 ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0); 4449 ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
4450 spin_unlock_irqrestore(&ha->smp_lock, flags); 4450 spin_unlock_irqrestore(&ha->smp_lock, flags);
4451 } 4451 }
4452 4452
@@ -4466,7 +4466,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4466{ 4466{
4467 gdth_ha_str *ha; 4467 gdth_ha_str *ha;
4468 Scsi_Cmnd *scp; 4468 Scsi_Cmnd *scp;
4469 ulong flags; 4469 unsigned long flags;
4470 char cmnd[MAX_COMMAND_SIZE]; 4470 char cmnd[MAX_COMMAND_SIZE];
4471 void __user *argp = (void __user *)arg; 4471 void __user *argp = (void __user *)arg;
4472 4472
@@ -4495,9 +4495,9 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4495 { 4495 {
4496 gdth_ioctl_osvers osv; 4496 gdth_ioctl_osvers osv;
4497 4497
4498 osv.version = (unchar)(LINUX_VERSION_CODE >> 16); 4498 osv.version = (u8)(LINUX_VERSION_CODE >> 16);
4499 osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8); 4499 osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
4500 osv.revision = (ushort)(LINUX_VERSION_CODE & 0xff); 4500 osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
4501 if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers))) 4501 if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
4502 return -EFAULT; 4502 return -EFAULT;
4503 break; 4503 break;
@@ -4512,10 +4512,10 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4512 return -EFAULT; 4512 return -EFAULT;
4513 4513
4514 if (ha->type == GDT_ISA || ha->type == GDT_EISA) { 4514 if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
4515 ctrt.type = (unchar)((ha->stype>>20) - 0x10); 4515 ctrt.type = (u8)((ha->stype>>20) - 0x10);
4516 } else { 4516 } else {
4517 if (ha->type != GDT_PCIMPR) { 4517 if (ha->type != GDT_PCIMPR) {
4518 ctrt.type = (unchar)((ha->stype<<4) + 6); 4518 ctrt.type = (u8)((ha->stype<<4) + 6);
4519 } else { 4519 } else {
4520 ctrt.type = 4520 ctrt.type =
4521 (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); 4521 (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
@@ -4546,7 +4546,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4546 case GDTIOCTL_LOCKCHN: 4546 case GDTIOCTL_LOCKCHN:
4547 { 4547 {
4548 gdth_ioctl_lockchn lchn; 4548 gdth_ioctl_lockchn lchn;
4549 unchar i, j; 4549 u8 i, j;
4550 4550
4551 if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) || 4551 if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
4552 (NULL == (ha = gdth_find_ha(lchn.ionode)))) 4552 (NULL == (ha = gdth_find_ha(lchn.ionode))))
@@ -4670,7 +4670,7 @@ static struct scsi_host_template gdth_template = {
4670}; 4670};
4671 4671
4672#ifdef CONFIG_ISA 4672#ifdef CONFIG_ISA
4673static int __init gdth_isa_probe_one(ulong32 isa_bios) 4673static int __init gdth_isa_probe_one(u32 isa_bios)
4674{ 4674{
4675 struct Scsi_Host *shp; 4675 struct Scsi_Host *shp;
4676 gdth_ha_str *ha; 4676 gdth_ha_str *ha;
@@ -4802,7 +4802,7 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios)
4802#endif /* CONFIG_ISA */ 4802#endif /* CONFIG_ISA */
4803 4803
4804#ifdef CONFIG_EISA 4804#ifdef CONFIG_EISA
4805static int __init gdth_eisa_probe_one(ushort eisa_slot) 4805static int __init gdth_eisa_probe_one(u16 eisa_slot)
4806{ 4806{
4807 struct Scsi_Host *shp; 4807 struct Scsi_Host *shp;
4808 gdth_ha_str *ha; 4808 gdth_ha_str *ha;
@@ -5120,7 +5120,7 @@ static void gdth_remove_one(gdth_ha_str *ha)
5120 scsi_host_put(shp); 5120 scsi_host_put(shp);
5121} 5121}
5122 5122
5123static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) 5123static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
5124{ 5124{
5125 gdth_ha_str *ha; 5125 gdth_ha_str *ha;
5126 5126
@@ -5158,14 +5158,14 @@ static int __init gdth_init(void)
5158 if (probe_eisa_isa) { 5158 if (probe_eisa_isa) {
5159 /* scanning for controllers, at first: ISA controller */ 5159 /* scanning for controllers, at first: ISA controller */
5160#ifdef CONFIG_ISA 5160#ifdef CONFIG_ISA
5161 ulong32 isa_bios; 5161 u32 isa_bios;
5162 for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL; 5162 for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL;
5163 isa_bios += 0x8000UL) 5163 isa_bios += 0x8000UL)
5164 gdth_isa_probe_one(isa_bios); 5164 gdth_isa_probe_one(isa_bios);
5165#endif 5165#endif
5166#ifdef CONFIG_EISA 5166#ifdef CONFIG_EISA
5167 { 5167 {
5168 ushort eisa_slot; 5168 u16 eisa_slot;
5169 for (eisa_slot = 0x1000; eisa_slot <= 0x8000; 5169 for (eisa_slot = 0x1000; eisa_slot <= 0x8000;
5170 eisa_slot += 0x1000) 5170 eisa_slot += 0x1000)
5171 gdth_eisa_probe_one(eisa_slot); 5171 gdth_eisa_probe_one(eisa_slot);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 1646444e9bd5..120a0625a7b5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -321,524 +321,524 @@
321 321
322/* screenservice message */ 322/* screenservice message */
323typedef struct { 323typedef struct {
324 ulong32 msg_handle; /* message handle */ 324 u32 msg_handle; /* message handle */
325 ulong32 msg_len; /* size of message */ 325 u32 msg_len; /* size of message */
326 ulong32 msg_alen; /* answer length */ 326 u32 msg_alen; /* answer length */
327 unchar msg_answer; /* answer flag */ 327 u8 msg_answer; /* answer flag */
328 unchar msg_ext; /* more messages */ 328 u8 msg_ext; /* more messages */
329 unchar msg_reserved[2]; 329 u8 msg_reserved[2];
330 char msg_text[MSGLEN+2]; /* the message text */ 330 char msg_text[MSGLEN+2]; /* the message text */
331} PACKED gdth_msg_str; 331} __attribute__((packed)) gdth_msg_str;
332 332
333 333
334/* IOCTL data structures */ 334/* IOCTL data structures */
335 335
336/* Status coalescing buffer for returning multiple requests per interrupt */ 336/* Status coalescing buffer for returning multiple requests per interrupt */
337typedef struct { 337typedef struct {
338 ulong32 status; 338 u32 status;
339 ulong32 ext_status; 339 u32 ext_status;
340 ulong32 info0; 340 u32 info0;
341 ulong32 info1; 341 u32 info1;
342} PACKED gdth_coal_status; 342} __attribute__((packed)) gdth_coal_status;
343 343
344/* performance mode data structure */ 344/* performance mode data structure */
345typedef struct { 345typedef struct {
346 ulong32 version; /* The version of this IOCTL structure. */ 346 u32 version; /* The version of this IOCTL structure. */
347 ulong32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */ 347 u32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */
348 ulong32 st_buff_addr1; /* physical address of status buffer 1 */ 348 u32 st_buff_addr1; /* physical address of status buffer 1 */
349 ulong32 st_buff_u_addr1; /* reserved for 64 bit addressing */ 349 u32 st_buff_u_addr1; /* reserved for 64 bit addressing */
350 ulong32 st_buff_indx1; /* reserved command idx. for this buffer */ 350 u32 st_buff_indx1; /* reserved command idx. for this buffer */
351 ulong32 st_buff_addr2; /* physical address of status buffer 1 */ 351 u32 st_buff_addr2; /* physical address of status buffer 1 */
352 ulong32 st_buff_u_addr2; /* reserved for 64 bit addressing */ 352 u32 st_buff_u_addr2; /* reserved for 64 bit addressing */
353 ulong32 st_buff_indx2; /* reserved command idx. for this buffer */ 353 u32 st_buff_indx2; /* reserved command idx. for this buffer */
354 ulong32 st_buff_size; /* size of each buffer in bytes */ 354 u32 st_buff_size; /* size of each buffer in bytes */
355 ulong32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */ 355 u32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */
356 ulong32 cmd_buff_addr1; /* physical address of cmd buffer 1 */ 356 u32 cmd_buff_addr1; /* physical address of cmd buffer 1 */
357 ulong32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */ 357 u32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */
358 ulong32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */ 358 u32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */
359 ulong32 cmd_buff_addr2; /* physical address of cmd buffer 1 */ 359 u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
360 ulong32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */ 360 u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
361 ulong32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */ 361 u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
362 ulong32 cmd_buff_size; /* size of each cmd bufer in bytes */ 362 u32 cmd_buff_size; /* size of each cmd bufer in bytes */
363 ulong32 reserved1; 363 u32 reserved1;
364 ulong32 reserved2; 364 u32 reserved2;
365} PACKED gdth_perf_modes; 365} __attribute__((packed)) gdth_perf_modes;
366 366
367/* SCSI drive info */ 367/* SCSI drive info */
368typedef struct { 368typedef struct {
369 unchar vendor[8]; /* vendor string */ 369 u8 vendor[8]; /* vendor string */
370 unchar product[16]; /* product string */ 370 u8 product[16]; /* product string */
371 unchar revision[4]; /* revision */ 371 u8 revision[4]; /* revision */
372 ulong32 sy_rate; /* current rate for sync. tr. */ 372 u32 sy_rate; /* current rate for sync. tr. */
373 ulong32 sy_max_rate; /* max. rate for sync. tr. */ 373 u32 sy_max_rate; /* max. rate for sync. tr. */
374 ulong32 no_ldrive; /* belongs to this log. drv.*/ 374 u32 no_ldrive; /* belongs to this log. drv.*/
375 ulong32 blkcnt; /* number of blocks */ 375 u32 blkcnt; /* number of blocks */
376 ushort blksize; /* size of block in bytes */ 376 u16 blksize; /* size of block in bytes */
377 unchar available; /* flag: access is available */ 377 u8 available; /* flag: access is available */
378 unchar init; /* medium is initialized */ 378 u8 init; /* medium is initialized */
379 unchar devtype; /* SCSI devicetype */ 379 u8 devtype; /* SCSI devicetype */
380 unchar rm_medium; /* medium is removable */ 380 u8 rm_medium; /* medium is removable */
381 unchar wp_medium; /* medium is write protected */ 381 u8 wp_medium; /* medium is write protected */
382 unchar ansi; /* SCSI I/II or III? */ 382 u8 ansi; /* SCSI I/II or III? */
383 unchar protocol; /* same as ansi */ 383 u8 protocol; /* same as ansi */
384 unchar sync; /* flag: sync. transfer enab. */ 384 u8 sync; /* flag: sync. transfer enab. */
385 unchar disc; /* flag: disconnect enabled */ 385 u8 disc; /* flag: disconnect enabled */
386 unchar queueing; /* flag: command queing enab. */ 386 u8 queueing; /* flag: command queing enab. */
387 unchar cached; /* flag: caching enabled */ 387 u8 cached; /* flag: caching enabled */
388 unchar target_id; /* target ID of device */ 388 u8 target_id; /* target ID of device */
389 unchar lun; /* LUN id of device */ 389 u8 lun; /* LUN id of device */
390 unchar orphan; /* flag: drive fragment */ 390 u8 orphan; /* flag: drive fragment */
391 ulong32 last_error; /* sense key or drive state */ 391 u32 last_error; /* sense key or drive state */
392 ulong32 last_result; /* result of last command */ 392 u32 last_result; /* result of last command */
393 ulong32 check_errors; /* err. in last surface check */ 393 u32 check_errors; /* err. in last surface check */
394 unchar percent; /* progress for surface check */ 394 u8 percent; /* progress for surface check */
395 unchar last_check; /* IOCTRL operation */ 395 u8 last_check; /* IOCTRL operation */
396 unchar res[2]; 396 u8 res[2];
397 ulong32 flags; /* from 1.19/2.19: raw reserv.*/ 397 u32 flags; /* from 1.19/2.19: raw reserv.*/
398 unchar multi_bus; /* multi bus dev? (fibre ch.) */ 398 u8 multi_bus; /* multi bus dev? (fibre ch.) */
399 unchar mb_status; /* status: available? */ 399 u8 mb_status; /* status: available? */
400 unchar res2[2]; 400 u8 res2[2];
401 unchar mb_alt_status; /* status on second bus */ 401 u8 mb_alt_status; /* status on second bus */
402 unchar mb_alt_bid; /* number of second bus */ 402 u8 mb_alt_bid; /* number of second bus */
403 unchar mb_alt_tid; /* target id on second bus */ 403 u8 mb_alt_tid; /* target id on second bus */
404 unchar res3; 404 u8 res3;
405 unchar fc_flag; /* from 1.22/2.22: info valid?*/ 405 u8 fc_flag; /* from 1.22/2.22: info valid?*/
406 unchar res4; 406 u8 res4;
407 ushort fc_frame_size; /* frame size (bytes) */ 407 u16 fc_frame_size; /* frame size (bytes) */
408 char wwn[8]; /* world wide name */ 408 char wwn[8]; /* world wide name */
409} PACKED gdth_diskinfo_str; 409} __attribute__((packed)) gdth_diskinfo_str;
410 410
411/* get SCSI channel count */ 411/* get SCSI channel count */
412typedef struct { 412typedef struct {
413 ulong32 channel_no; /* number of channel */ 413 u32 channel_no; /* number of channel */
414 ulong32 drive_cnt; /* drive count */ 414 u32 drive_cnt; /* drive count */
415 unchar siop_id; /* SCSI processor ID */ 415 u8 siop_id; /* SCSI processor ID */
416 unchar siop_state; /* SCSI processor state */ 416 u8 siop_state; /* SCSI processor state */
417} PACKED gdth_getch_str; 417} __attribute__((packed)) gdth_getch_str;
418 418
419/* get SCSI drive numbers */ 419/* get SCSI drive numbers */
420typedef struct { 420typedef struct {
421 ulong32 sc_no; /* SCSI channel */ 421 u32 sc_no; /* SCSI channel */
422 ulong32 sc_cnt; /* sc_list[] elements */ 422 u32 sc_cnt; /* sc_list[] elements */
423 ulong32 sc_list[MAXID]; /* minor device numbers */ 423 u32 sc_list[MAXID]; /* minor device numbers */
424} PACKED gdth_drlist_str; 424} __attribute__((packed)) gdth_drlist_str;
425 425
426/* get grown/primary defect count */ 426/* get grown/primary defect count */
427typedef struct { 427typedef struct {
428 unchar sddc_type; /* 0x08: grown, 0x10: prim. */ 428 u8 sddc_type; /* 0x08: grown, 0x10: prim. */
429 unchar sddc_format; /* list entry format */ 429 u8 sddc_format; /* list entry format */
430 unchar sddc_len; /* list entry length */ 430 u8 sddc_len; /* list entry length */
431 unchar sddc_res; 431 u8 sddc_res;
432 ulong32 sddc_cnt; /* entry count */ 432 u32 sddc_cnt; /* entry count */
433} PACKED gdth_defcnt_str; 433} __attribute__((packed)) gdth_defcnt_str;
434 434
435/* disk statistics */ 435/* disk statistics */
436typedef struct { 436typedef struct {
437 ulong32 bid; /* SCSI channel */ 437 u32 bid; /* SCSI channel */
438 ulong32 first; /* first SCSI disk */ 438 u32 first; /* first SCSI disk */
439 ulong32 entries; /* number of elements */ 439 u32 entries; /* number of elements */
440 ulong32 count; /* (R) number of init. el. */ 440 u32 count; /* (R) number of init. el. */
441 ulong32 mon_time; /* time stamp */ 441 u32 mon_time; /* time stamp */
442 struct { 442 struct {
443 unchar tid; /* target ID */ 443 u8 tid; /* target ID */
444 unchar lun; /* LUN */ 444 u8 lun; /* LUN */
445 unchar res[2]; 445 u8 res[2];
446 ulong32 blk_size; /* block size in bytes */ 446 u32 blk_size; /* block size in bytes */
447 ulong32 rd_count; /* bytes read */ 447 u32 rd_count; /* bytes read */
448 ulong32 wr_count; /* bytes written */ 448 u32 wr_count; /* bytes written */
449 ulong32 rd_blk_count; /* blocks read */ 449 u32 rd_blk_count; /* blocks read */
450 ulong32 wr_blk_count; /* blocks written */ 450 u32 wr_blk_count; /* blocks written */
451 ulong32 retries; /* retries */ 451 u32 retries; /* retries */
452 ulong32 reassigns; /* reassigns */ 452 u32 reassigns; /* reassigns */
453 } PACKED list[1]; 453 } __attribute__((packed)) list[1];
454} PACKED gdth_dskstat_str; 454} __attribute__((packed)) gdth_dskstat_str;
455 455
456/* IO channel header */ 456/* IO channel header */
457typedef struct { 457typedef struct {
458 ulong32 version; /* version (-1UL: newest) */ 458 u32 version; /* version (-1UL: newest) */
459 unchar list_entries; /* list entry count */ 459 u8 list_entries; /* list entry count */
460 unchar first_chan; /* first channel number */ 460 u8 first_chan; /* first channel number */
461 unchar last_chan; /* last channel number */ 461 u8 last_chan; /* last channel number */
462 unchar chan_count; /* (R) channel count */ 462 u8 chan_count; /* (R) channel count */
463 ulong32 list_offset; /* offset of list[0] */ 463 u32 list_offset; /* offset of list[0] */
464} PACKED gdth_iochan_header; 464} __attribute__((packed)) gdth_iochan_header;
465 465
466/* get IO channel description */ 466/* get IO channel description */
467typedef struct { 467typedef struct {
468 gdth_iochan_header hdr; 468 gdth_iochan_header hdr;
469 struct { 469 struct {
470 ulong32 address; /* channel address */ 470 u32 address; /* channel address */
471 unchar type; /* type (SCSI, FCAL) */ 471 u8 type; /* type (SCSI, FCAL) */
472 unchar local_no; /* local number */ 472 u8 local_no; /* local number */
473 ushort features; /* channel features */ 473 u16 features; /* channel features */
474 } PACKED list[MAXBUS]; 474 } __attribute__((packed)) list[MAXBUS];
475} PACKED gdth_iochan_str; 475} __attribute__((packed)) gdth_iochan_str;
476 476
477/* get raw IO channel description */ 477/* get raw IO channel description */
478typedef struct { 478typedef struct {
479 gdth_iochan_header hdr; 479 gdth_iochan_header hdr;
480 struct { 480 struct {
481 unchar proc_id; /* processor id */ 481 u8 proc_id; /* processor id */
482 unchar proc_defect; /* defect ? */ 482 u8 proc_defect; /* defect ? */
483 unchar reserved[2]; 483 u8 reserved[2];
484 } PACKED list[MAXBUS]; 484 } __attribute__((packed)) list[MAXBUS];
485} PACKED gdth_raw_iochan_str; 485} __attribute__((packed)) gdth_raw_iochan_str;
486 486
487/* array drive component */ 487/* array drive component */
488typedef struct { 488typedef struct {
489 ulong32 al_controller; /* controller ID */ 489 u32 al_controller; /* controller ID */
490 unchar al_cache_drive; /* cache drive number */ 490 u8 al_cache_drive; /* cache drive number */
491 unchar al_status; /* cache drive state */ 491 u8 al_status; /* cache drive state */
492 unchar al_res[2]; 492 u8 al_res[2];
493} PACKED gdth_arraycomp_str; 493} __attribute__((packed)) gdth_arraycomp_str;
494 494
495/* array drive information */ 495/* array drive information */
496typedef struct { 496typedef struct {
497 unchar ai_type; /* array type (RAID0,4,5) */ 497 u8 ai_type; /* array type (RAID0,4,5) */
498 unchar ai_cache_drive_cnt; /* active cachedrives */ 498 u8 ai_cache_drive_cnt; /* active cachedrives */
499 unchar ai_state; /* array drive state */ 499 u8 ai_state; /* array drive state */
500 unchar ai_master_cd; /* master cachedrive */ 500 u8 ai_master_cd; /* master cachedrive */
501 ulong32 ai_master_controller; /* ID of master controller */ 501 u32 ai_master_controller; /* ID of master controller */
502 ulong32 ai_size; /* user capacity [sectors] */ 502 u32 ai_size; /* user capacity [sectors] */
503 ulong32 ai_striping_size; /* striping size [sectors] */ 503 u32 ai_striping_size; /* striping size [sectors] */
504 ulong32 ai_secsize; /* sector size [bytes] */ 504 u32 ai_secsize; /* sector size [bytes] */
505 ulong32 ai_err_info; /* failed cache drive */ 505 u32 ai_err_info; /* failed cache drive */
506 unchar ai_name[8]; /* name of the array drive */ 506 u8 ai_name[8]; /* name of the array drive */
507 unchar ai_controller_cnt; /* number of controllers */ 507 u8 ai_controller_cnt; /* number of controllers */
508 unchar ai_removable; /* flag: removable */ 508 u8 ai_removable; /* flag: removable */
509 unchar ai_write_protected; /* flag: write protected */ 509 u8 ai_write_protected; /* flag: write protected */
510 unchar ai_devtype; /* type: always direct access */ 510 u8 ai_devtype; /* type: always direct access */
511 gdth_arraycomp_str ai_drives[35]; /* drive components: */ 511 gdth_arraycomp_str ai_drives[35]; /* drive components: */
512 unchar ai_drive_entries; /* number of drive components */ 512 u8 ai_drive_entries; /* number of drive components */
513 unchar ai_protected; /* protection flag */ 513 u8 ai_protected; /* protection flag */
514 unchar ai_verify_state; /* state of a parity verify */ 514 u8 ai_verify_state; /* state of a parity verify */
515 unchar ai_ext_state; /* extended array drive state */ 515 u8 ai_ext_state; /* extended array drive state */
516 unchar ai_expand_state; /* array expand state (>=2.18)*/ 516 u8 ai_expand_state; /* array expand state (>=2.18)*/
517 unchar ai_reserved[3]; 517 u8 ai_reserved[3];
518} PACKED gdth_arrayinf_str; 518} __attribute__((packed)) gdth_arrayinf_str;
519 519
520/* get array drive list */ 520/* get array drive list */
521typedef struct { 521typedef struct {
522 ulong32 controller_no; /* controller no. */ 522 u32 controller_no; /* controller no. */
523 unchar cd_handle; /* master cachedrive */ 523 u8 cd_handle; /* master cachedrive */
524 unchar is_arrayd; /* Flag: is array drive? */ 524 u8 is_arrayd; /* Flag: is array drive? */
525 unchar is_master; /* Flag: is array master? */ 525 u8 is_master; /* Flag: is array master? */
526 unchar is_parity; /* Flag: is parity drive? */ 526 u8 is_parity; /* Flag: is parity drive? */
527 unchar is_hotfix; /* Flag: is hotfix drive? */ 527 u8 is_hotfix; /* Flag: is hotfix drive? */
528 unchar res[3]; 528 u8 res[3];
529} PACKED gdth_alist_str; 529} __attribute__((packed)) gdth_alist_str;
530 530
531typedef struct { 531typedef struct {
532 ulong32 entries_avail; /* allocated entries */ 532 u32 entries_avail; /* allocated entries */
533 ulong32 entries_init; /* returned entries */ 533 u32 entries_init; /* returned entries */
534 ulong32 first_entry; /* first entry number */ 534 u32 first_entry; /* first entry number */
535 ulong32 list_offset; /* offset of following list */ 535 u32 list_offset; /* offset of following list */
536 gdth_alist_str list[1]; /* list */ 536 gdth_alist_str list[1]; /* list */
537} PACKED gdth_arcdl_str; 537} __attribute__((packed)) gdth_arcdl_str;
538 538
539/* cache info/config IOCTL */ 539/* cache info/config IOCTL */
540typedef struct { 540typedef struct {
541 ulong32 version; /* firmware version */ 541 u32 version; /* firmware version */
542 ushort state; /* cache state (on/off) */ 542 u16 state; /* cache state (on/off) */
543 ushort strategy; /* cache strategy */ 543 u16 strategy; /* cache strategy */
544 ushort write_back; /* write back state (on/off) */ 544 u16 write_back; /* write back state (on/off) */
545 ushort block_size; /* cache block size */ 545 u16 block_size; /* cache block size */
546} PACKED gdth_cpar_str; 546} __attribute__((packed)) gdth_cpar_str;
547 547
548typedef struct { 548typedef struct {
549 ulong32 csize; /* cache size */ 549 u32 csize; /* cache size */
550 ulong32 read_cnt; /* read/write counter */ 550 u32 read_cnt; /* read/write counter */
551 ulong32 write_cnt; 551 u32 write_cnt;
552 ulong32 tr_hits; /* hits */ 552 u32 tr_hits; /* hits */
553 ulong32 sec_hits; 553 u32 sec_hits;
554 ulong32 sec_miss; /* misses */ 554 u32 sec_miss; /* misses */
555} PACKED gdth_cstat_str; 555} __attribute__((packed)) gdth_cstat_str;
556 556
557typedef struct { 557typedef struct {
558 gdth_cpar_str cpar; 558 gdth_cpar_str cpar;
559 gdth_cstat_str cstat; 559 gdth_cstat_str cstat;
560} PACKED gdth_cinfo_str; 560} __attribute__((packed)) gdth_cinfo_str;
561 561
562/* cache drive info */ 562/* cache drive info */
563typedef struct { 563typedef struct {
564 unchar cd_name[8]; /* cache drive name */ 564 u8 cd_name[8]; /* cache drive name */
565 ulong32 cd_devtype; /* SCSI devicetype */ 565 u32 cd_devtype; /* SCSI devicetype */
566 ulong32 cd_ldcnt; /* number of log. drives */ 566 u32 cd_ldcnt; /* number of log. drives */
567 ulong32 cd_last_error; /* last error */ 567 u32 cd_last_error; /* last error */
568 unchar cd_initialized; /* drive is initialized */ 568 u8 cd_initialized; /* drive is initialized */
569 unchar cd_removable; /* media is removable */ 569 u8 cd_removable; /* media is removable */
570 unchar cd_write_protected; /* write protected */ 570 u8 cd_write_protected; /* write protected */
571 unchar cd_flags; /* Pool Hot Fix? */ 571 u8 cd_flags; /* Pool Hot Fix? */
572 ulong32 ld_blkcnt; /* number of blocks */ 572 u32 ld_blkcnt; /* number of blocks */
573 ulong32 ld_blksize; /* blocksize */ 573 u32 ld_blksize; /* blocksize */
574 ulong32 ld_dcnt; /* number of disks */ 574 u32 ld_dcnt; /* number of disks */
575 ulong32 ld_slave; /* log. drive index */ 575 u32 ld_slave; /* log. drive index */
576 ulong32 ld_dtype; /* type of logical drive */ 576 u32 ld_dtype; /* type of logical drive */
577 ulong32 ld_last_error; /* last error */ 577 u32 ld_last_error; /* last error */
578 unchar ld_name[8]; /* log. drive name */ 578 u8 ld_name[8]; /* log. drive name */
579 unchar ld_error; /* error */ 579 u8 ld_error; /* error */
580} PACKED gdth_cdrinfo_str; 580} __attribute__((packed)) gdth_cdrinfo_str;
581 581
582/* OEM string */ 582/* OEM string */
583typedef struct { 583typedef struct {
584 ulong32 ctl_version; 584 u32 ctl_version;
585 ulong32 file_major_version; 585 u32 file_major_version;
586 ulong32 file_minor_version; 586 u32 file_minor_version;
587 ulong32 buffer_size; 587 u32 buffer_size;
588 ulong32 cpy_count; 588 u32 cpy_count;
589 ulong32 ext_error; 589 u32 ext_error;
590 ulong32 oem_id; 590 u32 oem_id;
591 ulong32 board_id; 591 u32 board_id;
592} PACKED gdth_oem_str_params; 592} __attribute__((packed)) gdth_oem_str_params;
593 593
594typedef struct { 594typedef struct {
595 unchar product_0_1_name[16]; 595 u8 product_0_1_name[16];
596 unchar product_4_5_name[16]; 596 u8 product_4_5_name[16];
597 unchar product_cluster_name[16]; 597 u8 product_cluster_name[16];
598 unchar product_reserved[16]; 598 u8 product_reserved[16];
599 unchar scsi_cluster_target_vendor_id[16]; 599 u8 scsi_cluster_target_vendor_id[16];
600 unchar cluster_raid_fw_name[16]; 600 u8 cluster_raid_fw_name[16];
601 unchar oem_brand_name[16]; 601 u8 oem_brand_name[16];
602 unchar oem_raid_type[16]; 602 u8 oem_raid_type[16];
603 unchar bios_type[13]; 603 u8 bios_type[13];
604 unchar bios_title[50]; 604 u8 bios_title[50];
605 unchar oem_company_name[37]; 605 u8 oem_company_name[37];
606 ulong32 pci_id_1; 606 u32 pci_id_1;
607 ulong32 pci_id_2; 607 u32 pci_id_2;
608 unchar validation_status[80]; 608 u8 validation_status[80];
609 unchar reserved_1[4]; 609 u8 reserved_1[4];
610 unchar scsi_host_drive_inquiry_vendor_id[16]; 610 u8 scsi_host_drive_inquiry_vendor_id[16];
611 unchar library_file_template[16]; 611 u8 library_file_template[16];
612 unchar reserved_2[16]; 612 u8 reserved_2[16];
613 unchar tool_name_1[32]; 613 u8 tool_name_1[32];
614 unchar tool_name_2[32]; 614 u8 tool_name_2[32];
615 unchar tool_name_3[32]; 615 u8 tool_name_3[32];
616 unchar oem_contact_1[84]; 616 u8 oem_contact_1[84];
617 unchar oem_contact_2[84]; 617 u8 oem_contact_2[84];
618 unchar oem_contact_3[84]; 618 u8 oem_contact_3[84];
619} PACKED gdth_oem_str; 619} __attribute__((packed)) gdth_oem_str;
620 620
621typedef struct { 621typedef struct {
622 gdth_oem_str_params params; 622 gdth_oem_str_params params;
623 gdth_oem_str text; 623 gdth_oem_str text;
624} PACKED gdth_oem_str_ioctl; 624} __attribute__((packed)) gdth_oem_str_ioctl;
625 625
626/* board features */ 626/* board features */
627typedef struct { 627typedef struct {
628 unchar chaining; /* Chaining supported */ 628 u8 chaining; /* Chaining supported */
629 unchar striping; /* Striping (RAID-0) supp. */ 629 u8 striping; /* Striping (RAID-0) supp. */
630 unchar mirroring; /* Mirroring (RAID-1) supp. */ 630 u8 mirroring; /* Mirroring (RAID-1) supp. */
631 unchar raid; /* RAID-4/5/10 supported */ 631 u8 raid; /* RAID-4/5/10 supported */
632} PACKED gdth_bfeat_str; 632} __attribute__((packed)) gdth_bfeat_str;
633 633
634/* board info IOCTL */ 634/* board info IOCTL */
635typedef struct { 635typedef struct {
636 ulong32 ser_no; /* serial no. */ 636 u32 ser_no; /* serial no. */
637 unchar oem_id[2]; /* OEM ID */ 637 u8 oem_id[2]; /* OEM ID */
638 ushort ep_flags; /* eprom flags */ 638 u16 ep_flags; /* eprom flags */
639 ulong32 proc_id; /* processor ID */ 639 u32 proc_id; /* processor ID */
640 ulong32 memsize; /* memory size (bytes) */ 640 u32 memsize; /* memory size (bytes) */
641 unchar mem_banks; /* memory banks */ 641 u8 mem_banks; /* memory banks */
642 unchar chan_type; /* channel type */ 642 u8 chan_type; /* channel type */
643 unchar chan_count; /* channel count */ 643 u8 chan_count; /* channel count */
644 unchar rdongle_pres; /* dongle present? */ 644 u8 rdongle_pres; /* dongle present? */
645 ulong32 epr_fw_ver; /* (eprom) firmware version */ 645 u32 epr_fw_ver; /* (eprom) firmware version */
646 ulong32 upd_fw_ver; /* (update) firmware version */ 646 u32 upd_fw_ver; /* (update) firmware version */
647 ulong32 upd_revision; /* update revision */ 647 u32 upd_revision; /* update revision */
648 char type_string[16]; /* controller name */ 648 char type_string[16]; /* controller name */
649 char raid_string[16]; /* RAID firmware name */ 649 char raid_string[16]; /* RAID firmware name */
650 unchar update_pres; /* update present? */ 650 u8 update_pres; /* update present? */
651 unchar xor_pres; /* XOR engine present? */ 651 u8 xor_pres; /* XOR engine present? */
652 unchar prom_type; /* ROM type (eprom/flash) */ 652 u8 prom_type; /* ROM type (eprom/flash) */
653 unchar prom_count; /* number of ROM devices */ 653 u8 prom_count; /* number of ROM devices */
654 ulong32 dup_pres; /* duplexing module present? */ 654 u32 dup_pres; /* duplexing module present? */
655 ulong32 chan_pres; /* number of expansion chn. */ 655 u32 chan_pres; /* number of expansion chn. */
656 ulong32 mem_pres; /* memory expansion inst. ? */ 656 u32 mem_pres; /* memory expansion inst. ? */
657 unchar ft_bus_system; /* fault bus supported? */ 657 u8 ft_bus_system; /* fault bus supported? */
658 unchar subtype_valid; /* board_subtype valid? */ 658 u8 subtype_valid; /* board_subtype valid? */
659 unchar board_subtype; /* subtype/hardware level */ 659 u8 board_subtype; /* subtype/hardware level */
660 unchar ramparity_pres; /* RAM parity check hardware? */ 660 u8 ramparity_pres; /* RAM parity check hardware? */
661} PACKED gdth_binfo_str; 661} __attribute__((packed)) gdth_binfo_str;
662 662
663/* get host drive info */ 663/* get host drive info */
664typedef struct { 664typedef struct {
665 char name[8]; /* host drive name */ 665 char name[8]; /* host drive name */
666 ulong32 size; /* size (sectors) */ 666 u32 size; /* size (sectors) */
667 unchar host_drive; /* host drive number */ 667 u8 host_drive; /* host drive number */
668 unchar log_drive; /* log. drive (master) */ 668 u8 log_drive; /* log. drive (master) */
669 unchar reserved; 669 u8 reserved;
670 unchar rw_attribs; /* r/w attribs */ 670 u8 rw_attribs; /* r/w attribs */
671 ulong32 start_sec; /* start sector */ 671 u32 start_sec; /* start sector */
672} PACKED gdth_hentry_str; 672} __attribute__((packed)) gdth_hentry_str;
673 673
674typedef struct { 674typedef struct {
675 ulong32 entries; /* entry count */ 675 u32 entries; /* entry count */
676 ulong32 offset; /* offset of entries */ 676 u32 offset; /* offset of entries */
677 unchar secs_p_head; /* sectors/head */ 677 u8 secs_p_head; /* sectors/head */
678 unchar heads_p_cyl; /* heads/cylinder */ 678 u8 heads_p_cyl; /* heads/cylinder */
679 unchar reserved; 679 u8 reserved;
680 unchar clust_drvtype; /* cluster drive type */ 680 u8 clust_drvtype; /* cluster drive type */
681 ulong32 location; /* controller number */ 681 u32 location; /* controller number */
682 gdth_hentry_str entry[MAX_HDRIVES]; /* entries */ 682 gdth_hentry_str entry[MAX_HDRIVES]; /* entries */
683} PACKED gdth_hget_str; 683} __attribute__((packed)) gdth_hget_str;
684 684
685 685
686/* DPRAM structures */ 686/* DPRAM structures */
687 687
688/* interface area ISA/PCI */ 688/* interface area ISA/PCI */
689typedef struct { 689typedef struct {
690 unchar S_Cmd_Indx; /* special command */ 690 u8 S_Cmd_Indx; /* special command */
691 unchar volatile S_Status; /* status special command */ 691 u8 volatile S_Status; /* status special command */
692 ushort reserved1; 692 u16 reserved1;
693 ulong32 S_Info[4]; /* add. info special command */ 693 u32 S_Info[4]; /* add. info special command */
694 unchar volatile Sema0; /* command semaphore */ 694 u8 volatile Sema0; /* command semaphore */
695 unchar reserved2[3]; 695 u8 reserved2[3];
696 unchar Cmd_Index; /* command number */ 696 u8 Cmd_Index; /* command number */
697 unchar reserved3[3]; 697 u8 reserved3[3];
698 ushort volatile Status; /* command status */ 698 u16 volatile Status; /* command status */
699 ushort Service; /* service(for async.events) */ 699 u16 Service; /* service(for async.events) */
700 ulong32 Info[2]; /* additional info */ 700 u32 Info[2]; /* additional info */
701 struct { 701 struct {
702 ushort offset; /* command offs. in the DPRAM*/ 702 u16 offset; /* command offs. in the DPRAM*/
703 ushort serv_id; /* service */ 703 u16 serv_id; /* service */
704 } PACKED comm_queue[MAXOFFSETS]; /* command queue */ 704 } __attribute__((packed)) comm_queue[MAXOFFSETS]; /* command queue */
705 ulong32 bios_reserved[2]; 705 u32 bios_reserved[2];
706 unchar gdt_dpr_cmd[1]; /* commands */ 706 u8 gdt_dpr_cmd[1]; /* commands */
707} PACKED gdt_dpr_if; 707} __attribute__((packed)) gdt_dpr_if;
708 708
709/* SRAM structure PCI controllers */ 709/* SRAM structure PCI controllers */
710typedef struct { 710typedef struct {
711 ulong32 magic; /* controller ID from BIOS */ 711 u32 magic; /* controller ID from BIOS */
712 ushort need_deinit; /* switch betw. BIOS/driver */ 712 u16 need_deinit; /* switch betw. BIOS/driver */
713 unchar switch_support; /* see need_deinit */ 713 u8 switch_support; /* see need_deinit */
714 unchar padding[9]; 714 u8 padding[9];
715 unchar os_used[16]; /* OS code per service */ 715 u8 os_used[16]; /* OS code per service */
716 unchar unused[28]; 716 u8 unused[28];
717 unchar fw_magic; /* contr. ID from firmware */ 717 u8 fw_magic; /* contr. ID from firmware */
718} PACKED gdt_pci_sram; 718} __attribute__((packed)) gdt_pci_sram;
719 719
720/* SRAM structure EISA controllers (but NOT GDT3000/3020) */ 720/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
721typedef struct { 721typedef struct {
722 unchar os_used[16]; /* OS code per service */ 722 u8 os_used[16]; /* OS code per service */
723 ushort need_deinit; /* switch betw. BIOS/driver */ 723 u16 need_deinit; /* switch betw. BIOS/driver */
724 unchar switch_support; /* see need_deinit */ 724 u8 switch_support; /* see need_deinit */
725 unchar padding; 725 u8 padding;
726} PACKED gdt_eisa_sram; 726} __attribute__((packed)) gdt_eisa_sram;
727 727
728 728
729/* DPRAM ISA controllers */ 729/* DPRAM ISA controllers */
730typedef struct { 730typedef struct {
731 union { 731 union {
732 struct { 732 struct {
733 unchar bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */ 733 u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
734 ulong32 magic; /* controller (EISA) ID */ 734 u32 magic; /* controller (EISA) ID */
735 ushort need_deinit; /* switch betw. BIOS/driver */ 735 u16 need_deinit; /* switch betw. BIOS/driver */
736 unchar switch_support; /* see need_deinit */ 736 u8 switch_support; /* see need_deinit */
737 unchar padding[9]; 737 u8 padding[9];
738 unchar os_used[16]; /* OS code per service */ 738 u8 os_used[16]; /* OS code per service */
739 } PACKED dp_sram; 739 } __attribute__((packed)) dp_sram;
740 unchar bios_area[0x4000]; /* 16KB reserved for BIOS */ 740 u8 bios_area[0x4000]; /* 16KB reserved for BIOS */
741 } bu; 741 } bu;
742 union { 742 union {
743 gdt_dpr_if ic; /* interface area */ 743 gdt_dpr_if ic; /* interface area */
744 unchar if_area[0x3000]; /* 12KB for interface */ 744 u8 if_area[0x3000]; /* 12KB for interface */
745 } u; 745 } u;
746 struct { 746 struct {
747 unchar memlock; /* write protection DPRAM */ 747 u8 memlock; /* write protection DPRAM */
748 unchar event; /* release event */ 748 u8 event; /* release event */
749 unchar irqen; /* board interrupts enable */ 749 u8 irqen; /* board interrupts enable */
750 unchar irqdel; /* acknowledge board int. */ 750 u8 irqdel; /* acknowledge board int. */
751 unchar volatile Sema1; /* status semaphore */ 751 u8 volatile Sema1; /* status semaphore */
752 unchar rq; /* IRQ/DRQ configuration */ 752 u8 rq; /* IRQ/DRQ configuration */
753 } PACKED io; 753 } __attribute__((packed)) io;
754} PACKED gdt2_dpram_str; 754} __attribute__((packed)) gdt2_dpram_str;
755 755
756/* DPRAM PCI controllers */ 756/* DPRAM PCI controllers */
757typedef struct { 757typedef struct {
758 union { 758 union {
759 gdt_dpr_if ic; /* interface area */ 759 gdt_dpr_if ic; /* interface area */
760 unchar if_area[0xff0-sizeof(gdt_pci_sram)]; 760 u8 if_area[0xff0-sizeof(gdt_pci_sram)];
761 } u; 761 } u;
762 gdt_pci_sram gdt6sr; /* SRAM structure */ 762 gdt_pci_sram gdt6sr; /* SRAM structure */
763 struct { 763 struct {
764 unchar unused0[1]; 764 u8 unused0[1];
765 unchar volatile Sema1; /* command semaphore */ 765 u8 volatile Sema1; /* command semaphore */
766 unchar unused1[3]; 766 u8 unused1[3];
767 unchar irqen; /* board interrupts enable */ 767 u8 irqen; /* board interrupts enable */
768 unchar unused2[2]; 768 u8 unused2[2];
769 unchar event; /* release event */ 769 u8 event; /* release event */
770 unchar unused3[3]; 770 u8 unused3[3];
771 unchar irqdel; /* acknowledge board int. */ 771 u8 irqdel; /* acknowledge board int. */
772 unchar unused4[3]; 772 u8 unused4[3];
773 } PACKED io; 773 } __attribute__((packed)) io;
774} PACKED gdt6_dpram_str; 774} __attribute__((packed)) gdt6_dpram_str;
775 775
776/* PLX register structure (new PCI controllers) */ 776/* PLX register structure (new PCI controllers) */
777typedef struct { 777typedef struct {
778 unchar cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/ 778 u8 cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
779 unchar unused1[0x3f]; 779 u8 unused1[0x3f];
780 unchar volatile sema0_reg; /* command semaphore */ 780 u8 volatile sema0_reg; /* command semaphore */
781 unchar volatile sema1_reg; /* status semaphore */ 781 u8 volatile sema1_reg; /* status semaphore */
782 unchar unused2[2]; 782 u8 unused2[2];
783 ushort volatile status; /* command status */ 783 u16 volatile status; /* command status */
784 ushort service; /* service */ 784 u16 service; /* service */
785 ulong32 info[2]; /* additional info */ 785 u32 info[2]; /* additional info */
786 unchar unused3[0x10]; 786 u8 unused3[0x10];
787 unchar ldoor_reg; /* PCI to local doorbell */ 787 u8 ldoor_reg; /* PCI to local doorbell */
788 unchar unused4[3]; 788 u8 unused4[3];
789 unchar volatile edoor_reg; /* local to PCI doorbell */ 789 u8 volatile edoor_reg; /* local to PCI doorbell */
790 unchar unused5[3]; 790 u8 unused5[3];
791 unchar control0; /* control0 register(unused) */ 791 u8 control0; /* control0 register(unused) */
792 unchar control1; /* board interrupts enable */ 792 u8 control1; /* board interrupts enable */
793 unchar unused6[0x16]; 793 u8 unused6[0x16];
794} PACKED gdt6c_plx_regs; 794} __attribute__((packed)) gdt6c_plx_regs;
795 795
796/* DPRAM new PCI controllers */ 796/* DPRAM new PCI controllers */
797typedef struct { 797typedef struct {
798 union { 798 union {
799 gdt_dpr_if ic; /* interface area */ 799 gdt_dpr_if ic; /* interface area */
800 unchar if_area[0x4000-sizeof(gdt_pci_sram)]; 800 u8 if_area[0x4000-sizeof(gdt_pci_sram)];
801 } u; 801 } u;
802 gdt_pci_sram gdt6sr; /* SRAM structure */ 802 gdt_pci_sram gdt6sr; /* SRAM structure */
803} PACKED gdt6c_dpram_str; 803} __attribute__((packed)) gdt6c_dpram_str;
804 804
805/* i960 register structure (PCI MPR controllers) */ 805/* i960 register structure (PCI MPR controllers) */
806typedef struct { 806typedef struct {
807 unchar unused1[16]; 807 u8 unused1[16];
808 unchar volatile sema0_reg; /* command semaphore */ 808 u8 volatile sema0_reg; /* command semaphore */
809 unchar unused2; 809 u8 unused2;
810 unchar volatile sema1_reg; /* status semaphore */ 810 u8 volatile sema1_reg; /* status semaphore */
811 unchar unused3; 811 u8 unused3;
812 ushort volatile status; /* command status */ 812 u16 volatile status; /* command status */
813 ushort service; /* service */ 813 u16 service; /* service */
814 ulong32 info[2]; /* additional info */ 814 u32 info[2]; /* additional info */
815 unchar ldoor_reg; /* PCI to local doorbell */ 815 u8 ldoor_reg; /* PCI to local doorbell */
816 unchar unused4[11]; 816 u8 unused4[11];
817 unchar volatile edoor_reg; /* local to PCI doorbell */ 817 u8 volatile edoor_reg; /* local to PCI doorbell */
818 unchar unused5[7]; 818 u8 unused5[7];
819 unchar edoor_en_reg; /* board interrupts enable */ 819 u8 edoor_en_reg; /* board interrupts enable */
820 unchar unused6[27]; 820 u8 unused6[27];
821 ulong32 unused7[939]; 821 u32 unused7[939];
822 ulong32 severity; 822 u32 severity;
823 char evt_str[256]; /* event string */ 823 char evt_str[256]; /* event string */
824} PACKED gdt6m_i960_regs; 824} __attribute__((packed)) gdt6m_i960_regs;
825 825
826/* DPRAM PCI MPR controllers */ 826/* DPRAM PCI MPR controllers */
827typedef struct { 827typedef struct {
828 gdt6m_i960_regs i960r; /* 4KB i960 registers */ 828 gdt6m_i960_regs i960r; /* 4KB i960 registers */
829 union { 829 union {
830 gdt_dpr_if ic; /* interface area */ 830 gdt_dpr_if ic; /* interface area */
831 unchar if_area[0x3000-sizeof(gdt_pci_sram)]; 831 u8 if_area[0x3000-sizeof(gdt_pci_sram)];
832 } u; 832 } u;
833 gdt_pci_sram gdt6sr; /* SRAM structure */ 833 gdt_pci_sram gdt6sr; /* SRAM structure */
834} PACKED gdt6m_dpram_str; 834} __attribute__((packed)) gdt6m_dpram_str;
835 835
836 836
837/* PCI resources */ 837/* PCI resources */
838typedef struct { 838typedef struct {
839 struct pci_dev *pdev; 839 struct pci_dev *pdev;
840 ulong dpmem; /* DPRAM address */ 840 unsigned long dpmem; /* DPRAM address */
841 ulong io; /* IO address */ 841 unsigned long io; /* IO address */
842} gdth_pci_str; 842} gdth_pci_str;
843 843
844 844
@@ -846,93 +846,93 @@ typedef struct {
846typedef struct { 846typedef struct {
847 struct Scsi_Host *shost; 847 struct Scsi_Host *shost;
848 struct list_head list; 848 struct list_head list;
849 ushort hanum; 849 u16 hanum;
850 ushort oem_id; /* OEM */ 850 u16 oem_id; /* OEM */
851 ushort type; /* controller class */ 851 u16 type; /* controller class */
852 ulong32 stype; /* subtype (PCI: device ID) */ 852 u32 stype; /* subtype (PCI: device ID) */
853 ushort fw_vers; /* firmware version */ 853 u16 fw_vers; /* firmware version */
854 ushort cache_feat; /* feat. cache serv. (s/g,..)*/ 854 u16 cache_feat; /* feat. cache serv. (s/g,..)*/
855 ushort raw_feat; /* feat. raw service (s/g,..)*/ 855 u16 raw_feat; /* feat. raw service (s/g,..)*/
856 ushort screen_feat; /* feat. raw service (s/g,..)*/ 856 u16 screen_feat; /* feat. raw service (s/g,..)*/
857 ushort bmic; /* BMIC address (EISA) */ 857 u16 bmic; /* BMIC address (EISA) */
858 void __iomem *brd; /* DPRAM address */ 858 void __iomem *brd; /* DPRAM address */
859 ulong32 brd_phys; /* slot number/BIOS address */ 859 u32 brd_phys; /* slot number/BIOS address */
860 gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */ 860 gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
861 gdth_cmd_str cmdext; 861 gdth_cmd_str cmdext;
862 gdth_cmd_str *pccb; /* address command structure */ 862 gdth_cmd_str *pccb; /* address command structure */
863 ulong32 ccb_phys; /* phys. address */ 863 u32 ccb_phys; /* phys. address */
864#ifdef INT_COAL 864#ifdef INT_COAL
865 gdth_coal_status *coal_stat; /* buffer for coalescing int.*/ 865 gdth_coal_status *coal_stat; /* buffer for coalescing int.*/
866 ulong64 coal_stat_phys; /* phys. address */ 866 u64 coal_stat_phys; /* phys. address */
867#endif 867#endif
868 char *pscratch; /* scratch (DMA) buffer */ 868 char *pscratch; /* scratch (DMA) buffer */
869 ulong64 scratch_phys; /* phys. address */ 869 u64 scratch_phys; /* phys. address */
870 unchar scratch_busy; /* in use? */ 870 u8 scratch_busy; /* in use? */
871 unchar dma64_support; /* 64-bit DMA supported? */ 871 u8 dma64_support; /* 64-bit DMA supported? */
872 gdth_msg_str *pmsg; /* message buffer */ 872 gdth_msg_str *pmsg; /* message buffer */
873 ulong64 msg_phys; /* phys. address */ 873 u64 msg_phys; /* phys. address */
874 unchar scan_mode; /* current scan mode */ 874 u8 scan_mode; /* current scan mode */
875 unchar irq; /* IRQ */ 875 u8 irq; /* IRQ */
876 unchar drq; /* DRQ (ISA controllers) */ 876 u8 drq; /* DRQ (ISA controllers) */
877 ushort status; /* command status */ 877 u16 status; /* command status */
878 ushort service; /* service/firmware ver./.. */ 878 u16 service; /* service/firmware ver./.. */
879 ulong32 info; 879 u32 info;
880 ulong32 info2; /* additional info */ 880 u32 info2; /* additional info */
881 Scsi_Cmnd *req_first; /* top of request queue */ 881 Scsi_Cmnd *req_first; /* top of request queue */
882 struct { 882 struct {
883 unchar present; /* Flag: host drive present? */ 883 u8 present; /* Flag: host drive present? */
884 unchar is_logdrv; /* Flag: log. drive (master)? */ 884 u8 is_logdrv; /* Flag: log. drive (master)? */
885 unchar is_arraydrv; /* Flag: array drive? */ 885 u8 is_arraydrv; /* Flag: array drive? */
886 unchar is_master; /* Flag: array drive master? */ 886 u8 is_master; /* Flag: array drive master? */
887 unchar is_parity; /* Flag: parity drive? */ 887 u8 is_parity; /* Flag: parity drive? */
888 unchar is_hotfix; /* Flag: hotfix drive? */ 888 u8 is_hotfix; /* Flag: hotfix drive? */
889 unchar master_no; /* number of master drive */ 889 u8 master_no; /* number of master drive */
890 unchar lock; /* drive locked? (hot plug) */ 890 u8 lock; /* drive locked? (hot plug) */
891 unchar heads; /* mapping */ 891 u8 heads; /* mapping */
892 unchar secs; 892 u8 secs;
893 ushort devtype; /* further information */ 893 u16 devtype; /* further information */
894 ulong64 size; /* capacity */ 894 u64 size; /* capacity */
895 unchar ldr_no; /* log. drive no. */ 895 u8 ldr_no; /* log. drive no. */
896 unchar rw_attribs; /* r/w attributes */ 896 u8 rw_attribs; /* r/w attributes */
897 unchar cluster_type; /* cluster properties */ 897 u8 cluster_type; /* cluster properties */
898 unchar media_changed; /* Flag:MOUNT/UNMOUNT occured */ 898 u8 media_changed; /* Flag:MOUNT/UNMOUNT occured */
899 ulong32 start_sec; /* start sector */ 899 u32 start_sec; /* start sector */
900 } hdr[MAX_LDRIVES]; /* host drives */ 900 } hdr[MAX_LDRIVES]; /* host drives */
901 struct { 901 struct {
902 unchar lock; /* channel locked? (hot plug) */ 902 u8 lock; /* channel locked? (hot plug) */
903 unchar pdev_cnt; /* physical device count */ 903 u8 pdev_cnt; /* physical device count */
904 unchar local_no; /* local channel number */ 904 u8 local_no; /* local channel number */
905 unchar io_cnt[MAXID]; /* current IO count */ 905 u8 io_cnt[MAXID]; /* current IO count */
906 ulong32 address; /* channel address */ 906 u32 address; /* channel address */
907 ulong32 id_list[MAXID]; /* IDs of the phys. devices */ 907 u32 id_list[MAXID]; /* IDs of the phys. devices */
908 } raw[MAXBUS]; /* SCSI channels */ 908 } raw[MAXBUS]; /* SCSI channels */
909 struct { 909 struct {
910 Scsi_Cmnd *cmnd; /* pending request */ 910 Scsi_Cmnd *cmnd; /* pending request */
911 ushort service; /* service */ 911 u16 service; /* service */
912 } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */ 912 } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
913 struct gdth_cmndinfo { /* per-command private info */ 913 struct gdth_cmndinfo { /* per-command private info */
914 int index; 914 int index;
915 int internal_command; /* don't call scsi_done */ 915 int internal_command; /* don't call scsi_done */
916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ 916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
917 dma_addr_t sense_paddr; /* sense dma-addr */ 917 dma_addr_t sense_paddr; /* sense dma-addr */
918 unchar priority; 918 u8 priority;
919 int timeout_count; /* # of timeout calls */ 919 int timeout_count; /* # of timeout calls */
920 volatile int wait_for_completion; 920 volatile int wait_for_completion;
921 ushort status; 921 u16 status;
922 ulong32 info; 922 u32 info;
923 enum dma_data_direction dma_dir; 923 enum dma_data_direction dma_dir;
924 int phase; /* ???? */ 924 int phase; /* ???? */
925 int OpCode; 925 int OpCode;
926 } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */ 926 } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */
927 unchar bus_cnt; /* SCSI bus count */ 927 u8 bus_cnt; /* SCSI bus count */
928 unchar tid_cnt; /* Target ID count */ 928 u8 tid_cnt; /* Target ID count */
929 unchar bus_id[MAXBUS]; /* IOP IDs */ 929 u8 bus_id[MAXBUS]; /* IOP IDs */
930 unchar virt_bus; /* number of virtual bus */ 930 u8 virt_bus; /* number of virtual bus */
931 unchar more_proc; /* more /proc info supported */ 931 u8 more_proc; /* more /proc info supported */
932 ushort cmd_cnt; /* command count in DPRAM */ 932 u16 cmd_cnt; /* command count in DPRAM */
933 ushort cmd_len; /* length of actual command */ 933 u16 cmd_len; /* length of actual command */
934 ushort cmd_offs_dpmem; /* actual offset in DPRAM */ 934 u16 cmd_offs_dpmem; /* actual offset in DPRAM */
935 ushort ic_all_size; /* sizeof DPRAM interf. area */ 935 u16 ic_all_size; /* sizeof DPRAM interf. area */
936 gdth_cpar_str cpar; /* controller cache par. */ 936 gdth_cpar_str cpar; /* controller cache par. */
937 gdth_bfeat_str bfeat; /* controller features */ 937 gdth_bfeat_str bfeat; /* controller features */
938 gdth_binfo_str binfo; /* controller info */ 938 gdth_binfo_str binfo; /* controller info */
@@ -941,7 +941,7 @@ typedef struct {
941 struct pci_dev *pdev; 941 struct pci_dev *pdev;
942 char oem_name[8]; 942 char oem_name[8];
943#ifdef GDTH_DMA_STATISTICS 943#ifdef GDTH_DMA_STATISTICS
944 ulong dma32_cnt, dma64_cnt; /* statistics: DMA buffer */ 944 unsigned long dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
945#endif 945#endif
946 struct scsi_device *sdev; 946 struct scsi_device *sdev;
947} gdth_ha_str; 947} gdth_ha_str;
@@ -953,65 +953,65 @@ static inline struct gdth_cmndinfo *gdth_cmnd_priv(struct scsi_cmnd* cmd)
953 953
954/* INQUIRY data format */ 954/* INQUIRY data format */
955typedef struct { 955typedef struct {
956 unchar type_qual; 956 u8 type_qual;
957 unchar modif_rmb; 957 u8 modif_rmb;
958 unchar version; 958 u8 version;
959 unchar resp_aenc; 959 u8 resp_aenc;
960 unchar add_length; 960 u8 add_length;
961 unchar reserved1; 961 u8 reserved1;
962 unchar reserved2; 962 u8 reserved2;
963 unchar misc; 963 u8 misc;
964 unchar vendor[8]; 964 u8 vendor[8];
965 unchar product[16]; 965 u8 product[16];
966 unchar revision[4]; 966 u8 revision[4];
967} PACKED gdth_inq_data; 967} __attribute__((packed)) gdth_inq_data;
968 968
969/* READ_CAPACITY data format */ 969/* READ_CAPACITY data format */
970typedef struct { 970typedef struct {
971 ulong32 last_block_no; 971 u32 last_block_no;
972 ulong32 block_length; 972 u32 block_length;
973} PACKED gdth_rdcap_data; 973} __attribute__((packed)) gdth_rdcap_data;
974 974
975/* READ_CAPACITY (16) data format */ 975/* READ_CAPACITY (16) data format */
976typedef struct { 976typedef struct {
977 ulong64 last_block_no; 977 u64 last_block_no;
978 ulong32 block_length; 978 u32 block_length;
979} PACKED gdth_rdcap16_data; 979} __attribute__((packed)) gdth_rdcap16_data;
980 980
981/* REQUEST_SENSE data format */ 981/* REQUEST_SENSE data format */
982typedef struct { 982typedef struct {
983 unchar errorcode; 983 u8 errorcode;
984 unchar segno; 984 u8 segno;
985 unchar key; 985 u8 key;
986 ulong32 info; 986 u32 info;
987 unchar add_length; 987 u8 add_length;
988 ulong32 cmd_info; 988 u32 cmd_info;
989 unchar adsc; 989 u8 adsc;
990 unchar adsq; 990 u8 adsq;
991 unchar fruc; 991 u8 fruc;
992 unchar key_spec[3]; 992 u8 key_spec[3];
993} PACKED gdth_sense_data; 993} __attribute__((packed)) gdth_sense_data;
994 994
995/* MODE_SENSE data format */ 995/* MODE_SENSE data format */
996typedef struct { 996typedef struct {
997 struct { 997 struct {
998 unchar data_length; 998 u8 data_length;
999 unchar med_type; 999 u8 med_type;
1000 unchar dev_par; 1000 u8 dev_par;
1001 unchar bd_length; 1001 u8 bd_length;
1002 } PACKED hd; 1002 } __attribute__((packed)) hd;
1003 struct { 1003 struct {
1004 unchar dens_code; 1004 u8 dens_code;
1005 unchar block_count[3]; 1005 u8 block_count[3];
1006 unchar reserved; 1006 u8 reserved;
1007 unchar block_length[3]; 1007 u8 block_length[3];
1008 } PACKED bd; 1008 } __attribute__((packed)) bd;
1009} PACKED gdth_modep_data; 1009} __attribute__((packed)) gdth_modep_data;
1010 1010
1011/* stack frame */ 1011/* stack frame */
1012typedef struct { 1012typedef struct {
1013 ulong b[10]; /* 32/64 bit compiler ! */ 1013 unsigned long b[10]; /* 32/64 bit compiler ! */
1014} PACKED gdth_stackframe; 1014} __attribute__((packed)) gdth_stackframe;
1015 1015
1016 1016
1017/* function prototyping */ 1017/* function prototyping */
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
index 783fae737f17..b004c6165887 100644
--- a/drivers/scsi/gdth_ioctl.h
+++ b/drivers/scsi/gdth_ioctl.h
@@ -32,109 +32,101 @@
32#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */ 32#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
33#endif 33#endif
34 34
35/* typedefs */
36#ifdef __KERNEL__
37typedef u32 ulong32;
38typedef u64 ulong64;
39#endif
40
41#define PACKED __attribute__((packed))
42
43/* scatter/gather element */ 35/* scatter/gather element */
44typedef struct { 36typedef struct {
45 ulong32 sg_ptr; /* address */ 37 u32 sg_ptr; /* address */
46 ulong32 sg_len; /* length */ 38 u32 sg_len; /* length */
47} PACKED gdth_sg_str; 39} __attribute__((packed)) gdth_sg_str;
48 40
49/* scatter/gather element - 64bit addresses */ 41/* scatter/gather element - 64bit addresses */
50typedef struct { 42typedef struct {
51 ulong64 sg_ptr; /* address */ 43 u64 sg_ptr; /* address */
52 ulong32 sg_len; /* length */ 44 u32 sg_len; /* length */
53} PACKED gdth_sg64_str; 45} __attribute__((packed)) gdth_sg64_str;
54 46
55/* command structure */ 47/* command structure */
56typedef struct { 48typedef struct {
57 ulong32 BoardNode; /* board node (always 0) */ 49 u32 BoardNode; /* board node (always 0) */
58 ulong32 CommandIndex; /* command number */ 50 u32 CommandIndex; /* command number */
59 ushort OpCode; /* the command (READ,..) */ 51 u16 OpCode; /* the command (READ,..) */
60 union { 52 union {
61 struct { 53 struct {
62 ushort DeviceNo; /* number of cache drive */ 54 u16 DeviceNo; /* number of cache drive */
63 ulong32 BlockNo; /* block number */ 55 u32 BlockNo; /* block number */
64 ulong32 BlockCnt; /* block count */ 56 u32 BlockCnt; /* block count */
65 ulong32 DestAddr; /* dest. addr. (if s/g: -1) */ 57 u32 DestAddr; /* dest. addr. (if s/g: -1) */
66 ulong32 sg_canz; /* s/g element count */ 58 u32 sg_canz; /* s/g element count */
67 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ 59 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
68 } PACKED cache; /* cache service cmd. str. */ 60 } __attribute__((packed)) cache; /* cache service cmd. str. */
69 struct { 61 struct {
70 ushort DeviceNo; /* number of cache drive */ 62 u16 DeviceNo; /* number of cache drive */
71 ulong64 BlockNo; /* block number */ 63 u64 BlockNo; /* block number */
72 ulong32 BlockCnt; /* block count */ 64 u32 BlockCnt; /* block count */
73 ulong64 DestAddr; /* dest. addr. (if s/g: -1) */ 65 u64 DestAddr; /* dest. addr. (if s/g: -1) */
74 ulong32 sg_canz; /* s/g element count */ 66 u32 sg_canz; /* s/g element count */
75 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ 67 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
76 } PACKED cache64; /* cache service cmd. str. */ 68 } __attribute__((packed)) cache64; /* cache service cmd. str. */
77 struct { 69 struct {
78 ushort param_size; /* size of p_param buffer */ 70 u16 param_size; /* size of p_param buffer */
79 ulong32 subfunc; /* IOCTL function */ 71 u32 subfunc; /* IOCTL function */
80 ulong32 channel; /* device */ 72 u32 channel; /* device */
81 ulong64 p_param; /* buffer */ 73 u64 p_param; /* buffer */
82 } PACKED ioctl; /* IOCTL command structure */ 74 } __attribute__((packed)) ioctl; /* IOCTL command structure */
83 struct { 75 struct {
84 ushort reserved; 76 u16 reserved;
85 union { 77 union {
86 struct { 78 struct {
87 ulong32 msg_handle; /* message handle */ 79 u32 msg_handle; /* message handle */
88 ulong64 msg_addr; /* message buffer address */ 80 u64 msg_addr; /* message buffer address */
89 } PACKED msg; 81 } __attribute__((packed)) msg;
90 unchar data[12]; /* buffer for rtc data, ... */ 82 u8 data[12]; /* buffer for rtc data, ... */
91 } su; 83 } su;
92 } PACKED screen; /* screen service cmd. str. */ 84 } __attribute__((packed)) screen; /* screen service cmd. str. */
93 struct { 85 struct {
94 ushort reserved; 86 u16 reserved;
95 ulong32 direction; /* data direction */ 87 u32 direction; /* data direction */
96 ulong32 mdisc_time; /* disc. time (0: no timeout)*/ 88 u32 mdisc_time; /* disc. time (0: no timeout)*/
97 ulong32 mcon_time; /* connect time(0: no to.) */ 89 u32 mcon_time; /* connect time(0: no to.) */
98 ulong32 sdata; /* dest. addr. (if s/g: -1) */ 90 u32 sdata; /* dest. addr. (if s/g: -1) */
99 ulong32 sdlen; /* data length (bytes) */ 91 u32 sdlen; /* data length (bytes) */
100 ulong32 clen; /* SCSI cmd. length(6,10,12) */ 92 u32 clen; /* SCSI cmd. length(6,10,12) */
101 unchar cmd[12]; /* SCSI command */ 93 u8 cmd[12]; /* SCSI command */
102 unchar target; /* target ID */ 94 u8 target; /* target ID */
103 unchar lun; /* LUN */ 95 u8 lun; /* LUN */
104 unchar bus; /* SCSI bus number */ 96 u8 bus; /* SCSI bus number */
105 unchar priority; /* only 0 used */ 97 u8 priority; /* only 0 used */
106 ulong32 sense_len; /* sense data length */ 98 u32 sense_len; /* sense data length */
107 ulong32 sense_data; /* sense data addr. */ 99 u32 sense_data; /* sense data addr. */
108 ulong32 link_p; /* linked cmds (not supp.) */ 100 u32 link_p; /* linked cmds (not supp.) */
109 ulong32 sg_ranz; /* s/g element count */ 101 u32 sg_ranz; /* s/g element count */
110 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ 102 gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
111 } PACKED raw; /* raw service cmd. struct. */ 103 } __attribute__((packed)) raw; /* raw service cmd. struct. */
112 struct { 104 struct {
113 ushort reserved; 105 u16 reserved;
114 ulong32 direction; /* data direction */ 106 u32 direction; /* data direction */
115 ulong32 mdisc_time; /* disc. time (0: no timeout)*/ 107 u32 mdisc_time; /* disc. time (0: no timeout)*/
116 ulong32 mcon_time; /* connect time(0: no to.) */ 108 u32 mcon_time; /* connect time(0: no to.) */
117 ulong64 sdata; /* dest. addr. (if s/g: -1) */ 109 u64 sdata; /* dest. addr. (if s/g: -1) */
118 ulong32 sdlen; /* data length (bytes) */ 110 u32 sdlen; /* data length (bytes) */
119 ulong32 clen; /* SCSI cmd. length(6,..,16) */ 111 u32 clen; /* SCSI cmd. length(6,..,16) */
120 unchar cmd[16]; /* SCSI command */ 112 u8 cmd[16]; /* SCSI command */
121 unchar target; /* target ID */ 113 u8 target; /* target ID */
122 unchar lun; /* LUN */ 114 u8 lun; /* LUN */
123 unchar bus; /* SCSI bus number */ 115 u8 bus; /* SCSI bus number */
124 unchar priority; /* only 0 used */ 116 u8 priority; /* only 0 used */
125 ulong32 sense_len; /* sense data length */ 117 u32 sense_len; /* sense data length */
126 ulong64 sense_data; /* sense data addr. */ 118 u64 sense_data; /* sense data addr. */
127 ulong32 sg_ranz; /* s/g element count */ 119 u32 sg_ranz; /* s/g element count */
128 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ 120 gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
129 } PACKED raw64; /* raw service cmd. struct. */ 121 } __attribute__((packed)) raw64; /* raw service cmd. struct. */
130 } u; 122 } u;
131 /* additional variables */ 123 /* additional variables */
132 unchar Service; /* controller service */ 124 u8 Service; /* controller service */
133 unchar reserved; 125 u8 reserved;
134 ushort Status; /* command result */ 126 u16 Status; /* command result */
135 ulong32 Info; /* additional information */ 127 u32 Info; /* additional information */
136 void *RequestBuffer; /* request buffer */ 128 void *RequestBuffer; /* request buffer */
137} PACKED gdth_cmd_str; 129} __attribute__((packed)) gdth_cmd_str;
138 130
139/* controller event structure */ 131/* controller event structure */
140#define ES_ASYNC 1 132#define ES_ASYNC 1
@@ -142,129 +134,129 @@ typedef struct {
142#define ES_TEST 3 134#define ES_TEST 3
143#define ES_SYNC 4 135#define ES_SYNC 4
144typedef struct { 136typedef struct {
145 ushort size; /* size of structure */ 137 u16 size; /* size of structure */
146 union { 138 union {
147 char stream[16]; 139 char stream[16];
148 struct { 140 struct {
149 ushort ionode; 141 u16 ionode;
150 ushort service; 142 u16 service;
151 ulong32 index; 143 u32 index;
152 } PACKED driver; 144 } __attribute__((packed)) driver;
153 struct { 145 struct {
154 ushort ionode; 146 u16 ionode;
155 ushort service; 147 u16 service;
156 ushort status; 148 u16 status;
157 ulong32 info; 149 u32 info;
158 unchar scsi_coord[3]; 150 u8 scsi_coord[3];
159 } PACKED async; 151 } __attribute__((packed)) async;
160 struct { 152 struct {
161 ushort ionode; 153 u16 ionode;
162 ushort service; 154 u16 service;
163 ushort status; 155 u16 status;
164 ulong32 info; 156 u32 info;
165 ushort hostdrive; 157 u16 hostdrive;
166 unchar scsi_coord[3]; 158 u8 scsi_coord[3];
167 unchar sense_key; 159 u8 sense_key;
168 } PACKED sync; 160 } __attribute__((packed)) sync;
169 struct { 161 struct {
170 ulong32 l1, l2, l3, l4; 162 u32 l1, l2, l3, l4;
171 } PACKED test; 163 } __attribute__((packed)) test;
172 } eu; 164 } eu;
173 ulong32 severity; 165 u32 severity;
174 unchar event_string[256]; 166 u8 event_string[256];
175} PACKED gdth_evt_data; 167} __attribute__((packed)) gdth_evt_data;
176 168
177typedef struct { 169typedef struct {
178 ulong32 first_stamp; 170 u32 first_stamp;
179 ulong32 last_stamp; 171 u32 last_stamp;
180 ushort same_count; 172 u16 same_count;
181 ushort event_source; 173 u16 event_source;
182 ushort event_idx; 174 u16 event_idx;
183 unchar application; 175 u8 application;
184 unchar reserved; 176 u8 reserved;
185 gdth_evt_data event_data; 177 gdth_evt_data event_data;
186} PACKED gdth_evt_str; 178} __attribute__((packed)) gdth_evt_str;
187 179
188 180
189#ifdef GDTH_IOCTL_PROC 181#ifdef GDTH_IOCTL_PROC
190/* IOCTL structure (write) */ 182/* IOCTL structure (write) */
191typedef struct { 183typedef struct {
192 ulong32 magic; /* IOCTL magic */ 184 u32 magic; /* IOCTL magic */
193 ushort ioctl; /* IOCTL */ 185 u16 ioctl; /* IOCTL */
194 ushort ionode; /* controller number */ 186 u16 ionode; /* controller number */
195 ushort service; /* controller service */ 187 u16 service; /* controller service */
196 ushort timeout; /* timeout */ 188 u16 timeout; /* timeout */
197 union { 189 union {
198 struct { 190 struct {
199 unchar command[512]; /* controller command */ 191 u8 command[512]; /* controller command */
200 unchar data[1]; /* add. data */ 192 u8 data[1]; /* add. data */
201 } general; 193 } general;
202 struct { 194 struct {
203 unchar lock; /* lock/unlock */ 195 u8 lock; /* lock/unlock */
204 unchar drive_cnt; /* drive count */ 196 u8 drive_cnt; /* drive count */
205 ushort drives[MAX_HDRIVES];/* drives */ 197 u16 drives[MAX_HDRIVES];/* drives */
206 } lockdrv; 198 } lockdrv;
207 struct { 199 struct {
208 unchar lock; /* lock/unlock */ 200 u8 lock; /* lock/unlock */
209 unchar channel; /* channel */ 201 u8 channel; /* channel */
210 } lockchn; 202 } lockchn;
211 struct { 203 struct {
212 int erase; /* erase event ? */ 204 int erase; /* erase event ? */
213 int handle; 205 int handle;
214 unchar evt[EVENT_SIZE]; /* event structure */ 206 u8 evt[EVENT_SIZE]; /* event structure */
215 } event; 207 } event;
216 struct { 208 struct {
217 unchar bus; /* SCSI bus */ 209 u8 bus; /* SCSI bus */
218 unchar target; /* target ID */ 210 u8 target; /* target ID */
219 unchar lun; /* LUN */ 211 u8 lun; /* LUN */
220 unchar cmd_len; /* command length */ 212 u8 cmd_len; /* command length */
221 unchar cmd[12]; /* SCSI command */ 213 u8 cmd[12]; /* SCSI command */
222 } scsi; 214 } scsi;
223 struct { 215 struct {
224 ushort hdr_no; /* host drive number */ 216 u16 hdr_no; /* host drive number */
225 unchar flag; /* old meth./add/remove */ 217 u8 flag; /* old meth./add/remove */
226 } rescan; 218 } rescan;
227 } iu; 219 } iu;
228} gdth_iowr_str; 220} gdth_iowr_str;
229 221
230/* IOCTL structure (read) */ 222/* IOCTL structure (read) */
231typedef struct { 223typedef struct {
232 ulong32 size; /* buffer size */ 224 u32 size; /* buffer size */
233 ulong32 status; /* IOCTL error code */ 225 u32 status; /* IOCTL error code */
234 union { 226 union {
235 struct { 227 struct {
236 unchar data[1]; /* data */ 228 u8 data[1]; /* data */
237 } general; 229 } general;
238 struct { 230 struct {
239 ushort version; /* driver version */ 231 u16 version; /* driver version */
240 } drvers; 232 } drvers;
241 struct { 233 struct {
242 unchar type; /* controller type */ 234 u8 type; /* controller type */
243 ushort info; /* slot etc. */ 235 u16 info; /* slot etc. */
244 ushort oem_id; /* OEM ID */ 236 u16 oem_id; /* OEM ID */
245 ushort bios_ver; /* not used */ 237 u16 bios_ver; /* not used */
246 ushort access; /* not used */ 238 u16 access; /* not used */
247 ushort ext_type; /* extended type */ 239 u16 ext_type; /* extended type */
248 ushort device_id; /* device ID */ 240 u16 device_id; /* device ID */
249 ushort sub_device_id; /* sub device ID */ 241 u16 sub_device_id; /* sub device ID */
250 } ctrtype; 242 } ctrtype;
251 struct { 243 struct {
252 unchar version; /* OS version */ 244 u8 version; /* OS version */
253 unchar subversion; /* OS subversion */ 245 u8 subversion; /* OS subversion */
254 ushort revision; /* revision */ 246 u16 revision; /* revision */
255 } osvers; 247 } osvers;
256 struct { 248 struct {
257 ushort count; /* controller count */ 249 u16 count; /* controller count */
258 } ctrcnt; 250 } ctrcnt;
259 struct { 251 struct {
260 int handle; 252 int handle;
261 unchar evt[EVENT_SIZE]; /* event structure */ 253 u8 evt[EVENT_SIZE]; /* event structure */
262 } event; 254 } event;
263 struct { 255 struct {
264 unchar bus; /* SCSI bus, 0xff: invalid */ 256 u8 bus; /* SCSI bus, 0xff: invalid */
265 unchar target; /* target ID */ 257 u8 target; /* target ID */
266 unchar lun; /* LUN */ 258 u8 lun; /* LUN */
267 unchar cluster_type; /* cluster properties */ 259 u8 cluster_type; /* cluster properties */
268 } hdr_list[MAX_HDRIVES]; /* index is host drive number */ 260 } hdr_list[MAX_HDRIVES]; /* index is host drive number */
269 } iu; 261 } iu;
270} gdth_iord_str; 262} gdth_iord_str;
@@ -272,53 +264,53 @@ typedef struct {
272 264
273/* GDTIOCTL_GENERAL */ 265/* GDTIOCTL_GENERAL */
274typedef struct { 266typedef struct {
275 ushort ionode; /* controller number */ 267 u16 ionode; /* controller number */
276 ushort timeout; /* timeout */ 268 u16 timeout; /* timeout */
277 ulong32 info; /* error info */ 269 u32 info; /* error info */
278 ushort status; /* status */ 270 u16 status; /* status */
279 ulong data_len; /* data buffer size */ 271 unsigned long data_len; /* data buffer size */
280 ulong sense_len; /* sense buffer size */ 272 unsigned long sense_len; /* sense buffer size */
281 gdth_cmd_str command; /* command */ 273 gdth_cmd_str command; /* command */
282} gdth_ioctl_general; 274} gdth_ioctl_general;
283 275
284/* GDTIOCTL_LOCKDRV */ 276/* GDTIOCTL_LOCKDRV */
285typedef struct { 277typedef struct {
286 ushort ionode; /* controller number */ 278 u16 ionode; /* controller number */
287 unchar lock; /* lock/unlock */ 279 u8 lock; /* lock/unlock */
288 unchar drive_cnt; /* drive count */ 280 u8 drive_cnt; /* drive count */
289 ushort drives[MAX_HDRIVES]; /* drives */ 281 u16 drives[MAX_HDRIVES]; /* drives */
290} gdth_ioctl_lockdrv; 282} gdth_ioctl_lockdrv;
291 283
292/* GDTIOCTL_LOCKCHN */ 284/* GDTIOCTL_LOCKCHN */
293typedef struct { 285typedef struct {
294 ushort ionode; /* controller number */ 286 u16 ionode; /* controller number */
295 unchar lock; /* lock/unlock */ 287 u8 lock; /* lock/unlock */
296 unchar channel; /* channel */ 288 u8 channel; /* channel */
297} gdth_ioctl_lockchn; 289} gdth_ioctl_lockchn;
298 290
299/* GDTIOCTL_OSVERS */ 291/* GDTIOCTL_OSVERS */
300typedef struct { 292typedef struct {
301 unchar version; /* OS version */ 293 u8 version; /* OS version */
302 unchar subversion; /* OS subversion */ 294 u8 subversion; /* OS subversion */
303 ushort revision; /* revision */ 295 u16 revision; /* revision */
304} gdth_ioctl_osvers; 296} gdth_ioctl_osvers;
305 297
306/* GDTIOCTL_CTRTYPE */ 298/* GDTIOCTL_CTRTYPE */
307typedef struct { 299typedef struct {
308 ushort ionode; /* controller number */ 300 u16 ionode; /* controller number */
309 unchar type; /* controller type */ 301 u8 type; /* controller type */
310 ushort info; /* slot etc. */ 302 u16 info; /* slot etc. */
311 ushort oem_id; /* OEM ID */ 303 u16 oem_id; /* OEM ID */
312 ushort bios_ver; /* not used */ 304 u16 bios_ver; /* not used */
313 ushort access; /* not used */ 305 u16 access; /* not used */
314 ushort ext_type; /* extended type */ 306 u16 ext_type; /* extended type */
315 ushort device_id; /* device ID */ 307 u16 device_id; /* device ID */
316 ushort sub_device_id; /* sub device ID */ 308 u16 sub_device_id; /* sub device ID */
317} gdth_ioctl_ctrtype; 309} gdth_ioctl_ctrtype;
318 310
319/* GDTIOCTL_EVENT */ 311/* GDTIOCTL_EVENT */
320typedef struct { 312typedef struct {
321 ushort ionode; 313 u16 ionode;
322 int erase; /* erase event? */ 314 int erase; /* erase event? */
323 int handle; /* event handle */ 315 int handle; /* event handle */
324 gdth_evt_str event; 316 gdth_evt_str event;
@@ -326,22 +318,22 @@ typedef struct {
326 318
327/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */ 319/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
328typedef struct { 320typedef struct {
329 ushort ionode; /* controller number */ 321 u16 ionode; /* controller number */
330 unchar flag; /* add/remove */ 322 u8 flag; /* add/remove */
331 ushort hdr_no; /* drive no. */ 323 u16 hdr_no; /* drive no. */
332 struct { 324 struct {
333 unchar bus; /* SCSI bus */ 325 u8 bus; /* SCSI bus */
334 unchar target; /* target ID */ 326 u8 target; /* target ID */
335 unchar lun; /* LUN */ 327 u8 lun; /* LUN */
336 unchar cluster_type; /* cluster properties */ 328 u8 cluster_type; /* cluster properties */
337 } hdr_list[MAX_HDRIVES]; /* index is host drive number */ 329 } hdr_list[MAX_HDRIVES]; /* index is host drive number */
338} gdth_ioctl_rescan; 330} gdth_ioctl_rescan;
339 331
340/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */ 332/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
341typedef struct { 333typedef struct {
342 ushort ionode; /* controller number */ 334 u16 ionode; /* controller number */
343 ushort number; /* bus/host drive number */ 335 u16 number; /* bus/host drive number */
344 ushort status; /* status */ 336 u16 status; /* status */
345} gdth_ioctl_reset; 337} gdth_ioctl_reset;
346 338
347#endif 339#endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 1258da34fbc2..ffb2b21992ba 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -43,7 +43,7 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
43 int i, found; 43 int i, found;
44 gdth_cmd_str gdtcmd; 44 gdth_cmd_str gdtcmd;
45 gdth_cpar_str *pcpar; 45 gdth_cpar_str *pcpar;
46 ulong64 paddr; 46 u64 paddr;
47 47
48 char cmnd[MAX_COMMAND_SIZE]; 48 char cmnd[MAX_COMMAND_SIZE];
49 memset(cmnd, 0xff, 12); 49 memset(cmnd, 0xff, 12);
@@ -156,8 +156,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
156 off_t begin = 0,pos = 0; 156 off_t begin = 0,pos = 0;
157 int id, i, j, k, sec, flag; 157 int id, i, j, k, sec, flag;
158 int no_mdrv = 0, drv_no, is_mirr; 158 int no_mdrv = 0, drv_no, is_mirr;
159 ulong32 cnt; 159 u32 cnt;
160 ulong64 paddr; 160 u64 paddr;
161 int rc = -ENOMEM; 161 int rc = -ENOMEM;
162 162
163 gdth_cmd_str *gdtcmd; 163 gdth_cmd_str *gdtcmd;
@@ -220,14 +220,14 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
220 220
221 if (ha->more_proc) 221 if (ha->more_proc)
222 sprintf(hrec, "%d.%02d.%02d-%c%03X", 222 sprintf(hrec, "%d.%02d.%02d-%c%03X",
223 (unchar)(ha->binfo.upd_fw_ver>>24), 223 (u8)(ha->binfo.upd_fw_ver>>24),
224 (unchar)(ha->binfo.upd_fw_ver>>16), 224 (u8)(ha->binfo.upd_fw_ver>>16),
225 (unchar)(ha->binfo.upd_fw_ver), 225 (u8)(ha->binfo.upd_fw_ver),
226 ha->bfeat.raid ? 'R':'N', 226 ha->bfeat.raid ? 'R':'N',
227 ha->binfo.upd_revision); 227 ha->binfo.upd_revision);
228 else 228 else
229 sprintf(hrec, "%d.%02d", (unchar)(ha->cpar.version>>8), 229 sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8),
230 (unchar)(ha->cpar.version)); 230 (u8)(ha->cpar.version));
231 231
232 size = sprintf(buffer+len, 232 size = sprintf(buffer+len,
233 " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n", 233 " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n",
@@ -281,7 +281,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
281 pds->bid = ha->raw[i].local_no; 281 pds->bid = ha->raw[i].local_no;
282 pds->first = 0; 282 pds->first = 0;
283 pds->entries = ha->raw[i].pdev_cnt; 283 pds->entries = ha->raw[i].pdev_cnt;
284 cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(ulong32)) / 284 cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
285 sizeof(pds->list[0]); 285 sizeof(pds->list[0]);
286 if (pds->entries > cnt) 286 if (pds->entries > cnt)
287 pds->entries = cnt; 287 pds->entries = cnt;
@@ -604,7 +604,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
604 604
605 size = sprintf(buffer+len, 605 size = sprintf(buffer+len,
606 " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n", 606 " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
607 (ulong32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec); 607 (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
608 len += size; pos = begin + len; 608 len += size; pos = begin + len;
609 if (pos < offset) { 609 if (pos < offset) {
610 len = 0; 610 len = 0;
@@ -664,9 +664,9 @@ free_fail:
664} 664}
665 665
666static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, 666static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
667 ulong64 *paddr) 667 u64 *paddr)
668{ 668{
669 ulong flags; 669 unsigned long flags;
670 char *ret_val; 670 char *ret_val;
671 671
672 if (size == 0) 672 if (size == 0)
@@ -691,9 +691,9 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
691 return ret_val; 691 return ret_val;
692} 692}
693 693
694static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr) 694static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
695{ 695{
696 ulong flags; 696 unsigned long flags;
697 697
698 if (buf == ha->pscratch) { 698 if (buf == ha->pscratch) {
699 spin_lock_irqsave(&ha->smp_lock, flags); 699 spin_lock_irqsave(&ha->smp_lock, flags);
@@ -705,16 +705,16 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
705} 705}
706 706
707#ifdef GDTH_IOCTL_PROC 707#ifdef GDTH_IOCTL_PROC
708static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size) 708static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
709{ 709{
710 ulong flags; 710 unsigned long flags;
711 int ret_val; 711 int ret_val;
712 712
713 spin_lock_irqsave(&ha->smp_lock, flags); 713 spin_lock_irqsave(&ha->smp_lock, flags);
714 714
715 ret_val = FALSE; 715 ret_val = FALSE;
716 if (ha->scratch_busy) { 716 if (ha->scratch_busy) {
717 if (((gdth_iord_str *)ha->pscratch)->size == (ulong32)size) 717 if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
718 ret_val = TRUE; 718 ret_val = TRUE;
719 } 719 }
720 spin_unlock_irqrestore(&ha->smp_lock, flags); 720 spin_unlock_irqrestore(&ha->smp_lock, flags);
@@ -724,11 +724,11 @@ static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size)
724 724
725static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) 725static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
726{ 726{
727 ulong flags; 727 unsigned long flags;
728 int i; 728 int i;
729 Scsi_Cmnd *scp; 729 Scsi_Cmnd *scp;
730 struct gdth_cmndinfo *cmndinfo; 730 struct gdth_cmndinfo *cmndinfo;
731 unchar b, t; 731 u8 b, t;
732 732
733 spin_lock_irqsave(&ha->smp_lock, flags); 733 spin_lock_irqsave(&ha->smp_lock, flags);
734 734
@@ -738,8 +738,8 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
738 738
739 b = scp->device->channel; 739 b = scp->device->channel;
740 t = scp->device->id; 740 t = scp->device->id;
741 if (!SPECIAL_SCP(scp) && t == (unchar)id && 741 if (!SPECIAL_SCP(scp) && t == (u8)id &&
742 b == (unchar)busnum) { 742 b == (u8)busnum) {
743 cmndinfo->wait_for_completion = 0; 743 cmndinfo->wait_for_completion = 0;
744 spin_unlock_irqrestore(&ha->smp_lock, flags); 744 spin_unlock_irqrestore(&ha->smp_lock, flags);
745 while (!cmndinfo->wait_for_completion) 745 while (!cmndinfo->wait_for_completion)
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 9b900cc9ebe8..dab15f59f2cc 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -17,8 +17,8 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
17 int length, gdth_ha_str *ha); 17 int length, gdth_ha_str *ha);
18 18
19static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, 19static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
20 ulong64 *paddr); 20 u64 *paddr);
21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); 21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); 22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
23 23
24#endif 24#endif
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index bb96fdd58e23..03697ba94251 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -52,7 +52,7 @@
52#include "hpsa.h" 52#include "hpsa.h"
53 53
54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55#define HPSA_DRIVER_VERSION "1.0.0" 55#define HPSA_DRIVER_VERSION "2.0.1-3"
56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57 57
58/* How long to wait (in milliseconds) for board to go into simple mode */ 58/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -77,9 +77,6 @@ MODULE_PARM_DESC(hpsa_allow_any,
77 77
78/* define the PCI info for the cards we can control */ 78/* define the PCI info for the cards we can control */
79static const struct pci_device_id hpsa_pci_device_id[] = { 79static const struct pci_device_id hpsa_pci_device_id[] = {
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
@@ -87,6 +84,9 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
88#define PCI_DEVICE_ID_HP_CISSF 0x333f
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F},
90 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 90 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
91 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 91 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
92 {0,} 92 {0,}
@@ -99,9 +99,6 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
99 * access = Address of the struct of function pointers 99 * access = Address of the struct of function pointers
100 */ 100 */
101static struct board_type products[] = { 101static struct board_type products[] = {
102 {0x3223103C, "Smart Array P800", &SA5_access},
103 {0x3234103C, "Smart Array P400", &SA5_access},
104 {0x323d103c, "Smart Array P700M", &SA5_access},
105 {0x3241103C, "Smart Array P212", &SA5_access}, 102 {0x3241103C, "Smart Array P212", &SA5_access},
106 {0x3243103C, "Smart Array P410", &SA5_access}, 103 {0x3243103C, "Smart Array P410", &SA5_access},
107 {0x3245103C, "Smart Array P410i", &SA5_access}, 104 {0x3245103C, "Smart Array P410i", &SA5_access},
@@ -109,6 +106,8 @@ static struct board_type products[] = {
109 {0x3249103C, "Smart Array P812", &SA5_access}, 106 {0x3249103C, "Smart Array P812", &SA5_access},
110 {0x324a103C, "Smart Array P712m", &SA5_access}, 107 {0x324a103C, "Smart Array P712m", &SA5_access},
111 {0x324b103C, "Smart Array P711m", &SA5_access}, 108 {0x324b103C, "Smart Array P711m", &SA5_access},
109 {0x3233103C, "StorageWorks P1210m", &SA5_access},
110 {0x333F103C, "StorageWorks P1210m", &SA5_access},
112 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 111 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
113}; 112};
114 113
@@ -126,12 +125,15 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
126static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 125static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
127static struct CommandList *cmd_alloc(struct ctlr_info *h); 126static struct CommandList *cmd_alloc(struct ctlr_info *h);
128static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 127static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
129static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h, 128static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
130 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, 129 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
131 int cmd_type); 130 int cmd_type);
132 131
133static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, 132static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134 void (*done)(struct scsi_cmnd *)); 133 void (*done)(struct scsi_cmnd *));
134static void hpsa_scan_start(struct Scsi_Host *);
135static int hpsa_scan_finished(struct Scsi_Host *sh,
136 unsigned long elapsed_time);
135 137
136static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 138static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
137static int hpsa_slave_alloc(struct scsi_device *sdev); 139static int hpsa_slave_alloc(struct scsi_device *sdev);
@@ -150,6 +152,11 @@ static int check_for_unit_attention(struct ctlr_info *h,
150 struct CommandList *c); 152 struct CommandList *c);
151static void check_ioctl_unit_attention(struct ctlr_info *h, 153static void check_ioctl_unit_attention(struct ctlr_info *h,
152 struct CommandList *c); 154 struct CommandList *c);
155/* performant mode helper functions */
156static void calc_bucket_map(int *bucket, int num_buckets,
157 int nsgs, int *bucket_map);
158static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
159static inline u32 next_command(struct ctlr_info *h);
153 160
154static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 161static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
155static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 162static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
@@ -173,10 +180,10 @@ static struct scsi_host_template hpsa_driver_template = {
173 .name = "hpsa", 180 .name = "hpsa",
174 .proc_name = "hpsa", 181 .proc_name = "hpsa",
175 .queuecommand = hpsa_scsi_queue_command, 182 .queuecommand = hpsa_scsi_queue_command,
176 .can_queue = 512, 183 .scan_start = hpsa_scan_start,
184 .scan_finished = hpsa_scan_finished,
177 .this_id = -1, 185 .this_id = -1,
178 .sg_tablesize = MAXSGENTRIES, 186 .sg_tablesize = MAXSGENTRIES,
179 .cmd_per_lun = 512,
180 .use_clustering = ENABLE_CLUSTERING, 187 .use_clustering = ENABLE_CLUSTERING,
181 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 188 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
182 .ioctl = hpsa_ioctl, 189 .ioctl = hpsa_ioctl,
@@ -195,6 +202,12 @@ static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
195 return (struct ctlr_info *) *priv; 202 return (struct ctlr_info *) *priv;
196} 203}
197 204
205static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
206{
207 unsigned long *priv = shost_priv(sh);
208 return (struct ctlr_info *) *priv;
209}
210
198static struct task_struct *hpsa_scan_thread; 211static struct task_struct *hpsa_scan_thread;
199static DEFINE_MUTEX(hpsa_scan_mutex); 212static DEFINE_MUTEX(hpsa_scan_mutex);
200static LIST_HEAD(hpsa_scan_q); 213static LIST_HEAD(hpsa_scan_q);
@@ -312,7 +325,7 @@ static int hpsa_scan_func(__attribute__((unused)) void *data)
312 h->busy_scanning = 1; 325 h->busy_scanning = 1;
313 mutex_unlock(&hpsa_scan_mutex); 326 mutex_unlock(&hpsa_scan_mutex);
314 host_no = h->scsi_host ? h->scsi_host->host_no : -1; 327 host_no = h->scsi_host ? h->scsi_host->host_no : -1;
315 hpsa_update_scsi_devices(h, host_no); 328 hpsa_scan_start(h->scsi_host);
316 complete_all(&h->scan_wait); 329 complete_all(&h->scan_wait);
317 mutex_lock(&hpsa_scan_mutex); 330 mutex_lock(&hpsa_scan_mutex);
318 h->busy_scanning = 0; 331 h->busy_scanning = 0;
@@ -379,8 +392,7 @@ static ssize_t host_store_rescan(struct device *dev,
379{ 392{
380 struct ctlr_info *h; 393 struct ctlr_info *h;
381 struct Scsi_Host *shost = class_to_shost(dev); 394 struct Scsi_Host *shost = class_to_shost(dev);
382 unsigned long *priv = shost_priv(shost); 395 h = shost_to_hba(shost);
383 h = (struct ctlr_info *) *priv;
384 if (add_to_scan_list(h)) { 396 if (add_to_scan_list(h)) {
385 wake_up_process(hpsa_scan_thread); 397 wake_up_process(hpsa_scan_thread);
386 wait_for_completion_interruptible(&h->scan_wait); 398 wait_for_completion_interruptible(&h->scan_wait);
@@ -394,10 +406,44 @@ static inline void addQ(struct hlist_head *list, struct CommandList *c)
394 hlist_add_head(&c->list, list); 406 hlist_add_head(&c->list, list);
395} 407}
396 408
409static inline u32 next_command(struct ctlr_info *h)
410{
411 u32 a;
412
413 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
414 return h->access.command_completed(h);
415
416 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
417 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
418 (h->reply_pool_head)++;
419 h->commands_outstanding--;
420 } else {
421 a = FIFO_EMPTY;
422 }
423 /* Check for wraparound */
424 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
425 h->reply_pool_head = h->reply_pool;
426 h->reply_pool_wraparound ^= 1;
427 }
428 return a;
429}
430
431/* set_performant_mode: Modify the tag for cciss performant
432 * set bit 0 for pull model, bits 3-1 for block fetch
433 * register number
434 */
435static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
436{
437 if (likely(h->transMethod == CFGTBL_Trans_Performant))
438 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
439}
440
397static void enqueue_cmd_and_start_io(struct ctlr_info *h, 441static void enqueue_cmd_and_start_io(struct ctlr_info *h,
398 struct CommandList *c) 442 struct CommandList *c)
399{ 443{
400 unsigned long flags; 444 unsigned long flags;
445
446 set_performant_mode(h, c);
401 spin_lock_irqsave(&h->lock, flags); 447 spin_lock_irqsave(&h->lock, flags);
402 addQ(&h->reqQ, c); 448 addQ(&h->reqQ, c);
403 h->Qdepth++; 449 h->Qdepth++;
@@ -422,6 +468,15 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
422 return (scsi3addr[3] & 0xC0) == 0x40; 468 return (scsi3addr[3] & 0xC0) == 0x40;
423} 469}
424 470
471static inline int is_scsi_rev_5(struct ctlr_info *h)
472{
473 if (!h->hba_inquiry_data)
474 return 0;
475 if ((h->hba_inquiry_data[2] & 0x07) == 5)
476 return 1;
477 return 0;
478}
479
425static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 480static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
426 "UNKNOWN" 481 "UNKNOWN"
427}; 482};
@@ -431,7 +486,7 @@ static ssize_t raid_level_show(struct device *dev,
431 struct device_attribute *attr, char *buf) 486 struct device_attribute *attr, char *buf)
432{ 487{
433 ssize_t l = 0; 488 ssize_t l = 0;
434 int rlevel; 489 unsigned char rlevel;
435 struct ctlr_info *h; 490 struct ctlr_info *h;
436 struct scsi_device *sdev; 491 struct scsi_device *sdev;
437 struct hpsa_scsi_dev_t *hdev; 492 struct hpsa_scsi_dev_t *hdev;
@@ -455,7 +510,7 @@ static ssize_t raid_level_show(struct device *dev,
455 510
456 rlevel = hdev->raid_level; 511 rlevel = hdev->raid_level;
457 spin_unlock_irqrestore(&h->lock, flags); 512 spin_unlock_irqrestore(&h->lock, flags);
458 if (rlevel < 0 || rlevel > RAID_UNKNOWN) 513 if (rlevel > RAID_UNKNOWN)
459 rlevel = RAID_UNKNOWN; 514 rlevel = RAID_UNKNOWN;
460 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 515 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
461 return l; 516 return l;
@@ -620,6 +675,24 @@ lun_assigned:
620 return 0; 675 return 0;
621} 676}
622 677
678/* Replace an entry from h->dev[] array. */
679static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
680 int entry, struct hpsa_scsi_dev_t *new_entry,
681 struct hpsa_scsi_dev_t *added[], int *nadded,
682 struct hpsa_scsi_dev_t *removed[], int *nremoved)
683{
684 /* assumes h->devlock is held */
685 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
686 removed[*nremoved] = h->dev[entry];
687 (*nremoved)++;
688 h->dev[entry] = new_entry;
689 added[*nadded] = new_entry;
690 (*nadded)++;
691 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
692 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
693 new_entry->target, new_entry->lun);
694}
695
623/* Remove an entry from h->dev[] array. */ 696/* Remove an entry from h->dev[] array. */
624static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 697static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
625 struct hpsa_scsi_dev_t *removed[], int *nremoved) 698 struct hpsa_scsi_dev_t *removed[], int *nremoved)
@@ -628,8 +701,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
628 int i; 701 int i;
629 struct hpsa_scsi_dev_t *sd; 702 struct hpsa_scsi_dev_t *sd;
630 703
631 if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA) 704 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
632 BUG();
633 705
634 sd = h->dev[entry]; 706 sd = h->dev[entry];
635 removed[*nremoved] = h->dev[entry]; 707 removed[*nremoved] = h->dev[entry];
@@ -722,6 +794,8 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
722#define DEVICE_CHANGED 1 794#define DEVICE_CHANGED 1
723#define DEVICE_SAME 2 795#define DEVICE_SAME 2
724 for (i = 0; i < haystack_size; i++) { 796 for (i = 0; i < haystack_size; i++) {
797 if (haystack[i] == NULL) /* previously removed. */
798 continue;
725 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 799 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
726 *index = i; 800 *index = i;
727 if (device_is_the_same(needle, haystack[i])) 801 if (device_is_the_same(needle, haystack[i]))
@@ -734,7 +808,7 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
734 return DEVICE_NOT_FOUND; 808 return DEVICE_NOT_FOUND;
735} 809}
736 810
737static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 811static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
738 struct hpsa_scsi_dev_t *sd[], int nsds) 812 struct hpsa_scsi_dev_t *sd[], int nsds)
739{ 813{
740 /* sd contains scsi3 addresses and devtypes, and inquiry 814 /* sd contains scsi3 addresses and devtypes, and inquiry
@@ -779,12 +853,12 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
779 continue; /* remove ^^^, hence i not incremented */ 853 continue; /* remove ^^^, hence i not incremented */
780 } else if (device_change == DEVICE_CHANGED) { 854 } else if (device_change == DEVICE_CHANGED) {
781 changes++; 855 changes++;
782 hpsa_scsi_remove_entry(h, hostno, i, 856 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
783 removed, &nremoved); 857 added, &nadded, removed, &nremoved);
784 (void) hpsa_scsi_add_entry(h, hostno, sd[entry], 858 /* Set it to NULL to prevent it from being freed
785 added, &nadded); 859 * at the bottom of hpsa_update_scsi_devices()
786 /* add can't fail, we just removed one. */ 860 */
787 sd[entry] = NULL; /* prevent it from being freed */ 861 sd[entry] = NULL;
788 } 862 }
789 i++; 863 i++;
790 } 864 }
@@ -860,7 +934,6 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
860free_and_out: 934free_and_out:
861 kfree(added); 935 kfree(added);
862 kfree(removed); 936 kfree(removed);
863 return 0;
864} 937}
865 938
866/* 939/*
@@ -900,7 +973,7 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
900 973
901static void hpsa_slave_destroy(struct scsi_device *sdev) 974static void hpsa_slave_destroy(struct scsi_device *sdev)
902{ 975{
903 return; /* nothing to do. */ 976 /* nothing to do. */
904} 977}
905 978
906static void hpsa_scsi_setup(struct ctlr_info *h) 979static void hpsa_scsi_setup(struct ctlr_info *h)
@@ -908,11 +981,10 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
908 h->ndevices = 0; 981 h->ndevices = 0;
909 h->scsi_host = NULL; 982 h->scsi_host = NULL;
910 spin_lock_init(&h->devlock); 983 spin_lock_init(&h->devlock);
911 return;
912} 984}
913 985
914static void complete_scsi_command(struct CommandList *cp, 986static void complete_scsi_command(struct CommandList *cp,
915 int timeout, __u32 tag) 987 int timeout, u32 tag)
916{ 988{
917 struct scsi_cmnd *cmd; 989 struct scsi_cmnd *cmd;
918 struct ctlr_info *h; 990 struct ctlr_info *h;
@@ -987,7 +1059,6 @@ static void complete_scsi_command(struct CommandList *cp,
987 * required 1059 * required
988 */ 1060 */
989 if ((asc == 0x04) && (ascq == 0x03)) { 1061 if ((asc == 0x04) && (ascq == 0x03)) {
990 cmd->result = DID_NO_CONNECT << 16;
991 dev_warn(&h->pdev->dev, "cp %p " 1062 dev_warn(&h->pdev->dev, "cp %p "
992 "has check condition: unit " 1063 "has check condition: unit "
993 "not ready, manual " 1064 "not ready, manual "
@@ -995,14 +1066,22 @@ static void complete_scsi_command(struct CommandList *cp,
995 break; 1066 break;
996 } 1067 }
997 } 1068 }
998 1069 if (sense_key == ABORTED_COMMAND) {
999 1070 /* Aborted command is retryable */
1071 dev_warn(&h->pdev->dev, "cp %p "
1072 "has check condition: aborted command: "
1073 "ASC: 0x%x, ASCQ: 0x%x\n",
1074 cp, asc, ascq);
1075 cmd->result = DID_SOFT_ERROR << 16;
1076 break;
1077 }
1000 /* Must be some other type of check condition */ 1078 /* Must be some other type of check condition */
1001 dev_warn(&h->pdev->dev, "cp %p has check condition: " 1079 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1002 "unknown type: " 1080 "unknown type: "
1003 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1081 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1004 "Returning result: 0x%x, " 1082 "Returning result: 0x%x, "
1005 "cmd=[%02x %02x %02x %02x %02x " 1083 "cmd=[%02x %02x %02x %02x %02x "
1084 "%02x %02x %02x %02x %02x %02x "
1006 "%02x %02x %02x %02x %02x]\n", 1085 "%02x %02x %02x %02x %02x]\n",
1007 cp, sense_key, asc, ascq, 1086 cp, sense_key, asc, ascq,
1008 cmd->result, 1087 cmd->result,
@@ -1010,7 +1089,10 @@ static void complete_scsi_command(struct CommandList *cp,
1010 cmd->cmnd[2], cmd->cmnd[3], 1089 cmd->cmnd[2], cmd->cmnd[3],
1011 cmd->cmnd[4], cmd->cmnd[5], 1090 cmd->cmnd[4], cmd->cmnd[5],
1012 cmd->cmnd[6], cmd->cmnd[7], 1091 cmd->cmnd[6], cmd->cmnd[7],
1013 cmd->cmnd[8], cmd->cmnd[9]); 1092 cmd->cmnd[8], cmd->cmnd[9],
1093 cmd->cmnd[10], cmd->cmnd[11],
1094 cmd->cmnd[12], cmd->cmnd[13],
1095 cmd->cmnd[14], cmd->cmnd[15]);
1014 break; 1096 break;
1015 } 1097 }
1016 1098
@@ -1086,7 +1168,7 @@ static void complete_scsi_command(struct CommandList *cp,
1086 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1168 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1087 break; 1169 break;
1088 case CMD_UNSOLICITED_ABORT: 1170 case CMD_UNSOLICITED_ABORT:
1089 cmd->result = DID_ABORT << 16; 1171 cmd->result = DID_RESET << 16;
1090 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " 1172 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1091 "abort\n", cp); 1173 "abort\n", cp);
1092 break; 1174 break;
@@ -1119,9 +1201,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
1119 sh->max_cmd_len = MAX_COMMAND_SIZE; 1201 sh->max_cmd_len = MAX_COMMAND_SIZE;
1120 sh->max_lun = HPSA_MAX_LUN; 1202 sh->max_lun = HPSA_MAX_LUN;
1121 sh->max_id = HPSA_MAX_LUN; 1203 sh->max_id = HPSA_MAX_LUN;
1204 sh->can_queue = h->nr_cmds;
1205 sh->cmd_per_lun = h->nr_cmds;
1122 h->scsi_host = sh; 1206 h->scsi_host = sh;
1123 sh->hostdata[0] = (unsigned long) h; 1207 sh->hostdata[0] = (unsigned long) h;
1124 sh->irq = h->intr[SIMPLE_MODE_INT]; 1208 sh->irq = h->intr[PERF_MODE_INT];
1125 sh->unique_id = sh->irq; 1209 sh->unique_id = sh->irq;
1126 error = scsi_add_host(sh, &h->pdev->dev); 1210 error = scsi_add_host(sh, &h->pdev->dev);
1127 if (error) 1211 if (error)
@@ -1133,11 +1217,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
1133 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" 1217 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1134 " failed for controller %d\n", h->ctlr); 1218 " failed for controller %d\n", h->ctlr);
1135 scsi_host_put(sh); 1219 scsi_host_put(sh);
1136 return -1; 1220 return error;
1137 fail: 1221 fail:
1138 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" 1222 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1139 " failed for controller %d\n", h->ctlr); 1223 " failed for controller %d\n", h->ctlr);
1140 return -1; 1224 return -ENOMEM;
1141} 1225}
1142 1226
1143static void hpsa_pci_unmap(struct pci_dev *pdev, 1227static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -1160,7 +1244,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
1160 size_t buflen, 1244 size_t buflen,
1161 int data_direction) 1245 int data_direction)
1162{ 1246{
1163 __u64 addr64; 1247 u64 addr64;
1164 1248
1165 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1249 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1166 cp->Header.SGList = 0; 1250 cp->Header.SGList = 0;
@@ -1168,14 +1252,14 @@ static void hpsa_map_one(struct pci_dev *pdev,
1168 return; 1252 return;
1169 } 1253 }
1170 1254
1171 addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); 1255 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1172 cp->SG[0].Addr.lower = 1256 cp->SG[0].Addr.lower =
1173 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); 1257 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1174 cp->SG[0].Addr.upper = 1258 cp->SG[0].Addr.upper =
1175 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); 1259 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1176 cp->SG[0].Len = buflen; 1260 cp->SG[0].Len = buflen;
1177 cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ 1261 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1178 cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ 1262 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1179} 1263}
1180 1264
1181static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1265static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1274,7 +1358,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1274 1358
1275 if (c == NULL) { /* trouble... */ 1359 if (c == NULL) { /* trouble... */
1276 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1360 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1277 return -1; 1361 return -ENOMEM;
1278 } 1362 }
1279 1363
1280 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); 1364 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
@@ -1366,9 +1450,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1366 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1450 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1367 return -1; 1451 return -1;
1368 } 1452 }
1369 1453 /* address the controller */
1370 memset(&scsi3addr[0], 0, 8); /* address the controller */ 1454 memset(scsi3addr, 0, sizeof(scsi3addr));
1371
1372 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 1455 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1373 buf, bufsize, 0, scsi3addr, TYPE_CMD); 1456 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1374 if (extended_response) 1457 if (extended_response)
@@ -1409,13 +1492,12 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1409 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) 1492 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1410{ 1493{
1411#define OBDR_TAPE_INQ_SIZE 49 1494#define OBDR_TAPE_INQ_SIZE 49
1412 unsigned char *inq_buff = NULL; 1495 unsigned char *inq_buff;
1413 1496
1414 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1497 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1415 if (!inq_buff) 1498 if (!inq_buff)
1416 goto bail_out; 1499 goto bail_out;
1417 1500
1418 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
1419 /* Do an inquiry to the device to see what it is. */ 1501 /* Do an inquiry to the device to see what it is. */
1420 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 1502 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1421 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1503 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
@@ -1485,32 +1567,51 @@ static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1485 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 1567 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1486 */ 1568 */
1487static void figure_bus_target_lun(struct ctlr_info *h, 1569static void figure_bus_target_lun(struct ctlr_info *h,
1488 __u8 *lunaddrbytes, int *bus, int *target, int *lun, 1570 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1489 struct hpsa_scsi_dev_t *device) 1571 struct hpsa_scsi_dev_t *device)
1490{ 1572{
1491 1573 u32 lunid;
1492 __u32 lunid;
1493 1574
1494 if (is_logical_dev_addr_mode(lunaddrbytes)) { 1575 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1495 /* logical device */ 1576 /* logical device */
1496 memcpy(&lunid, lunaddrbytes, sizeof(lunid)); 1577 if (unlikely(is_scsi_rev_5(h))) {
1497 lunid = le32_to_cpu(lunid); 1578 /* p1210m, logical drives lun assignments
1498 1579 * match SCSI REPORT LUNS data.
1499 if (is_msa2xxx(h, device)) { 1580 */
1500 *bus = 1; 1581 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1501 *target = (lunid >> 16) & 0x3fff;
1502 *lun = lunid & 0x00ff;
1503 } else {
1504 *bus = 0; 1582 *bus = 0;
1505 *lun = 0; 1583 *target = 0;
1506 *target = lunid & 0x3fff; 1584 *lun = (lunid & 0x3fff) + 1;
1585 } else {
1586 /* not p1210m... */
1587 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1588 if (is_msa2xxx(h, device)) {
1589 /* msa2xxx way, put logicals on bus 1
1590 * and match target/lun numbers box
1591 * reports.
1592 */
1593 *bus = 1;
1594 *target = (lunid >> 16) & 0x3fff;
1595 *lun = lunid & 0x00ff;
1596 } else {
1597 /* Traditional smart array way. */
1598 *bus = 0;
1599 *lun = 0;
1600 *target = lunid & 0x3fff;
1601 }
1507 } 1602 }
1508 } else { 1603 } else {
1509 /* physical device */ 1604 /* physical device */
1510 if (is_hba_lunid(lunaddrbytes)) 1605 if (is_hba_lunid(lunaddrbytes))
1511 *bus = 3; 1606 if (unlikely(is_scsi_rev_5(h))) {
1607 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1608 *target = 0;
1609 *lun = 0;
1610 return;
1611 } else
1612 *bus = 3; /* traditional smartarray */
1512 else 1613 else
1513 *bus = 2; 1614 *bus = 2; /* physical disk */
1514 *target = -1; 1615 *target = -1;
1515 *lun = -1; /* we will fill these in later. */ 1616 *lun = -1; /* we will fill these in later. */
1516 } 1617 }
@@ -1529,7 +1630,7 @@ static void figure_bus_target_lun(struct ctlr_info *h,
1529 */ 1630 */
1530static int add_msa2xxx_enclosure_device(struct ctlr_info *h, 1631static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1531 struct hpsa_scsi_dev_t *tmpdevice, 1632 struct hpsa_scsi_dev_t *tmpdevice,
1532 struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes, 1633 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1533 int bus, int target, int lun, unsigned long lunzerobits[], 1634 int bus, int target, int lun, unsigned long lunzerobits[],
1534 int *nmsa2xxx_enclosures) 1635 int *nmsa2xxx_enclosures)
1535{ 1636{
@@ -1550,6 +1651,9 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1550 if (is_hba_lunid(scsi3addr)) 1651 if (is_hba_lunid(scsi3addr))
1551 return 0; /* Don't add the RAID controller here. */ 1652 return 0; /* Don't add the RAID controller here. */
1552 1653
1654 if (is_scsi_rev_5(h))
1655 return 0; /* p1210m doesn't need to do this. */
1656
1553#define MAX_MSA2XXX_ENCLOSURES 32 1657#define MAX_MSA2XXX_ENCLOSURES 32
1554 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { 1658 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1555 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " 1659 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
@@ -1576,18 +1680,14 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1576 */ 1680 */
1577static int hpsa_gather_lun_info(struct ctlr_info *h, 1681static int hpsa_gather_lun_info(struct ctlr_info *h,
1578 int reportlunsize, 1682 int reportlunsize,
1579 struct ReportLUNdata *physdev, __u32 *nphysicals, 1683 struct ReportLUNdata *physdev, u32 *nphysicals,
1580 struct ReportLUNdata *logdev, __u32 *nlogicals) 1684 struct ReportLUNdata *logdev, u32 *nlogicals)
1581{ 1685{
1582 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { 1686 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1583 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 1687 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1584 return -1; 1688 return -1;
1585 } 1689 }
1586 memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals)); 1690 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1587 *nphysicals = be32_to_cpu(*nphysicals) / 8;
1588#ifdef DEBUG
1589 dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
1590#endif
1591 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 1691 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1592 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 1692 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1593 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1693 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1598,11 +1698,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
1598 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 1698 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1599 return -1; 1699 return -1;
1600 } 1700 }
1601 memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals)); 1701 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1602 *nlogicals = be32_to_cpu(*nlogicals) / 8;
1603#ifdef DEBUG
1604 dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
1605#endif
1606 /* Reject Logicals in excess of our max capability. */ 1702 /* Reject Logicals in excess of our max capability. */
1607 if (*nlogicals > HPSA_MAX_LUN) { 1703 if (*nlogicals > HPSA_MAX_LUN) {
1608 dev_warn(&h->pdev->dev, 1704 dev_warn(&h->pdev->dev,
@@ -1621,6 +1717,31 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
1621 return 0; 1717 return 0;
1622} 1718}
1623 1719
1720u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1721 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1722 struct ReportLUNdata *logdev_list)
1723{
1724 /* Helper function, figure out where the LUN ID info is coming from
1725 * given index i, lists of physical and logical devices, where in
1726 * the list the raid controller is supposed to appear (first or last)
1727 */
1728
1729 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1730 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1731
1732 if (i == raid_ctlr_position)
1733 return RAID_CTLR_LUNID;
1734
1735 if (i < logicals_start)
1736 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1737
1738 if (i < last_device)
1739 return &logdev_list->LUN[i - nphysicals -
1740 (raid_ctlr_position == 0)][0];
1741 BUG();
1742 return NULL;
1743}
1744
1624static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 1745static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1625{ 1746{
1626 /* the idea here is we could get notified 1747 /* the idea here is we could get notified
@@ -1636,14 +1757,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1636 struct ReportLUNdata *physdev_list = NULL; 1757 struct ReportLUNdata *physdev_list = NULL;
1637 struct ReportLUNdata *logdev_list = NULL; 1758 struct ReportLUNdata *logdev_list = NULL;
1638 unsigned char *inq_buff = NULL; 1759 unsigned char *inq_buff = NULL;
1639 __u32 nphysicals = 0; 1760 u32 nphysicals = 0;
1640 __u32 nlogicals = 0; 1761 u32 nlogicals = 0;
1641 __u32 ndev_allocated = 0; 1762 u32 ndev_allocated = 0;
1642 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 1763 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1643 int ncurrent = 0; 1764 int ncurrent = 0;
1644 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 1765 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1645 int i, nmsa2xxx_enclosures, ndevs_to_allocate; 1766 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1646 int bus, target, lun; 1767 int bus, target, lun;
1768 int raid_ctlr_position;
1647 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); 1769 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1648 1770
1649 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, 1771 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
@@ -1681,23 +1803,22 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1681 ndev_allocated++; 1803 ndev_allocated++;
1682 } 1804 }
1683 1805
1806 if (unlikely(is_scsi_rev_5(h)))
1807 raid_ctlr_position = 0;
1808 else
1809 raid_ctlr_position = nphysicals + nlogicals;
1810
1684 /* adjust our table of devices */ 1811 /* adjust our table of devices */
1685 nmsa2xxx_enclosures = 0; 1812 nmsa2xxx_enclosures = 0;
1686 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1813 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1687 __u8 *lunaddrbytes; 1814 u8 *lunaddrbytes;
1688 1815
1689 /* Figure out where the LUN ID info is coming from */ 1816 /* Figure out where the LUN ID info is coming from */
1690 if (i < nphysicals) 1817 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1691 lunaddrbytes = &physdev_list->LUN[i][0]; 1818 i, nphysicals, nlogicals, physdev_list, logdev_list);
1692 else
1693 if (i < nphysicals + nlogicals)
1694 lunaddrbytes =
1695 &logdev_list->LUN[i-nphysicals][0];
1696 else /* jam in the RAID controller at the end */
1697 lunaddrbytes = RAID_CTLR_LUNID;
1698
1699 /* skip masked physical devices. */ 1819 /* skip masked physical devices. */
1700 if (lunaddrbytes[3] & 0xC0 && i < nphysicals) 1820 if (lunaddrbytes[3] & 0xC0 &&
1821 i < nphysicals + (raid_ctlr_position == 0))
1701 continue; 1822 continue;
1702 1823
1703 /* Get device type, vendor, model, device id */ 1824 /* Get device type, vendor, model, device id */
@@ -1777,7 +1898,6 @@ out:
1777 kfree(inq_buff); 1898 kfree(inq_buff);
1778 kfree(physdev_list); 1899 kfree(physdev_list);
1779 kfree(logdev_list); 1900 kfree(logdev_list);
1780 return;
1781} 1901}
1782 1902
1783/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1903/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
@@ -1790,7 +1910,7 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
1790{ 1910{
1791 unsigned int len; 1911 unsigned int len;
1792 struct scatterlist *sg; 1912 struct scatterlist *sg;
1793 __u64 addr64; 1913 u64 addr64;
1794 int use_sg, i; 1914 int use_sg, i;
1795 1915
1796 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); 1916 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
@@ -1803,20 +1923,20 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
1803 goto sglist_finished; 1923 goto sglist_finished;
1804 1924
1805 scsi_for_each_sg(cmd, sg, use_sg, i) { 1925 scsi_for_each_sg(cmd, sg, use_sg, i) {
1806 addr64 = (__u64) sg_dma_address(sg); 1926 addr64 = (u64) sg_dma_address(sg);
1807 len = sg_dma_len(sg); 1927 len = sg_dma_len(sg);
1808 cp->SG[i].Addr.lower = 1928 cp->SG[i].Addr.lower =
1809 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); 1929 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1810 cp->SG[i].Addr.upper = 1930 cp->SG[i].Addr.upper =
1811 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); 1931 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1812 cp->SG[i].Len = len; 1932 cp->SG[i].Len = len;
1813 cp->SG[i].Ext = 0; /* we are not chaining */ 1933 cp->SG[i].Ext = 0; /* we are not chaining */
1814 } 1934 }
1815 1935
1816sglist_finished: 1936sglist_finished:
1817 1937
1818 cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */ 1938 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
1819 cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */ 1939 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
1820 return 0; 1940 return 0;
1821} 1941}
1822 1942
@@ -1860,7 +1980,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1860 c->scsi_cmd = cmd; 1980 c->scsi_cmd = cmd;
1861 c->Header.ReplyQueue = 0; /* unused in simple mode */ 1981 c->Header.ReplyQueue = 0; /* unused in simple mode */
1862 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 1982 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1863 c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ 1983 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
1984 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
1864 1985
1865 /* Fill in the request block... */ 1986 /* Fill in the request block... */
1866 1987
@@ -1914,6 +2035,48 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1914 return 0; 2035 return 0;
1915} 2036}
1916 2037
2038static void hpsa_scan_start(struct Scsi_Host *sh)
2039{
2040 struct ctlr_info *h = shost_to_hba(sh);
2041 unsigned long flags;
2042
2043 /* wait until any scan already in progress is finished. */
2044 while (1) {
2045 spin_lock_irqsave(&h->scan_lock, flags);
2046 if (h->scan_finished)
2047 break;
2048 spin_unlock_irqrestore(&h->scan_lock, flags);
2049 wait_event(h->scan_wait_queue, h->scan_finished);
2050 /* Note: We don't need to worry about a race between this
2051 * thread and driver unload because the midlayer will
2052 * have incremented the reference count, so unload won't
2053 * happen if we're in here.
2054 */
2055 }
2056 h->scan_finished = 0; /* mark scan as in progress */
2057 spin_unlock_irqrestore(&h->scan_lock, flags);
2058
2059 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2060
2061 spin_lock_irqsave(&h->scan_lock, flags);
2062 h->scan_finished = 1; /* mark scan as finished. */
2063 wake_up_all(&h->scan_wait_queue);
2064 spin_unlock_irqrestore(&h->scan_lock, flags);
2065}
2066
2067static int hpsa_scan_finished(struct Scsi_Host *sh,
2068 unsigned long elapsed_time)
2069{
2070 struct ctlr_info *h = shost_to_hba(sh);
2071 unsigned long flags;
2072 int finished;
2073
2074 spin_lock_irqsave(&h->scan_lock, flags);
2075 finished = h->scan_finished;
2076 spin_unlock_irqrestore(&h->scan_lock, flags);
2077 return finished;
2078}
2079
1917static void hpsa_unregister_scsi(struct ctlr_info *h) 2080static void hpsa_unregister_scsi(struct ctlr_info *h)
1918{ 2081{
1919 /* we are being forcibly unloaded, and may not refuse. */ 2082 /* we are being forcibly unloaded, and may not refuse. */
@@ -1926,7 +2089,6 @@ static int hpsa_register_scsi(struct ctlr_info *h)
1926{ 2089{
1927 int rc; 2090 int rc;
1928 2091
1929 hpsa_update_scsi_devices(h, -1);
1930 rc = hpsa_scsi_detect(h); 2092 rc = hpsa_scsi_detect(h);
1931 if (rc != 0) 2093 if (rc != 0)
1932 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" 2094 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
@@ -2003,14 +2165,14 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2003 h = sdev_to_hba(scsicmd->device); 2165 h = sdev_to_hba(scsicmd->device);
2004 if (h == NULL) /* paranoia */ 2166 if (h == NULL) /* paranoia */
2005 return FAILED; 2167 return FAILED;
2006 dev_warn(&h->pdev->dev, "resetting drive\n");
2007
2008 dev = scsicmd->device->hostdata; 2168 dev = scsicmd->device->hostdata;
2009 if (!dev) { 2169 if (!dev) {
2010 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 2170 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2011 "device lookup failed.\n"); 2171 "device lookup failed.\n");
2012 return FAILED; 2172 return FAILED;
2013 } 2173 }
2174 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2175 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2014 /* send a reset to the SCSI LUN which the command was sent to */ 2176 /* send a reset to the SCSI LUN which the command was sent to */
2015 rc = hpsa_send_reset(h, dev->scsi3addr); 2177 rc = hpsa_send_reset(h, dev->scsi3addr);
2016 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 2178 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
@@ -2053,8 +2215,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
2053 c->cmdindex = i; 2215 c->cmdindex = i;
2054 2216
2055 INIT_HLIST_NODE(&c->list); 2217 INIT_HLIST_NODE(&c->list);
2056 c->busaddr = (__u32) cmd_dma_handle; 2218 c->busaddr = (u32) cmd_dma_handle;
2057 temp64.val = (__u64) err_dma_handle; 2219 temp64.val = (u64) err_dma_handle;
2058 c->ErrDesc.Addr.lower = temp64.val32.lower; 2220 c->ErrDesc.Addr.lower = temp64.val32.lower;
2059 c->ErrDesc.Addr.upper = temp64.val32.upper; 2221 c->ErrDesc.Addr.upper = temp64.val32.upper;
2060 c->ErrDesc.Len = sizeof(*c->err_info); 2222 c->ErrDesc.Len = sizeof(*c->err_info);
@@ -2091,8 +2253,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2091 memset(c->err_info, 0, sizeof(*c->err_info)); 2253 memset(c->err_info, 0, sizeof(*c->err_info));
2092 2254
2093 INIT_HLIST_NODE(&c->list); 2255 INIT_HLIST_NODE(&c->list);
2094 c->busaddr = (__u32) cmd_dma_handle; 2256 c->busaddr = (u32) cmd_dma_handle;
2095 temp64.val = (__u64) err_dma_handle; 2257 temp64.val = (u64) err_dma_handle;
2096 c->ErrDesc.Addr.lower = temp64.val32.lower; 2258 c->ErrDesc.Addr.lower = temp64.val32.lower;
2097 c->ErrDesc.Addr.upper = temp64.val32.upper; 2259 c->ErrDesc.Addr.upper = temp64.val32.upper;
2098 c->ErrDesc.Len = sizeof(*c->err_info); 2260 c->ErrDesc.Len = sizeof(*c->err_info);
@@ -2125,50 +2287,6 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2125 2287
2126#ifdef CONFIG_COMPAT 2288#ifdef CONFIG_COMPAT
2127 2289
2128static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
2129{
2130 int ret;
2131
2132 lock_kernel();
2133 ret = hpsa_ioctl(dev, cmd, arg);
2134 unlock_kernel();
2135 return ret;
2136}
2137
2138static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
2139static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2140 int cmd, void *arg);
2141
2142static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2143{
2144 switch (cmd) {
2145 case CCISS_GETPCIINFO:
2146 case CCISS_GETINTINFO:
2147 case CCISS_SETINTINFO:
2148 case CCISS_GETNODENAME:
2149 case CCISS_SETNODENAME:
2150 case CCISS_GETHEARTBEAT:
2151 case CCISS_GETBUSTYPES:
2152 case CCISS_GETFIRMVER:
2153 case CCISS_GETDRIVVER:
2154 case CCISS_REVALIDVOLS:
2155 case CCISS_DEREGDISK:
2156 case CCISS_REGNEWDISK:
2157 case CCISS_REGNEWD:
2158 case CCISS_RESCANDISK:
2159 case CCISS_GETLUNINFO:
2160 return do_ioctl(dev, cmd, arg);
2161
2162 case CCISS_PASSTHRU32:
2163 return hpsa_ioctl32_passthru(dev, cmd, arg);
2164 case CCISS_BIG_PASSTHRU32:
2165 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2166
2167 default:
2168 return -ENOIOCTLCMD;
2169 }
2170}
2171
2172static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 2290static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2173{ 2291{
2174 IOCTL32_Command_struct __user *arg32 = 2292 IOCTL32_Command_struct __user *arg32 =
@@ -2193,7 +2311,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2193 if (err) 2311 if (err)
2194 return -EFAULT; 2312 return -EFAULT;
2195 2313
2196 err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p); 2314 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2197 if (err) 2315 if (err)
2198 return err; 2316 return err;
2199 err |= copy_in_user(&arg32->error_info, &p->error_info, 2317 err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -2230,7 +2348,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2230 if (err) 2348 if (err)
2231 return -EFAULT; 2349 return -EFAULT;
2232 2350
2233 err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 2351 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2234 if (err) 2352 if (err)
2235 return err; 2353 return err;
2236 err |= copy_in_user(&arg32->error_info, &p->error_info, 2354 err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -2239,6 +2357,36 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2239 return -EFAULT; 2357 return -EFAULT;
2240 return err; 2358 return err;
2241} 2359}
2360
2361static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2362{
2363 switch (cmd) {
2364 case CCISS_GETPCIINFO:
2365 case CCISS_GETINTINFO:
2366 case CCISS_SETINTINFO:
2367 case CCISS_GETNODENAME:
2368 case CCISS_SETNODENAME:
2369 case CCISS_GETHEARTBEAT:
2370 case CCISS_GETBUSTYPES:
2371 case CCISS_GETFIRMVER:
2372 case CCISS_GETDRIVVER:
2373 case CCISS_REVALIDVOLS:
2374 case CCISS_DEREGDISK:
2375 case CCISS_REGNEWDISK:
2376 case CCISS_REGNEWD:
2377 case CCISS_RESCANDISK:
2378 case CCISS_GETLUNINFO:
2379 return hpsa_ioctl(dev, cmd, arg);
2380
2381 case CCISS_PASSTHRU32:
2382 return hpsa_ioctl32_passthru(dev, cmd, arg);
2383 case CCISS_BIG_PASSTHRU32:
2384 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2385
2386 default:
2387 return -ENOIOCTLCMD;
2388 }
2389}
2242#endif 2390#endif
2243 2391
2244static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 2392static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
@@ -2378,8 +2526,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2378 BYTE sg_used = 0; 2526 BYTE sg_used = 0;
2379 int status = 0; 2527 int status = 0;
2380 int i; 2528 int i;
2381 __u32 left; 2529 u32 left;
2382 __u32 sz; 2530 u32 sz;
2383 BYTE __user *data_ptr; 2531 BYTE __user *data_ptr;
2384 2532
2385 if (!argp) 2533 if (!argp)
@@ -2527,7 +2675,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2527 case CCISS_DEREGDISK: 2675 case CCISS_DEREGDISK:
2528 case CCISS_REGNEWDISK: 2676 case CCISS_REGNEWDISK:
2529 case CCISS_REGNEWD: 2677 case CCISS_REGNEWD:
2530 hpsa_update_scsi_devices(h, dev->host->host_no); 2678 hpsa_scan_start(h->scsi_host);
2531 return 0; 2679 return 0;
2532 case CCISS_GETPCIINFO: 2680 case CCISS_GETPCIINFO:
2533 return hpsa_getpciinfo_ioctl(h, argp); 2681 return hpsa_getpciinfo_ioctl(h, argp);
@@ -2542,8 +2690,8 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2542 } 2690 }
2543} 2691}
2544 2692
2545static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h, 2693static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2546 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, 2694 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2547 int cmd_type) 2695 int cmd_type)
2548{ 2696{
2549 int pci_dir = XFER_NONE; 2697 int pci_dir = XFER_NONE;
@@ -2710,19 +2858,20 @@ static inline unsigned long get_next_completion(struct ctlr_info *h)
2710 return h->access.command_completed(h); 2858 return h->access.command_completed(h);
2711} 2859}
2712 2860
2713static inline int interrupt_pending(struct ctlr_info *h) 2861static inline bool interrupt_pending(struct ctlr_info *h)
2714{ 2862{
2715 return h->access.intr_pending(h); 2863 return h->access.intr_pending(h);
2716} 2864}
2717 2865
2718static inline long interrupt_not_for_us(struct ctlr_info *h) 2866static inline long interrupt_not_for_us(struct ctlr_info *h)
2719{ 2867{
2720 return ((h->access.intr_pending(h) == 0) || 2868 return !(h->msi_vector || h->msix_vector) &&
2721 (h->interrupts_enabled == 0)); 2869 ((h->access.intr_pending(h) == 0) ||
2870 (h->interrupts_enabled == 0));
2722} 2871}
2723 2872
2724static inline int bad_tag(struct ctlr_info *h, __u32 tag_index, 2873static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2725 __u32 raw_tag) 2874 u32 raw_tag)
2726{ 2875{
2727 if (unlikely(tag_index >= h->nr_cmds)) { 2876 if (unlikely(tag_index >= h->nr_cmds)) {
2728 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 2877 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
@@ -2731,7 +2880,7 @@ static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
2731 return 0; 2880 return 0;
2732} 2881}
2733 2882
2734static inline void finish_cmd(struct CommandList *c, __u32 raw_tag) 2883static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2735{ 2884{
2736 removeQ(c); 2885 removeQ(c);
2737 if (likely(c->cmd_type == CMD_SCSI)) 2886 if (likely(c->cmd_type == CMD_SCSI))
@@ -2740,42 +2889,79 @@ static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
2740 complete(c->waiting); 2889 complete(c->waiting);
2741} 2890}
2742 2891
2892static inline u32 hpsa_tag_contains_index(u32 tag)
2893{
2894#define DIRECT_LOOKUP_BIT 0x10
2895 return tag & DIRECT_LOOKUP_BIT;
2896}
2897
2898static inline u32 hpsa_tag_to_index(u32 tag)
2899{
2900#define DIRECT_LOOKUP_SHIFT 5
2901 return tag >> DIRECT_LOOKUP_SHIFT;
2902}
2903
2904static inline u32 hpsa_tag_discard_error_bits(u32 tag)
2905{
2906#define HPSA_ERROR_BITS 0x03
2907 return tag & ~HPSA_ERROR_BITS;
2908}
2909
2910/* process completion of an indexed ("direct lookup") command */
2911static inline u32 process_indexed_cmd(struct ctlr_info *h,
2912 u32 raw_tag)
2913{
2914 u32 tag_index;
2915 struct CommandList *c;
2916
2917 tag_index = hpsa_tag_to_index(raw_tag);
2918 if (bad_tag(h, tag_index, raw_tag))
2919 return next_command(h);
2920 c = h->cmd_pool + tag_index;
2921 finish_cmd(c, raw_tag);
2922 return next_command(h);
2923}
2924
2925/* process completion of a non-indexed command */
2926static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2927 u32 raw_tag)
2928{
2929 u32 tag;
2930 struct CommandList *c = NULL;
2931 struct hlist_node *tmp;
2932
2933 tag = hpsa_tag_discard_error_bits(raw_tag);
2934 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2935 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2936 finish_cmd(c, raw_tag);
2937 return next_command(h);
2938 }
2939 }
2940 bad_tag(h, h->nr_cmds + 1, raw_tag);
2941 return next_command(h);
2942}
2943
2743static irqreturn_t do_hpsa_intr(int irq, void *dev_id) 2944static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2744{ 2945{
2745 struct ctlr_info *h = dev_id; 2946 struct ctlr_info *h = dev_id;
2746 struct CommandList *c;
2747 unsigned long flags; 2947 unsigned long flags;
2748 __u32 raw_tag, tag, tag_index; 2948 u32 raw_tag;
2749 struct hlist_node *tmp;
2750 2949
2751 if (interrupt_not_for_us(h)) 2950 if (interrupt_not_for_us(h))
2752 return IRQ_NONE; 2951 return IRQ_NONE;
2753 spin_lock_irqsave(&h->lock, flags); 2952 spin_lock_irqsave(&h->lock, flags);
2754 while (interrupt_pending(h)) { 2953 raw_tag = get_next_completion(h);
2755 while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) { 2954 while (raw_tag != FIFO_EMPTY) {
2756 if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) { 2955 if (hpsa_tag_contains_index(raw_tag))
2757 tag_index = HPSA_TAG_TO_INDEX(raw_tag); 2956 raw_tag = process_indexed_cmd(h, raw_tag);
2758 if (bad_tag(h, tag_index, raw_tag)) 2957 else
2759 return IRQ_HANDLED; 2958 raw_tag = process_nonindexed_cmd(h, raw_tag);
2760 c = h->cmd_pool + tag_index;
2761 finish_cmd(c, raw_tag);
2762 continue;
2763 }
2764 tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
2765 c = NULL;
2766 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2767 if (c->busaddr == tag) {
2768 finish_cmd(c, raw_tag);
2769 break;
2770 }
2771 }
2772 }
2773 } 2959 }
2774 spin_unlock_irqrestore(&h->lock, flags); 2960 spin_unlock_irqrestore(&h->lock, flags);
2775 return IRQ_HANDLED; 2961 return IRQ_HANDLED;
2776} 2962}
2777 2963
2778/* Send a message CDB to the firmware. */ 2964/* Send a message CDB to the firmwart. */
2779static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 2965static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2780 unsigned char type) 2966 unsigned char type)
2781{ 2967{
@@ -2841,7 +3027,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2841 3027
2842 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 3028 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
2843 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 3029 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2844 if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32) 3030 if (hpsa_tag_discard_error_bits(tag) == paddr32)
2845 break; 3031 break;
2846 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 3032 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
2847 } 3033 }
@@ -3063,7 +3249,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3063 */ 3249 */
3064 3250
3065static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, 3251static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3066 struct pci_dev *pdev, __u32 board_id) 3252 struct pci_dev *pdev, u32 board_id)
3067{ 3253{
3068#ifdef CONFIG_PCI_MSI 3254#ifdef CONFIG_PCI_MSI
3069 int err; 3255 int err;
@@ -3107,22 +3293,22 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3107default_int_mode: 3293default_int_mode:
3108#endif /* CONFIG_PCI_MSI */ 3294#endif /* CONFIG_PCI_MSI */
3109 /* if we get here we're going to use the default interrupt mode */ 3295 /* if we get here we're going to use the default interrupt mode */
3110 h->intr[SIMPLE_MODE_INT] = pdev->irq; 3296 h->intr[PERF_MODE_INT] = pdev->irq;
3111 return;
3112} 3297}
3113 3298
3114static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) 3299static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3115{ 3300{
3116 ushort subsystem_vendor_id, subsystem_device_id, command; 3301 ushort subsystem_vendor_id, subsystem_device_id, command;
3117 __u32 board_id, scratchpad = 0; 3302 u32 board_id, scratchpad = 0;
3118 __u64 cfg_offset; 3303 u64 cfg_offset;
3119 __u32 cfg_base_addr; 3304 u32 cfg_base_addr;
3120 __u64 cfg_base_addr_index; 3305 u64 cfg_base_addr_index;
3306 u32 trans_offset;
3121 int i, prod_index, err; 3307 int i, prod_index, err;
3122 3308
3123 subsystem_vendor_id = pdev->subsystem_vendor; 3309 subsystem_vendor_id = pdev->subsystem_vendor;
3124 subsystem_device_id = pdev->subsystem_device; 3310 subsystem_device_id = pdev->subsystem_device;
3125 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | 3311 board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
3126 subsystem_vendor_id); 3312 subsystem_vendor_id);
3127 3313
3128 for (i = 0; i < ARRAY_SIZE(products); i++) 3314 for (i = 0; i < ARRAY_SIZE(products); i++)
@@ -3199,7 +3385,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3199 3385
3200 /* get the address index number */ 3386 /* get the address index number */
3201 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET); 3387 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
3202 cfg_base_addr &= (__u32) 0x0000ffff; 3388 cfg_base_addr &= (u32) 0x0000ffff;
3203 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); 3389 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3204 if (cfg_base_addr_index == -1) { 3390 if (cfg_base_addr_index == -1) {
3205 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 3391 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
@@ -3211,11 +3397,14 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3211 h->cfgtable = remap_pci_mem(pci_resource_start(pdev, 3397 h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3212 cfg_base_addr_index) + cfg_offset, 3398 cfg_base_addr_index) + cfg_offset,
3213 sizeof(h->cfgtable)); 3399 sizeof(h->cfgtable));
3214 h->board_id = board_id; 3400 /* Find performant mode table. */
3215 3401 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3216 /* Query controller for max supported commands: */ 3402 h->transtable = remap_pci_mem(pci_resource_start(pdev,
3217 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 3403 cfg_base_addr_index)+cfg_offset+trans_offset,
3404 sizeof(*h->transtable));
3218 3405
3406 h->board_id = board_id;
3407 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3219 h->product_name = products[prod_index].product_name; 3408 h->product_name = products[prod_index].product_name;
3220 h->access = *(products[prod_index].access); 3409 h->access = *(products[prod_index].access);
3221 /* Allow room for some ioctls */ 3410 /* Allow room for some ioctls */
@@ -3232,7 +3421,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3232#ifdef CONFIG_X86 3421#ifdef CONFIG_X86
3233 { 3422 {
3234 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3423 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3235 __u32 prefetch; 3424 u32 prefetch;
3236 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3425 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3237 prefetch |= 0x100; 3426 prefetch |= 0x100;
3238 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 3427 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
@@ -3244,7 +3433,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3244 * physical memory. 3433 * physical memory.
3245 */ 3434 */
3246 if (board_id == 0x3225103C) { 3435 if (board_id == 0x3225103C) {
3247 __u32 dma_prefetch; 3436 u32 dma_prefetch;
3248 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 3437 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3249 dma_prefetch |= 0x8000; 3438 dma_prefetch |= 0x8000;
3250 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 3439 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
@@ -3286,10 +3475,26 @@ err_out_free_res:
3286 return err; 3475 return err;
3287} 3476}
3288 3477
3478static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3479{
3480 int rc;
3481
3482#define HBA_INQUIRY_BYTE_COUNT 64
3483 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3484 if (!h->hba_inquiry_data)
3485 return;
3486 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3487 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3488 if (rc != 0) {
3489 kfree(h->hba_inquiry_data);
3490 h->hba_inquiry_data = NULL;
3491 }
3492}
3493
3289static int __devinit hpsa_init_one(struct pci_dev *pdev, 3494static int __devinit hpsa_init_one(struct pci_dev *pdev,
3290 const struct pci_device_id *ent) 3495 const struct pci_device_id *ent)
3291{ 3496{
3292 int i; 3497 int i, rc;
3293 int dac; 3498 int dac;
3294 struct ctlr_info *h; 3499 struct ctlr_info *h;
3295 3500
@@ -3314,17 +3519,23 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3314 } 3519 }
3315 } 3520 }
3316 3521
3317 BUILD_BUG_ON(sizeof(struct CommandList) % 8); 3522 /* Command structures must be aligned on a 32-byte boundary because
3523 * the 5 lower bits of the address are used by the hardware. and by
3524 * the driver. See comments in hpsa.h for more info.
3525 */
3526#define COMMANDLIST_ALIGNMENT 32
3527 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3318 h = kzalloc(sizeof(*h), GFP_KERNEL); 3528 h = kzalloc(sizeof(*h), GFP_KERNEL);
3319 if (!h) 3529 if (!h)
3320 return -1; 3530 return -ENOMEM;
3321 3531
3322 h->busy_initializing = 1; 3532 h->busy_initializing = 1;
3323 INIT_HLIST_HEAD(&h->cmpQ); 3533 INIT_HLIST_HEAD(&h->cmpQ);
3324 INIT_HLIST_HEAD(&h->reqQ); 3534 INIT_HLIST_HEAD(&h->reqQ);
3325 mutex_init(&h->busy_shutting_down); 3535 mutex_init(&h->busy_shutting_down);
3326 init_completion(&h->scan_wait); 3536 init_completion(&h->scan_wait);
3327 if (hpsa_pci_init(h, pdev) != 0) 3537 rc = hpsa_pci_init(h, pdev);
3538 if (rc != 0)
3328 goto clean1; 3539 goto clean1;
3329 3540
3330 sprintf(h->devname, "hpsa%d", number_of_controllers); 3541 sprintf(h->devname, "hpsa%d", number_of_controllers);
@@ -3333,27 +3544,32 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3333 h->pdev = pdev; 3544 h->pdev = pdev;
3334 3545
3335 /* configure PCI DMA stuff */ 3546 /* configure PCI DMA stuff */
3336 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 3547 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3548 if (rc == 0) {
3337 dac = 1; 3549 dac = 1;
3338 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 3550 } else {
3339 dac = 0; 3551 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3340 else { 3552 if (rc == 0) {
3341 dev_err(&pdev->dev, "no suitable DMA available\n"); 3553 dac = 0;
3342 goto clean1; 3554 } else {
3555 dev_err(&pdev->dev, "no suitable DMA available\n");
3556 goto clean1;
3557 }
3343 } 3558 }
3344 3559
3345 /* make sure the board interrupts are off */ 3560 /* make sure the board interrupts are off */
3346 h->access.set_intr_mask(h, HPSA_INTR_OFF); 3561 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3347 if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr, 3562 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
3348 IRQF_DISABLED | IRQF_SHARED, h->devname, h)) { 3563 IRQF_DISABLED, h->devname, h);
3564 if (rc) {
3349 dev_err(&pdev->dev, "unable to get irq %d for %s\n", 3565 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3350 h->intr[SIMPLE_MODE_INT], h->devname); 3566 h->intr[PERF_MODE_INT], h->devname);
3351 goto clean2; 3567 goto clean2;
3352 } 3568 }
3353 3569
3354 dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 3570 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3355 h->devname, pdev->device, pci_name(pdev), 3571 h->devname, pdev->device,
3356 h->intr[SIMPLE_MODE_INT], dac ? "" : " not"); 3572 h->intr[PERF_MODE_INT], dac ? "" : " not");
3357 3573
3358 h->cmd_pool_bits = 3574 h->cmd_pool_bits =
3359 kmalloc(((h->nr_cmds + BITS_PER_LONG - 3575 kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3368,9 +3584,13 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3368 || (h->cmd_pool == NULL) 3584 || (h->cmd_pool == NULL)
3369 || (h->errinfo_pool == NULL)) { 3585 || (h->errinfo_pool == NULL)) {
3370 dev_err(&pdev->dev, "out of memory"); 3586 dev_err(&pdev->dev, "out of memory");
3587 rc = -ENOMEM;
3371 goto clean4; 3588 goto clean4;
3372 } 3589 }
3373 spin_lock_init(&h->lock); 3590 spin_lock_init(&h->lock);
3591 spin_lock_init(&h->scan_lock);
3592 init_waitqueue_head(&h->scan_wait_queue);
3593 h->scan_finished = 1; /* no scan currently in progress */
3374 3594
3375 pci_set_drvdata(pdev, h); 3595 pci_set_drvdata(pdev, h);
3376 memset(h->cmd_pool_bits, 0, 3596 memset(h->cmd_pool_bits, 0,
@@ -3382,6 +3602,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3382 /* Turn the interrupts on so we can service requests */ 3602 /* Turn the interrupts on so we can service requests */
3383 h->access.set_intr_mask(h, HPSA_INTR_ON); 3603 h->access.set_intr_mask(h, HPSA_INTR_ON);
3384 3604
3605 hpsa_put_ctlr_into_performant_mode(h);
3606 hpsa_hba_inquiry(h);
3385 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 3607 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3386 h->busy_initializing = 0; 3608 h->busy_initializing = 0;
3387 return 1; 3609 return 1;
@@ -3397,12 +3619,12 @@ clean4:
3397 h->nr_cmds * sizeof(struct ErrorInfo), 3619 h->nr_cmds * sizeof(struct ErrorInfo),
3398 h->errinfo_pool, 3620 h->errinfo_pool,
3399 h->errinfo_pool_dhandle); 3621 h->errinfo_pool_dhandle);
3400 free_irq(h->intr[SIMPLE_MODE_INT], h); 3622 free_irq(h->intr[PERF_MODE_INT], h);
3401clean2: 3623clean2:
3402clean1: 3624clean1:
3403 h->busy_initializing = 0; 3625 h->busy_initializing = 0;
3404 kfree(h); 3626 kfree(h);
3405 return -1; 3627 return rc;
3406} 3628}
3407 3629
3408static void hpsa_flush_cache(struct ctlr_info *h) 3630static void hpsa_flush_cache(struct ctlr_info *h)
@@ -3441,7 +3663,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
3441 */ 3663 */
3442 hpsa_flush_cache(h); 3664 hpsa_flush_cache(h);
3443 h->access.set_intr_mask(h, HPSA_INTR_OFF); 3665 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3444 free_irq(h->intr[2], h); 3666 free_irq(h->intr[PERF_MODE_INT], h);
3445#ifdef CONFIG_PCI_MSI 3667#ifdef CONFIG_PCI_MSI
3446 if (h->msix_vector) 3668 if (h->msix_vector)
3447 pci_disable_msix(h->pdev); 3669 pci_disable_msix(h->pdev);
@@ -3470,7 +3692,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3470 pci_free_consistent(h->pdev, 3692 pci_free_consistent(h->pdev,
3471 h->nr_cmds * sizeof(struct ErrorInfo), 3693 h->nr_cmds * sizeof(struct ErrorInfo),
3472 h->errinfo_pool, h->errinfo_pool_dhandle); 3694 h->errinfo_pool, h->errinfo_pool_dhandle);
3695 pci_free_consistent(h->pdev, h->reply_pool_size,
3696 h->reply_pool, h->reply_pool_dhandle);
3473 kfree(h->cmd_pool_bits); 3697 kfree(h->cmd_pool_bits);
3698 kfree(h->blockFetchTable);
3699 kfree(h->hba_inquiry_data);
3474 /* 3700 /*
3475 * Deliberately omit pci_disable_device(): it does something nasty to 3701 * Deliberately omit pci_disable_device(): it does something nasty to
3476 * Smart Array controllers that pci_enable_device does not undo 3702 * Smart Array controllers that pci_enable_device does not undo
@@ -3502,6 +3728,129 @@ static struct pci_driver hpsa_pci_driver = {
3502 .resume = hpsa_resume, 3728 .resume = hpsa_resume,
3503}; 3729};
3504 3730
3731/* Fill in bucket_map[], given nsgs (the max number of
3732 * scatter gather elements supported) and bucket[],
3733 * which is an array of 8 integers. The bucket[] array
3734 * contains 8 different DMA transfer sizes (in 16
3735 * byte increments) which the controller uses to fetch
3736 * commands. This function fills in bucket_map[], which
3737 * maps a given number of scatter gather elements to one of
3738 * the 8 DMA transfer sizes. The point of it is to allow the
3739 * controller to only do as much DMA as needed to fetch the
3740 * command, with the DMA transfer size encoded in the lower
3741 * bits of the command address.
3742 */
3743static void calc_bucket_map(int bucket[], int num_buckets,
3744 int nsgs, int *bucket_map)
3745{
3746 int i, j, b, size;
3747
3748 /* even a command with 0 SGs requires 4 blocks */
3749#define MINIMUM_TRANSFER_BLOCKS 4
3750#define NUM_BUCKETS 8
3751 /* Note, bucket_map must have nsgs+1 entries. */
3752 for (i = 0; i <= nsgs; i++) {
3753 /* Compute size of a command with i SG entries */
3754 size = i + MINIMUM_TRANSFER_BLOCKS;
3755 b = num_buckets; /* Assume the biggest bucket */
3756 /* Find the bucket that is just big enough */
3757 for (j = 0; j < 8; j++) {
3758 if (bucket[j] >= size) {
3759 b = j;
3760 break;
3761 }
3762 }
3763 /* for a command with i SG entries, use bucket b. */
3764 bucket_map[i] = b;
3765 }
3766}
3767
3768static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
3769{
3770 u32 trans_support;
3771 u64 trans_offset;
3772 /* 5 = 1 s/g entry or 4k
3773 * 6 = 2 s/g entry or 8k
3774 * 8 = 4 s/g entry or 16k
3775 * 10 = 6 s/g entry or 24k
3776 */
3777 int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
3778 int i = 0;
3779 int l = 0;
3780 unsigned long register_value;
3781
3782 trans_support = readl(&(h->cfgtable->TransportSupport));
3783 if (!(trans_support & PERFORMANT_MODE))
3784 return;
3785
3786 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3787 h->max_sg_entries = 32;
3788 /* Performant mode ring buffer and supporting data structures */
3789 h->reply_pool_size = h->max_commands * sizeof(u64);
3790 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
3791 &(h->reply_pool_dhandle));
3792
3793 /* Need a block fetch table for performant mode */
3794 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
3795 sizeof(u32)), GFP_KERNEL);
3796
3797 if ((h->reply_pool == NULL)
3798 || (h->blockFetchTable == NULL))
3799 goto clean_up;
3800
3801 h->reply_pool_wraparound = 1; /* spec: init to 1 */
3802
3803 /* Controller spec: zero out this buffer. */
3804 memset(h->reply_pool, 0, h->reply_pool_size);
3805 h->reply_pool_head = h->reply_pool;
3806
3807 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3808 bft[7] = h->max_sg_entries + 4;
3809 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
3810 for (i = 0; i < 8; i++)
3811 writel(bft[i], &h->transtable->BlockFetch[i]);
3812
3813 /* size of controller ring buffer */
3814 writel(h->max_commands, &h->transtable->RepQSize);
3815 writel(1, &h->transtable->RepQCount);
3816 writel(0, &h->transtable->RepQCtrAddrLow32);
3817 writel(0, &h->transtable->RepQCtrAddrHigh32);
3818 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
3819 writel(0, &h->transtable->RepQAddr0High32);
3820 writel(CFGTBL_Trans_Performant,
3821 &(h->cfgtable->HostWrite.TransportRequest));
3822 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3823 /* under certain very rare conditions, this can take awhile.
3824 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3825 * as we enter this code.) */
3826 for (l = 0; l < MAX_CONFIG_WAIT; l++) {
3827 register_value = readl(h->vaddr + SA5_DOORBELL);
3828 if (!(register_value & CFGTBL_ChangeReq))
3829 break;
3830 /* delay and try again */
3831 set_current_state(TASK_INTERRUPTIBLE);
3832 schedule_timeout(10);
3833 }
3834 register_value = readl(&(h->cfgtable->TransportActive));
3835 if (!(register_value & CFGTBL_Trans_Performant)) {
3836 dev_warn(&h->pdev->dev, "unable to get board into"
3837 " performant mode\n");
3838 return;
3839 }
3840
3841 /* Change the access methods to the performant access methods */
3842 h->access = SA5_performant_access;
3843 h->transMethod = CFGTBL_Trans_Performant;
3844
3845 return;
3846
3847clean_up:
3848 if (h->reply_pool)
3849 pci_free_consistent(h->pdev, h->reply_pool_size,
3850 h->reply_pool, h->reply_pool_dhandle);
3851 kfree(h->blockFetchTable);
3852}
3853
3505/* 3854/*
3506 * This is it. Register the PCI driver information for the cards we control 3855 * This is it. Register the PCI driver information for the cards we control
3507 * the OS will call our registered routines when it finds one of our cards. 3856 * the OS will call our registered routines when it finds one of our cards.
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6bd1949144b5..a0502b3ac17e 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -33,7 +33,7 @@ struct access_method {
33 struct CommandList *c); 33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h); 35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 unsigned long (*intr_pending)(struct ctlr_info *h); 36 bool (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h); 37 unsigned long (*command_completed)(struct ctlr_info *h);
38}; 38};
39 39
@@ -55,19 +55,20 @@ struct ctlr_info {
55 char *product_name; 55 char *product_name;
56 char firm_ver[4]; /* Firmware version */ 56 char firm_ver[4]; /* Firmware version */
57 struct pci_dev *pdev; 57 struct pci_dev *pdev;
58 __u32 board_id; 58 u32 board_id;
59 void __iomem *vaddr; 59 void __iomem *vaddr;
60 unsigned long paddr; 60 unsigned long paddr;
61 int nr_cmds; /* Number of commands allowed on this controller */ 61 int nr_cmds; /* Number of commands allowed on this controller */
62 struct CfgTable __iomem *cfgtable; 62 struct CfgTable __iomem *cfgtable;
63 int max_sg_entries;
63 int interrupts_enabled; 64 int interrupts_enabled;
64 int major; 65 int major;
65 int max_commands; 66 int max_commands;
66 int commands_outstanding; 67 int commands_outstanding;
67 int max_outstanding; /* Debug */ 68 int max_outstanding; /* Debug */
68 int usage_count; /* number of opens all all minor devices */ 69 int usage_count; /* number of opens all all minor devices */
69# define DOORBELL_INT 0 70# define PERF_MODE_INT 0
70# define PERF_MODE_INT 1 71# define DOORBELL_INT 1
71# define SIMPLE_MODE_INT 2 72# define SIMPLE_MODE_INT 2
72# define MEMQ_MODE_INT 3 73# define MEMQ_MODE_INT 3
73 unsigned int intr[4]; 74 unsigned int intr[4];
@@ -93,6 +94,9 @@ struct ctlr_info {
93 int nr_frees; 94 int nr_frees;
94 int busy_initializing; 95 int busy_initializing;
95 int busy_scanning; 96 int busy_scanning;
97 int scan_finished;
98 spinlock_t scan_lock;
99 wait_queue_head_t scan_wait_queue;
96 struct mutex busy_shutting_down; 100 struct mutex busy_shutting_down;
97 struct list_head scan_list; 101 struct list_head scan_list;
98 struct completion scan_wait; 102 struct completion scan_wait;
@@ -102,6 +106,24 @@ struct ctlr_info {
102 int ndevices; /* number of used elements in .dev[] array. */ 106 int ndevices; /* number of used elements in .dev[] array. */
103#define HPSA_MAX_SCSI_DEVS_PER_HBA 256 107#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
104 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA]; 108 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
109 /*
110 * Performant mode tables.
111 */
112 u32 trans_support;
113 u32 trans_offset;
114 struct TransTable_struct *transtable;
115 unsigned long transMethod;
116
117 /*
118 * Performant mode completion buffer
119 */
120 u64 *reply_pool;
121 dma_addr_t reply_pool_dhandle;
122 u64 *reply_pool_head;
123 size_t reply_pool_size;
124 unsigned char reply_pool_wraparound;
125 u32 *blockFetchTable;
126 unsigned char *hba_inquiry_data;
105}; 127};
106#define HPSA_ABORT_MSG 0 128#define HPSA_ABORT_MSG 0
107#define HPSA_DEVICE_RESET_MSG 1 129#define HPSA_DEVICE_RESET_MSG 1
@@ -164,9 +186,16 @@ struct ctlr_info {
164#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 186#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
165 187
166#define HPSA_ERROR_BIT 0x02 188#define HPSA_ERROR_BIT 0x02
167#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04) 189
168#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3) 190/* Performant mode flags */
169#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3) 191#define SA5_PERF_INTR_PENDING 0x04
192#define SA5_PERF_INTR_OFF 0x05
193#define SA5_OUTDB_STATUS_PERF_BIT 0x01
194#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
195#define SA5_OUTDB_CLEAR 0xA0
196#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
197#define SA5_OUTDB_STATUS 0x9C
198
170 199
171#define HPSA_INTR_ON 1 200#define HPSA_INTR_ON 1
172#define HPSA_INTR_OFF 0 201#define HPSA_INTR_OFF 0
@@ -176,10 +205,8 @@ struct ctlr_info {
176static void SA5_submit_command(struct ctlr_info *h, 205static void SA5_submit_command(struct ctlr_info *h,
177 struct CommandList *c) 206 struct CommandList *c)
178{ 207{
179#ifdef HPSA_DEBUG 208 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
180 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n", 209 c->Header.Tag.lower);
181 c->busaddr);
182#endif /* HPSA_DEBUG */
183 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 210 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
184 h->commands_outstanding++; 211 h->commands_outstanding++;
185 if (h->commands_outstanding > h->max_outstanding) 212 if (h->commands_outstanding > h->max_outstanding)
@@ -202,6 +229,52 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
202 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 229 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
203 } 230 }
204} 231}
232
233static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
234{
235 if (val) { /* turn on interrupts */
236 h->interrupts_enabled = 1;
237 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
238 } else {
239 h->interrupts_enabled = 0;
240 writel(SA5_PERF_INTR_OFF,
241 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
242 }
243}
244
245static unsigned long SA5_performant_completed(struct ctlr_info *h)
246{
247 unsigned long register_value = FIFO_EMPTY;
248
249 /* flush the controller write of the reply queue by reading
250 * outbound doorbell status register.
251 */
252 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
253 /* msi auto clears the interrupt pending bit. */
254 if (!(h->msi_vector || h->msix_vector)) {
255 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
256 /* Do a read in order to flush the write to the controller
257 * (as per spec.)
258 */
259 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
260 }
261
262 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
263 register_value = *(h->reply_pool_head);
264 (h->reply_pool_head)++;
265 h->commands_outstanding--;
266 } else {
267 register_value = FIFO_EMPTY;
268 }
269 /* Check for wraparound */
270 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
271 h->reply_pool_head = h->reply_pool;
272 h->reply_pool_wraparound ^= 1;
273 }
274
275 return register_value;
276}
277
205/* 278/*
206 * Returns true if fifo is full. 279 * Returns true if fifo is full.
207 * 280 *
@@ -228,10 +301,10 @@ static unsigned long SA5_completed(struct ctlr_info *h)
228 301
229#ifdef HPSA_DEBUG 302#ifdef HPSA_DEBUG
230 if (register_value != FIFO_EMPTY) 303 if (register_value != FIFO_EMPTY)
231 printk(KERN_INFO "hpsa: Read %lx back from board\n", 304 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
232 register_value); 305 register_value);
233 else 306 else
234 printk(KERN_INFO "hpsa: FIFO Empty read\n"); 307 dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
235#endif 308#endif
236 309
237 return register_value; 310 return register_value;
@@ -239,18 +312,28 @@ static unsigned long SA5_completed(struct ctlr_info *h)
239/* 312/*
240 * Returns true if an interrupt is pending.. 313 * Returns true if an interrupt is pending..
241 */ 314 */
242static unsigned long SA5_intr_pending(struct ctlr_info *h) 315static bool SA5_intr_pending(struct ctlr_info *h)
243{ 316{
244 unsigned long register_value = 317 unsigned long register_value =
245 readl(h->vaddr + SA5_INTR_STATUS); 318 readl(h->vaddr + SA5_INTR_STATUS);
246#ifdef HPSA_DEBUG 319 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
247 printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value); 320 return register_value & SA5_INTR_PENDING;
248#endif /* HPSA_DEBUG */
249 if (register_value & SA5_INTR_PENDING)
250 return 1;
251 return 0 ;
252} 321}
253 322
323static bool SA5_performant_intr_pending(struct ctlr_info *h)
324{
325 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
326
327 if (!register_value)
328 return false;
329
330 if (h->msi_vector || h->msix_vector)
331 return true;
332
333 /* Read outbound doorbell to flush */
334 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
335 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
336}
254 337
255static struct access_method SA5_access = { 338static struct access_method SA5_access = {
256 SA5_submit_command, 339 SA5_submit_command,
@@ -260,14 +343,19 @@ static struct access_method SA5_access = {
260 SA5_completed, 343 SA5_completed,
261}; 344};
262 345
346static struct access_method SA5_performant_access = {
347 SA5_submit_command,
348 SA5_performant_intr_mask,
349 SA5_fifo_full,
350 SA5_performant_intr_pending,
351 SA5_performant_completed,
352};
353
263struct board_type { 354struct board_type {
264 __u32 board_id; 355 u32 board_id;
265 char *product_name; 356 char *product_name;
266 struct access_method *access; 357 struct access_method *access;
267}; 358};
268 359
269
270/* end of old hpsa_scsi.h file */
271
272#endif /* HPSA_H */ 360#endif /* HPSA_H */
273 361
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 12d71387ed9a..3e0abdf76689 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -101,19 +101,20 @@
101#define CFGTBL_AccCmds 0x00000001l 101#define CFGTBL_AccCmds 0x00000001l
102 102
103#define CFGTBL_Trans_Simple 0x00000002l 103#define CFGTBL_Trans_Simple 0x00000002l
104#define CFGTBL_Trans_Performant 0x00000004l
104 105
105#define CFGTBL_BusType_Ultra2 0x00000001l 106#define CFGTBL_BusType_Ultra2 0x00000001l
106#define CFGTBL_BusType_Ultra3 0x00000002l 107#define CFGTBL_BusType_Ultra3 0x00000002l
107#define CFGTBL_BusType_Fibre1G 0x00000100l 108#define CFGTBL_BusType_Fibre1G 0x00000100l
108#define CFGTBL_BusType_Fibre2G 0x00000200l 109#define CFGTBL_BusType_Fibre2G 0x00000200l
109struct vals32 { 110struct vals32 {
110 __u32 lower; 111 u32 lower;
111 __u32 upper; 112 u32 upper;
112}; 113};
113 114
114union u64bit { 115union u64bit {
115 struct vals32 val32; 116 struct vals32 val32;
116 __u64 val; 117 u64 val;
117}; 118};
118 119
119/* FIXME this is a per controller value (barf!) */ 120/* FIXME this is a per controller value (barf!) */
@@ -126,34 +127,34 @@ union u64bit {
126 127
127#define HPSA_INQUIRY 0x12 128#define HPSA_INQUIRY 0x12
128struct InquiryData { 129struct InquiryData {
129 __u8 data_byte[36]; 130 u8 data_byte[36];
130}; 131};
131 132
132#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ 133#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
133#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ 134#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
134struct ReportLUNdata { 135struct ReportLUNdata {
135 __u8 LUNListLength[4]; 136 u8 LUNListLength[4];
136 __u32 reserved; 137 u32 reserved;
137 __u8 LUN[HPSA_MAX_LUN][8]; 138 u8 LUN[HPSA_MAX_LUN][8];
138}; 139};
139 140
140struct ReportExtendedLUNdata { 141struct ReportExtendedLUNdata {
141 __u8 LUNListLength[4]; 142 u8 LUNListLength[4];
142 __u8 extended_response_flag; 143 u8 extended_response_flag;
143 __u8 reserved[3]; 144 u8 reserved[3];
144 __u8 LUN[HPSA_MAX_LUN][24]; 145 u8 LUN[HPSA_MAX_LUN][24];
145}; 146};
146 147
147struct SenseSubsystem_info { 148struct SenseSubsystem_info {
148 __u8 reserved[36]; 149 u8 reserved[36];
149 __u8 portname[8]; 150 u8 portname[8];
150 __u8 reserved1[1108]; 151 u8 reserved1[1108];
151}; 152};
152 153
153#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */ 154#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
154struct ReadCapdata { 155struct ReadCapdata {
155 __u8 total_size[4]; /* Total size in blocks */ 156 u8 total_size[4]; /* Total size in blocks */
156 __u8 block_size[4]; /* Size of blocks in bytes */ 157 u8 block_size[4]; /* Size of blocks in bytes */
157}; 158};
158 159
159#if 0 160#if 0
@@ -174,112 +175,131 @@ struct ReadCapdata {
174/* Command List Structure */ 175/* Command List Structure */
175union SCSI3Addr { 176union SCSI3Addr {
176 struct { 177 struct {
177 __u8 Dev; 178 u8 Dev;
178 __u8 Bus:6; 179 u8 Bus:6;
179 __u8 Mode:2; /* b00 */ 180 u8 Mode:2; /* b00 */
180 } PeripDev; 181 } PeripDev;
181 struct { 182 struct {
182 __u8 DevLSB; 183 u8 DevLSB;
183 __u8 DevMSB:6; 184 u8 DevMSB:6;
184 __u8 Mode:2; /* b01 */ 185 u8 Mode:2; /* b01 */
185 } LogDev; 186 } LogDev;
186 struct { 187 struct {
187 __u8 Dev:5; 188 u8 Dev:5;
188 __u8 Bus:3; 189 u8 Bus:3;
189 __u8 Targ:6; 190 u8 Targ:6;
190 __u8 Mode:2; /* b10 */ 191 u8 Mode:2; /* b10 */
191 } LogUnit; 192 } LogUnit;
192}; 193};
193 194
194struct PhysDevAddr { 195struct PhysDevAddr {
195 __u32 TargetId:24; 196 u32 TargetId:24;
196 __u32 Bus:6; 197 u32 Bus:6;
197 __u32 Mode:2; 198 u32 Mode:2;
198 /* 2 level target device addr */ 199 /* 2 level target device addr */
199 union SCSI3Addr Target[2]; 200 union SCSI3Addr Target[2];
200}; 201};
201 202
202struct LogDevAddr { 203struct LogDevAddr {
203 __u32 VolId:30; 204 u32 VolId:30;
204 __u32 Mode:2; 205 u32 Mode:2;
205 __u8 reserved[4]; 206 u8 reserved[4];
206}; 207};
207 208
208union LUNAddr { 209union LUNAddr {
209 __u8 LunAddrBytes[8]; 210 u8 LunAddrBytes[8];
210 union SCSI3Addr SCSI3Lun[4]; 211 union SCSI3Addr SCSI3Lun[4];
211 struct PhysDevAddr PhysDev; 212 struct PhysDevAddr PhysDev;
212 struct LogDevAddr LogDev; 213 struct LogDevAddr LogDev;
213}; 214};
214 215
215struct CommandListHeader { 216struct CommandListHeader {
216 __u8 ReplyQueue; 217 u8 ReplyQueue;
217 __u8 SGList; 218 u8 SGList;
218 __u16 SGTotal; 219 u16 SGTotal;
219 struct vals32 Tag; 220 struct vals32 Tag;
220 union LUNAddr LUN; 221 union LUNAddr LUN;
221}; 222};
222 223
223struct RequestBlock { 224struct RequestBlock {
224 __u8 CDBLen; 225 u8 CDBLen;
225 struct { 226 struct {
226 __u8 Type:3; 227 u8 Type:3;
227 __u8 Attribute:3; 228 u8 Attribute:3;
228 __u8 Direction:2; 229 u8 Direction:2;
229 } Type; 230 } Type;
230 __u16 Timeout; 231 u16 Timeout;
231 __u8 CDB[16]; 232 u8 CDB[16];
232}; 233};
233 234
234struct ErrDescriptor { 235struct ErrDescriptor {
235 struct vals32 Addr; 236 struct vals32 Addr;
236 __u32 Len; 237 u32 Len;
237}; 238};
238 239
239struct SGDescriptor { 240struct SGDescriptor {
240 struct vals32 Addr; 241 struct vals32 Addr;
241 __u32 Len; 242 u32 Len;
242 __u32 Ext; 243 u32 Ext;
243}; 244};
244 245
245union MoreErrInfo { 246union MoreErrInfo {
246 struct { 247 struct {
247 __u8 Reserved[3]; 248 u8 Reserved[3];
248 __u8 Type; 249 u8 Type;
249 __u32 ErrorInfo; 250 u32 ErrorInfo;
250 } Common_Info; 251 } Common_Info;
251 struct { 252 struct {
252 __u8 Reserved[2]; 253 u8 Reserved[2];
253 __u8 offense_size; /* size of offending entry */ 254 u8 offense_size; /* size of offending entry */
254 __u8 offense_num; /* byte # of offense 0-base */ 255 u8 offense_num; /* byte # of offense 0-base */
255 __u32 offense_value; 256 u32 offense_value;
256 } Invalid_Cmd; 257 } Invalid_Cmd;
257}; 258};
258struct ErrorInfo { 259struct ErrorInfo {
259 __u8 ScsiStatus; 260 u8 ScsiStatus;
260 __u8 SenseLen; 261 u8 SenseLen;
261 __u16 CommandStatus; 262 u16 CommandStatus;
262 __u32 ResidualCnt; 263 u32 ResidualCnt;
263 union MoreErrInfo MoreErrInfo; 264 union MoreErrInfo MoreErrInfo;
264 __u8 SenseInfo[SENSEINFOBYTES]; 265 u8 SenseInfo[SENSEINFOBYTES];
265}; 266};
266/* Command types */ 267/* Command types */
267#define CMD_IOCTL_PEND 0x01 268#define CMD_IOCTL_PEND 0x01
268#define CMD_SCSI 0x03 269#define CMD_SCSI 0x03
269 270
271/* This structure needs to be divisible by 32 for new
272 * indexing method and performant mode.
273 */
274#define PAD32 32
275#define PAD64DIFF 0
276#define USEEXTRA ((sizeof(void *) - 4)/4)
277#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
278
279#define DIRECT_LOOKUP_SHIFT 5
280#define DIRECT_LOOKUP_BIT 0x10
281
282#define HPSA_ERROR_BIT 0x02
270struct ctlr_info; /* defined in hpsa.h */ 283struct ctlr_info; /* defined in hpsa.h */
271/* The size of this structure needs to be divisible by 8 284/* The size of this structure needs to be divisible by 32
272 * od on all architectures, because the controller uses 2 285 * on all architectures because low 5 bits of the addresses
273 * lower bits of the address, and the driver uses 1 lower 286 * are used as follows:
274 * bit (3 bits total.) 287 *
288 * bit 0: to device, used to indicate "performant mode" command
289 * from device, indidcates error status.
290 * bit 1-3: to device, indicates block fetch table entry for
291 * reducing DMA in fetching commands from host memory.
292 * bit 4: used to indicate whether tag is "direct lookup" (index),
293 * or a bus address.
275 */ 294 */
295
276struct CommandList { 296struct CommandList {
277 struct CommandListHeader Header; 297 struct CommandListHeader Header;
278 struct RequestBlock Request; 298 struct RequestBlock Request;
279 struct ErrDescriptor ErrDesc; 299 struct ErrDescriptor ErrDesc;
280 struct SGDescriptor SG[MAXSGENTRIES]; 300 struct SGDescriptor SG[MAXSGENTRIES];
281 /* information associated with the command */ 301 /* information associated with the command */
282 __u32 busaddr; /* physical addr of this record */ 302 u32 busaddr; /* physical addr of this record */
283 struct ErrorInfo *err_info; /* pointer to the allocated mem */ 303 struct ErrorInfo *err_info; /* pointer to the allocated mem */
284 struct ctlr_info *h; 304 struct ctlr_info *h;
285 int cmd_type; 305 int cmd_type;
@@ -291,35 +311,63 @@ struct CommandList {
291 struct completion *waiting; 311 struct completion *waiting;
292 int retry_count; 312 int retry_count;
293 void *scsi_cmd; 313 void *scsi_cmd;
314
315/* on 64 bit architectures, to get this to be 32-byte-aligned
316 * it so happens we need no padding, on 32 bit systems,
317 * we need 8 bytes of padding. This does that.
318 */
319#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
320 u8 pad[COMMANDLIST_PAD];
321
294}; 322};
295 323
296/* Configuration Table Structure */ 324/* Configuration Table Structure */
297struct HostWrite { 325struct HostWrite {
298 __u32 TransportRequest; 326 u32 TransportRequest;
299 __u32 Reserved; 327 u32 Reserved;
300 __u32 CoalIntDelay; 328 u32 CoalIntDelay;
301 __u32 CoalIntCount; 329 u32 CoalIntCount;
302}; 330};
303 331
332#define SIMPLE_MODE 0x02
333#define PERFORMANT_MODE 0x04
334#define MEMQ_MODE 0x08
335
304struct CfgTable { 336struct CfgTable {
305 __u8 Signature[4]; 337 u8 Signature[4];
306 __u32 SpecValence; 338 u32 SpecValence;
307 __u32 TransportSupport; 339 u32 TransportSupport;
308 __u32 TransportActive; 340 u32 TransportActive;
309 struct HostWrite HostWrite; 341 struct HostWrite HostWrite;
310 __u32 CmdsOutMax; 342 u32 CmdsOutMax;
311 __u32 BusTypes; 343 u32 BusTypes;
312 __u32 Reserved; 344 u32 TransMethodOffset;
313 __u8 ServerName[16]; 345 u8 ServerName[16];
314 __u32 HeartBeat; 346 u32 HeartBeat;
315 __u32 SCSI_Prefetch; 347 u32 SCSI_Prefetch;
348 u32 MaxScatterGatherElements;
349 u32 MaxLogicalUnits;
350 u32 MaxPhysicalDevices;
351 u32 MaxPhysicalDrivesPerLogicalUnit;
352 u32 MaxPerformantModeCommands;
353};
354
355#define NUM_BLOCKFETCH_ENTRIES 8
356struct TransTable_struct {
357 u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
358 u32 RepQSize;
359 u32 RepQCount;
360 u32 RepQCtrAddrLow32;
361 u32 RepQCtrAddrHigh32;
362 u32 RepQAddr0Low32;
363 u32 RepQAddr0High32;
316}; 364};
317 365
318struct hpsa_pci_info { 366struct hpsa_pci_info {
319 unsigned char bus; 367 unsigned char bus;
320 unsigned char dev_fn; 368 unsigned char dev_fn;
321 unsigned short domain; 369 unsigned short domain;
322 __u32 board_id; 370 u32 board_id;
323}; 371};
324 372
325#pragma pack() 373#pragma pack()
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 9c1e6a5b5af0..9a4b69d4f4eb 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2336,7 +2336,7 @@ static int option_setup(char *str)
2336 char *cur = str; 2336 char *cur = str;
2337 int i = 1; 2337 int i = 1;
2338 2338
2339 while (cur && isdigit(*cur) && i <= IM_MAX_HOSTS) { 2339 while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
2340 ints[i++] = simple_strtoul(cur, NULL, 0); 2340 ints[i++] = simple_strtoul(cur, NULL, 0);
2341 if ((cur = strchr(cur, ',')) != NULL) 2341 if ((cur = strchr(cur, ',')) != NULL)
2342 cur++; 2342 cur++;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e475b7957c2d..e3a18e0ef276 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -40,7 +40,7 @@
40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's
41 * Senders cannot access the buffer directly, but send messages by 41 * Senders cannot access the buffer directly, but send messages by
42 * making a hypervisor call and passing in the 16 bytes. The hypervisor 42 * making a hypervisor call and passing in the 16 bytes. The hypervisor
43 * puts the message in the next 16 byte space in round-robbin fashion, 43 * puts the message in the next 16 byte space in round-robin fashion,
44 * turns on the high order bit of the message (the valid bit), and 44 * turns on the high order bit of the message (the valid bit), and
45 * generates an interrupt to the receiver (if interrupts are turned on.) 45 * generates an interrupt to the receiver (if interrupts are turned on.)
46 * The receiver just turns off the valid bit when they have copied out 46 * The receiver just turns off the valid bit when they have copied out
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 517da3fd89d3..8a89ba900588 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -584,9 +584,10 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
584 struct iscsi_conn *conn = cls_conn->dd_data; 584 struct iscsi_conn *conn = cls_conn->dd_data;
585 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 585 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
586 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 586 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
587 struct socket *sock = tcp_sw_conn->sock;
587 588
588 /* userspace may have goofed up and not bound us */ 589 /* userspace may have goofed up and not bound us */
589 if (!tcp_sw_conn->sock) 590 if (!sock)
590 return; 591 return;
591 /* 592 /*
592 * Make sure our recv side is stopped. 593 * Make sure our recv side is stopped.
@@ -597,6 +598,11 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
597 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 598 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
598 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); 599 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
599 600
601 if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
602 sock->sk->sk_err = EIO;
603 wake_up_interruptible(sock->sk->sk_sleep);
604 }
605
600 iscsi_conn_stop(cls_conn, flag); 606 iscsi_conn_stop(cls_conn, flag);
601 iscsi_sw_tcp_release_conn(conn); 607 iscsi_sw_tcp_release_conn(conn);
602} 608}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c28a712fd4db..703eb6a88790 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1919,10 +1919,11 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1919static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) 1919static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1920{ 1920{
1921 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; 1921 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1922 struct iscsi_task *task = NULL; 1922 struct iscsi_task *task = NULL, *running_task;
1923 struct iscsi_cls_session *cls_session; 1923 struct iscsi_cls_session *cls_session;
1924 struct iscsi_session *session; 1924 struct iscsi_session *session;
1925 struct iscsi_conn *conn; 1925 struct iscsi_conn *conn;
1926 int i;
1926 1927
1927 cls_session = starget_to_session(scsi_target(sc->device)); 1928 cls_session = starget_to_session(scsi_target(sc->device));
1928 session = cls_session->dd_data; 1929 session = cls_session->dd_data;
@@ -1947,8 +1948,15 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1947 } 1948 }
1948 1949
1949 task = (struct iscsi_task *)sc->SCp.ptr; 1950 task = (struct iscsi_task *)sc->SCp.ptr;
1950 if (!task) 1951 if (!task) {
1952 /*
1953 * Raced with completion. Just reset timer, and let it
1954 * complete normally
1955 */
1956 rc = BLK_EH_RESET_TIMER;
1951 goto done; 1957 goto done;
1958 }
1959
1952 /* 1960 /*
1953 * If we have sent (at least queued to the network layer) a pdu or 1961 * If we have sent (at least queued to the network layer) a pdu or
1954 * recvd one for the task since the last timeout ask for 1962 * recvd one for the task since the last timeout ask for
@@ -1956,10 +1964,10 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1956 * we can check if it is the task or connection when we send the 1964 * we can check if it is the task or connection when we send the
1957 * nop as a ping. 1965 * nop as a ping.
1958 */ 1966 */
1959 if (time_after_eq(task->last_xfer, task->last_timeout)) { 1967 if (time_after(task->last_xfer, task->last_timeout)) {
1960 ISCSI_DBG_EH(session, "Command making progress. Asking " 1968 ISCSI_DBG_EH(session, "Command making progress. Asking "
1961 "scsi-ml for more time to complete. " 1969 "scsi-ml for more time to complete. "
1962 "Last data recv at %lu. Last timeout was at " 1970 "Last data xfer at %lu. Last timeout was at "
1963 "%lu\n.", task->last_xfer, task->last_timeout); 1971 "%lu\n.", task->last_xfer, task->last_timeout);
1964 task->have_checked_conn = false; 1972 task->have_checked_conn = false;
1965 rc = BLK_EH_RESET_TIMER; 1973 rc = BLK_EH_RESET_TIMER;
@@ -1977,6 +1985,43 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1977 goto done; 1985 goto done;
1978 } 1986 }
1979 1987
1988 for (i = 0; i < conn->session->cmds_max; i++) {
1989 running_task = conn->session->cmds[i];
1990 if (!running_task->sc || running_task == task ||
1991 running_task->state != ISCSI_TASK_RUNNING)
1992 continue;
1993
1994 /*
1995 * Only check if cmds started before this one have made
1996 * progress, or this could never fail
1997 */
1998 if (time_after(running_task->sc->jiffies_at_alloc,
1999 task->sc->jiffies_at_alloc))
2000 continue;
2001
2002 if (time_after(running_task->last_xfer, task->last_timeout)) {
2003 /*
2004 * This task has not made progress, but a task
2005 * started before us has transferred data since
2006 * we started/last-checked. We could be queueing
2007 * too many tasks or the LU is bad.
2008 *
2009 * If the device is bad the cmds ahead of us on
2010 * other devs will complete, and this loop will
2011 * eventually fail starting the scsi eh.
2012 */
2013 ISCSI_DBG_EH(session, "Command has not made progress "
2014 "but commands ahead of it have. "
2015 "Asking scsi-ml for more time to "
2016 "complete. Our last xfer vs running task "
2017 "last xfer %lu/%lu. Last check %lu.\n",
2018 task->last_xfer, running_task->last_xfer,
2019 task->last_timeout);
2020 rc = BLK_EH_RESET_TIMER;
2021 goto done;
2022 }
2023 }
2024
1980 /* Assumes nop timeout is shorter than scsi cmd timeout */ 2025 /* Assumes nop timeout is shorter than scsi cmd timeout */
1981 if (task->have_checked_conn) 2026 if (task->have_checked_conn)
1982 goto done; 2027 goto done;
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index ab19b3b4be52..22775165bf6a 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * SCSI RDAM Protocol lib functions 2 * SCSI RDMA Protocol lib functions
3 * 3 *
4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org> 4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
5 * 5 *
@@ -328,7 +328,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
328 int offset, err = 0; 328 int offset, err = 0;
329 u8 format; 329 u8 format;
330 330
331 offset = cmd->add_cdb_len * 4; 331 offset = cmd->add_cdb_len & ~3;
332 332
333 dir = srp_cmd_direction(cmd); 333 dir = srp_cmd_direction(cmd);
334 if (dir == DMA_FROM_DEVICE) 334 if (dir == DMA_FROM_DEVICE)
@@ -366,7 +366,7 @@ static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
366{ 366{
367 struct srp_direct_buf *md; 367 struct srp_direct_buf *md;
368 struct srp_indirect_buf *id; 368 struct srp_indirect_buf *id;
369 int len = 0, offset = cmd->add_cdb_len * 4; 369 int len = 0, offset = cmd->add_cdb_len & ~3;
370 u8 fmt; 370 u8 fmt;
371 371
372 if (dir == DMA_TO_DEVICE) 372 if (dir == DMA_TO_DEVICE)
@@ -440,6 +440,6 @@ int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
440} 440}
441EXPORT_SYMBOL_GPL(srp_cmd_queue); 441EXPORT_SYMBOL_GPL(srp_cmd_queue);
442 442
443MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions"); 443MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
444MODULE_AUTHOR("FUJITA Tomonori"); 444MODULE_AUTHOR("FUJITA Tomonori");
445MODULE_LICENSE("GPL"); 445MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1cc23a69db5e..84b696463a58 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -315,6 +315,9 @@ struct lpfc_vport {
315#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ 315#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
316#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ 316#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
317#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ 317#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
318#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
319#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
320#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
318 321
319 uint32_t ct_flags; 322 uint32_t ct_flags;
320#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ 323#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -448,6 +451,8 @@ struct unsol_rcv_ct_ctx {
448 uint32_t ctxt_id; 451 uint32_t ctxt_id;
449 uint32_t SID; 452 uint32_t SID;
450 uint32_t oxid; 453 uint32_t oxid;
454 uint32_t flags;
455#define UNSOL_VALID 0x00000001
451}; 456};
452 457
453struct lpfc_hba { 458struct lpfc_hba {
@@ -499,6 +504,10 @@ struct lpfc_hba {
499 (struct lpfc_hba *); 504 (struct lpfc_hba *);
500 void (*lpfc_stop_port) 505 void (*lpfc_stop_port)
501 (struct lpfc_hba *); 506 (struct lpfc_hba *);
507 int (*lpfc_hba_init_link)
508 (struct lpfc_hba *);
509 int (*lpfc_hba_down_link)
510 (struct lpfc_hba *);
502 511
503 512
504 /* SLI4 specific HBA data structure */ 513 /* SLI4 specific HBA data structure */
@@ -613,6 +622,7 @@ struct lpfc_hba {
613 uint32_t cfg_enable_bg; 622 uint32_t cfg_enable_bg;
614 uint32_t cfg_log_verbose; 623 uint32_t cfg_log_verbose;
615 uint32_t cfg_aer_support; 624 uint32_t cfg_aer_support;
625 uint32_t cfg_suppress_link_up;
616 626
617 lpfc_vpd_t vpd; /* vital product data */ 627 lpfc_vpd_t vpd; /* vital product data */
618 628
@@ -790,7 +800,7 @@ struct lpfc_hba {
790 uint16_t vlan_id; 800 uint16_t vlan_id;
791 struct list_head fcf_conn_rec_list; 801 struct list_head fcf_conn_rec_list;
792 802
793 struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */ 803 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
794 struct list_head ct_ev_waiters; 804 struct list_head ct_ev_waiters;
795 struct unsol_rcv_ct_ctx ct_ctx[64]; 805 struct unsol_rcv_ct_ctx ct_ctx[64];
796 uint32_t ctx_idx; 806 uint32_t ctx_idx;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 91542f786edf..c992e8328f9e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -482,6 +482,41 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
482} 482}
483 483
484/** 484/**
485 * lpfc_link_state_store - Transition the link_state on an HBA port
486 * @dev: class device that is converted into a Scsi_host.
487 * @attr: device attribute, not used.
488 * @buf: one or more lpfc_polling_flags values.
489 * @count: not used.
490 *
491 * Returns:
492 * -EINVAL if the buffer is not "up" or "down"
493 * return from link state change function if non-zero
494 * length of the buf on success
495 **/
496static ssize_t
497lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
498 const char *buf, size_t count)
499{
500 struct Scsi_Host *shost = class_to_shost(dev);
501 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
502 struct lpfc_hba *phba = vport->phba;
503
504 int status = -EINVAL;
505
506 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
507 (phba->link_state == LPFC_LINK_DOWN))
508 status = phba->lpfc_hba_init_link(phba);
509 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
510 (phba->link_state >= LPFC_LINK_UP))
511 status = phba->lpfc_hba_down_link(phba);
512
513 if (status == 0)
514 return strlen(buf);
515 else
516 return status;
517}
518
519/**
485 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports 520 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
486 * @dev: class device that is converted into a Scsi_host. 521 * @dev: class device that is converted into a Scsi_host.
487 * @attr: device attribute, not used. 522 * @attr: device attribute, not used.
@@ -1219,7 +1254,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1219 struct Scsi_Host *shost = class_to_shost(dev);\ 1254 struct Scsi_Host *shost = class_to_shost(dev);\
1220 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1255 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1221 struct lpfc_hba *phba = vport->phba;\ 1256 struct lpfc_hba *phba = vport->phba;\
1222 int val = 0;\ 1257 uint val = 0;\
1223 val = phba->cfg_##attr;\ 1258 val = phba->cfg_##attr;\
1224 return snprintf(buf, PAGE_SIZE, "%d\n",\ 1259 return snprintf(buf, PAGE_SIZE, "%d\n",\
1225 phba->cfg_##attr);\ 1260 phba->cfg_##attr);\
@@ -1247,7 +1282,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1247 struct Scsi_Host *shost = class_to_shost(dev);\ 1282 struct Scsi_Host *shost = class_to_shost(dev);\
1248 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1283 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1249 struct lpfc_hba *phba = vport->phba;\ 1284 struct lpfc_hba *phba = vport->phba;\
1250 int val = 0;\ 1285 uint val = 0;\
1251 val = phba->cfg_##attr;\ 1286 val = phba->cfg_##attr;\
1252 return snprintf(buf, PAGE_SIZE, "%#x\n",\ 1287 return snprintf(buf, PAGE_SIZE, "%#x\n",\
1253 phba->cfg_##attr);\ 1288 phba->cfg_##attr);\
@@ -1274,7 +1309,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1274 **/ 1309 **/
1275#define lpfc_param_init(attr, default, minval, maxval) \ 1310#define lpfc_param_init(attr, default, minval, maxval) \
1276static int \ 1311static int \
1277lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ 1312lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
1278{ \ 1313{ \
1279 if (val >= minval && val <= maxval) {\ 1314 if (val >= minval && val <= maxval) {\
1280 phba->cfg_##attr = val;\ 1315 phba->cfg_##attr = val;\
@@ -1309,7 +1344,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
1309 **/ 1344 **/
1310#define lpfc_param_set(attr, default, minval, maxval) \ 1345#define lpfc_param_set(attr, default, minval, maxval) \
1311static int \ 1346static int \
1312lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ 1347lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
1313{ \ 1348{ \
1314 if (val >= minval && val <= maxval) {\ 1349 if (val >= minval && val <= maxval) {\
1315 phba->cfg_##attr = val;\ 1350 phba->cfg_##attr = val;\
@@ -1350,7 +1385,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1350 struct Scsi_Host *shost = class_to_shost(dev);\ 1385 struct Scsi_Host *shost = class_to_shost(dev);\
1351 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1386 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1352 struct lpfc_hba *phba = vport->phba;\ 1387 struct lpfc_hba *phba = vport->phba;\
1353 int val=0;\ 1388 uint val = 0;\
1354 if (!isdigit(buf[0]))\ 1389 if (!isdigit(buf[0]))\
1355 return -EINVAL;\ 1390 return -EINVAL;\
1356 if (sscanf(buf, "%i", &val) != 1)\ 1391 if (sscanf(buf, "%i", &val) != 1)\
@@ -1382,7 +1417,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1382{ \ 1417{ \
1383 struct Scsi_Host *shost = class_to_shost(dev);\ 1418 struct Scsi_Host *shost = class_to_shost(dev);\
1384 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1419 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1385 int val = 0;\ 1420 uint val = 0;\
1386 val = vport->cfg_##attr;\ 1421 val = vport->cfg_##attr;\
1387 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ 1422 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
1388} 1423}
@@ -1409,7 +1444,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1409{ \ 1444{ \
1410 struct Scsi_Host *shost = class_to_shost(dev);\ 1445 struct Scsi_Host *shost = class_to_shost(dev);\
1411 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1446 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1412 int val = 0;\ 1447 uint val = 0;\
1413 val = vport->cfg_##attr;\ 1448 val = vport->cfg_##attr;\
1414 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ 1449 return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
1415} 1450}
@@ -1434,7 +1469,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
1434 **/ 1469 **/
1435#define lpfc_vport_param_init(attr, default, minval, maxval) \ 1470#define lpfc_vport_param_init(attr, default, minval, maxval) \
1436static int \ 1471static int \
1437lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ 1472lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
1438{ \ 1473{ \
1439 if (val >= minval && val <= maxval) {\ 1474 if (val >= minval && val <= maxval) {\
1440 vport->cfg_##attr = val;\ 1475 vport->cfg_##attr = val;\
@@ -1466,7 +1501,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
1466 **/ 1501 **/
1467#define lpfc_vport_param_set(attr, default, minval, maxval) \ 1502#define lpfc_vport_param_set(attr, default, minval, maxval) \
1468static int \ 1503static int \
1469lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ 1504lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
1470{ \ 1505{ \
1471 if (val >= minval && val <= maxval) {\ 1506 if (val >= minval && val <= maxval) {\
1472 vport->cfg_##attr = val;\ 1507 vport->cfg_##attr = val;\
@@ -1502,7 +1537,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1502{ \ 1537{ \
1503 struct Scsi_Host *shost = class_to_shost(dev);\ 1538 struct Scsi_Host *shost = class_to_shost(dev);\
1504 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 1539 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
1505 int val=0;\ 1540 uint val = 0;\
1506 if (!isdigit(buf[0]))\ 1541 if (!isdigit(buf[0]))\
1507 return -EINVAL;\ 1542 return -EINVAL;\
1508 if (sscanf(buf, "%i", &val) != 1)\ 1543 if (sscanf(buf, "%i", &val) != 1)\
@@ -1515,22 +1550,22 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
1515 1550
1516 1551
1517#define LPFC_ATTR(name, defval, minval, maxval, desc) \ 1552#define LPFC_ATTR(name, defval, minval, maxval, desc) \
1518static int lpfc_##name = defval;\ 1553static uint lpfc_##name = defval;\
1519module_param(lpfc_##name, int, 0);\ 1554module_param(lpfc_##name, uint, 0);\
1520MODULE_PARM_DESC(lpfc_##name, desc);\ 1555MODULE_PARM_DESC(lpfc_##name, desc);\
1521lpfc_param_init(name, defval, minval, maxval) 1556lpfc_param_init(name, defval, minval, maxval)
1522 1557
1523#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ 1558#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
1524static int lpfc_##name = defval;\ 1559static uint lpfc_##name = defval;\
1525module_param(lpfc_##name, int, 0);\ 1560module_param(lpfc_##name, uint, 0);\
1526MODULE_PARM_DESC(lpfc_##name, desc);\ 1561MODULE_PARM_DESC(lpfc_##name, desc);\
1527lpfc_param_show(name)\ 1562lpfc_param_show(name)\
1528lpfc_param_init(name, defval, minval, maxval)\ 1563lpfc_param_init(name, defval, minval, maxval)\
1529static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1564static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1530 1565
1531#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ 1566#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
1532static int lpfc_##name = defval;\ 1567static uint lpfc_##name = defval;\
1533module_param(lpfc_##name, int, 0);\ 1568module_param(lpfc_##name, uint, 0);\
1534MODULE_PARM_DESC(lpfc_##name, desc);\ 1569MODULE_PARM_DESC(lpfc_##name, desc);\
1535lpfc_param_show(name)\ 1570lpfc_param_show(name)\
1536lpfc_param_init(name, defval, minval, maxval)\ 1571lpfc_param_init(name, defval, minval, maxval)\
@@ -1540,16 +1575,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1540 lpfc_##name##_show, lpfc_##name##_store) 1575 lpfc_##name##_show, lpfc_##name##_store)
1541 1576
1542#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ 1577#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
1543static int lpfc_##name = defval;\ 1578static uint lpfc_##name = defval;\
1544module_param(lpfc_##name, int, 0);\ 1579module_param(lpfc_##name, uint, 0);\
1545MODULE_PARM_DESC(lpfc_##name, desc);\ 1580MODULE_PARM_DESC(lpfc_##name, desc);\
1546lpfc_param_hex_show(name)\ 1581lpfc_param_hex_show(name)\
1547lpfc_param_init(name, defval, minval, maxval)\ 1582lpfc_param_init(name, defval, minval, maxval)\
1548static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1583static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1549 1584
1550#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ 1585#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
1551static int lpfc_##name = defval;\ 1586static uint lpfc_##name = defval;\
1552module_param(lpfc_##name, int, 0);\ 1587module_param(lpfc_##name, uint, 0);\
1553MODULE_PARM_DESC(lpfc_##name, desc);\ 1588MODULE_PARM_DESC(lpfc_##name, desc);\
1554lpfc_param_hex_show(name)\ 1589lpfc_param_hex_show(name)\
1555lpfc_param_init(name, defval, minval, maxval)\ 1590lpfc_param_init(name, defval, minval, maxval)\
@@ -1559,22 +1594,22 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1559 lpfc_##name##_show, lpfc_##name##_store) 1594 lpfc_##name##_show, lpfc_##name##_store)
1560 1595
1561#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ 1596#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
1562static int lpfc_##name = defval;\ 1597static uint lpfc_##name = defval;\
1563module_param(lpfc_##name, int, 0);\ 1598module_param(lpfc_##name, uint, 0);\
1564MODULE_PARM_DESC(lpfc_##name, desc);\ 1599MODULE_PARM_DESC(lpfc_##name, desc);\
1565lpfc_vport_param_init(name, defval, minval, maxval) 1600lpfc_vport_param_init(name, defval, minval, maxval)
1566 1601
1567#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ 1602#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
1568static int lpfc_##name = defval;\ 1603static uint lpfc_##name = defval;\
1569module_param(lpfc_##name, int, 0);\ 1604module_param(lpfc_##name, uint, 0);\
1570MODULE_PARM_DESC(lpfc_##name, desc);\ 1605MODULE_PARM_DESC(lpfc_##name, desc);\
1571lpfc_vport_param_show(name)\ 1606lpfc_vport_param_show(name)\
1572lpfc_vport_param_init(name, defval, minval, maxval)\ 1607lpfc_vport_param_init(name, defval, minval, maxval)\
1573static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1608static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1574 1609
1575#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ 1610#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
1576static int lpfc_##name = defval;\ 1611static uint lpfc_##name = defval;\
1577module_param(lpfc_##name, int, 0);\ 1612module_param(lpfc_##name, uint, 0);\
1578MODULE_PARM_DESC(lpfc_##name, desc);\ 1613MODULE_PARM_DESC(lpfc_##name, desc);\
1579lpfc_vport_param_show(name)\ 1614lpfc_vport_param_show(name)\
1580lpfc_vport_param_init(name, defval, minval, maxval)\ 1615lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1584,16 +1619,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1584 lpfc_##name##_show, lpfc_##name##_store) 1619 lpfc_##name##_show, lpfc_##name##_store)
1585 1620
1586#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ 1621#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
1587static int lpfc_##name = defval;\ 1622static uint lpfc_##name = defval;\
1588module_param(lpfc_##name, int, 0);\ 1623module_param(lpfc_##name, uint, 0);\
1589MODULE_PARM_DESC(lpfc_##name, desc);\ 1624MODULE_PARM_DESC(lpfc_##name, desc);\
1590lpfc_vport_param_hex_show(name)\ 1625lpfc_vport_param_hex_show(name)\
1591lpfc_vport_param_init(name, defval, minval, maxval)\ 1626lpfc_vport_param_init(name, defval, minval, maxval)\
1592static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 1627static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
1593 1628
1594#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ 1629#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
1595static int lpfc_##name = defval;\ 1630static uint lpfc_##name = defval;\
1596module_param(lpfc_##name, int, 0);\ 1631module_param(lpfc_##name, uint, 0);\
1597MODULE_PARM_DESC(lpfc_##name, desc);\ 1632MODULE_PARM_DESC(lpfc_##name, desc);\
1598lpfc_vport_param_hex_show(name)\ 1633lpfc_vport_param_hex_show(name)\
1599lpfc_vport_param_init(name, defval, minval, maxval)\ 1634lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1614,7 +1649,8 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
1614static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 1649static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
1615static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 1650static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
1616static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 1651static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
1617static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL); 1652static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
1653 lpfc_link_state_store);
1618static DEVICE_ATTR(option_rom_version, S_IRUGO, 1654static DEVICE_ATTR(option_rom_version, S_IRUGO,
1619 lpfc_option_rom_version_show, NULL); 1655 lpfc_option_rom_version_show, NULL);
1620static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 1656static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1897,6 +1933,15 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
1897 lpfc_enable_npiv_show, NULL); 1933 lpfc_enable_npiv_show, NULL);
1898 1934
1899/* 1935/*
1936# lpfc_suppress_link_up: Bring link up at initialization
1937# 0x0 = bring link up (issue MBX_INIT_LINK)
1938# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
1939# 0x2 = never bring up link
1940# Default value is 0.
1941*/
1942LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization");
1943
1944/*
1900# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1945# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
1901# until the timer expires. Value range is [0,255]. Default value is 30. 1946# until the timer expires. Value range is [0,255]. Default value is 30.
1902*/ 1947*/
@@ -3114,12 +3159,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
3114/* 3159/*
3115# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 3160# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
3116# support this feature 3161# support this feature
3117# 0 = MSI disabled (default) 3162# 0 = MSI disabled
3118# 1 = MSI enabled 3163# 1 = MSI enabled
3119# 2 = MSI-X enabled 3164# 2 = MSI-X enabled (default)
3120# Value range is [0,2]. Default value is 0. 3165# Value range is [0,2]. Default value is 2.
3121*/ 3166*/
3122LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " 3167LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
3123 "MSI-X (2), if possible"); 3168 "MSI-X (2), if possible");
3124 3169
3125/* 3170/*
@@ -3278,6 +3323,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3278 &dev_attr_lpfc_prot_sg_seg_cnt, 3323 &dev_attr_lpfc_prot_sg_seg_cnt,
3279 &dev_attr_lpfc_aer_support, 3324 &dev_attr_lpfc_aer_support,
3280 &dev_attr_lpfc_aer_state_cleanup, 3325 &dev_attr_lpfc_aer_state_cleanup,
3326 &dev_attr_lpfc_suppress_link_up,
3281 NULL, 3327 NULL,
3282}; 3328};
3283 3329
@@ -4456,7 +4502,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4456 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4502 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4457 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 4503 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4458 lpfc_aer_support_init(phba, lpfc_aer_support); 4504 lpfc_aer_support_init(phba, lpfc_aer_support);
4459 4505 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4460 return; 4506 return;
4461} 4507}
4462 4508
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a5d9048235d9..f3f1bf1a0a71 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/delay.h>
24 25
25#include <scsi/scsi.h> 26#include <scsi/scsi.h>
26#include <scsi/scsi_host.h> 27#include <scsi/scsi_host.h>
@@ -33,6 +34,7 @@
33#include "lpfc_sli.h" 34#include "lpfc_sli.h"
34#include "lpfc_sli4.h" 35#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 36#include "lpfc_nl.h"
37#include "lpfc_bsg.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
38#include "lpfc.h" 40#include "lpfc.h"
@@ -41,14 +43,183 @@
41#include "lpfc_vport.h" 43#include "lpfc_vport.h"
42#include "lpfc_version.h" 44#include "lpfc_version.h"
43 45
46struct lpfc_bsg_event {
47 struct list_head node;
48 struct kref kref;
49 wait_queue_head_t wq;
50
51 /* Event type and waiter identifiers */
52 uint32_t type_mask;
53 uint32_t req_id;
54 uint32_t reg_id;
55
56 /* next two flags are here for the auto-delete logic */
57 unsigned long wait_time_stamp;
58 int waiting;
59
60 /* seen and not seen events */
61 struct list_head events_to_get;
62 struct list_head events_to_see;
63
64 /* job waiting for this event to finish */
65 struct fc_bsg_job *set_job;
66};
67
68struct lpfc_bsg_iocb {
69 struct lpfc_iocbq *cmdiocbq;
70 struct lpfc_iocbq *rspiocbq;
71 struct lpfc_dmabuf *bmp;
72 struct lpfc_nodelist *ndlp;
73
74 /* job waiting for this iocb to finish */
75 struct fc_bsg_job *set_job;
76};
77
78struct lpfc_bsg_mbox {
79 LPFC_MBOXQ_t *pmboxq;
80 MAILBOX_t *mb;
81
82 /* job waiting for this mbox command to finish */
83 struct fc_bsg_job *set_job;
84};
85
86#define TYPE_EVT 1
87#define TYPE_IOCB 2
88#define TYPE_MBOX 3
89struct bsg_job_data {
90 uint32_t type;
91 union {
92 struct lpfc_bsg_event *evt;
93 struct lpfc_bsg_iocb iocb;
94 struct lpfc_bsg_mbox mbox;
95 } context_un;
96};
97
98struct event_data {
99 struct list_head node;
100 uint32_t type;
101 uint32_t immed_dat;
102 void *data;
103 uint32_t len;
104};
105
106#define BUF_SZ_4K 4096
107#define SLI_CT_ELX_LOOPBACK 0x10
108
109enum ELX_LOOPBACK_CMD {
110 ELX_LOOPBACK_XRI_SETUP,
111 ELX_LOOPBACK_DATA,
112};
113
114#define ELX_LOOPBACK_HEADER_SZ \
115 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
116
117struct lpfc_dmabufext {
118 struct lpfc_dmabuf dma;
119 uint32_t size;
120 uint32_t flag;
121};
122
123/**
124 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
125 * @phba: Pointer to HBA context object.
126 * @cmdiocbq: Pointer to command iocb.
127 * @rspiocbq: Pointer to response iocb.
128 *
129 * This function is the completion handler for iocbs issued using
130 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
131 * ring event handler function without any lock held. This function
132 * can be called from both worker thread context and interrupt
133 * context. This function also can be called from another thread which
134 * cleans up the SLI layer objects.
135 * This function copies the contents of the response iocb to the
136 * response iocb memory object provided by the caller of
137 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
138 * sleeps for the iocb completion.
139 **/
140static void
141lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
142 struct lpfc_iocbq *cmdiocbq,
143 struct lpfc_iocbq *rspiocbq)
144{
145 unsigned long iflags;
146 struct bsg_job_data *dd_data;
147 struct fc_bsg_job *job;
148 IOCB_t *rsp;
149 struct lpfc_dmabuf *bmp;
150 struct lpfc_nodelist *ndlp;
151 struct lpfc_bsg_iocb *iocb;
152 unsigned long flags;
153 int rc = 0;
154
155 spin_lock_irqsave(&phba->ct_ev_lock, flags);
156 dd_data = cmdiocbq->context1;
157 if (!dd_data) {
158 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
159 return;
160 }
161
162 iocb = &dd_data->context_un.iocb;
163 job = iocb->set_job;
164 job->dd_data = NULL; /* so timeout handler does not reply */
165
166 spin_lock_irqsave(&phba->hbalock, iflags);
167 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
168 if (cmdiocbq->context2 && rspiocbq)
169 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
170 &rspiocbq->iocb, sizeof(IOCB_t));
171 spin_unlock_irqrestore(&phba->hbalock, iflags);
172
173 bmp = iocb->bmp;
174 rspiocbq = iocb->rspiocbq;
175 rsp = &rspiocbq->iocb;
176 ndlp = iocb->ndlp;
177
178 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
179 job->request_payload.sg_cnt, DMA_TO_DEVICE);
180 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
181 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
182
183 if (rsp->ulpStatus) {
184 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
185 switch (rsp->un.ulpWord[4] & 0xff) {
186 case IOERR_SEQUENCE_TIMEOUT:
187 rc = -ETIMEDOUT;
188 break;
189 case IOERR_INVALID_RPI:
190 rc = -EFAULT;
191 break;
192 default:
193 rc = -EACCES;
194 break;
195 }
196 } else
197 rc = -EACCES;
198 } else
199 job->reply->reply_payload_rcv_len =
200 rsp->un.genreq64.bdl.bdeSize;
201
202 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
203 lpfc_sli_release_iocbq(phba, rspiocbq);
204 lpfc_sli_release_iocbq(phba, cmdiocbq);
205 lpfc_nlp_put(ndlp);
206 kfree(bmp);
207 kfree(dd_data);
208 /* make error code available to userspace */
209 job->reply->result = rc;
210 /* complete the job back to userspace */
211 job->job_done(job);
212 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
213 return;
214}
215
44/** 216/**
45 * lpfc_bsg_rport_ct - send a CT command from a bsg request 217 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
46 * @job: fc_bsg_job to handle 218 * @job: fc_bsg_job to handle
47 */ 219 **/
48static int 220static int
49lpfc_bsg_rport_ct(struct fc_bsg_job *job) 221lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
50{ 222{
51 struct Scsi_Host *shost = job->shost;
52 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 223 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
53 struct lpfc_hba *phba = vport->phba; 224 struct lpfc_hba *phba = vport->phba;
54 struct lpfc_rport_data *rdata = job->rport->dd_data; 225 struct lpfc_rport_data *rdata = job->rport->dd_data;
@@ -65,57 +236,60 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
65 struct scatterlist *sgel = NULL; 236 struct scatterlist *sgel = NULL;
66 int numbde; 237 int numbde;
67 dma_addr_t busaddr; 238 dma_addr_t busaddr;
239 struct bsg_job_data *dd_data;
240 uint32_t creg_val;
68 int rc = 0; 241 int rc = 0;
69 242
70 /* in case no data is transferred */ 243 /* in case no data is transferred */
71 job->reply->reply_payload_rcv_len = 0; 244 job->reply->reply_payload_rcv_len = 0;
72 245
246 /* allocate our bsg tracking structure */
247 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
248 if (!dd_data) {
249 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
250 "2733 Failed allocation of dd_data\n");
251 rc = -ENOMEM;
252 goto no_dd_data;
253 }
254
73 if (!lpfc_nlp_get(ndlp)) { 255 if (!lpfc_nlp_get(ndlp)) {
74 job->reply->result = -ENODEV; 256 rc = -ENODEV;
75 return 0; 257 goto no_ndlp;
258 }
259
260 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
261 if (!bmp) {
262 rc = -ENOMEM;
263 goto free_ndlp;
76 } 264 }
77 265
78 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 266 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
79 rc = -ENODEV; 267 rc = -ENODEV;
80 goto free_ndlp_exit; 268 goto free_bmp;
81 } 269 }
82 270
83 spin_lock_irq(shost->host_lock);
84 cmdiocbq = lpfc_sli_get_iocbq(phba); 271 cmdiocbq = lpfc_sli_get_iocbq(phba);
85 if (!cmdiocbq) { 272 if (!cmdiocbq) {
86 rc = -ENOMEM; 273 rc = -ENOMEM;
87 spin_unlock_irq(shost->host_lock); 274 goto free_bmp;
88 goto free_ndlp_exit;
89 } 275 }
90 cmd = &cmdiocbq->iocb;
91 276
277 cmd = &cmdiocbq->iocb;
92 rspiocbq = lpfc_sli_get_iocbq(phba); 278 rspiocbq = lpfc_sli_get_iocbq(phba);
93 if (!rspiocbq) { 279 if (!rspiocbq) {
94 rc = -ENOMEM; 280 rc = -ENOMEM;
95 goto free_cmdiocbq; 281 goto free_cmdiocbq;
96 } 282 }
97 spin_unlock_irq(shost->host_lock);
98 283
99 rsp = &rspiocbq->iocb; 284 rsp = &rspiocbq->iocb;
100
101 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
102 if (!bmp) {
103 rc = -ENOMEM;
104 spin_lock_irq(shost->host_lock);
105 goto free_rspiocbq;
106 }
107
108 spin_lock_irq(shost->host_lock);
109 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 285 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
110 if (!bmp->virt) { 286 if (!bmp->virt) {
111 rc = -ENOMEM; 287 rc = -ENOMEM;
112 goto free_bmp; 288 goto free_rspiocbq;
113 } 289 }
114 spin_unlock_irq(shost->host_lock);
115 290
116 INIT_LIST_HEAD(&bmp->list); 291 INIT_LIST_HEAD(&bmp->list);
117 bpl = (struct ulp_bde64 *) bmp->virt; 292 bpl = (struct ulp_bde64 *) bmp->virt;
118
119 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 293 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
120 job->request_payload.sg_cnt, DMA_TO_DEVICE); 294 job->request_payload.sg_cnt, DMA_TO_DEVICE);
121 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 295 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
@@ -157,78 +331,152 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
157 cmd->ulpContext = ndlp->nlp_rpi; 331 cmd->ulpContext = ndlp->nlp_rpi;
158 cmd->ulpOwner = OWN_CHIP; 332 cmd->ulpOwner = OWN_CHIP;
159 cmdiocbq->vport = phba->pport; 333 cmdiocbq->vport = phba->pport;
160 cmdiocbq->context1 = NULL; 334 cmdiocbq->context3 = bmp;
161 cmdiocbq->context2 = NULL;
162 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 335 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
163
164 timeout = phba->fc_ratov * 2; 336 timeout = phba->fc_ratov * 2;
165 job->dd_data = cmdiocbq; 337 cmd->ulpTimeout = timeout;
166 338
167 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, 339 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
168 timeout + LPFC_DRVR_TIMEOUT); 340 cmdiocbq->context1 = dd_data;
169 341 cmdiocbq->context2 = rspiocbq;
170 if (rc != IOCB_TIMEDOUT) { 342 dd_data->type = TYPE_IOCB;
171 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 343 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
172 job->request_payload.sg_cnt, DMA_TO_DEVICE); 344 dd_data->context_un.iocb.rspiocbq = rspiocbq;
173 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 345 dd_data->context_un.iocb.set_job = job;
174 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 346 dd_data->context_un.iocb.bmp = bmp;
347 dd_data->context_un.iocb.ndlp = ndlp;
348
349 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
350 creg_val = readl(phba->HCregaddr);
351 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
352 writel(creg_val, phba->HCregaddr);
353 readl(phba->HCregaddr); /* flush */
175 } 354 }
176 355
177 if (rc == IOCB_TIMEDOUT) { 356 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
178 lpfc_sli_release_iocbq(phba, rspiocbq);
179 rc = -EACCES;
180 goto free_ndlp_exit;
181 }
182 357
183 if (rc != IOCB_SUCCESS) { 358 if (rc == IOCB_SUCCESS)
184 rc = -EACCES; 359 return 0; /* done for now */
185 goto free_outdmp;
186 }
187 360
188 if (rsp->ulpStatus) { 361 /* iocb failed so cleanup */
189 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 362 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
190 switch (rsp->un.ulpWord[4] & 0xff) { 363 job->request_payload.sg_cnt, DMA_TO_DEVICE);
191 case IOERR_SEQUENCE_TIMEOUT: 364 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
192 rc = -ETIMEDOUT; 365 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
193 break;
194 case IOERR_INVALID_RPI:
195 rc = -EFAULT;
196 break;
197 default:
198 rc = -EACCES;
199 break;
200 }
201 goto free_outdmp;
202 }
203 } else
204 job->reply->reply_payload_rcv_len =
205 rsp->un.genreq64.bdl.bdeSize;
206 366
207free_outdmp:
208 spin_lock_irq(shost->host_lock);
209 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 367 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
210free_bmp: 368
211 kfree(bmp);
212free_rspiocbq: 369free_rspiocbq:
213 lpfc_sli_release_iocbq(phba, rspiocbq); 370 lpfc_sli_release_iocbq(phba, rspiocbq);
214free_cmdiocbq: 371free_cmdiocbq:
215 lpfc_sli_release_iocbq(phba, cmdiocbq); 372 lpfc_sli_release_iocbq(phba, cmdiocbq);
216 spin_unlock_irq(shost->host_lock); 373free_bmp:
217free_ndlp_exit: 374 kfree(bmp);
375free_ndlp:
218 lpfc_nlp_put(ndlp); 376 lpfc_nlp_put(ndlp);
377no_ndlp:
378 kfree(dd_data);
379no_dd_data:
380 /* make error code available to userspace */
381 job->reply->result = rc;
382 job->dd_data = NULL;
383 return rc;
384}
385
386/**
387 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
388 * @phba: Pointer to HBA context object.
389 * @cmdiocbq: Pointer to command iocb.
390 * @rspiocbq: Pointer to response iocb.
391 *
392 * This function is the completion handler for iocbs issued using
393 * lpfc_bsg_rport_els_cmp function. This function is called by the
394 * ring event handler function without any lock held. This function
395 * can be called from both worker thread context and interrupt
396 * context. This function also can be called from other thread which
397 * cleans up the SLI layer objects.
398 * This function copies the contents of the response iocb to the
399 * response iocb memory object provided by the caller of
400 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
401 * sleeps for the iocb completion.
402 **/
403static void
404lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
405 struct lpfc_iocbq *cmdiocbq,
406 struct lpfc_iocbq *rspiocbq)
407{
408 struct bsg_job_data *dd_data;
409 struct fc_bsg_job *job;
410 IOCB_t *rsp;
411 struct lpfc_nodelist *ndlp;
412 struct lpfc_dmabuf *pbuflist = NULL;
413 struct fc_bsg_ctels_reply *els_reply;
414 uint8_t *rjt_data;
415 unsigned long flags;
416 int rc = 0;
417
418 spin_lock_irqsave(&phba->ct_ev_lock, flags);
419 dd_data = cmdiocbq->context1;
420 /* normal completion and timeout crossed paths, already done */
421 if (!dd_data) {
422 spin_unlock_irqrestore(&phba->hbalock, flags);
423 return;
424 }
425
426 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
427 if (cmdiocbq->context2 && rspiocbq)
428 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
429 &rspiocbq->iocb, sizeof(IOCB_t));
430
431 job = dd_data->context_un.iocb.set_job;
432 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
433 rspiocbq = dd_data->context_un.iocb.rspiocbq;
434 rsp = &rspiocbq->iocb;
435 ndlp = dd_data->context_un.iocb.ndlp;
436
437 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
438 job->request_payload.sg_cnt, DMA_TO_DEVICE);
439 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
440 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
219 441
442 if (job->reply->result == -EAGAIN)
443 rc = -EAGAIN;
444 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
445 job->reply->reply_payload_rcv_len =
446 rsp->un.elsreq64.bdl.bdeSize;
447 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
448 job->reply->reply_payload_rcv_len =
449 sizeof(struct fc_bsg_ctels_reply);
450 /* LS_RJT data returned in word 4 */
451 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
452 els_reply = &job->reply->reply_data.ctels_reply;
453 els_reply->status = FC_CTELS_STATUS_REJECT;
454 els_reply->rjt_data.action = rjt_data[3];
455 els_reply->rjt_data.reason_code = rjt_data[2];
456 els_reply->rjt_data.reason_explanation = rjt_data[1];
457 els_reply->rjt_data.vendor_unique = rjt_data[0];
458 } else
459 rc = -EIO;
460
461 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
462 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
463 lpfc_sli_release_iocbq(phba, rspiocbq);
464 lpfc_sli_release_iocbq(phba, cmdiocbq);
465 lpfc_nlp_put(ndlp);
466 kfree(dd_data);
220 /* make error code available to userspace */ 467 /* make error code available to userspace */
221 job->reply->result = rc; 468 job->reply->result = rc;
469 job->dd_data = NULL;
222 /* complete the job back to userspace */ 470 /* complete the job back to userspace */
223 job->job_done(job); 471 job->job_done(job);
224 472 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
225 return 0; 473 return;
226} 474}
227 475
228/** 476/**
229 * lpfc_bsg_rport_els - send an ELS command from a bsg request 477 * lpfc_bsg_rport_els - send an ELS command from a bsg request
230 * @job: fc_bsg_job to handle 478 * @job: fc_bsg_job to handle
231 */ 479 **/
232static int 480static int
233lpfc_bsg_rport_els(struct fc_bsg_job *job) 481lpfc_bsg_rport_els(struct fc_bsg_job *job)
234{ 482{
@@ -236,7 +484,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
236 struct lpfc_hba *phba = vport->phba; 484 struct lpfc_hba *phba = vport->phba;
237 struct lpfc_rport_data *rdata = job->rport->dd_data; 485 struct lpfc_rport_data *rdata = job->rport->dd_data;
238 struct lpfc_nodelist *ndlp = rdata->pnode; 486 struct lpfc_nodelist *ndlp = rdata->pnode;
239
240 uint32_t elscmd; 487 uint32_t elscmd;
241 uint32_t cmdsize; 488 uint32_t cmdsize;
242 uint32_t rspsize; 489 uint32_t rspsize;
@@ -248,20 +495,30 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
248 struct lpfc_dmabuf *prsp; 495 struct lpfc_dmabuf *prsp;
249 struct lpfc_dmabuf *pbuflist = NULL; 496 struct lpfc_dmabuf *pbuflist = NULL;
250 struct ulp_bde64 *bpl; 497 struct ulp_bde64 *bpl;
251 int iocb_status;
252 int request_nseg; 498 int request_nseg;
253 int reply_nseg; 499 int reply_nseg;
254 struct scatterlist *sgel = NULL; 500 struct scatterlist *sgel = NULL;
255 int numbde; 501 int numbde;
256 dma_addr_t busaddr; 502 dma_addr_t busaddr;
503 struct bsg_job_data *dd_data;
504 uint32_t creg_val;
257 int rc = 0; 505 int rc = 0;
258 506
259 /* in case no data is transferred */ 507 /* in case no data is transferred */
260 job->reply->reply_payload_rcv_len = 0; 508 job->reply->reply_payload_rcv_len = 0;
261 509
510 /* allocate our bsg tracking structure */
511 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
512 if (!dd_data) {
513 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
514 "2735 Failed allocation of dd_data\n");
515 rc = -ENOMEM;
516 goto no_dd_data;
517 }
518
262 if (!lpfc_nlp_get(ndlp)) { 519 if (!lpfc_nlp_get(ndlp)) {
263 rc = -ENODEV; 520 rc = -ENODEV;
264 goto out; 521 goto free_dd_data;
265 } 522 }
266 523
267 elscmd = job->request->rqst_data.r_els.els_code; 524 elscmd = job->request->rqst_data.r_els.els_code;
@@ -271,24 +528,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
271 if (!rspiocbq) { 528 if (!rspiocbq) {
272 lpfc_nlp_put(ndlp); 529 lpfc_nlp_put(ndlp);
273 rc = -ENOMEM; 530 rc = -ENOMEM;
274 goto out; 531 goto free_dd_data;
275 } 532 }
276 533
277 rsp = &rspiocbq->iocb; 534 rsp = &rspiocbq->iocb;
278 rpi = ndlp->nlp_rpi; 535 rpi = ndlp->nlp_rpi;
279 536
280 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp, 537 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
281 ndlp->nlp_DID, elscmd); 538 ndlp->nlp_DID, elscmd);
282
283 if (!cmdiocbq) { 539 if (!cmdiocbq) {
284 lpfc_sli_release_iocbq(phba, rspiocbq); 540 rc = -EIO;
285 return -EIO; 541 goto free_rspiocbq;
286 } 542 }
287 543
288 job->dd_data = cmdiocbq; 544 /* prep els iocb set context1 to the ndlp, context2 to the command
545 * dmabuf, context3 holds the data dmabuf
546 */
289 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 547 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
290 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 548 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
291
292 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 549 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
293 kfree(pcmd); 550 kfree(pcmd);
294 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 551 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
@@ -300,7 +557,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
300 557
301 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 558 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
302 job->request_payload.sg_cnt, DMA_TO_DEVICE); 559 job->request_payload.sg_cnt, DMA_TO_DEVICE);
303
304 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 560 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
305 busaddr = sg_dma_address(sgel); 561 busaddr = sg_dma_address(sgel);
306 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 562 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
@@ -322,7 +578,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
322 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 578 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
323 bpl++; 579 bpl++;
324 } 580 }
325
326 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 581 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
327 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 582 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
328 cmdiocbq->iocb.ulpContext = rpi; 583 cmdiocbq->iocb.ulpContext = rpi;
@@ -330,102 +585,62 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
330 cmdiocbq->context1 = NULL; 585 cmdiocbq->context1 = NULL;
331 cmdiocbq->context2 = NULL; 586 cmdiocbq->context2 = NULL;
332 587
333 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 588 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
334 rspiocbq, (phba->fc_ratov * 2) 589 cmdiocbq->context1 = dd_data;
335 + LPFC_DRVR_TIMEOUT); 590 cmdiocbq->context2 = rspiocbq;
336 591 dd_data->type = TYPE_IOCB;
337 /* release the new ndlp once the iocb completes */ 592 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
338 lpfc_nlp_put(ndlp); 593 dd_data->context_un.iocb.rspiocbq = rspiocbq;
339 if (iocb_status != IOCB_TIMEDOUT) { 594 dd_data->context_un.iocb.set_job = job;
340 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 595 dd_data->context_un.iocb.bmp = NULL;;
341 job->request_payload.sg_cnt, DMA_TO_DEVICE); 596 dd_data->context_un.iocb.ndlp = ndlp;
342 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 597
343 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 598 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
599 creg_val = readl(phba->HCregaddr);
600 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
601 writel(creg_val, phba->HCregaddr);
602 readl(phba->HCregaddr); /* flush */
344 } 603 }
604 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
605 lpfc_nlp_put(ndlp);
606 if (rc == IOCB_SUCCESS)
607 return 0; /* done for now */
345 608
346 if (iocb_status == IOCB_SUCCESS) { 609 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
347 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 610 job->request_payload.sg_cnt, DMA_TO_DEVICE);
348 job->reply->reply_payload_rcv_len = 611 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
349 rsp->un.elsreq64.bdl.bdeSize; 612 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
350 rc = 0; 613
351 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 614 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
352 struct fc_bsg_ctels_reply *els_reply;
353 /* LS_RJT data returned in word 4 */
354 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
355
356 els_reply = &job->reply->reply_data.ctels_reply;
357 job->reply->result = 0;
358 els_reply->status = FC_CTELS_STATUS_REJECT;
359 els_reply->rjt_data.action = rjt_data[0];
360 els_reply->rjt_data.reason_code = rjt_data[1];
361 els_reply->rjt_data.reason_explanation = rjt_data[2];
362 els_reply->rjt_data.vendor_unique = rjt_data[3];
363 } else
364 rc = -EIO;
365 } else
366 rc = -EIO;
367 615
368 if (iocb_status != IOCB_TIMEDOUT) 616 lpfc_sli_release_iocbq(phba, cmdiocbq);
369 lpfc_els_free_iocb(phba, cmdiocbq);
370 617
618free_rspiocbq:
371 lpfc_sli_release_iocbq(phba, rspiocbq); 619 lpfc_sli_release_iocbq(phba, rspiocbq);
372 620
373out: 621free_dd_data:
622 kfree(dd_data);
623
624no_dd_data:
374 /* make error code available to userspace */ 625 /* make error code available to userspace */
375 job->reply->result = rc; 626 job->reply->result = rc;
376 /* complete the job back to userspace */ 627 job->dd_data = NULL;
377 job->job_done(job); 628 return rc;
378
379 return 0;
380}
381
382struct lpfc_ct_event {
383 struct list_head node;
384 int ref;
385 wait_queue_head_t wq;
386
387 /* Event type and waiter identifiers */
388 uint32_t type_mask;
389 uint32_t req_id;
390 uint32_t reg_id;
391
392 /* next two flags are here for the auto-delete logic */
393 unsigned long wait_time_stamp;
394 int waiting;
395
396 /* seen and not seen events */
397 struct list_head events_to_get;
398 struct list_head events_to_see;
399};
400
401struct event_data {
402 struct list_head node;
403 uint32_t type;
404 uint32_t immed_dat;
405 void *data;
406 uint32_t len;
407};
408
409static struct lpfc_ct_event *
410lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
411{
412 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
413 if (!evt)
414 return NULL;
415
416 INIT_LIST_HEAD(&evt->events_to_get);
417 INIT_LIST_HEAD(&evt->events_to_see);
418 evt->req_id = ev_req_id;
419 evt->reg_id = ev_reg_id;
420 evt->wait_time_stamp = jiffies;
421 init_waitqueue_head(&evt->wq);
422
423 return evt;
424} 629}
425 630
631/**
632 * lpfc_bsg_event_free - frees an allocated event structure
633 * @kref: Pointer to a kref.
634 *
635 * Called from kref_put. Back cast the kref into an event structure address.
636 * Free any events to get, delete associated nodes, free any events to see,
637 * free any data then free the event itself.
638 **/
426static void 639static void
427lpfc_ct_event_free(struct lpfc_ct_event *evt) 640lpfc_bsg_event_free(struct kref *kref)
428{ 641{
642 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
643 kref);
429 struct event_data *ed; 644 struct event_data *ed;
430 645
431 list_del(&evt->node); 646 list_del(&evt->node);
@@ -447,25 +662,82 @@ lpfc_ct_event_free(struct lpfc_ct_event *evt)
447 kfree(evt); 662 kfree(evt);
448} 663}
449 664
665/**
666 * lpfc_bsg_event_ref - increments the kref for an event
667 * @evt: Pointer to an event structure.
668 **/
450static inline void 669static inline void
451lpfc_ct_event_ref(struct lpfc_ct_event *evt) 670lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
452{ 671{
453 evt->ref++; 672 kref_get(&evt->kref);
454} 673}
455 674
675/**
676 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
677 * @evt: Pointer to an event structure.
678 **/
456static inline void 679static inline void
457lpfc_ct_event_unref(struct lpfc_ct_event *evt) 680lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
458{ 681{
459 if (--evt->ref < 0) 682 kref_put(&evt->kref, lpfc_bsg_event_free);
460 lpfc_ct_event_free(evt);
461} 683}
462 684
463#define SLI_CT_ELX_LOOPBACK 0x10 685/**
686 * lpfc_bsg_event_new - allocate and initialize a event structure
687 * @ev_mask: Mask of events.
688 * @ev_reg_id: Event reg id.
689 * @ev_req_id: Event request id.
690 **/
691static struct lpfc_bsg_event *
692lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
693{
694 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
464 695
465enum ELX_LOOPBACK_CMD { 696 if (!evt)
466 ELX_LOOPBACK_XRI_SETUP, 697 return NULL;
467 ELX_LOOPBACK_DATA, 698
468}; 699 INIT_LIST_HEAD(&evt->events_to_get);
700 INIT_LIST_HEAD(&evt->events_to_see);
701 evt->type_mask = ev_mask;
702 evt->req_id = ev_req_id;
703 evt->reg_id = ev_reg_id;
704 evt->wait_time_stamp = jiffies;
705 init_waitqueue_head(&evt->wq);
706 kref_init(&evt->kref);
707 return evt;
708}
709
710/**
711 * diag_cmd_data_free - Frees an lpfc dma buffer extension
712 * @phba: Pointer to HBA context object.
713 * @mlist: Pointer to an lpfc dma buffer extension.
714 **/
715static int
716diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
717{
718 struct lpfc_dmabufext *mlast;
719 struct pci_dev *pcidev;
720 struct list_head head, *curr, *next;
721
722 if ((!mlist) || (!lpfc_is_link_up(phba) &&
723 (phba->link_flag & LS_LOOPBACK_MODE))) {
724 return 0;
725 }
726
727 pcidev = phba->pcidev;
728 list_add_tail(&head, &mlist->dma.list);
729
730 list_for_each_safe(curr, next, &head) {
731 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
732 if (mlast->dma.virt)
733 dma_free_coherent(&pcidev->dev,
734 mlast->size,
735 mlast->dma.virt,
736 mlast->dma.phys);
737 kfree(mlast);
738 }
739 return 0;
740}
469 741
470/** 742/**
471 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 743 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
@@ -474,9 +746,9 @@ enum ELX_LOOPBACK_CMD {
474 * @piocbq: 746 * @piocbq:
475 * 747 *
476 * This function is called when an unsolicited CT command is received. It 748 * This function is called when an unsolicited CT command is received. It
477 * forwards the event to any processes registerd to receive CT events. 749 * forwards the event to any processes registered to receive CT events.
478 */ 750 **/
479void 751int
480lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 752lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
481 struct lpfc_iocbq *piocbq) 753 struct lpfc_iocbq *piocbq)
482{ 754{
@@ -484,7 +756,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
484 uint32_t cmd; 756 uint32_t cmd;
485 uint32_t len; 757 uint32_t len;
486 struct lpfc_dmabuf *dmabuf = NULL; 758 struct lpfc_dmabuf *dmabuf = NULL;
487 struct lpfc_ct_event *evt; 759 struct lpfc_bsg_event *evt;
488 struct event_data *evt_dat = NULL; 760 struct event_data *evt_dat = NULL;
489 struct lpfc_iocbq *iocbq; 761 struct lpfc_iocbq *iocbq;
490 size_t offset = 0; 762 size_t offset = 0;
@@ -496,6 +768,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
496 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 768 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
497 struct lpfc_hbq_entry *hbqe; 769 struct lpfc_hbq_entry *hbqe;
498 struct lpfc_sli_ct_request *ct_req; 770 struct lpfc_sli_ct_request *ct_req;
771 struct fc_bsg_job *job = NULL;
772 unsigned long flags;
773 int size = 0;
499 774
500 INIT_LIST_HEAD(&head); 775 INIT_LIST_HEAD(&head);
501 list_add_tail(&head, &piocbq->list); 776 list_add_tail(&head, &piocbq->list);
@@ -504,6 +779,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
504 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 779 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
505 goto error_ct_unsol_exit; 780 goto error_ct_unsol_exit;
506 781
782 if (phba->link_state == LPFC_HBA_ERROR ||
783 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
784 goto error_ct_unsol_exit;
785
507 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 786 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
508 dmabuf = bdeBuf1; 787 dmabuf = bdeBuf1;
509 else { 788 else {
@@ -511,7 +790,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
511 piocbq->iocb.un.cont64[0].addrLow); 790 piocbq->iocb.un.cont64[0].addrLow);
512 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 791 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
513 } 792 }
514 793 if (dmabuf == NULL)
794 goto error_ct_unsol_exit;
515 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 795 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
516 evt_req_id = ct_req->FsType; 796 evt_req_id = ct_req->FsType;
517 cmd = ct_req->CommandResponse.bits.CmdRsp; 797 cmd = ct_req->CommandResponse.bits.CmdRsp;
@@ -519,24 +799,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
519 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 799 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
520 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 800 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
521 801
522 mutex_lock(&phba->ct_event_mutex); 802 spin_lock_irqsave(&phba->ct_ev_lock, flags);
523 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 803 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
524 if (evt->req_id != evt_req_id) 804 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
805 evt->req_id != evt_req_id)
525 continue; 806 continue;
526 807
527 lpfc_ct_event_ref(evt); 808 lpfc_bsg_event_ref(evt);
528 809 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
529 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 810 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
530 if (!evt_dat) { 811 if (evt_dat == NULL) {
531 lpfc_ct_event_unref(evt); 812 spin_lock_irqsave(&phba->ct_ev_lock, flags);
813 lpfc_bsg_event_unref(evt);
532 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 814 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
533 "2614 Memory allocation failed for " 815 "2614 Memory allocation failed for "
534 "CT event\n"); 816 "CT event\n");
535 break; 817 break;
536 } 818 }
537 819
538 mutex_unlock(&phba->ct_event_mutex);
539
540 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 820 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
541 /* take accumulated byte count from the last iocbq */ 821 /* take accumulated byte count from the last iocbq */
542 iocbq = list_entry(head.prev, typeof(*iocbq), list); 822 iocbq = list_entry(head.prev, typeof(*iocbq), list);
@@ -550,25 +830,25 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
550 } 830 }
551 831
552 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 832 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
553 if (!evt_dat->data) { 833 if (evt_dat->data == NULL) {
554 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 834 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
555 "2615 Memory allocation failed for " 835 "2615 Memory allocation failed for "
556 "CT event data, size %d\n", 836 "CT event data, size %d\n",
557 evt_dat->len); 837 evt_dat->len);
558 kfree(evt_dat); 838 kfree(evt_dat);
559 mutex_lock(&phba->ct_event_mutex); 839 spin_lock_irqsave(&phba->ct_ev_lock, flags);
560 lpfc_ct_event_unref(evt); 840 lpfc_bsg_event_unref(evt);
561 mutex_unlock(&phba->ct_event_mutex); 841 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
562 goto error_ct_unsol_exit; 842 goto error_ct_unsol_exit;
563 } 843 }
564 844
565 list_for_each_entry(iocbq, &head, list) { 845 list_for_each_entry(iocbq, &head, list) {
846 size = 0;
566 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 847 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
567 bdeBuf1 = iocbq->context2; 848 bdeBuf1 = iocbq->context2;
568 bdeBuf2 = iocbq->context3; 849 bdeBuf2 = iocbq->context3;
569 } 850 }
570 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 851 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
571 int size = 0;
572 if (phba->sli3_options & 852 if (phba->sli3_options &
573 LPFC_SLI3_HBQ_ENABLED) { 853 LPFC_SLI3_HBQ_ENABLED) {
574 if (i == 0) { 854 if (i == 0) {
@@ -601,9 +881,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
601 iocbq); 881 iocbq);
602 kfree(evt_dat->data); 882 kfree(evt_dat->data);
603 kfree(evt_dat); 883 kfree(evt_dat);
604 mutex_lock(&phba->ct_event_mutex); 884 spin_lock_irqsave(&phba->ct_ev_lock,
605 lpfc_ct_event_unref(evt); 885 flags);
606 mutex_unlock(&phba->ct_event_mutex); 886 lpfc_bsg_event_unref(evt);
887 spin_unlock_irqrestore(
888 &phba->ct_ev_lock, flags);
607 goto error_ct_unsol_exit; 889 goto error_ct_unsol_exit;
608 } 890 }
609 memcpy((char *)(evt_dat->data) + offset, 891 memcpy((char *)(evt_dat->data) + offset,
@@ -616,15 +898,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
616 dmabuf); 898 dmabuf);
617 } else { 899 } else {
618 switch (cmd) { 900 switch (cmd) {
901 case ELX_LOOPBACK_DATA:
902 diag_cmd_data_free(phba,
903 (struct lpfc_dmabufext *)
904 dmabuf);
905 break;
619 case ELX_LOOPBACK_XRI_SETUP: 906 case ELX_LOOPBACK_XRI_SETUP:
620 if (!(phba->sli3_options & 907 if ((phba->sli_rev ==
621 LPFC_SLI3_HBQ_ENABLED)) 908 LPFC_SLI_REV2) ||
909 (phba->sli3_options &
910 LPFC_SLI3_HBQ_ENABLED
911 )) {
912 lpfc_in_buf_free(phba,
913 dmabuf);
914 } else {
622 lpfc_post_buffer(phba, 915 lpfc_post_buffer(phba,
623 pring, 916 pring,
624 1); 917 1);
625 else 918 }
626 lpfc_in_buf_free(phba,
627 dmabuf);
628 break; 919 break;
629 default: 920 default:
630 if (!(phba->sli3_options & 921 if (!(phba->sli3_options &
@@ -638,7 +929,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
638 } 929 }
639 } 930 }
640 931
641 mutex_lock(&phba->ct_event_mutex); 932 spin_lock_irqsave(&phba->ct_ev_lock, flags);
642 if (phba->sli_rev == LPFC_SLI_REV4) { 933 if (phba->sli_rev == LPFC_SLI_REV4) {
643 evt_dat->immed_dat = phba->ctx_idx; 934 evt_dat->immed_dat = phba->ctx_idx;
644 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 935 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
@@ -651,122 +942,144 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
651 942
652 evt_dat->type = FC_REG_CT_EVENT; 943 evt_dat->type = FC_REG_CT_EVENT;
653 list_add(&evt_dat->node, &evt->events_to_see); 944 list_add(&evt_dat->node, &evt->events_to_see);
654 wake_up_interruptible(&evt->wq); 945 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
655 lpfc_ct_event_unref(evt); 946 wake_up_interruptible(&evt->wq);
656 if (evt_req_id == SLI_CT_ELX_LOOPBACK) 947 lpfc_bsg_event_unref(evt);
657 break; 948 break;
949 }
950
951 list_move(evt->events_to_see.prev, &evt->events_to_get);
952 lpfc_bsg_event_unref(evt);
953
954 job = evt->set_job;
955 evt->set_job = NULL;
956 if (job) {
957 job->reply->reply_payload_rcv_len = size;
958 /* make error code available to userspace */
959 job->reply->result = 0;
960 job->dd_data = NULL;
961 /* complete the job back to userspace */
962 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
963 job->job_done(job);
964 spin_lock_irqsave(&phba->ct_ev_lock, flags);
965 }
658 } 966 }
659 mutex_unlock(&phba->ct_event_mutex); 967 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
660 968
661error_ct_unsol_exit: 969error_ct_unsol_exit:
662 if (!list_empty(&head)) 970 if (!list_empty(&head))
663 list_del(&head); 971 list_del(&head);
664 972 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
665 return; 973 return 0;
974 return 1;
666} 975}
667 976
668/** 977/**
669 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command 978 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
670 * @job: SET_EVENT fc_bsg_job 979 * @job: SET_EVENT fc_bsg_job
671 */ 980 **/
672static int 981static int
673lpfc_bsg_set_event(struct fc_bsg_job *job) 982lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
674{ 983{
675 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 984 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
676 struct lpfc_hba *phba = vport->phba; 985 struct lpfc_hba *phba = vport->phba;
677 struct set_ct_event *event_req; 986 struct set_ct_event *event_req;
678 struct lpfc_ct_event *evt; 987 struct lpfc_bsg_event *evt;
679 int rc = 0; 988 int rc = 0;
989 struct bsg_job_data *dd_data = NULL;
990 uint32_t ev_mask;
991 unsigned long flags;
680 992
681 if (job->request_len < 993 if (job->request_len <
682 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 994 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
683 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 995 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
684 "2612 Received SET_CT_EVENT below minimum " 996 "2612 Received SET_CT_EVENT below minimum "
685 "size\n"); 997 "size\n");
686 return -EINVAL; 998 rc = -EINVAL;
999 goto job_error;
1000 }
1001
1002 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1003 if (dd_data == NULL) {
1004 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1005 "2734 Failed allocation of dd_data\n");
1006 rc = -ENOMEM;
1007 goto job_error;
687 } 1008 }
688 1009
689 event_req = (struct set_ct_event *) 1010 event_req = (struct set_ct_event *)
690 job->request->rqst_data.h_vendor.vendor_cmd; 1011 job->request->rqst_data.h_vendor.vendor_cmd;
691 1012 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
692 mutex_lock(&phba->ct_event_mutex); 1013 FC_REG_EVENT_MASK);
1014 spin_lock_irqsave(&phba->ct_ev_lock, flags);
693 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1015 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
694 if (evt->reg_id == event_req->ev_reg_id) { 1016 if (evt->reg_id == event_req->ev_reg_id) {
695 lpfc_ct_event_ref(evt); 1017 lpfc_bsg_event_ref(evt);
696 evt->wait_time_stamp = jiffies; 1018 evt->wait_time_stamp = jiffies;
697 break; 1019 break;
698 } 1020 }
699 } 1021 }
700 mutex_unlock(&phba->ct_event_mutex); 1022 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
701 1023
702 if (&evt->node == &phba->ct_ev_waiters) { 1024 if (&evt->node == &phba->ct_ev_waiters) {
703 /* no event waiting struct yet - first call */ 1025 /* no event waiting struct yet - first call */
704 evt = lpfc_ct_event_new(event_req->ev_reg_id, 1026 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
705 event_req->ev_req_id); 1027 event_req->ev_req_id);
706 if (!evt) { 1028 if (!evt) {
707 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1029 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
708 "2617 Failed allocation of event " 1030 "2617 Failed allocation of event "
709 "waiter\n"); 1031 "waiter\n");
710 return -ENOMEM; 1032 rc = -ENOMEM;
1033 goto job_error;
711 } 1034 }
712 1035
713 mutex_lock(&phba->ct_event_mutex); 1036 spin_lock_irqsave(&phba->ct_ev_lock, flags);
714 list_add(&evt->node, &phba->ct_ev_waiters); 1037 list_add(&evt->node, &phba->ct_ev_waiters);
715 lpfc_ct_event_ref(evt); 1038 lpfc_bsg_event_ref(evt);
716 mutex_unlock(&phba->ct_event_mutex); 1039 evt->wait_time_stamp = jiffies;
1040 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
717 } 1041 }
718 1042
1043 spin_lock_irqsave(&phba->ct_ev_lock, flags);
719 evt->waiting = 1; 1044 evt->waiting = 1;
720 if (wait_event_interruptible(evt->wq, 1045 dd_data->type = TYPE_EVT;
721 !list_empty(&evt->events_to_see))) { 1046 dd_data->context_un.evt = evt;
722 mutex_lock(&phba->ct_event_mutex); 1047 evt->set_job = job; /* for unsolicited command */
723 lpfc_ct_event_unref(evt); /* release ref */ 1048 job->dd_data = dd_data; /* for fc transport timeout callback*/
724 lpfc_ct_event_unref(evt); /* delete */ 1049 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
725 mutex_unlock(&phba->ct_event_mutex); 1050 return 0; /* call job done later */
726 rc = -EINTR; 1051
727 goto set_event_out; 1052job_error:
728 } 1053 if (dd_data != NULL)
729 1054 kfree(dd_data);
730 evt->wait_time_stamp = jiffies; 1055
731 evt->waiting = 0; 1056 job->dd_data = NULL;
732 1057 return rc;
733 mutex_lock(&phba->ct_event_mutex);
734 list_move(evt->events_to_see.prev, &evt->events_to_get);
735 lpfc_ct_event_unref(evt); /* release ref */
736 mutex_unlock(&phba->ct_event_mutex);
737
738set_event_out:
739 /* set_event carries no reply payload */
740 job->reply->reply_payload_rcv_len = 0;
741 /* make error code available to userspace */
742 job->reply->result = rc;
743 /* complete the job back to userspace */
744 job->job_done(job);
745
746 return 0;
747} 1058}
748 1059
749/** 1060/**
750 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command 1061 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
751 * @job: GET_EVENT fc_bsg_job 1062 * @job: GET_EVENT fc_bsg_job
752 */ 1063 **/
753static int 1064static int
754lpfc_bsg_get_event(struct fc_bsg_job *job) 1065lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
755{ 1066{
756 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1067 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
757 struct lpfc_hba *phba = vport->phba; 1068 struct lpfc_hba *phba = vport->phba;
758 struct get_ct_event *event_req; 1069 struct get_ct_event *event_req;
759 struct get_ct_event_reply *event_reply; 1070 struct get_ct_event_reply *event_reply;
760 struct lpfc_ct_event *evt; 1071 struct lpfc_bsg_event *evt;
761 struct event_data *evt_dat = NULL; 1072 struct event_data *evt_dat = NULL;
762 int rc = 0; 1073 unsigned long flags;
1074 uint32_t rc = 0;
763 1075
764 if (job->request_len < 1076 if (job->request_len <
765 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1077 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
766 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1078 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
767 "2613 Received GET_CT_EVENT request below " 1079 "2613 Received GET_CT_EVENT request below "
768 "minimum size\n"); 1080 "minimum size\n");
769 return -EINVAL; 1081 rc = -EINVAL;
1082 goto job_error;
770 } 1083 }
771 1084
772 event_req = (struct get_ct_event *) 1085 event_req = (struct get_ct_event *)
@@ -774,13 +1087,12 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
774 1087
775 event_reply = (struct get_ct_event_reply *) 1088 event_reply = (struct get_ct_event_reply *)
776 job->reply->reply_data.vendor_reply.vendor_rsp; 1089 job->reply->reply_data.vendor_reply.vendor_rsp;
777 1090 spin_lock_irqsave(&phba->ct_ev_lock, flags);
778 mutex_lock(&phba->ct_event_mutex);
779 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1091 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
780 if (evt->reg_id == event_req->ev_reg_id) { 1092 if (evt->reg_id == event_req->ev_reg_id) {
781 if (list_empty(&evt->events_to_get)) 1093 if (list_empty(&evt->events_to_get))
782 break; 1094 break;
783 lpfc_ct_event_ref(evt); 1095 lpfc_bsg_event_ref(evt);
784 evt->wait_time_stamp = jiffies; 1096 evt->wait_time_stamp = jiffies;
785 evt_dat = list_entry(evt->events_to_get.prev, 1097 evt_dat = list_entry(evt->events_to_get.prev,
786 struct event_data, node); 1098 struct event_data, node);
@@ -788,45 +1100,1539 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
788 break; 1100 break;
789 } 1101 }
790 } 1102 }
791 mutex_unlock(&phba->ct_event_mutex); 1103 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
792 1104
793 if (!evt_dat) { 1105 /* The app may continue to ask for event data until it gets
1106 * an error indicating that there isn't anymore
1107 */
1108 if (evt_dat == NULL) {
794 job->reply->reply_payload_rcv_len = 0; 1109 job->reply->reply_payload_rcv_len = 0;
795 rc = -ENOENT; 1110 rc = -ENOENT;
796 goto error_get_event_exit; 1111 goto job_error;
797 } 1112 }
798 1113
799 if (evt_dat->len > job->reply_payload.payload_len) { 1114 if (evt_dat->len > job->request_payload.payload_len) {
800 evt_dat->len = job->reply_payload.payload_len; 1115 evt_dat->len = job->request_payload.payload_len;
801 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1116 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
802 "2618 Truncated event data at %d " 1117 "2618 Truncated event data at %d "
803 "bytes\n", 1118 "bytes\n",
804 job->reply_payload.payload_len); 1119 job->request_payload.payload_len);
805 } 1120 }
806 1121
1122 event_reply->type = evt_dat->type;
807 event_reply->immed_data = evt_dat->immed_dat; 1123 event_reply->immed_data = evt_dat->immed_dat;
808
809 if (evt_dat->len > 0) 1124 if (evt_dat->len > 0)
810 job->reply->reply_payload_rcv_len = 1125 job->reply->reply_payload_rcv_len =
811 sg_copy_from_buffer(job->reply_payload.sg_list, 1126 sg_copy_from_buffer(job->request_payload.sg_list,
812 job->reply_payload.sg_cnt, 1127 job->request_payload.sg_cnt,
813 evt_dat->data, evt_dat->len); 1128 evt_dat->data, evt_dat->len);
814 else 1129 else
815 job->reply->reply_payload_rcv_len = 0; 1130 job->reply->reply_payload_rcv_len = 0;
816 rc = 0;
817 1131
818 if (evt_dat) 1132 if (evt_dat) {
819 kfree(evt_dat->data); 1133 kfree(evt_dat->data);
820 kfree(evt_dat); 1134 kfree(evt_dat);
821 mutex_lock(&phba->ct_event_mutex); 1135 }
822 lpfc_ct_event_unref(evt); 1136
823 mutex_unlock(&phba->ct_event_mutex); 1137 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1138 lpfc_bsg_event_unref(evt);
1139 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1140 job->dd_data = NULL;
1141 job->reply->result = 0;
1142 job->job_done(job);
1143 return 0;
1144
1145job_error:
1146 job->dd_data = NULL;
1147 job->reply->result = rc;
1148 return rc;
1149}
1150
1151/**
1152 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1153 * @phba: Pointer to HBA context object.
1154 * @cmdiocbq: Pointer to command iocb.
1155 * @rspiocbq: Pointer to response iocb.
1156 *
1157 * This function is the completion handler for iocbs issued using
1158 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1159 * ring event handler function without any lock held. This function
1160 * can be called from both worker thread context and interrupt
1161 * context. This function also can be called from other thread which
1162 * cleans up the SLI layer objects.
1163 * This function copy the contents of the response iocb to the
1164 * response iocb memory object provided by the caller of
1165 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1166 * sleeps for the iocb completion.
1167 **/
1168static void
1169lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1170 struct lpfc_iocbq *cmdiocbq,
1171 struct lpfc_iocbq *rspiocbq)
1172{
1173 struct bsg_job_data *dd_data;
1174 struct fc_bsg_job *job;
1175 IOCB_t *rsp;
1176 struct lpfc_dmabuf *bmp;
1177 struct lpfc_nodelist *ndlp;
1178 unsigned long flags;
1179 int rc = 0;
1180
1181 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1182 dd_data = cmdiocbq->context1;
1183 /* normal completion and timeout crossed paths, already done */
1184 if (!dd_data) {
1185 spin_unlock_irqrestore(&phba->hbalock, flags);
1186 return;
1187 }
824 1188
825error_get_event_exit: 1189 job = dd_data->context_un.iocb.set_job;
1190 bmp = dd_data->context_un.iocb.bmp;
1191 rsp = &rspiocbq->iocb;
1192 ndlp = dd_data->context_un.iocb.ndlp;
1193
1194 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1195 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1196
1197 if (rsp->ulpStatus) {
1198 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1199 switch (rsp->un.ulpWord[4] & 0xff) {
1200 case IOERR_SEQUENCE_TIMEOUT:
1201 rc = -ETIMEDOUT;
1202 break;
1203 case IOERR_INVALID_RPI:
1204 rc = -EFAULT;
1205 break;
1206 default:
1207 rc = -EACCES;
1208 break;
1209 }
1210 } else
1211 rc = -EACCES;
1212 } else
1213 job->reply->reply_payload_rcv_len =
1214 rsp->un.genreq64.bdl.bdeSize;
1215
1216 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1217 lpfc_sli_release_iocbq(phba, cmdiocbq);
1218 lpfc_nlp_put(ndlp);
1219 kfree(bmp);
1220 kfree(dd_data);
826 /* make error code available to userspace */ 1221 /* make error code available to userspace */
827 job->reply->result = rc; 1222 job->reply->result = rc;
1223 job->dd_data = NULL;
828 /* complete the job back to userspace */ 1224 /* complete the job back to userspace */
829 job->job_done(job); 1225 job->job_done(job);
1226 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1227 return;
1228}
1229
1230/**
1231 * lpfc_issue_ct_rsp - issue a ct response
1232 * @phba: Pointer to HBA context object.
1233 * @job: Pointer to the job object.
1234 * @tag: tag index value into the ports context exchange array.
1235 * @bmp: Pointer to a dma buffer descriptor.
1236 * @num_entry: Number of enties in the bde.
1237 **/
1238static int
1239lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1240 struct lpfc_dmabuf *bmp, int num_entry)
1241{
1242 IOCB_t *icmd;
1243 struct lpfc_iocbq *ctiocb = NULL;
1244 int rc = 0;
1245 struct lpfc_nodelist *ndlp = NULL;
1246 struct bsg_job_data *dd_data;
1247 uint32_t creg_val;
1248
1249 /* allocate our bsg tracking structure */
1250 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1251 if (!dd_data) {
1252 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1253 "2736 Failed allocation of dd_data\n");
1254 rc = -ENOMEM;
1255 goto no_dd_data;
1256 }
1257
1258 /* Allocate buffer for command iocb */
1259 ctiocb = lpfc_sli_get_iocbq(phba);
1260 if (!ctiocb) {
1261 rc = ENOMEM;
1262 goto no_ctiocb;
1263 }
1264
1265 icmd = &ctiocb->iocb;
1266 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1267 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1268 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1269 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1270 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1271 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1272 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1273 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1274 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1275
1276 /* Fill in rest of iocb */
1277 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1278 icmd->ulpBdeCount = 1;
1279 icmd->ulpLe = 1;
1280 icmd->ulpClass = CLASS3;
1281 if (phba->sli_rev == LPFC_SLI_REV4) {
1282 /* Do not issue unsol response if oxid not marked as valid */
1283 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1284 rc = IOCB_ERROR;
1285 goto issue_ct_rsp_exit;
1286 }
1287 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1288 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1289 if (!ndlp) {
1290 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1291 "2721 ndlp null for oxid %x SID %x\n",
1292 icmd->ulpContext,
1293 phba->ct_ctx[tag].SID);
1294 rc = IOCB_ERROR;
1295 goto issue_ct_rsp_exit;
1296 }
1297 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1298 /* The exchange is done, mark the entry as invalid */
1299 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1300 } else
1301 icmd->ulpContext = (ushort) tag;
1302
1303 icmd->ulpTimeout = phba->fc_ratov * 2;
1304
1305 /* Xmit CT response on exchange <xid> */
1306 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1307 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1308 icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1309
1310 ctiocb->iocb_cmpl = NULL;
1311 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1312 ctiocb->vport = phba->pport;
1313 ctiocb->context3 = bmp;
1314
1315 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1316 ctiocb->context1 = dd_data;
1317 ctiocb->context2 = NULL;
1318 dd_data->type = TYPE_IOCB;
1319 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1320 dd_data->context_un.iocb.rspiocbq = NULL;
1321 dd_data->context_un.iocb.set_job = job;
1322 dd_data->context_un.iocb.bmp = bmp;
1323 dd_data->context_un.iocb.ndlp = ndlp;
1324
1325 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1326 creg_val = readl(phba->HCregaddr);
1327 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1328 writel(creg_val, phba->HCregaddr);
1329 readl(phba->HCregaddr); /* flush */
1330 }
1331
1332 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1333
1334 if (rc == IOCB_SUCCESS)
1335 return 0; /* done for now */
1336
1337issue_ct_rsp_exit:
1338 lpfc_sli_release_iocbq(phba, ctiocb);
1339no_ctiocb:
1340 kfree(dd_data);
1341no_dd_data:
1342 return rc;
1343}
1344
1345/**
1346 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1347 * @job: SEND_MGMT_RESP fc_bsg_job
1348 **/
1349static int
1350lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1351{
1352 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1353 struct lpfc_hba *phba = vport->phba;
1354 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1355 job->request->rqst_data.h_vendor.vendor_cmd;
1356 struct ulp_bde64 *bpl;
1357 struct lpfc_dmabuf *bmp = NULL;
1358 struct scatterlist *sgel = NULL;
1359 int request_nseg;
1360 int numbde;
1361 dma_addr_t busaddr;
1362 uint32_t tag = mgmt_resp->tag;
1363 unsigned long reqbfrcnt =
1364 (unsigned long)job->request_payload.payload_len;
1365 int rc = 0;
1366
1367 /* in case no data is transferred */
1368 job->reply->reply_payload_rcv_len = 0;
1369
1370 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1371 rc = -ERANGE;
1372 goto send_mgmt_rsp_exit;
1373 }
1374
1375 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1376 if (!bmp) {
1377 rc = -ENOMEM;
1378 goto send_mgmt_rsp_exit;
1379 }
1380
1381 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1382 if (!bmp->virt) {
1383 rc = -ENOMEM;
1384 goto send_mgmt_rsp_free_bmp;
1385 }
1386
1387 INIT_LIST_HEAD(&bmp->list);
1388 bpl = (struct ulp_bde64 *) bmp->virt;
1389 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1390 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1391 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1392 busaddr = sg_dma_address(sgel);
1393 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1394 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1395 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1396 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1397 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1398 bpl++;
1399 }
1400
1401 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1402
1403 if (rc == IOCB_SUCCESS)
1404 return 0; /* done for now */
1405
1406 /* TBD need to handle a timeout */
1407 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1408 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1409 rc = -EACCES;
1410 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1411
1412send_mgmt_rsp_free_bmp:
1413 kfree(bmp);
1414send_mgmt_rsp_exit:
1415 /* make error code available to userspace */
1416 job->reply->result = rc;
1417 job->dd_data = NULL;
1418 return rc;
1419}
1420
1421/**
1422 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1423 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1424 *
1425 * This function is responsible for placing a port into diagnostic loopback
1426 * mode in order to perform a diagnostic loopback test.
1427 * All new scsi requests are blocked, a small delay is used to allow the
1428 * scsi requests to complete then the link is brought down. If the link is
1429 * is placed in loopback mode then scsi requests are again allowed
1430 * so the scsi mid-layer doesn't give up on the port.
1431 * All of this is done in-line.
1432 */
1433static int
1434lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1435{
1436 struct Scsi_Host *shost = job->shost;
1437 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1438 struct lpfc_hba *phba = vport->phba;
1439 struct diag_mode_set *loopback_mode;
1440 struct lpfc_sli *psli = &phba->sli;
1441 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1442 uint32_t link_flags;
1443 uint32_t timeout;
1444 struct lpfc_vport **vports;
1445 LPFC_MBOXQ_t *pmboxq;
1446 int mbxstatus;
1447 int i = 0;
1448 int rc = 0;
1449
1450 /* no data to return just the return code */
1451 job->reply->reply_payload_rcv_len = 0;
1452
1453 if (job->request_len <
1454 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1455 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1456 "2738 Received DIAG MODE request below minimum "
1457 "size\n");
1458 rc = -EINVAL;
1459 goto job_error;
1460 }
1461
1462 loopback_mode = (struct diag_mode_set *)
1463 job->request->rqst_data.h_vendor.vendor_cmd;
1464 link_flags = loopback_mode->type;
1465 timeout = loopback_mode->timeout;
1466
1467 if ((phba->link_state == LPFC_HBA_ERROR) ||
1468 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1469 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1470 rc = -EACCES;
1471 goto job_error;
1472 }
1473
1474 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1475 if (!pmboxq) {
1476 rc = -ENOMEM;
1477 goto job_error;
1478 }
1479
1480 vports = lpfc_create_vport_work_array(phba);
1481 if (vports) {
1482 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1483 shost = lpfc_shost_from_vport(vports[i]);
1484 scsi_block_requests(shost);
1485 }
1486
1487 lpfc_destroy_vport_work_array(phba, vports);
1488 } else {
1489 shost = lpfc_shost_from_vport(phba->pport);
1490 scsi_block_requests(shost);
1491 }
1492
1493 while (pring->txcmplq_cnt) {
1494 if (i++ > 500) /* wait up to 5 seconds */
1495 break;
1496
1497 msleep(10);
1498 }
1499
1500 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1501 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1502 pmboxq->u.mb.mbxOwner = OWN_HOST;
1503
1504 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1505
1506 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1507 /* wait for link down before proceeding */
1508 i = 0;
1509 while (phba->link_state != LPFC_LINK_DOWN) {
1510 if (i++ > timeout) {
1511 rc = -ETIMEDOUT;
1512 goto loopback_mode_exit;
1513 }
1514
1515 msleep(10);
1516 }
1517
1518 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1519 if (link_flags == INTERNAL_LOOP_BACK)
1520 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1521 else
1522 pmboxq->u.mb.un.varInitLnk.link_flags =
1523 FLAGS_TOPOLOGY_MODE_LOOP;
1524
1525 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1526 pmboxq->u.mb.mbxOwner = OWN_HOST;
1527
1528 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1529 LPFC_MBOX_TMO);
1530
1531 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1532 rc = -ENODEV;
1533 else {
1534 phba->link_flag |= LS_LOOPBACK_MODE;
1535 /* wait for the link attention interrupt */
1536 msleep(100);
1537
1538 i = 0;
1539 while (phba->link_state != LPFC_HBA_READY) {
1540 if (i++ > timeout) {
1541 rc = -ETIMEDOUT;
1542 break;
1543 }
1544
1545 msleep(10);
1546 }
1547 }
1548
1549 } else
1550 rc = -ENODEV;
1551
1552loopback_mode_exit:
1553 vports = lpfc_create_vport_work_array(phba);
1554 if (vports) {
1555 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1556 shost = lpfc_shost_from_vport(vports[i]);
1557 scsi_unblock_requests(shost);
1558 }
1559 lpfc_destroy_vport_work_array(phba, vports);
1560 } else {
1561 shost = lpfc_shost_from_vport(phba->pport);
1562 scsi_unblock_requests(shost);
1563 }
1564
1565 /*
1566 * Let SLI layer release mboxq if mbox command completed after timeout.
1567 */
1568 if (mbxstatus != MBX_TIMEOUT)
1569 mempool_free(pmboxq, phba->mbox_mem_pool);
1570
1571job_error:
1572 /* make error code available to userspace */
1573 job->reply->result = rc;
1574 /* complete the job back to userspace if no error */
1575 if (rc == 0)
1576 job->job_done(job);
1577 return rc;
1578}
1579
1580/**
1581 * lpfcdiag_loop_self_reg - obtains a remote port login id
1582 * @phba: Pointer to HBA context object
1583 * @rpi: Pointer to a remote port login id
1584 *
1585 * This function obtains a remote port login id so the diag loopback test
1586 * can send and receive its own unsolicited CT command.
1587 **/
1588static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1589{
1590 LPFC_MBOXQ_t *mbox;
1591 struct lpfc_dmabuf *dmabuff;
1592 int status;
1593
1594 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1595 if (!mbox)
1596 return ENOMEM;
1597
1598 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1599 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1600 if (status) {
1601 mempool_free(mbox, phba->mbox_mem_pool);
1602 return ENOMEM;
1603 }
1604
1605 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1606 mbox->context1 = NULL;
1607 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1608
1609 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1610 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1611 kfree(dmabuff);
1612 if (status != MBX_TIMEOUT)
1613 mempool_free(mbox, phba->mbox_mem_pool);
1614 return ENODEV;
1615 }
1616
1617 *rpi = mbox->u.mb.un.varWords[0];
1618
1619 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1620 kfree(dmabuff);
1621 mempool_free(mbox, phba->mbox_mem_pool);
1622 return 0;
1623}
1624
1625/**
1626 * lpfcdiag_loop_self_unreg - unregs from the rpi
1627 * @phba: Pointer to HBA context object
1628 * @rpi: Remote port login id
1629 *
1630 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1631 **/
1632static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1633{
1634 LPFC_MBOXQ_t *mbox;
1635 int status;
1636
1637 /* Allocate mboxq structure */
1638 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1639 if (mbox == NULL)
1640 return ENOMEM;
1641
1642 lpfc_unreg_login(phba, 0, rpi, mbox);
1643 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1644
1645 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1646 if (status != MBX_TIMEOUT)
1647 mempool_free(mbox, phba->mbox_mem_pool);
1648 return EIO;
1649 }
1650
1651 mempool_free(mbox, phba->mbox_mem_pool);
1652 return 0;
1653}
1654
1655/**
1656 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1657 * @phba: Pointer to HBA context object
1658 * @rpi: Remote port login id
1659 * @txxri: Pointer to transmit exchange id
1660 * @rxxri: Pointer to response exchabge id
1661 *
1662 * This function obtains the transmit and receive ids required to send
1663 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1664 * flags are used to the unsolicted response handler is able to process
1665 * the ct command sent on the same port.
1666 **/
1667static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1668 uint16_t *txxri, uint16_t * rxxri)
1669{
1670 struct lpfc_bsg_event *evt;
1671 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1672 IOCB_t *cmd, *rsp;
1673 struct lpfc_dmabuf *dmabuf;
1674 struct ulp_bde64 *bpl = NULL;
1675 struct lpfc_sli_ct_request *ctreq = NULL;
1676 int ret_val = 0;
1677 unsigned long flags;
1678
1679 *txxri = 0;
1680 *rxxri = 0;
1681 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1682 SLI_CT_ELX_LOOPBACK);
1683 if (!evt)
1684 return ENOMEM;
1685
1686 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1687 list_add(&evt->node, &phba->ct_ev_waiters);
1688 lpfc_bsg_event_ref(evt);
1689 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1690
1691 cmdiocbq = lpfc_sli_get_iocbq(phba);
1692 rspiocbq = lpfc_sli_get_iocbq(phba);
1693
1694 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1695 if (dmabuf) {
1696 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1697 INIT_LIST_HEAD(&dmabuf->list);
1698 bpl = (struct ulp_bde64 *) dmabuf->virt;
1699 memset(bpl, 0, sizeof(*bpl));
1700 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1701 bpl->addrHigh =
1702 le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
1703 bpl->addrLow =
1704 le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
1705 bpl->tus.f.bdeFlags = 0;
1706 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1707 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1708 }
1709
1710 if (cmdiocbq == NULL || rspiocbq == NULL ||
1711 dmabuf == NULL || bpl == NULL || ctreq == NULL) {
1712 ret_val = ENOMEM;
1713 goto err_get_xri_exit;
1714 }
1715
1716 cmd = &cmdiocbq->iocb;
1717 rsp = &rspiocbq->iocb;
1718
1719 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1720
1721 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1722 ctreq->RevisionId.bits.InId = 0;
1723 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1724 ctreq->FsSubType = 0;
1725 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1726 ctreq->CommandResponse.bits.Size = 0;
1727
1728
1729 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1730 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1731 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1732 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1733
1734 cmd->un.xseq64.w5.hcsw.Fctl = LA;
1735 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1736 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1737 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1738
1739 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1740 cmd->ulpBdeCount = 1;
1741 cmd->ulpLe = 1;
1742 cmd->ulpClass = CLASS3;
1743 cmd->ulpContext = rpi;
1744
1745 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1746 cmdiocbq->vport = phba->pport;
1747
1748 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1749 rspiocbq,
1750 (phba->fc_ratov * 2)
1751 + LPFC_DRVR_TIMEOUT);
1752 if (ret_val)
1753 goto err_get_xri_exit;
1754
1755 *txxri = rsp->ulpContext;
1756
1757 evt->waiting = 1;
1758 evt->wait_time_stamp = jiffies;
1759 ret_val = wait_event_interruptible_timeout(
1760 evt->wq, !list_empty(&evt->events_to_see),
1761 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1762 if (list_empty(&evt->events_to_see))
1763 ret_val = (ret_val) ? EINTR : ETIMEDOUT;
1764 else {
1765 ret_val = IOCB_SUCCESS;
1766 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1767 list_move(evt->events_to_see.prev, &evt->events_to_get);
1768 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1769 *rxxri = (list_entry(evt->events_to_get.prev,
1770 typeof(struct event_data),
1771 node))->immed_dat;
1772 }
1773 evt->waiting = 0;
1774
1775err_get_xri_exit:
1776 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1777 lpfc_bsg_event_unref(evt); /* release ref */
1778 lpfc_bsg_event_unref(evt); /* delete */
1779 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1780
1781 if (dmabuf) {
1782 if (dmabuf->virt)
1783 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1784 kfree(dmabuf);
1785 }
1786
1787 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
1788 lpfc_sli_release_iocbq(phba, cmdiocbq);
1789 if (rspiocbq)
1790 lpfc_sli_release_iocbq(phba, rspiocbq);
1791 return ret_val;
1792}
1793
1794/**
1795 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1796 * @phba: Pointer to HBA context object
1797 * @bpl: Pointer to 64 bit bde structure
1798 * @size: Number of bytes to process
1799 * @nocopydata: Flag to copy user data into the allocated buffer
1800 *
1801 * This function allocates page size buffers and populates an lpfc_dmabufext.
1802 * If allowed the user data pointed to with indataptr is copied into the kernel
1803 * memory. The chained list of page size buffers is returned.
1804 **/
1805static struct lpfc_dmabufext *
1806diag_cmd_data_alloc(struct lpfc_hba *phba,
1807 struct ulp_bde64 *bpl, uint32_t size,
1808 int nocopydata)
1809{
1810 struct lpfc_dmabufext *mlist = NULL;
1811 struct lpfc_dmabufext *dmp;
1812 int cnt, offset = 0, i = 0;
1813 struct pci_dev *pcidev;
1814
1815 pcidev = phba->pcidev;
1816
1817 while (size) {
1818 /* We get chunks of 4K */
1819 if (size > BUF_SZ_4K)
1820 cnt = BUF_SZ_4K;
1821 else
1822 cnt = size;
1823
1824 /* allocate struct lpfc_dmabufext buffer header */
1825 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1826 if (!dmp)
1827 goto out;
1828
1829 INIT_LIST_HEAD(&dmp->dma.list);
1830
1831 /* Queue it to a linked list */
1832 if (mlist)
1833 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1834 else
1835 mlist = dmp;
1836
1837 /* allocate buffer */
1838 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1839 cnt,
1840 &(dmp->dma.phys),
1841 GFP_KERNEL);
1842
1843 if (!dmp->dma.virt)
1844 goto out;
1845
1846 dmp->size = cnt;
1847
1848 if (nocopydata) {
1849 bpl->tus.f.bdeFlags = 0;
1850 pci_dma_sync_single_for_device(phba->pcidev,
1851 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1852
1853 } else {
1854 memset((uint8_t *)dmp->dma.virt, 0, cnt);
1855 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1856 }
1857
1858 /* build buffer ptr list for IOCB */
1859 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1860 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1861 bpl->tus.f.bdeSize = (ushort) cnt;
1862 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1863 bpl++;
1864
1865 i++;
1866 offset += cnt;
1867 size -= cnt;
1868 }
1869
1870 mlist->flag = i;
1871 return mlist;
1872out:
1873 diag_cmd_data_free(phba, mlist);
1874 return NULL;
1875}
1876
1877/**
1878 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1879 * @phba: Pointer to HBA context object
1880 * @rxxri: Receive exchange id
1881 * @len: Number of data bytes
1882 *
1883 * This function allocates and posts a data buffer of sufficient size to recieve
1884 * an unsolicted CT command.
1885 **/
1886static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1887 size_t len)
1888{
1889 struct lpfc_sli *psli = &phba->sli;
1890 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1891 struct lpfc_iocbq *cmdiocbq;
1892 IOCB_t *cmd = NULL;
1893 struct list_head head, *curr, *next;
1894 struct lpfc_dmabuf *rxbmp;
1895 struct lpfc_dmabuf *dmp;
1896 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1897 struct ulp_bde64 *rxbpl = NULL;
1898 uint32_t num_bde;
1899 struct lpfc_dmabufext *rxbuffer = NULL;
1900 int ret_val = 0;
1901 int i = 0;
1902
1903 cmdiocbq = lpfc_sli_get_iocbq(phba);
1904 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1905 if (rxbmp != NULL) {
1906 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1907 INIT_LIST_HEAD(&rxbmp->list);
1908 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1909 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1910 }
1911
1912 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1913 ret_val = ENOMEM;
1914 goto err_post_rxbufs_exit;
1915 }
1916
1917 /* Queue buffers for the receive exchange */
1918 num_bde = (uint32_t)rxbuffer->flag;
1919 dmp = &rxbuffer->dma;
1920
1921 cmd = &cmdiocbq->iocb;
1922 i = 0;
1923
1924 INIT_LIST_HEAD(&head);
1925 list_add_tail(&head, &dmp->list);
1926 list_for_each_safe(curr, next, &head) {
1927 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1928 list_del(curr);
1929
1930 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1931 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1932 cmd->un.quexri64cx.buff.bde.addrHigh =
1933 putPaddrHigh(mp[i]->phys);
1934 cmd->un.quexri64cx.buff.bde.addrLow =
1935 putPaddrLow(mp[i]->phys);
1936 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
1937 ((struct lpfc_dmabufext *)mp[i])->size;
1938 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
1939 cmd->ulpCommand = CMD_QUE_XRI64_CX;
1940 cmd->ulpPU = 0;
1941 cmd->ulpLe = 1;
1942 cmd->ulpBdeCount = 1;
1943 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
1944
1945 } else {
1946 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
1947 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
1948 cmd->un.cont64[i].tus.f.bdeSize =
1949 ((struct lpfc_dmabufext *)mp[i])->size;
1950 cmd->ulpBdeCount = ++i;
1951
1952 if ((--num_bde > 0) && (i < 2))
1953 continue;
1954
1955 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
1956 cmd->ulpLe = 1;
1957 }
1958
1959 cmd->ulpClass = CLASS3;
1960 cmd->ulpContext = rxxri;
1961
1962 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
1963
1964 if (ret_val == IOCB_ERROR) {
1965 diag_cmd_data_free(phba,
1966 (struct lpfc_dmabufext *)mp[0]);
1967 if (mp[1])
1968 diag_cmd_data_free(phba,
1969 (struct lpfc_dmabufext *)mp[1]);
1970 dmp = list_entry(next, struct lpfc_dmabuf, list);
1971 ret_val = EIO;
1972 goto err_post_rxbufs_exit;
1973 }
1974
1975 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
1976 if (mp[1]) {
1977 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
1978 mp[1] = NULL;
1979 }
1980
1981 /* The iocb was freed by lpfc_sli_issue_iocb */
1982 cmdiocbq = lpfc_sli_get_iocbq(phba);
1983 if (!cmdiocbq) {
1984 dmp = list_entry(next, struct lpfc_dmabuf, list);
1985 ret_val = EIO;
1986 goto err_post_rxbufs_exit;
1987 }
1988
1989 cmd = &cmdiocbq->iocb;
1990 i = 0;
1991 }
1992 list_del(&head);
1993
1994err_post_rxbufs_exit:
1995
1996 if (rxbmp) {
1997 if (rxbmp->virt)
1998 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
1999 kfree(rxbmp);
2000 }
2001
2002 if (cmdiocbq)
2003 lpfc_sli_release_iocbq(phba, cmdiocbq);
2004 return ret_val;
2005}
2006
2007/**
2008 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2009 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2010 *
2011 * This function receives a user data buffer to be transmitted and received on
2012 * the same port, the link must be up and in loopback mode prior
2013 * to being called.
2014 * 1. A kernel buffer is allocated to copy the user data into.
2015 * 2. The port registers with "itself".
2016 * 3. The transmit and receive exchange ids are obtained.
2017 * 4. The receive exchange id is posted.
2018 * 5. A new els loopback event is created.
2019 * 6. The command and response iocbs are allocated.
2020 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2021 *
2022 * This function is meant to be called n times while the port is in loopback
2023 * so it is the apps responsibility to issue a reset to take the port out
2024 * of loopback mode.
2025 **/
2026static int
2027lpfc_bsg_diag_test(struct fc_bsg_job *job)
2028{
2029 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2030 struct lpfc_hba *phba = vport->phba;
2031 struct diag_mode_test *diag_mode;
2032 struct lpfc_bsg_event *evt;
2033 struct event_data *evdat;
2034 struct lpfc_sli *psli = &phba->sli;
2035 uint32_t size;
2036 uint32_t full_size;
2037 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2038 uint16_t rpi;
2039 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2040 IOCB_t *cmd, *rsp;
2041 struct lpfc_sli_ct_request *ctreq;
2042 struct lpfc_dmabuf *txbmp;
2043 struct ulp_bde64 *txbpl = NULL;
2044 struct lpfc_dmabufext *txbuffer = NULL;
2045 struct list_head head;
2046 struct lpfc_dmabuf *curr;
2047 uint16_t txxri, rxxri;
2048 uint32_t num_bde;
2049 uint8_t *ptr = NULL, *rx_databuf = NULL;
2050 int rc = 0;
2051 unsigned long flags;
2052 void *dataout = NULL;
2053 uint32_t total_mem;
2054
2055 /* in case no data is returned return just the return code */
2056 job->reply->reply_payload_rcv_len = 0;
2057
2058 if (job->request_len <
2059 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2060 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2061 "2739 Received DIAG TEST request below minimum "
2062 "size\n");
2063 rc = -EINVAL;
2064 goto loopback_test_exit;
2065 }
2066
2067 if (job->request_payload.payload_len !=
2068 job->reply_payload.payload_len) {
2069 rc = -EINVAL;
2070 goto loopback_test_exit;
2071 }
2072
2073 diag_mode = (struct diag_mode_test *)
2074 job->request->rqst_data.h_vendor.vendor_cmd;
2075
2076 if ((phba->link_state == LPFC_HBA_ERROR) ||
2077 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2078 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2079 rc = -EACCES;
2080 goto loopback_test_exit;
2081 }
2082
2083 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2084 rc = -EACCES;
2085 goto loopback_test_exit;
2086 }
2087
2088 size = job->request_payload.payload_len;
2089 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2090
2091 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2092 rc = -ERANGE;
2093 goto loopback_test_exit;
2094 }
2095
2096 if (size >= BUF_SZ_4K) {
2097 /*
2098 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2099 * then we allocate 64k and re-use that buffer over and over to
2100 * xfer the whole block. This is because Linux kernel has a
2101 * problem allocating more than 120k of kernel space memory. Saw
2102 * problem with GET_FCPTARGETMAPPING...
2103 */
2104 if (size <= (64 * 1024))
2105 total_mem = size;
2106 else
2107 total_mem = 64 * 1024;
2108 } else
2109 /* Allocate memory for ioctl data */
2110 total_mem = BUF_SZ_4K;
2111
2112 dataout = kmalloc(total_mem, GFP_KERNEL);
2113 if (dataout == NULL) {
2114 rc = -ENOMEM;
2115 goto loopback_test_exit;
2116 }
2117
2118 ptr = dataout;
2119 ptr += ELX_LOOPBACK_HEADER_SZ;
2120 sg_copy_to_buffer(job->request_payload.sg_list,
2121 job->request_payload.sg_cnt,
2122 ptr, size);
2123
2124 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2125 if (rc) {
2126 rc = -ENOMEM;
2127 goto loopback_test_exit;
2128 }
2129
2130 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2131 if (rc) {
2132 lpfcdiag_loop_self_unreg(phba, rpi);
2133 rc = -ENOMEM;
2134 goto loopback_test_exit;
2135 }
2136
2137 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2138 if (rc) {
2139 lpfcdiag_loop_self_unreg(phba, rpi);
2140 rc = -ENOMEM;
2141 goto loopback_test_exit;
2142 }
2143
2144 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2145 SLI_CT_ELX_LOOPBACK);
2146 if (!evt) {
2147 lpfcdiag_loop_self_unreg(phba, rpi);
2148 rc = -ENOMEM;
2149 goto loopback_test_exit;
2150 }
2151
2152 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2153 list_add(&evt->node, &phba->ct_ev_waiters);
2154 lpfc_bsg_event_ref(evt);
2155 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2156
2157 cmdiocbq = lpfc_sli_get_iocbq(phba);
2158 rspiocbq = lpfc_sli_get_iocbq(phba);
2159 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2160
2161 if (txbmp) {
2162 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2163 INIT_LIST_HEAD(&txbmp->list);
2164 txbpl = (struct ulp_bde64 *) txbmp->virt;
2165 if (txbpl)
2166 txbuffer = diag_cmd_data_alloc(phba,
2167 txbpl, full_size, 0);
2168 }
2169
2170 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
2171 rc = -ENOMEM;
2172 goto err_loopback_test_exit;
2173 }
2174
2175 cmd = &cmdiocbq->iocb;
2176 rsp = &rspiocbq->iocb;
2177
2178 INIT_LIST_HEAD(&head);
2179 list_add_tail(&head, &txbuffer->dma.list);
2180 list_for_each_entry(curr, &head, list) {
2181 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2182 if (current_offset == 0) {
2183 ctreq = curr->virt;
2184 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2185 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2186 ctreq->RevisionId.bits.InId = 0;
2187 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2188 ctreq->FsSubType = 0;
2189 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2190 ctreq->CommandResponse.bits.Size = size;
2191 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2192 } else
2193 segment_offset = 0;
2194
2195 BUG_ON(segment_offset >= segment_len);
2196 memcpy(curr->virt + segment_offset,
2197 ptr + current_offset,
2198 segment_len - segment_offset);
2199
2200 current_offset += segment_len - segment_offset;
2201 BUG_ON(current_offset > size);
2202 }
2203 list_del(&head);
2204
2205 /* Build the XMIT_SEQUENCE iocb */
2206
2207 num_bde = (uint32_t)txbuffer->flag;
2208
2209 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2210 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2211 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2212 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2213
2214 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2215 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2216 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2217 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2218
2219 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2220 cmd->ulpBdeCount = 1;
2221 cmd->ulpLe = 1;
2222 cmd->ulpClass = CLASS3;
2223 cmd->ulpContext = txxri;
2224
2225 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2226 cmdiocbq->vport = phba->pport;
2227
2228 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
2229 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
2230
2231 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2232 rc = -EIO;
2233 goto err_loopback_test_exit;
2234 }
2235
2236 evt->waiting = 1;
2237 rc = wait_event_interruptible_timeout(
2238 evt->wq, !list_empty(&evt->events_to_see),
2239 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2240 evt->waiting = 0;
2241 if (list_empty(&evt->events_to_see))
2242 rc = (rc) ? -EINTR : -ETIMEDOUT;
2243 else {
2244 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2245 list_move(evt->events_to_see.prev, &evt->events_to_get);
2246 evdat = list_entry(evt->events_to_get.prev,
2247 typeof(*evdat), node);
2248 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2249 rx_databuf = evdat->data;
2250 if (evdat->len != full_size) {
2251 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2252 "1603 Loopback test did not receive expected "
2253 "data length. actual length 0x%x expected "
2254 "length 0x%x\n",
2255 evdat->len, full_size);
2256 rc = -EIO;
2257 } else if (rx_databuf == NULL)
2258 rc = -EIO;
2259 else {
2260 rc = IOCB_SUCCESS;
2261 /* skip over elx loopback header */
2262 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2263 job->reply->reply_payload_rcv_len =
2264 sg_copy_from_buffer(job->reply_payload.sg_list,
2265 job->reply_payload.sg_cnt,
2266 rx_databuf, size);
2267 job->reply->reply_payload_rcv_len = size;
2268 }
2269 }
2270
2271err_loopback_test_exit:
2272 lpfcdiag_loop_self_unreg(phba, rpi);
2273
2274 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2275 lpfc_bsg_event_unref(evt); /* release ref */
2276 lpfc_bsg_event_unref(evt); /* delete */
2277 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2278
2279 if (cmdiocbq != NULL)
2280 lpfc_sli_release_iocbq(phba, cmdiocbq);
2281
2282 if (rspiocbq != NULL)
2283 lpfc_sli_release_iocbq(phba, rspiocbq);
2284
2285 if (txbmp != NULL) {
2286 if (txbpl != NULL) {
2287 if (txbuffer != NULL)
2288 diag_cmd_data_free(phba, txbuffer);
2289 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2290 }
2291 kfree(txbmp);
2292 }
2293
2294loopback_test_exit:
2295 kfree(dataout);
2296 /* make error code available to userspace */
2297 job->reply->result = rc;
2298 job->dd_data = NULL;
2299 /* complete the job back to userspace if no error */
2300 if (rc == 0)
2301 job->job_done(job);
2302 return rc;
2303}
2304
2305/**
2306 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2307 * @job: GET_DFC_REV fc_bsg_job
2308 **/
2309static int
2310lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2311{
2312 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2313 struct lpfc_hba *phba = vport->phba;
2314 struct get_mgmt_rev *event_req;
2315 struct get_mgmt_rev_reply *event_reply;
2316 int rc = 0;
2317
2318 if (job->request_len <
2319 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2320 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2321 "2740 Received GET_DFC_REV request below "
2322 "minimum size\n");
2323 rc = -EINVAL;
2324 goto job_error;
2325 }
2326
2327 event_req = (struct get_mgmt_rev *)
2328 job->request->rqst_data.h_vendor.vendor_cmd;
2329
2330 event_reply = (struct get_mgmt_rev_reply *)
2331 job->reply->reply_data.vendor_reply.vendor_rsp;
2332
2333 if (job->reply_len <
2334 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2335 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2336 "2741 Received GET_DFC_REV reply below "
2337 "minimum size\n");
2338 rc = -EINVAL;
2339 goto job_error;
2340 }
2341
2342 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2343 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2344job_error:
2345 job->reply->result = rc;
2346 if (rc == 0)
2347 job->job_done(job);
2348 return rc;
2349}
2350
2351/**
2352 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2353 * @phba: Pointer to HBA context object.
2354 * @pmboxq: Pointer to mailbox command.
2355 *
2356 * This is completion handler function for mailbox commands issued from
2357 * lpfc_bsg_issue_mbox function. This function is called by the
2358 * mailbox event handler function with no lock held. This function
2359 * will wake up thread waiting on the wait queue pointed by context1
2360 * of the mailbox.
2361 **/
2362void
2363lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2364{
2365 struct bsg_job_data *dd_data;
2366 MAILBOX_t *pmb;
2367 MAILBOX_t *mb;
2368 struct fc_bsg_job *job;
2369 uint32_t size;
2370 unsigned long flags;
2371
2372 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2373 dd_data = pmboxq->context1;
2374 if (!dd_data) {
2375 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2376 return;
2377 }
2378
2379 pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
2380 mb = dd_data->context_un.mbox.mb;
2381 job = dd_data->context_un.mbox.set_job;
2382 memcpy(mb, pmb, sizeof(*pmb));
2383 size = job->request_payload.payload_len;
2384 job->reply->reply_payload_rcv_len =
2385 sg_copy_from_buffer(job->reply_payload.sg_list,
2386 job->reply_payload.sg_cnt,
2387 mb, size);
2388 job->reply->result = 0;
2389 dd_data->context_un.mbox.set_job = NULL;
2390 job->dd_data = NULL;
2391 job->job_done(job);
2392 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2393 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2394 kfree(mb);
2395 kfree(dd_data);
2396 return;
2397}
2398
2399/**
2400 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2401 * @phba: Pointer to HBA context object.
2402 * @mb: Pointer to a mailbox object.
2403 * @vport: Pointer to a vport object.
2404 *
2405 * Some commands require the port to be offline, some may not be called from
2406 * the application.
2407 **/
2408static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2409 MAILBOX_t *mb, struct lpfc_vport *vport)
2410{
2411 /* return negative error values for bsg job */
2412 switch (mb->mbxCommand) {
2413 /* Offline only */
2414 case MBX_INIT_LINK:
2415 case MBX_DOWN_LINK:
2416 case MBX_CONFIG_LINK:
2417 case MBX_CONFIG_RING:
2418 case MBX_RESET_RING:
2419 case MBX_UNREG_LOGIN:
2420 case MBX_CLEAR_LA:
2421 case MBX_DUMP_CONTEXT:
2422 case MBX_RUN_DIAGS:
2423 case MBX_RESTART:
2424 case MBX_SET_MASK:
2425 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2426 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2427 "2743 Command 0x%x is illegal in on-line "
2428 "state\n",
2429 mb->mbxCommand);
2430 return -EPERM;
2431 }
2432 case MBX_WRITE_NV:
2433 case MBX_WRITE_VPARMS:
2434 case MBX_LOAD_SM:
2435 case MBX_READ_NV:
2436 case MBX_READ_CONFIG:
2437 case MBX_READ_RCONFIG:
2438 case MBX_READ_STATUS:
2439 case MBX_READ_XRI:
2440 case MBX_READ_REV:
2441 case MBX_READ_LNK_STAT:
2442 case MBX_DUMP_MEMORY:
2443 case MBX_DOWN_LOAD:
2444 case MBX_UPDATE_CFG:
2445 case MBX_KILL_BOARD:
2446 case MBX_LOAD_AREA:
2447 case MBX_LOAD_EXP_ROM:
2448 case MBX_BEACON:
2449 case MBX_DEL_LD_ENTRY:
2450 case MBX_SET_DEBUG:
2451 case MBX_WRITE_WWN:
2452 case MBX_SLI4_CONFIG:
2453 case MBX_READ_EVENT_LOG_STATUS:
2454 case MBX_WRITE_EVENT_LOG:
2455 case MBX_PORT_CAPABILITIES:
2456 case MBX_PORT_IOV_CONTROL:
2457 break;
2458 case MBX_SET_VARIABLE:
2459 case MBX_RUN_BIU_DIAG64:
2460 case MBX_READ_EVENT_LOG:
2461 case MBX_READ_SPARM64:
2462 case MBX_READ_LA:
2463 case MBX_READ_LA64:
2464 case MBX_REG_LOGIN:
2465 case MBX_REG_LOGIN64:
2466 case MBX_CONFIG_PORT:
2467 case MBX_RUN_BIU_DIAG:
2468 default:
2469 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2470 "2742 Unknown Command 0x%x\n",
2471 mb->mbxCommand);
2472 return -EPERM;
2473 }
2474
2475 return 0; /* ok */
2476}
2477
2478/**
2479 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2480 * @phba: Pointer to HBA context object.
2481 * @mb: Pointer to a mailbox object.
2482 * @vport: Pointer to a vport object.
2483 *
2484 * Allocate a tracking object, mailbox command memory, get a mailbox
2485 * from the mailbox pool, copy the caller mailbox command.
2486 *
2487 * If offline and the sli is active we need to poll for the command (port is
2488 * being reset) and com-plete the job, otherwise issue the mailbox command and
2489 * let our completion handler finish the command.
2490 **/
2491static uint32_t
2492lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2493 struct lpfc_vport *vport)
2494{
2495 LPFC_MBOXQ_t *pmboxq;
2496 MAILBOX_t *pmb;
2497 MAILBOX_t *mb;
2498 struct bsg_job_data *dd_data;
2499 uint32_t size;
2500 int rc = 0;
2501
2502 /* allocate our bsg tracking structure */
2503 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2504 if (!dd_data) {
2505 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2506 "2727 Failed allocation of dd_data\n");
2507 return -ENOMEM;
2508 }
2509
2510 mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2511 if (!mb) {
2512 kfree(dd_data);
2513 return -ENOMEM;
2514 }
2515
2516 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2517 if (!pmboxq) {
2518 kfree(dd_data);
2519 kfree(mb);
2520 return -ENOMEM;
2521 }
2522
2523 size = job->request_payload.payload_len;
2524 job->reply->reply_payload_rcv_len =
2525 sg_copy_to_buffer(job->request_payload.sg_list,
2526 job->request_payload.sg_cnt,
2527 mb, size);
2528
2529 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2530 if (rc != 0) {
2531 kfree(dd_data);
2532 kfree(mb);
2533 mempool_free(pmboxq, phba->mbox_mem_pool);
2534 return rc; /* must be negative */
2535 }
2536
2537 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2538 pmb = &pmboxq->u.mb;
2539 memcpy(pmb, mb, sizeof(*pmb));
2540 pmb->mbxOwner = OWN_HOST;
2541 pmboxq->context1 = NULL;
2542 pmboxq->vport = vport;
2543
2544 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2545 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2546 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2547 if (rc != MBX_SUCCESS) {
2548 if (rc != MBX_TIMEOUT) {
2549 kfree(dd_data);
2550 kfree(mb);
2551 mempool_free(pmboxq, phba->mbox_mem_pool);
2552 }
2553 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2554 }
2555
2556 memcpy(mb, pmb, sizeof(*pmb));
2557 job->reply->reply_payload_rcv_len =
2558 sg_copy_from_buffer(job->reply_payload.sg_list,
2559 job->reply_payload.sg_cnt,
2560 mb, size);
2561 kfree(dd_data);
2562 kfree(mb);
2563 mempool_free(pmboxq, phba->mbox_mem_pool);
2564 /* not waiting mbox already done */
2565 return 0;
2566 }
2567
2568 /* setup wake call as IOCB callback */
2569 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2570 /* setup context field to pass wait_queue pointer to wake function */
2571 pmboxq->context1 = dd_data;
2572 dd_data->type = TYPE_MBOX;
2573 dd_data->context_un.mbox.pmboxq = pmboxq;
2574 dd_data->context_un.mbox.mb = mb;
2575 dd_data->context_un.mbox.set_job = job;
2576 job->dd_data = dd_data;
2577 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2578 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
2579 kfree(dd_data);
2580 kfree(mb);
2581 mempool_free(pmboxq, phba->mbox_mem_pool);
2582 return -EIO;
2583 }
2584
2585 return 1;
2586}
2587
2588/**
2589 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2590 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2591 **/
2592static int
2593lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2594{
2595 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2596 struct lpfc_hba *phba = vport->phba;
2597 int rc = 0;
2598
2599 /* in case no data is transferred */
2600 job->reply->reply_payload_rcv_len = 0;
2601 if (job->request_len <
2602 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
2603 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2604 "2737 Received MBOX_REQ request below "
2605 "minimum size\n");
2606 rc = -EINVAL;
2607 goto job_error;
2608 }
2609
2610 if (job->request_payload.payload_len != PAGE_SIZE) {
2611 rc = -EINVAL;
2612 goto job_error;
2613 }
2614
2615 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2616 rc = -EAGAIN;
2617 goto job_error;
2618 }
2619
2620 rc = lpfc_bsg_issue_mbox(phba, job, vport);
2621
2622job_error:
2623 if (rc == 0) {
2624 /* job done */
2625 job->reply->result = 0;
2626 job->dd_data = NULL;
2627 job->job_done(job);
2628 } else if (rc == 1)
2629 /* job submitted, will complete later*/
2630 rc = 0; /* return zero, no error */
2631 else {
2632 /* some error occurred */
2633 job->reply->result = rc;
2634 job->dd_data = NULL;
2635 }
830 2636
831 return rc; 2637 return rc;
832} 2638}
@@ -834,38 +2640,57 @@ error_get_event_exit:
834/** 2640/**
835 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2641 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
836 * @job: fc_bsg_job to handle 2642 * @job: fc_bsg_job to handle
837 */ 2643 **/
838static int 2644static int
839lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 2645lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
840{ 2646{
841 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 2647 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
2648 int rc;
842 2649
843 switch (command) { 2650 switch (command) {
844 case LPFC_BSG_VENDOR_SET_CT_EVENT: 2651 case LPFC_BSG_VENDOR_SET_CT_EVENT:
845 return lpfc_bsg_set_event(job); 2652 rc = lpfc_bsg_hba_set_event(job);
846 break; 2653 break;
847
848 case LPFC_BSG_VENDOR_GET_CT_EVENT: 2654 case LPFC_BSG_VENDOR_GET_CT_EVENT:
849 return lpfc_bsg_get_event(job); 2655 rc = lpfc_bsg_hba_get_event(job);
2656 break;
2657 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
2658 rc = lpfc_bsg_send_mgmt_rsp(job);
2659 break;
2660 case LPFC_BSG_VENDOR_DIAG_MODE:
2661 rc = lpfc_bsg_diag_mode(job);
2662 break;
2663 case LPFC_BSG_VENDOR_DIAG_TEST:
2664 rc = lpfc_bsg_diag_test(job);
2665 break;
2666 case LPFC_BSG_VENDOR_GET_MGMT_REV:
2667 rc = lpfc_bsg_get_dfc_rev(job);
2668 break;
2669 case LPFC_BSG_VENDOR_MBOX:
2670 rc = lpfc_bsg_mbox_cmd(job);
850 break; 2671 break;
851
852 default: 2672 default:
853 return -EINVAL; 2673 rc = -EINVAL;
2674 job->reply->reply_payload_rcv_len = 0;
2675 /* make error code available to userspace */
2676 job->reply->result = rc;
2677 break;
854 } 2678 }
2679
2680 return rc;
855} 2681}
856 2682
857/** 2683/**
858 * lpfc_bsg_request - handle a bsg request from the FC transport 2684 * lpfc_bsg_request - handle a bsg request from the FC transport
859 * @job: fc_bsg_job to handle 2685 * @job: fc_bsg_job to handle
860 */ 2686 **/
861int 2687int
862lpfc_bsg_request(struct fc_bsg_job *job) 2688lpfc_bsg_request(struct fc_bsg_job *job)
863{ 2689{
864 uint32_t msgcode; 2690 uint32_t msgcode;
865 int rc = -EINVAL; 2691 int rc;
866 2692
867 msgcode = job->request->msgcode; 2693 msgcode = job->request->msgcode;
868
869 switch (msgcode) { 2694 switch (msgcode) {
870 case FC_BSG_HST_VENDOR: 2695 case FC_BSG_HST_VENDOR:
871 rc = lpfc_bsg_hst_vendor(job); 2696 rc = lpfc_bsg_hst_vendor(job);
@@ -874,9 +2699,13 @@ lpfc_bsg_request(struct fc_bsg_job *job)
874 rc = lpfc_bsg_rport_els(job); 2699 rc = lpfc_bsg_rport_els(job);
875 break; 2700 break;
876 case FC_BSG_RPT_CT: 2701 case FC_BSG_RPT_CT:
877 rc = lpfc_bsg_rport_ct(job); 2702 rc = lpfc_bsg_send_mgmt_cmd(job);
878 break; 2703 break;
879 default: 2704 default:
2705 rc = -EINVAL;
2706 job->reply->reply_payload_rcv_len = 0;
2707 /* make error code available to userspace */
2708 job->reply->result = rc;
880 break; 2709 break;
881 } 2710 }
882 2711
@@ -889,17 +2718,71 @@ lpfc_bsg_request(struct fc_bsg_job *job)
889 * 2718 *
890 * This function just aborts the job's IOCB. The aborted IOCB will return to 2719 * This function just aborts the job's IOCB. The aborted IOCB will return to
891 * the waiting function which will handle passing the error back to userspace 2720 * the waiting function which will handle passing the error back to userspace
892 */ 2721 **/
893int 2722int
894lpfc_bsg_timeout(struct fc_bsg_job *job) 2723lpfc_bsg_timeout(struct fc_bsg_job *job)
895{ 2724{
896 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2725 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
897 struct lpfc_hba *phba = vport->phba; 2726 struct lpfc_hba *phba = vport->phba;
898 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data; 2727 struct lpfc_iocbq *cmdiocb;
2728 struct lpfc_bsg_event *evt;
2729 struct lpfc_bsg_iocb *iocb;
2730 struct lpfc_bsg_mbox *mbox;
899 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 2731 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2732 struct bsg_job_data *dd_data;
2733 unsigned long flags;
2734
2735 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2736 dd_data = (struct bsg_job_data *)job->dd_data;
2737 /* timeout and completion crossed paths if no dd_data */
2738 if (!dd_data) {
2739 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2740 return 0;
2741 }
900 2742
901 if (cmdiocb) 2743 switch (dd_data->type) {
2744 case TYPE_IOCB:
2745 iocb = &dd_data->context_un.iocb;
2746 cmdiocb = iocb->cmdiocbq;
2747 /* hint to completion handler that the job timed out */
2748 job->reply->result = -EAGAIN;
2749 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2750 /* this will call our completion handler */
2751 spin_lock_irq(&phba->hbalock);
902 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 2752 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
2753 spin_unlock_irq(&phba->hbalock);
2754 break;
2755 case TYPE_EVT:
2756 evt = dd_data->context_un.evt;
2757 /* this event has no job anymore */
2758 evt->set_job = NULL;
2759 job->dd_data = NULL;
2760 job->reply->reply_payload_rcv_len = 0;
2761 /* Return -EAGAIN which is our way of signallying the
2762 * app to retry.
2763 */
2764 job->reply->result = -EAGAIN;
2765 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2766 job->job_done(job);
2767 break;
2768 case TYPE_MBOX:
2769 mbox = &dd_data->context_un.mbox;
2770 /* this mbox has no job anymore */
2771 mbox->set_job = NULL;
2772 job->dd_data = NULL;
2773 job->reply->reply_payload_rcv_len = 0;
2774 job->reply->result = -EAGAIN;
2775 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2776 job->job_done(job);
2777 break;
2778 default:
2779 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2780 break;
2781 }
903 2782
2783 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
2784 * otherwise an error message will be displayed on the console
2785 * so always return success (zero)
2786 */
904 return 0; 2787 return 0;
905} 2788}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 000000000000..6c8f87e39b98
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,98 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20/* bsg definitions
21 * No pointers to user data are allowed, all application buffers and sizes will
22 * derived through the bsg interface.
23 *
24 * These are the vendor unique structures passed in using the bsg
25 * FC_BSG_HST_VENDOR message code type.
26 */
27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
30#define LPFC_BSG_VENDOR_DIAG_MODE 4
31#define LPFC_BSG_VENDOR_DIAG_TEST 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7
34
35struct set_ct_event {
36 uint32_t command;
37 uint32_t type_mask;
38 uint32_t ev_req_id;
39 uint32_t ev_reg_id;
40};
41
42struct get_ct_event {
43 uint32_t command;
44 uint32_t ev_reg_id;
45 uint32_t ev_req_id;
46};
47
48struct get_ct_event_reply {
49 uint32_t immed_data;
50 uint32_t type;
51};
52
53struct send_mgmt_resp {
54 uint32_t command;
55 uint32_t tag;
56};
57
58
59#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
60#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
61
62struct diag_mode_set {
63 uint32_t command;
64 uint32_t type;
65 uint32_t timeout;
66};
67
68struct diag_mode_test {
69 uint32_t command;
70};
71
72#define LPFC_WWNN_TYPE 0
73#define LPFC_WWPN_TYPE 1
74
75struct get_mgmt_rev {
76 uint32_t command;
77};
78
79#define MANAGEMENT_MAJOR_REV 1
80#define MANAGEMENT_MINOR_REV 0
81
82/* the MgmtRevInfo structure */
83struct MgmtRevInfo {
84 uint32_t a_Major;
85 uint32_t a_Minor;
86};
87
88struct get_mgmt_rev_reply {
89 struct MgmtRevInfo info;
90};
91
92struct dfc_mbox_req {
93 uint32_t command;
94 uint32_t inExtWLen;
95 uint32_t outExtWLen;
96 uint8_t mbOffset;
97};
98
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 650494d622c1..6f0fb51eb461 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -44,18 +44,26 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
47void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
48 struct lpfc_nodelist *);
47void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 49void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
48void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 50void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
49void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); 51void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
52void lpfc_supported_pages(struct lpfcMboxq *);
53void lpfc_sli4_params(struct lpfcMboxq *);
54int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
50 55
51struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 56struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
52void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); 57void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
53void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); 58void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
54void lpfc_cleanup_rpis(struct lpfc_vport *, int); 59void lpfc_cleanup_rpis(struct lpfc_vport *, int);
60void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
55int lpfc_linkdown(struct lpfc_hba *); 61int lpfc_linkdown(struct lpfc_hba *);
56void lpfc_linkdown_port(struct lpfc_vport *); 62void lpfc_linkdown_port(struct lpfc_vport *);
57void lpfc_port_link_failure(struct lpfc_vport *); 63void lpfc_port_link_failure(struct lpfc_vport *);
58void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
66void lpfc_retry_pport_discovery(struct lpfc_hba *);
59 67
60void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 68void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); 69void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -73,6 +81,7 @@ void lpfc_set_disctmo(struct lpfc_vport *);
73int lpfc_can_disctmo(struct lpfc_vport *); 81int lpfc_can_disctmo(struct lpfc_vport *);
74int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); 82int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
75void lpfc_unreg_all_rpis(struct lpfc_vport *); 83void lpfc_unreg_all_rpis(struct lpfc_vport *);
84void lpfc_unreg_hba_rpis(struct lpfc_hba *);
76void lpfc_unreg_default_rpis(struct lpfc_vport *); 85void lpfc_unreg_default_rpis(struct lpfc_vport *);
77void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); 86void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
78 87
@@ -99,7 +108,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
99 108
100void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); 109void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
101int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 110int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
102 struct serv_parm *, uint32_t); 111 struct serv_parm *, uint32_t, int);
103int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 112int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
104void lpfc_more_plogi(struct lpfc_vport *); 113void lpfc_more_plogi(struct lpfc_vport *);
105void lpfc_more_adisc(struct lpfc_vport *); 114void lpfc_more_adisc(struct lpfc_vport *);
@@ -197,6 +206,7 @@ void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
197void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); 206void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
198void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); 207void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
199int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); 208int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
209void lpfc_issue_init_vpi(struct lpfc_vport *);
200 210
201void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 211void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
202 uint32_t , LPFC_MBOXQ_t *); 212 uint32_t , LPFC_MBOXQ_t *);
@@ -206,7 +216,11 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
206void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); 216void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
207void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, 217void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
208 uint16_t); 218 uint16_t);
219void lpfc_unregister_fcf(struct lpfc_hba *);
220void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
209void lpfc_unregister_unused_fcf(struct lpfc_hba *); 221void lpfc_unregister_unused_fcf(struct lpfc_hba *);
222int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
223void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
210 224
211int lpfc_mem_alloc(struct lpfc_hba *, int align); 225int lpfc_mem_alloc(struct lpfc_hba *, int align);
212void lpfc_mem_free(struct lpfc_hba *); 226void lpfc_mem_free(struct lpfc_hba *);
@@ -365,6 +379,8 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
365void lpfc_create_static_vport(struct lpfc_hba *); 379void lpfc_create_static_vport(struct lpfc_hba *);
366void lpfc_stop_hba_timers(struct lpfc_hba *); 380void lpfc_stop_hba_timers(struct lpfc_hba *);
367void lpfc_stop_port(struct lpfc_hba *); 381void lpfc_stop_port(struct lpfc_hba *);
382void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
383void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
368void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); 384void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
369int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 385int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
370void lpfc_start_fdiscs(struct lpfc_hba *phba); 386void lpfc_start_fdiscs(struct lpfc_hba *phba);
@@ -378,5 +394,5 @@ struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
378/* functions to support SGIOv4/bsg interface */ 394/* functions to support SGIOv4/bsg interface */
379int lpfc_bsg_request(struct fc_bsg_job *); 395int lpfc_bsg_request(struct fc_bsg_job *);
380int lpfc_bsg_timeout(struct fc_bsg_job *); 396int lpfc_bsg_timeout(struct fc_bsg_job *);
381void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 397int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
382 struct lpfc_iocbq *); 398 struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0ebcd9baca79..c7e921973f66 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -97,7 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
97 struct list_head head; 97 struct list_head head;
98 struct lpfc_dmabuf *bdeBuf; 98 struct lpfc_dmabuf *bdeBuf;
99 99
100 lpfc_bsg_ct_unsol_event(phba, pring, piocbq); 100 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
101 return;
101 102
102 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 103 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
103 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 104 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -181,7 +182,8 @@ lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
181 uint32_t size; 182 uint32_t size;
182 183
183 /* Forward abort event to any process registered to receive ct event */ 184 /* Forward abort event to any process registered to receive ct event */
184 lpfc_bsg_ct_unsol_event(phba, pring, piocbq); 185 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
186 return;
185 187
186 /* If there is no BDE associated with IOCB, there is nothing to do */ 188 /* If there is no BDE associated with IOCB, there is nothing to do */
187 if (icmd->ulpBdeCount == 0) 189 if (icmd->ulpBdeCount == 0)
@@ -1843,12 +1845,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1843 c = (rev & 0x0000ff00) >> 8; 1845 c = (rev & 0x0000ff00) >> 8;
1844 b4 = (rev & 0x000000ff); 1846 b4 = (rev & 0x000000ff);
1845 1847
1846 if (flag) 1848 sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
1847 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1848 b2, b3, c, b4);
1849 else
1850 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1851 b2, b3, c, b4);
1852 } 1849 }
1853 return; 1850 return;
1854} 1851}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2cc39684ce97..08b6634cb994 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -50,9 +50,6 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
50 struct lpfc_nodelist *ndlp, uint8_t retry); 50 struct lpfc_nodelist *ndlp, uint8_t retry);
51static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 51static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
52 struct lpfc_iocbq *iocb); 52 struct lpfc_iocbq *iocb);
53static void lpfc_register_new_vport(struct lpfc_hba *phba,
54 struct lpfc_vport *vport,
55 struct lpfc_nodelist *ndlp);
56 53
57static int lpfc_max_els_tries = 3; 54static int lpfc_max_els_tries = 3;
58 55
@@ -592,6 +589,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
592 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 589 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
593 spin_unlock_irq(shost->host_lock); 590 spin_unlock_irq(shost->host_lock);
594 } 591 }
592 /*
593 * If VPI is unreged, driver need to do INIT_VPI
594 * before re-registering
595 */
596 if (phba->sli_rev == LPFC_SLI_REV4) {
597 spin_lock_irq(shost->host_lock);
598 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
599 spin_unlock_irq(shost->host_lock);
600 }
595 } 601 }
596 602
597 if (phba->sli_rev < LPFC_SLI_REV4) { 603 if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -604,10 +610,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
604 } else { 610 } else {
605 ndlp->nlp_type |= NLP_FABRIC; 611 ndlp->nlp_type |= NLP_FABRIC;
606 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 612 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
607 if (vport->vpi_state & LPFC_VPI_REGISTERED) { 613 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
614 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
608 lpfc_start_fdiscs(phba); 615 lpfc_start_fdiscs(phba);
609 lpfc_do_scr_ns_plogi(phba, vport); 616 lpfc_do_scr_ns_plogi(phba, vport);
610 } else 617 } else if (vport->fc_flag & FC_VFI_REGISTERED)
618 lpfc_issue_init_vpi(vport);
619 else
611 lpfc_issue_reg_vfi(vport); 620 lpfc_issue_reg_vfi(vport);
612 } 621 }
613 return 0; 622 return 0;
@@ -804,6 +813,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
804 irsp->ulpTimeout); 813 irsp->ulpTimeout);
805 goto flogifail; 814 goto flogifail;
806 } 815 }
816 spin_lock_irq(shost->host_lock);
817 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
818 spin_unlock_irq(shost->host_lock);
807 819
808 /* 820 /*
809 * The FLogI succeeded. Sync the data for the CPU before 821 * The FLogI succeeded. Sync the data for the CPU before
@@ -2720,7 +2732,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2720 if (did == FDMI_DID) 2732 if (did == FDMI_DID)
2721 retry = 1; 2733 retry = 1;
2722 2734
2723 if ((cmd == ELS_CMD_FLOGI) && 2735 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
2724 (phba->fc_topology != TOPOLOGY_LOOP) && 2736 (phba->fc_topology != TOPOLOGY_LOOP) &&
2725 !lpfc_error_lost_link(irsp)) { 2737 !lpfc_error_lost_link(irsp)) {
2726 /* FLOGI retry policy */ 2738 /* FLOGI retry policy */
@@ -4385,7 +4397,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4385 4397
4386 did = Fabric_DID; 4398 did = Fabric_DID;
4387 4399
4388 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) { 4400 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4389 /* For a FLOGI we accept, then if our portname is greater 4401 /* For a FLOGI we accept, then if our portname is greater
4390 * then the remote portname we initiate Nport login. 4402 * then the remote portname we initiate Nport login.
4391 */ 4403 */
@@ -5915,6 +5927,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5915 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5927 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5916 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5928 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5917 MAILBOX_t *mb = &pmb->u.mb; 5929 MAILBOX_t *mb = &pmb->u.mb;
5930 int rc;
5918 5931
5919 spin_lock_irq(shost->host_lock); 5932 spin_lock_irq(shost->host_lock);
5920 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5933 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5936,6 +5949,26 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5936 spin_unlock_irq(shost->host_lock); 5949 spin_unlock_irq(shost->host_lock);
5937 lpfc_can_disctmo(vport); 5950 lpfc_can_disctmo(vport);
5938 break; 5951 break;
5952 /* If reg_vpi fail with invalid VPI status, re-init VPI */
5953 case 0x20:
5954 spin_lock_irq(shost->host_lock);
5955 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
5956 spin_unlock_irq(shost->host_lock);
5957 lpfc_init_vpi(phba, pmb, vport->vpi);
5958 pmb->vport = vport;
5959 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
5960 rc = lpfc_sli_issue_mbox(phba, pmb,
5961 MBX_NOWAIT);
5962 if (rc == MBX_NOT_FINISHED) {
5963 lpfc_printf_vlog(vport,
5964 KERN_ERR, LOG_MBOX,
5965 "2732 Failed to issue INIT_VPI"
5966 " mailbox command\n");
5967 } else {
5968 lpfc_nlp_put(ndlp);
5969 return;
5970 }
5971
5939 default: 5972 default:
5940 /* Try to recover from this error */ 5973 /* Try to recover from this error */
5941 lpfc_mbx_unreg_vpi(vport); 5974 lpfc_mbx_unreg_vpi(vport);
@@ -5949,13 +5982,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5949 break; 5982 break;
5950 } 5983 }
5951 } else { 5984 } else {
5985 spin_lock_irq(shost->host_lock);
5952 vport->vpi_state |= LPFC_VPI_REGISTERED; 5986 vport->vpi_state |= LPFC_VPI_REGISTERED;
5953 if (vport == phba->pport) 5987 spin_unlock_irq(shost->host_lock);
5988 if (vport == phba->pport) {
5954 if (phba->sli_rev < LPFC_SLI_REV4) 5989 if (phba->sli_rev < LPFC_SLI_REV4)
5955 lpfc_issue_fabric_reglogin(vport); 5990 lpfc_issue_fabric_reglogin(vport);
5956 else 5991 else {
5957 lpfc_issue_reg_vfi(vport); 5992 lpfc_start_fdiscs(phba);
5958 else 5993 lpfc_do_scr_ns_plogi(phba, vport);
5994 }
5995 } else
5959 lpfc_do_scr_ns_plogi(phba, vport); 5996 lpfc_do_scr_ns_plogi(phba, vport);
5960 } 5997 }
5961 5998
@@ -5977,7 +6014,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5977 * This routine registers the @vport as a new virtual port with a HBA. 6014 * This routine registers the @vport as a new virtual port with a HBA.
5978 * It is done through a registering vpi mailbox command. 6015 * It is done through a registering vpi mailbox command.
5979 **/ 6016 **/
5980static void 6017void
5981lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 6018lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5982 struct lpfc_nodelist *ndlp) 6019 struct lpfc_nodelist *ndlp)
5983{ 6020{
@@ -6018,6 +6055,78 @@ mbox_err_exit:
6018} 6055}
6019 6056
6020/** 6057/**
6058 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6059 * @phba: pointer to lpfc hba data structure.
6060 *
6061 * This routine abort all pending discovery commands and
6062 * start a timer to retry FLOGI for the physical port
6063 * discovery.
6064 **/
6065void
6066lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6067{
6068 struct lpfc_vport **vports;
6069 struct lpfc_nodelist *ndlp;
6070 struct Scsi_Host *shost;
6071 int i;
6072 uint32_t link_state;
6073
6074 /* Treat this failure as linkdown for all vports */
6075 link_state = phba->link_state;
6076 lpfc_linkdown(phba);
6077 phba->link_state = link_state;
6078
6079 vports = lpfc_create_vport_work_array(phba);
6080
6081 if (vports) {
6082 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6083 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6084 if (ndlp)
6085 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6086 lpfc_els_flush_cmd(vports[i]);
6087 }
6088 lpfc_destroy_vport_work_array(phba, vports);
6089 }
6090
6091 /* If fabric require FLOGI, then re-instantiate physical login */
6092 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6093 if (!ndlp)
6094 return;
6095
6096
6097 shost = lpfc_shost_from_vport(phba->pport);
6098 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6099 spin_lock_irq(shost->host_lock);
6100 ndlp->nlp_flag |= NLP_DELAY_TMO;
6101 spin_unlock_irq(shost->host_lock);
6102 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
6103 phba->pport->port_state = LPFC_FLOGI;
6104 return;
6105}
6106
6107/**
6108 * lpfc_fabric_login_reqd - Check if FLOGI required.
6109 * @phba: pointer to lpfc hba data structure.
6110 * @cmdiocb: pointer to FDISC command iocb.
6111 * @rspiocb: pointer to FDISC response iocb.
6112 *
6113 * This routine checks if a FLOGI is reguired for FDISC
6114 * to succeed.
6115 **/
6116static int
6117lpfc_fabric_login_reqd(struct lpfc_hba *phba,
6118 struct lpfc_iocbq *cmdiocb,
6119 struct lpfc_iocbq *rspiocb)
6120{
6121
6122 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
6123 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
6124 return 0;
6125 else
6126 return 1;
6127}
6128
6129/**
6021 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 6130 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
6022 * @phba: pointer to lpfc hba data structure. 6131 * @phba: pointer to lpfc hba data structure.
6023 * @cmdiocb: pointer to lpfc command iocb data structure. 6132 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -6066,6 +6175,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6066 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 6175 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
6067 6176
6068 if (irsp->ulpStatus) { 6177 if (irsp->ulpStatus) {
6178
6179 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
6180 lpfc_retry_pport_discovery(phba);
6181 goto out;
6182 }
6183
6069 /* Check for retry */ 6184 /* Check for retry */
6070 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 6185 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
6071 goto out; 6186 goto out;
@@ -6076,6 +6191,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6076 goto fdisc_failed; 6191 goto fdisc_failed;
6077 } 6192 }
6078 spin_lock_irq(shost->host_lock); 6193 spin_lock_irq(shost->host_lock);
6194 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6079 vport->fc_flag |= FC_FABRIC; 6195 vport->fc_flag |= FC_FABRIC;
6080 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 6196 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6081 vport->fc_flag |= FC_PUBLIC_LOOP; 6197 vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6103,10 +6219,13 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6103 lpfc_mbx_unreg_vpi(vport); 6219 lpfc_mbx_unreg_vpi(vport);
6104 spin_lock_irq(shost->host_lock); 6220 spin_lock_irq(shost->host_lock);
6105 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6221 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6222 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6106 spin_unlock_irq(shost->host_lock); 6223 spin_unlock_irq(shost->host_lock);
6107 } 6224 }
6108 6225
6109 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 6226 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
6227 lpfc_issue_init_vpi(vport);
6228 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
6110 lpfc_register_new_vport(phba, vport, ndlp); 6229 lpfc_register_new_vport(phba, vport, ndlp);
6111 else 6230 else
6112 lpfc_do_scr_ns_plogi(phba, vport); 6231 lpfc_do_scr_ns_plogi(phba, vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2445e399fd60..2359d0bfb734 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,6 +525,8 @@ lpfc_work_done(struct lpfc_hba *phba)
525 spin_unlock_irq(&phba->hbalock); 525 spin_unlock_irq(&phba->hbalock);
526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
527 } 527 }
528 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
529 lpfc_sli4_fcf_redisc_event_proc(phba);
528 } 530 }
529 531
530 vports = lpfc_create_vport_work_array(phba); 532 vports = lpfc_create_vport_work_array(phba);
@@ -706,6 +708,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
706void 708void
707lpfc_port_link_failure(struct lpfc_vport *vport) 709lpfc_port_link_failure(struct lpfc_vport *vport)
708{ 710{
711 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
712
709 /* Cleanup any outstanding received buffers */ 713 /* Cleanup any outstanding received buffers */
710 lpfc_cleanup_rcv_buffers(vport); 714 lpfc_cleanup_rcv_buffers(vport);
711 715
@@ -752,12 +756,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
752 lpfc_scsi_dev_block(phba); 756 lpfc_scsi_dev_block(phba);
753 757
754 spin_lock_irq(&phba->hbalock); 758 spin_lock_irq(&phba->hbalock);
755 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); 759 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
760 spin_unlock_irq(&phba->hbalock);
756 if (phba->link_state > LPFC_LINK_DOWN) { 761 if (phba->link_state > LPFC_LINK_DOWN) {
757 phba->link_state = LPFC_LINK_DOWN; 762 phba->link_state = LPFC_LINK_DOWN;
763 spin_lock_irq(shost->host_lock);
758 phba->pport->fc_flag &= ~FC_LBIT; 764 phba->pport->fc_flag &= ~FC_LBIT;
765 spin_unlock_irq(shost->host_lock);
759 } 766 }
760 spin_unlock_irq(&phba->hbalock);
761 vports = lpfc_create_vport_work_array(phba); 767 vports = lpfc_create_vport_work_array(phba);
762 if (vports != NULL) 768 if (vports != NULL)
763 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 769 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -1023,7 +1029,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1023 return; 1029 return;
1024 } 1030 }
1025 spin_lock_irqsave(&phba->hbalock, flags); 1031 spin_lock_irqsave(&phba->hbalock, flags);
1026 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1032 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1027 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1033 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1028 spin_unlock_irqrestore(&phba->hbalock, flags); 1034 spin_unlock_irqrestore(&phba->hbalock, flags);
1029 if (vport->port_state != LPFC_FLOGI) 1035 if (vport->port_state != LPFC_FLOGI)
@@ -1045,25 +1051,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1045static uint32_t 1051static uint32_t
1046lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1052lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1047{ 1053{
1048 if ((fab_name[0] == 1054 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1049 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && 1055 return 0;
1050 (fab_name[1] == 1056 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1051 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && 1057 return 0;
1052 (fab_name[2] == 1058 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1053 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1054 (fab_name[3] ==
1055 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1056 (fab_name[4] ==
1057 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1058 (fab_name[5] ==
1059 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1060 (fab_name[6] ==
1061 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1062 (fab_name[7] ==
1063 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1064 return 1;
1065 else
1066 return 0; 1059 return 0;
1060 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1061 return 0;
1062 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1063 return 0;
1064 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1065 return 0;
1066 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1067 return 0;
1068 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1069 return 0;
1070 return 1;
1067} 1071}
1068 1072
1069/** 1073/**
@@ -1078,30 +1082,28 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1078static uint32_t 1082static uint32_t
1079lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) 1083lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1080{ 1084{
1081 if ((sw_name[0] == 1085 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1082 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
1083 (sw_name[1] ==
1084 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
1085 (sw_name[2] ==
1086 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
1087 (sw_name[3] ==
1088 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
1089 (sw_name[4] ==
1090 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
1091 (sw_name[5] ==
1092 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
1093 (sw_name[6] ==
1094 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
1095 (sw_name[7] ==
1096 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
1097 return 1;
1098 else
1099 return 0; 1086 return 0;
1087 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1088 return 0;
1089 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1090 return 0;
1091 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1092 return 0;
1093 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1094 return 0;
1095 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1096 return 0;
1097 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1098 return 0;
1099 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1100 return 0;
1101 return 1;
1100} 1102}
1101 1103
1102/** 1104/**
1103 * lpfc_mac_addr_match - Check if the fcf mac address match. 1105 * lpfc_mac_addr_match - Check if the fcf mac address match.
1104 * @phba: pointer to lpfc hba data structure. 1106 * @mac_addr: pointer to mac address.
1105 * @new_fcf_record: pointer to fcf record. 1107 * @new_fcf_record: pointer to fcf record.
1106 * 1108 *
1107 * This routine compare the fcf record's mac address with HBA's 1109 * This routine compare the fcf record's mac address with HBA's
@@ -1109,85 +1111,115 @@ lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1109 * returns 1 else return 0. 1111 * returns 1 else return 0.
1110 **/ 1112 **/
1111static uint32_t 1113static uint32_t
1112lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1114lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1113{ 1115{
1114 if ((phba->fcf.mac_addr[0] == 1116 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1115 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && 1117 return 0;
1116 (phba->fcf.mac_addr[1] == 1118 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1117 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) && 1119 return 0;
1118 (phba->fcf.mac_addr[2] == 1120 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1119 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1120 (phba->fcf.mac_addr[3] ==
1121 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1122 (phba->fcf.mac_addr[4] ==
1123 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1124 (phba->fcf.mac_addr[5] ==
1125 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1126 return 1;
1127 else
1128 return 0; 1121 return 0;
1122 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1123 return 0;
1124 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1125 return 0;
1126 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1127 return 0;
1128 return 1;
1129}
1130
1131static bool
1132lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1133{
1134 return (curr_vlan_id == new_vlan_id);
1129} 1135}
1130 1136
1131/** 1137/**
1132 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1138 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1133 * @phba: pointer to lpfc hba data structure. 1139 * @fcf: pointer to driver fcf record.
1134 * @new_fcf_record: pointer to fcf record. 1140 * @new_fcf_record: pointer to fcf record.
1135 * 1141 *
1136 * This routine copies the FCF information from the FCF 1142 * This routine copies the FCF information from the FCF
1137 * record to lpfc_hba data structure. 1143 * record to lpfc_hba data structure.
1138 **/ 1144 **/
1139static void 1145static void
1140lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1146lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1147 struct fcf_record *new_fcf_record)
1141{ 1148{
1142 phba->fcf.fabric_name[0] = 1149 /* Fabric name */
1150 fcf_rec->fabric_name[0] =
1143 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1151 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1144 phba->fcf.fabric_name[1] = 1152 fcf_rec->fabric_name[1] =
1145 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1153 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1146 phba->fcf.fabric_name[2] = 1154 fcf_rec->fabric_name[2] =
1147 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1155 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1148 phba->fcf.fabric_name[3] = 1156 fcf_rec->fabric_name[3] =
1149 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1157 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1150 phba->fcf.fabric_name[4] = 1158 fcf_rec->fabric_name[4] =
1151 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1159 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1152 phba->fcf.fabric_name[5] = 1160 fcf_rec->fabric_name[5] =
1153 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1161 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1154 phba->fcf.fabric_name[6] = 1162 fcf_rec->fabric_name[6] =
1155 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1163 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1156 phba->fcf.fabric_name[7] = 1164 fcf_rec->fabric_name[7] =
1157 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1165 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1158 phba->fcf.mac_addr[0] = 1166 /* Mac address */
1159 bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1167 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1160 phba->fcf.mac_addr[1] = 1168 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1161 bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1169 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1162 phba->fcf.mac_addr[2] = 1170 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1163 bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1171 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1164 phba->fcf.mac_addr[3] = 1172 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1165 bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1173 /* FCF record index */
1166 phba->fcf.mac_addr[4] = 1174 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1167 bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1175 /* FCF record priority */
1168 phba->fcf.mac_addr[5] = 1176 fcf_rec->priority = new_fcf_record->fip_priority;
1169 bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1177 /* Switch name */
1170 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1178 fcf_rec->switch_name[0] =
1171 phba->fcf.priority = new_fcf_record->fip_priority;
1172 phba->fcf.switch_name[0] =
1173 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); 1179 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1174 phba->fcf.switch_name[1] = 1180 fcf_rec->switch_name[1] =
1175 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); 1181 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1176 phba->fcf.switch_name[2] = 1182 fcf_rec->switch_name[2] =
1177 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); 1183 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1178 phba->fcf.switch_name[3] = 1184 fcf_rec->switch_name[3] =
1179 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); 1185 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1180 phba->fcf.switch_name[4] = 1186 fcf_rec->switch_name[4] =
1181 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); 1187 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1182 phba->fcf.switch_name[5] = 1188 fcf_rec->switch_name[5] =
1183 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); 1189 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1184 phba->fcf.switch_name[6] = 1190 fcf_rec->switch_name[6] =
1185 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); 1191 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1186 phba->fcf.switch_name[7] = 1192 fcf_rec->switch_name[7] =
1187 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); 1193 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1188} 1194}
1189 1195
1190/** 1196/**
1197 * lpfc_update_fcf_record - Update driver fcf record
1198 * @phba: pointer to lpfc hba data structure.
1199 * @fcf_rec: pointer to driver fcf record.
1200 * @new_fcf_record: pointer to hba fcf record.
1201 * @addr_mode: address mode to be set to the driver fcf record.
1202 * @vlan_id: vlan tag to be set to the driver fcf record.
1203 * @flag: flag bits to be set to the driver fcf record.
1204 *
1205 * This routine updates the driver FCF record from the new HBA FCF record
1206 * together with the address mode, vlan_id, and other informations. This
1207 * routine is called with the host lock held.
1208 **/
1209static void
1210__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1211 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1212 uint16_t vlan_id, uint32_t flag)
1213{
1214 /* Copy the fields from the HBA's FCF record */
1215 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1216 /* Update other fields of driver FCF record */
1217 fcf_rec->addr_mode = addr_mode;
1218 fcf_rec->vlan_id = vlan_id;
1219 fcf_rec->flag |= (flag | RECORD_VALID);
1220}
1221
1222/**
1191 * lpfc_register_fcf - Register the FCF with hba. 1223 * lpfc_register_fcf - Register the FCF with hba.
1192 * @phba: pointer to lpfc hba data structure. 1224 * @phba: pointer to lpfc hba data structure.
1193 * 1225 *
@@ -1212,7 +1244,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1212 1244
1213 /* The FCF is already registered, start discovery */ 1245 /* The FCF is already registered, start discovery */
1214 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1246 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1215 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1247 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1216 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1248 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1217 spin_unlock_irqrestore(&phba->hbalock, flags); 1249 spin_unlock_irqrestore(&phba->hbalock, flags);
1218 if (phba->pport->port_state != LPFC_FLOGI) 1250 if (phba->pport->port_state != LPFC_FLOGI)
@@ -1250,6 +1282,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1250 * @new_fcf_record: pointer to fcf record. 1282 * @new_fcf_record: pointer to fcf record.
1251 * @boot_flag: Indicates if this record used by boot bios. 1283 * @boot_flag: Indicates if this record used by boot bios.
1252 * @addr_mode: The address mode to be used by this FCF 1284 * @addr_mode: The address mode to be used by this FCF
1285 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1253 * 1286 *
1254 * This routine compare the fcf record with connect list obtained from the 1287 * This routine compare the fcf record with connect list obtained from the
1255 * config region to decide if this FCF can be used for SAN discovery. It returns 1288 * config region to decide if this FCF can be used for SAN discovery. It returns
@@ -1323,7 +1356,8 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1323 return 1; 1356 return 1;
1324 } 1357 }
1325 1358
1326 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { 1359 list_for_each_entry(conn_entry,
1360 &phba->fcf_conn_rec_list, list) {
1327 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 1361 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1328 continue; 1362 continue;
1329 1363
@@ -1470,6 +1504,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1470 */ 1504 */
1471 spin_lock_irq(&phba->hbalock); 1505 spin_lock_irq(&phba->hbalock);
1472 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1506 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1507 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1473 spin_unlock_irq(&phba->hbalock); 1508 spin_unlock_irq(&phba->hbalock);
1474 } 1509 }
1475 1510
@@ -1524,11 +1559,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1524 uint32_t shdr_status, shdr_add_status; 1559 uint32_t shdr_status, shdr_add_status;
1525 union lpfc_sli4_cfg_shdr *shdr; 1560 union lpfc_sli4_cfg_shdr *shdr;
1526 struct fcf_record *new_fcf_record; 1561 struct fcf_record *new_fcf_record;
1527 int rc;
1528 uint32_t boot_flag, addr_mode; 1562 uint32_t boot_flag, addr_mode;
1529 uint32_t next_fcf_index; 1563 uint32_t next_fcf_index;
1530 unsigned long flags; 1564 struct lpfc_fcf_rec *fcf_rec = NULL;
1565 unsigned long iflags;
1531 uint16_t vlan_id; 1566 uint16_t vlan_id;
1567 int rc;
1532 1568
1533 /* If there is pending FCoE event restart FCF table scan */ 1569 /* If there is pending FCoE event restart FCF table scan */
1534 if (lpfc_check_pending_fcoe_event(phba, 0)) { 1570 if (lpfc_check_pending_fcoe_event(phba, 0)) {
@@ -1583,9 +1619,8 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1583 sizeof(struct fcf_record)); 1619 sizeof(struct fcf_record));
1584 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 1620 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1585 1621
1586 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, 1622 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1587 &boot_flag, &addr_mode, 1623 &addr_mode, &vlan_id);
1588 &vlan_id);
1589 /* 1624 /*
1590 * If the fcf record does not match with connect list entries 1625 * If the fcf record does not match with connect list entries
1591 * read the next entry. 1626 * read the next entry.
@@ -1594,90 +1629,159 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1594 goto read_next_fcf; 1629 goto read_next_fcf;
1595 /* 1630 /*
1596 * If this is not the first FCF discovery of the HBA, use last 1631 * If this is not the first FCF discovery of the HBA, use last
1597 * FCF record for the discovery. 1632 * FCF record for the discovery. The condition that a rescan
1633 * matches the in-use FCF record: fabric name, switch name, mac
1634 * address, and vlan_id.
1598 */ 1635 */
1599 spin_lock_irqsave(&phba->hbalock, flags); 1636 spin_lock_irqsave(&phba->hbalock, iflags);
1600 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1637 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1601 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1638 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
1602 new_fcf_record) && 1639 new_fcf_record) &&
1603 lpfc_sw_name_match(phba->fcf.switch_name, 1640 lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
1604 new_fcf_record) && 1641 new_fcf_record) &&
1605 lpfc_mac_addr_match(phba, new_fcf_record)) { 1642 lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
1643 new_fcf_record) &&
1644 lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
1645 vlan_id)) {
1606 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1646 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1607 spin_unlock_irqrestore(&phba->hbalock, flags); 1647 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
1648 /* Stop FCF redisc wait timer if pending */
1649 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
1650 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1651 /* If in fast failover, mark it's completed */
1652 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1653 spin_unlock_irqrestore(&phba->hbalock, iflags);
1608 goto out; 1654 goto out;
1609 } 1655 }
1610 spin_unlock_irqrestore(&phba->hbalock, flags); 1656 /*
1611 goto read_next_fcf; 1657 * Read next FCF record from HBA searching for the matching
1658 * with in-use record only if not during the fast failover
1659 * period. In case of fast failover period, it shall try to
1660 * determine whether the FCF record just read should be the
1661 * next candidate.
1662 */
1663 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1664 spin_unlock_irqrestore(&phba->hbalock, iflags);
1665 goto read_next_fcf;
1666 }
1612 } 1667 }
1668 /*
1669 * Update on failover FCF record only if it's in FCF fast-failover
1670 * period; otherwise, update on current FCF record.
1671 */
1672 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
1673 /* Fast FCF failover only to the same fabric name */
1674 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
1675 new_fcf_record))
1676 fcf_rec = &phba->fcf.failover_rec;
1677 else
1678 goto read_next_fcf;
1679 } else
1680 fcf_rec = &phba->fcf.current_rec;
1681
1613 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 1682 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1614 /* 1683 /*
1615 * If the current FCF record does not have boot flag 1684 * If the driver FCF record does not have boot flag
1616 * set and new fcf record has boot flag set, use the 1685 * set and new hba fcf record has boot flag set, use
1617 * new fcf record. 1686 * the new hba fcf record.
1618 */ 1687 */
1619 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1688 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
1620 /* Use this FCF record */ 1689 /* Choose this FCF record */
1621 lpfc_copy_fcf_record(phba, new_fcf_record); 1690 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1622 phba->fcf.addr_mode = addr_mode; 1691 addr_mode, vlan_id, BOOT_ENABLE);
1623 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1692 spin_unlock_irqrestore(&phba->hbalock, iflags);
1624 if (vlan_id != 0xFFFF) {
1625 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1626 phba->fcf.vlan_id = vlan_id;
1627 }
1628 spin_unlock_irqrestore(&phba->hbalock, flags);
1629 goto read_next_fcf; 1693 goto read_next_fcf;
1630 } 1694 }
1631 /* 1695 /*
1632 * If the current FCF record has boot flag set and the 1696 * If the driver FCF record has boot flag set and the
1633 * new FCF record does not have boot flag, read the next 1697 * new hba FCF record does not have boot flag, read
1634 * FCF record. 1698 * the next FCF record.
1635 */ 1699 */
1636 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1700 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
1637 spin_unlock_irqrestore(&phba->hbalock, flags); 1701 spin_unlock_irqrestore(&phba->hbalock, iflags);
1638 goto read_next_fcf; 1702 goto read_next_fcf;
1639 } 1703 }
1640 /* 1704 /*
1641 * If there is a record with lower priority value for 1705 * If the new hba FCF record has lower priority value
1642 * the current FCF, use that record. 1706 * than the driver FCF record, use the new record.
1643 */ 1707 */
1644 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1708 if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) &&
1645 new_fcf_record) && 1709 (new_fcf_record->fip_priority < fcf_rec->priority)) {
1646 (new_fcf_record->fip_priority < phba->fcf.priority)) { 1710 /* Choose this FCF record */
1647 /* Use this FCF record */ 1711 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1648 lpfc_copy_fcf_record(phba, new_fcf_record); 1712 addr_mode, vlan_id, 0);
1649 phba->fcf.addr_mode = addr_mode;
1650 if (vlan_id != 0xFFFF) {
1651 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1652 phba->fcf.vlan_id = vlan_id;
1653 }
1654 spin_unlock_irqrestore(&phba->hbalock, flags);
1655 goto read_next_fcf;
1656 } 1713 }
1657 spin_unlock_irqrestore(&phba->hbalock, flags); 1714 spin_unlock_irqrestore(&phba->hbalock, iflags);
1658 goto read_next_fcf; 1715 goto read_next_fcf;
1659 } 1716 }
1660 /* 1717 /*
1661 * This is the first available FCF record, use this 1718 * This is the first suitable FCF record, choose this record for
1662 * record. 1719 * initial best-fit FCF.
1663 */ 1720 */
1664 lpfc_copy_fcf_record(phba, new_fcf_record); 1721 if (fcf_rec) {
1665 phba->fcf.addr_mode = addr_mode; 1722 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1666 if (boot_flag) 1723 addr_mode, vlan_id, (boot_flag ?
1667 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1724 BOOT_ENABLE : 0));
1668 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1725 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1669 if (vlan_id != 0xFFFF) {
1670 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1671 phba->fcf.vlan_id = vlan_id;
1672 } 1726 }
1673 spin_unlock_irqrestore(&phba->hbalock, flags); 1727 spin_unlock_irqrestore(&phba->hbalock, iflags);
1674 goto read_next_fcf; 1728 goto read_next_fcf;
1675 1729
1676read_next_fcf: 1730read_next_fcf:
1677 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1731 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1678 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) 1732 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
1679 lpfc_register_fcf(phba); 1733 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
1680 else 1734 /*
1735 * Case of FCF fast failover scan
1736 */
1737
1738 /*
1739 * It has not found any suitable FCF record, cancel
1740 * FCF scan inprogress, and do nothing
1741 */
1742 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
1743 spin_lock_irqsave(&phba->hbalock, iflags);
1744 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1745 spin_unlock_irqrestore(&phba->hbalock, iflags);
1746 return;
1747 }
1748 /*
1749 * It has found a suitable FCF record that is not
1750 * the same as in-use FCF record, unregister the
1751 * in-use FCF record, replace the in-use FCF record
1752 * with the new FCF record, mark FCF fast failover
1753 * completed, and then start register the new FCF
1754 * record.
1755 */
1756
1757 /* unregister the current in-use FCF record */
1758 lpfc_unregister_fcf(phba);
1759 /* replace in-use record with the new record */
1760 memcpy(&phba->fcf.current_rec,
1761 &phba->fcf.failover_rec,
1762 sizeof(struct lpfc_fcf_rec));
1763 /* mark the FCF fast failover completed */
1764 spin_lock_irqsave(&phba->hbalock, iflags);
1765 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1766 spin_unlock_irqrestore(&phba->hbalock, iflags);
1767 /* Register to the new FCF record */
1768 lpfc_register_fcf(phba);
1769 } else {
1770 /*
1771 * In case of transaction period to fast FCF failover,
1772 * do nothing when search to the end of the FCF table.
1773 */
1774 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
1775 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
1776 return;
1777 /*
1778 * Otherwise, initial scan or post linkdown rescan,
1779 * register with the best fit FCF record found so
1780 * far through the scanning process.
1781 */
1782 lpfc_register_fcf(phba);
1783 }
1784 } else
1681 lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1785 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1682 return; 1786 return;
1683 1787
@@ -1695,10 +1799,13 @@ out:
1695 * 1799 *
1696 * This function handles completion of init vpi mailbox command. 1800 * This function handles completion of init vpi mailbox command.
1697 */ 1801 */
1698static void 1802void
1699lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1803lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1700{ 1804{
1701 struct lpfc_vport *vport = mboxq->vport; 1805 struct lpfc_vport *vport = mboxq->vport;
1806 struct lpfc_nodelist *ndlp;
1807 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1808
1702 if (mboxq->u.mb.mbxStatus) { 1809 if (mboxq->u.mb.mbxStatus) {
1703 lpfc_printf_vlog(vport, KERN_ERR, 1810 lpfc_printf_vlog(vport, KERN_ERR,
1704 LOG_MBOX, 1811 LOG_MBOX,
@@ -1708,9 +1815,23 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1708 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1815 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1709 return; 1816 return;
1710 } 1817 }
1711 spin_lock_irq(&phba->hbalock); 1818 spin_lock_irq(shost->host_lock);
1712 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 1819 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
1713 spin_unlock_irq(&phba->hbalock); 1820 spin_unlock_irq(shost->host_lock);
1821
1822 /* If this port is physical port or FDISC is done, do reg_vpi */
1823 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
1824 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1825 if (!ndlp)
1826 lpfc_printf_vlog(vport, KERN_ERR,
1827 LOG_DISCOVERY,
1828 "2731 Cannot find fabric "
1829 "controller node\n");
1830 else
1831 lpfc_register_new_vport(phba, vport, ndlp);
1832 mempool_free(mboxq, phba->mbox_mem_pool);
1833 return;
1834 }
1714 1835
1715 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1836 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1716 lpfc_initial_fdisc(vport); 1837 lpfc_initial_fdisc(vport);
@@ -1719,10 +1840,42 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1719 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1840 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1720 "2606 No NPIV Fabric support\n"); 1841 "2606 No NPIV Fabric support\n");
1721 } 1842 }
1843 mempool_free(mboxq, phba->mbox_mem_pool);
1722 return; 1844 return;
1723} 1845}
1724 1846
1725/** 1847/**
1848 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
1849 * @vport: pointer to lpfc_vport data structure.
1850 *
1851 * This function issue a init_vpi mailbox command to initialize
1852 * VPI for the vport.
1853 */
1854void
1855lpfc_issue_init_vpi(struct lpfc_vport *vport)
1856{
1857 LPFC_MBOXQ_t *mboxq;
1858 int rc;
1859
1860 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
1861 if (!mboxq) {
1862 lpfc_printf_vlog(vport, KERN_ERR,
1863 LOG_MBOX, "2607 Failed to allocate "
1864 "init_vpi mailbox\n");
1865 return;
1866 }
1867 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
1868 mboxq->vport = vport;
1869 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
1870 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
1871 if (rc == MBX_NOT_FINISHED) {
1872 lpfc_printf_vlog(vport, KERN_ERR,
1873 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
1874 mempool_free(mboxq, vport->phba->mbox_mem_pool);
1875 }
1876}
1877
1878/**
1726 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 1879 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1727 * @phba: pointer to lpfc hba data structure. 1880 * @phba: pointer to lpfc hba data structure.
1728 * 1881 *
@@ -1734,8 +1887,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1734{ 1887{
1735 struct lpfc_vport **vports; 1888 struct lpfc_vport **vports;
1736 int i; 1889 int i;
1737 LPFC_MBOXQ_t *mboxq;
1738 int rc;
1739 1890
1740 vports = lpfc_create_vport_work_array(phba); 1891 vports = lpfc_create_vport_work_array(phba);
1741 if (vports != NULL) { 1892 if (vports != NULL) {
@@ -1754,26 +1905,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1754 continue; 1905 continue;
1755 } 1906 }
1756 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { 1907 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
1757 mboxq = mempool_alloc(phba->mbox_mem_pool, 1908 lpfc_issue_init_vpi(vports[i]);
1758 GFP_KERNEL);
1759 if (!mboxq) {
1760 lpfc_printf_vlog(vports[i], KERN_ERR,
1761 LOG_MBOX, "2607 Failed to allocate "
1762 "init_vpi mailbox\n");
1763 continue;
1764 }
1765 lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
1766 mboxq->vport = vports[i];
1767 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
1768 rc = lpfc_sli_issue_mbox(phba, mboxq,
1769 MBX_NOWAIT);
1770 if (rc == MBX_NOT_FINISHED) {
1771 lpfc_printf_vlog(vports[i], KERN_ERR,
1772 LOG_MBOX, "2608 Failed to issue "
1773 "init_vpi mailbox\n");
1774 mempool_free(mboxq,
1775 phba->mbox_mem_pool);
1776 }
1777 continue; 1909 continue;
1778 } 1910 }
1779 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1911 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
@@ -1796,6 +1928,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1796{ 1928{
1797 struct lpfc_dmabuf *dmabuf = mboxq->context1; 1929 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1798 struct lpfc_vport *vport = mboxq->vport; 1930 struct lpfc_vport *vport = mboxq->vport;
1931 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1799 1932
1800 if (mboxq->u.mb.mbxStatus) { 1933 if (mboxq->u.mb.mbxStatus) {
1801 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1934 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1813,7 +1946,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1813 goto fail_free_mem; 1946 goto fail_free_mem;
1814 } 1947 }
1815 /* The VPI is implicitly registered when the VFI is registered */ 1948 /* The VPI is implicitly registered when the VFI is registered */
1949 spin_lock_irq(shost->host_lock);
1816 vport->vpi_state |= LPFC_VPI_REGISTERED; 1950 vport->vpi_state |= LPFC_VPI_REGISTERED;
1951 vport->fc_flag |= FC_VFI_REGISTERED;
1952 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
1953 spin_unlock_irq(shost->host_lock);
1817 1954
1818 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1955 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1819 lpfc_start_fdiscs(phba); 1956 lpfc_start_fdiscs(phba);
@@ -2050,8 +2187,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2050 return; 2187 return;
2051 } 2188 }
2052 spin_unlock_irq(&phba->hbalock); 2189 spin_unlock_irq(&phba->hbalock);
2053 rc = lpfc_sli4_read_fcf_record(phba, 2190 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
2054 LPFC_FCOE_FCF_GET_FIRST);
2055 if (rc) 2191 if (rc)
2056 goto out; 2192 goto out;
2057 } 2193 }
@@ -2139,10 +2275,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2139 } 2275 }
2140 2276
2141 phba->fc_eventTag = la->eventTag; 2277 phba->fc_eventTag = la->eventTag;
2278 spin_lock_irq(&phba->hbalock);
2142 if (la->mm) 2279 if (la->mm)
2143 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 2280 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
2144 else 2281 else
2145 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 2282 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2283 spin_unlock_irq(&phba->hbalock);
2146 2284
2147 phba->link_events++; 2285 phba->link_events++;
2148 if (la->attType == AT_LINK_UP && (!la->mm)) { 2286 if (la->attType == AT_LINK_UP && (!la->mm)) {
@@ -2271,10 +2409,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2271 mb->mbxStatus); 2409 mb->mbxStatus);
2272 break; 2410 break;
2273 } 2411 }
2274 spin_lock_irq(&phba->hbalock); 2412 spin_lock_irq(shost->host_lock);
2275 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 2413 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
2276 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2414 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2277 spin_unlock_irq(&phba->hbalock); 2415 spin_unlock_irq(shost->host_lock);
2278 vport->unreg_vpi_cmpl = VPORT_OK; 2416 vport->unreg_vpi_cmpl = VPORT_OK;
2279 mempool_free(pmb, phba->mbox_mem_pool); 2417 mempool_free(pmb, phba->mbox_mem_pool);
2280 /* 2418 /*
@@ -2332,7 +2470,10 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2332 goto out; 2470 goto out;
2333 } 2471 }
2334 2472
2473 spin_lock_irq(shost->host_lock);
2335 vport->vpi_state |= LPFC_VPI_REGISTERED; 2474 vport->vpi_state |= LPFC_VPI_REGISTERED;
2475 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2476 spin_unlock_irq(shost->host_lock);
2336 vport->num_disc_nodes = 0; 2477 vport->num_disc_nodes = 0;
2337 /* go thru NPR list and issue ELS PLOGIs */ 2478 /* go thru NPR list and issue ELS PLOGIs */
2338 if (vport->fc_npr_cnt) 2479 if (vport->fc_npr_cnt)
@@ -3218,6 +3359,34 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3218 return 0; 3359 return 0;
3219} 3360}
3220 3361
3362/**
3363 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3364 * @phba: pointer to lpfc hba data structure.
3365 *
3366 * This routine is invoked to unregister all the currently registered RPIs
3367 * to the HBA.
3368 **/
3369void
3370lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
3371{
3372 struct lpfc_vport **vports;
3373 struct lpfc_nodelist *ndlp;
3374 struct Scsi_Host *shost;
3375 int i;
3376
3377 vports = lpfc_create_vport_work_array(phba);
3378 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3379 shost = lpfc_shost_from_vport(vports[i]);
3380 spin_lock_irq(shost->host_lock);
3381 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
3382 if (ndlp->nlp_flag & NLP_RPI_VALID)
3383 lpfc_unreg_rpi(vports[i], ndlp);
3384 }
3385 spin_unlock_irq(shost->host_lock);
3386 }
3387 lpfc_destroy_vport_work_array(phba, vports);
3388}
3389
3221void 3390void
3222lpfc_unreg_all_rpis(struct lpfc_vport *vport) 3391lpfc_unreg_all_rpis(struct lpfc_vport *vport)
3223{ 3392{
@@ -4448,63 +4617,56 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4448} 4617}
4449 4618
4450/** 4619/**
4451 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 4620 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
4452 * @phba: Pointer to hba context object. 4621 * @phba: Pointer to hba context object.
4453 * 4622 *
4454 * This function check if there are any connected remote port for the FCF and 4623 * This function prepare the HBA for unregistering the currently registered
4455 * if all the devices are disconnected, this function unregister FCFI. 4624 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
4456 * This function also tries to use another FCF for discovery. 4625 * VFIs.
4457 */ 4626 */
4458void 4627int
4459lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 4628lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
4460{ 4629{
4461 LPFC_MBOXQ_t *mbox; 4630 LPFC_MBOXQ_t *mbox;
4462 int rc;
4463 struct lpfc_vport **vports; 4631 struct lpfc_vport **vports;
4464 int i; 4632 struct lpfc_nodelist *ndlp;
4465 4633 struct Scsi_Host *shost;
4466 spin_lock_irq(&phba->hbalock); 4634 int i, rc;
4467 /*
4468 * If HBA is not running in FIP mode or
4469 * If HBA does not support FCoE or
4470 * If FCF is not registered.
4471 * do nothing.
4472 */
4473 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4474 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4475 (!(phba->hba_flag & HBA_FIP_SUPPORT))) {
4476 spin_unlock_irq(&phba->hbalock);
4477 return;
4478 }
4479 spin_unlock_irq(&phba->hbalock);
4480 4635
4636 /* Unregister RPIs */
4481 if (lpfc_fcf_inuse(phba)) 4637 if (lpfc_fcf_inuse(phba))
4482 return; 4638 lpfc_unreg_hba_rpis(phba);
4483 4639
4484 /* At this point, all discovery is aborted */ 4640 /* At this point, all discovery is aborted */
4485 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4641 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4486 4642
4487 /* Unregister VPIs */ 4643 /* Unregister VPIs */
4488 vports = lpfc_create_vport_work_array(phba); 4644 vports = lpfc_create_vport_work_array(phba);
4489 if (vports && 4645 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4490 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4491 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4646 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4647 /* Stop FLOGI/FDISC retries */
4648 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
4649 if (ndlp)
4650 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
4492 lpfc_mbx_unreg_vpi(vports[i]); 4651 lpfc_mbx_unreg_vpi(vports[i]);
4493 spin_lock_irq(&phba->hbalock); 4652 shost = lpfc_shost_from_vport(vports[i]);
4653 spin_lock_irq(shost->host_lock);
4494 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 4654 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4495 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 4655 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
4496 spin_unlock_irq(&phba->hbalock); 4656 spin_unlock_irq(shost->host_lock);
4497 } 4657 }
4498 lpfc_destroy_vport_work_array(phba, vports); 4658 lpfc_destroy_vport_work_array(phba, vports);
4499 4659
4660 /* Cleanup any outstanding ELS commands */
4661 lpfc_els_flush_all_cmd(phba);
4662
4500 /* Unregister VFI */ 4663 /* Unregister VFI */
4501 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4664 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4502 if (!mbox) { 4665 if (!mbox) {
4503 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4666 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4504 "2556 UNREG_VFI mbox allocation failed" 4667 "2556 UNREG_VFI mbox allocation failed"
4505 "HBA state x%x\n", 4668 "HBA state x%x\n", phba->pport->port_state);
4506 phba->pport->port_state); 4669 return -ENOMEM;
4507 return;
4508 } 4670 }
4509 4671
4510 lpfc_unreg_vfi(mbox, phba->pport); 4672 lpfc_unreg_vfi(mbox, phba->pport);
@@ -4514,58 +4676,163 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4676 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4515 if (rc == MBX_NOT_FINISHED) { 4677 if (rc == MBX_NOT_FINISHED) {
4516 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4678 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4517 "2557 UNREG_VFI issue mbox failed rc x%x " 4679 "2557 UNREG_VFI issue mbox failed rc x%x "
4518 "HBA state x%x\n", 4680 "HBA state x%x\n",
4519 rc, phba->pport->port_state); 4681 rc, phba->pport->port_state);
4520 mempool_free(mbox, phba->mbox_mem_pool); 4682 mempool_free(mbox, phba->mbox_mem_pool);
4521 return; 4683 return -EIO;
4522 } 4684 }
4523 4685
4524 /* Unregister FCF */ 4686 shost = lpfc_shost_from_vport(phba->pport);
4687 spin_lock_irq(shost->host_lock);
4688 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
4689 spin_unlock_irq(shost->host_lock);
4690
4691 return 0;
4692}
4693
4694/**
4695 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
4696 * @phba: Pointer to hba context object.
4697 *
4698 * This function issues synchronous unregister FCF mailbox command to HBA to
4699 * unregister the currently registered FCF record. The driver does not reset
4700 * the driver FCF usage state flags.
4701 *
4702 * Return 0 if successfully issued, none-zero otherwise.
4703 */
4704int
4705lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
4706{
4707 LPFC_MBOXQ_t *mbox;
4708 int rc;
4709
4525 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4710 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4526 if (!mbox) { 4711 if (!mbox) {
4527 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4712 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4528 "2551 UNREG_FCFI mbox allocation failed" 4713 "2551 UNREG_FCFI mbox allocation failed"
4529 "HBA state x%x\n", 4714 "HBA state x%x\n", phba->pport->port_state);
4530 phba->pport->port_state); 4715 return -ENOMEM;
4531 return;
4532 } 4716 }
4533
4534 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 4717 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4535 mbox->vport = phba->pport; 4718 mbox->vport = phba->pport;
4536 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 4719 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4537 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4720 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4538 4721
4539 if (rc == MBX_NOT_FINISHED) { 4722 if (rc == MBX_NOT_FINISHED) {
4540 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4723 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4541 "2552 UNREG_FCFI issue mbox failed rc x%x " 4724 "2552 Unregister FCFI command failed rc x%x "
4542 "HBA state x%x\n", 4725 "HBA state x%x\n",
4543 rc, phba->pport->port_state); 4726 rc, phba->pport->port_state);
4544 mempool_free(mbox, phba->mbox_mem_pool); 4727 return -EINVAL;
4728 }
4729 return 0;
4730}
4731
4732/**
4733 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
4734 * @phba: Pointer to hba context object.
4735 *
4736 * This function unregisters the currently reigstered FCF. This function
4737 * also tries to find another FCF for discovery by rescan the HBA FCF table.
4738 */
4739void
4740lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
4741{
4742 int rc;
4743
4744 /* Preparation for unregistering fcf */
4745 rc = lpfc_unregister_fcf_prep(phba);
4746 if (rc) {
4747 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
4748 "2748 Failed to prepare for unregistering "
4749 "HBA's FCF record: rc=%d\n", rc);
4545 return; 4750 return;
4546 } 4751 }
4547 4752
4548 spin_lock_irq(&phba->hbalock); 4753 /* Now, unregister FCF record and reset HBA FCF state */
4549 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | 4754 rc = lpfc_sli4_unregister_fcf(phba);
4550 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | 4755 if (rc)
4551 FCF_VALID_VLAN); 4756 return;
4552 spin_unlock_irq(&phba->hbalock); 4757 /* Reset HBA FCF states after successful unregister FCF */
4758 phba->fcf.fcf_flag = 0;
4553 4759
4554 /* 4760 /*
4555 * If driver is not unloading, check if there is any other 4761 * If driver is not unloading, check if there is any other
4556 * FCF record that can be used for discovery. 4762 * FCF record that can be used for discovery.
4557 */ 4763 */
4558 if ((phba->pport->load_flag & FC_UNLOADING) || 4764 if ((phba->pport->load_flag & FC_UNLOADING) ||
4559 (phba->link_state < LPFC_LINK_UP)) 4765 (phba->link_state < LPFC_LINK_UP))
4560 return; 4766 return;
4561 4767
4562 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 4768 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4563 4769
4564 if (rc) 4770 if (rc)
4565 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4771 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4566 "2553 lpfc_unregister_unused_fcf failed to read FCF" 4772 "2553 lpfc_unregister_unused_fcf failed "
4567 " record HBA state x%x\n", 4773 "to read FCF record HBA state x%x\n",
4568 phba->pport->port_state); 4774 phba->pport->port_state);
4775}
4776
4777/**
4778 * lpfc_unregister_fcf - Unregister the currently registered fcf record
4779 * @phba: Pointer to hba context object.
4780 *
4781 * This function just unregisters the currently reigstered FCF. It does not
4782 * try to find another FCF for discovery.
4783 */
4784void
4785lpfc_unregister_fcf(struct lpfc_hba *phba)
4786{
4787 int rc;
4788
4789 /* Preparation for unregistering fcf */
4790 rc = lpfc_unregister_fcf_prep(phba);
4791 if (rc) {
4792 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
4793 "2749 Failed to prepare for unregistering "
4794 "HBA's FCF record: rc=%d\n", rc);
4795 return;
4796 }
4797
4798 /* Now, unregister FCF record and reset HBA FCF state */
4799 rc = lpfc_sli4_unregister_fcf(phba);
4800 if (rc)
4801 return;
4802 /* Set proper HBA FCF states after successful unregister FCF */
4803 spin_lock_irq(&phba->hbalock);
4804 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
4805 spin_unlock_irq(&phba->hbalock);
4806}
4807
4808/**
4809 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4810 * @phba: Pointer to hba context object.
4811 *
4812 * This function check if there are any connected remote port for the FCF and
4813 * if all the devices are disconnected, this function unregister FCFI.
4814 * This function also tries to use another FCF for discovery.
4815 */
4816void
4817lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4818{
4819 /*
4820 * If HBA is not running in FIP mode or if HBA does not support
4821 * FCoE or if FCF is not registered, do nothing.
4822 */
4823 spin_lock_irq(&phba->hbalock);
4824 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4825 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4826 !(phba->hba_flag & HBA_FIP_SUPPORT)) {
4827 spin_unlock_irq(&phba->hbalock);
4828 return;
4829 }
4830 spin_unlock_irq(&phba->hbalock);
4831
4832 if (lpfc_fcf_inuse(phba))
4833 return;
4834
4835 lpfc_unregister_fcf_rescan(phba);
4569} 4836}
4570 4837
4571/** 4838/**
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index c9faa1d8c3c8..89ff7c09e298 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -1346,6 +1346,9 @@ typedef struct { /* FireFly BIU registers */
1346#define MBX_HEARTBEAT 0x31 1346#define MBX_HEARTBEAT 0x31
1347#define MBX_WRITE_VPARMS 0x32 1347#define MBX_WRITE_VPARMS 0x32
1348#define MBX_ASYNCEVT_ENABLE 0x33 1348#define MBX_ASYNCEVT_ENABLE 0x33
1349#define MBX_READ_EVENT_LOG_STATUS 0x37
1350#define MBX_READ_EVENT_LOG 0x38
1351#define MBX_WRITE_EVENT_LOG 0x39
1349 1352
1350#define MBX_PORT_CAPABILITIES 0x3B 1353#define MBX_PORT_CAPABILITIES 0x3B
1351#define MBX_PORT_IOV_CONTROL 0x3C 1354#define MBX_PORT_IOV_CONTROL 0x3C
@@ -1465,17 +1468,13 @@ typedef struct { /* FireFly BIU registers */
1465#define CMD_IOCB_LOGENTRY_CN 0x94 1468#define CMD_IOCB_LOGENTRY_CN 0x94
1466#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1469#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1467 1470
1468/* Unhandled Data Security SLI Commands */ 1471/* Data Security SLI Commands */
1469#define DSSCMD_IWRITE64_CR 0xD8 1472#define DSSCMD_IWRITE64_CR 0xF8
1470#define DSSCMD_IWRITE64_CX 0xD9 1473#define DSSCMD_IWRITE64_CX 0xF9
1471#define DSSCMD_IREAD64_CR 0xDA 1474#define DSSCMD_IREAD64_CR 0xFA
1472#define DSSCMD_IREAD64_CX 0xDB 1475#define DSSCMD_IREAD64_CX 0xFB
1473#define DSSCMD_INVALIDATE_DEK 0xDC 1476
1474#define DSSCMD_SET_KEK 0xDD 1477#define CMD_MAX_IOCB_CMD 0xFB
1475#define DSSCMD_GET_KEK_ID 0xDE
1476#define DSSCMD_GEN_XFER 0xDF
1477
1478#define CMD_MAX_IOCB_CMD 0xE6
1479#define CMD_IOCB_MASK 0xff 1478#define CMD_IOCB_MASK 0xff
1480 1479
1481#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG 1480#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 8a2a1c5935c6..820015fbc4d6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,35 +52,37 @@ struct dma_address {
52 uint32_t addr_hi; 52 uint32_t addr_hi;
53}; 53};
54 54
55#define LPFC_SLIREV_CONF_WORD 0x58
56struct lpfc_sli_intf { 55struct lpfc_sli_intf {
57 uint32_t word0; 56 uint32_t word0;
58#define lpfc_sli_intf_iftype_MASK 0x00000007 57#define lpfc_sli_intf_valid_SHIFT 29
59#define lpfc_sli_intf_iftype_SHIFT 0 58#define lpfc_sli_intf_valid_MASK 0x00000007
60#define lpfc_sli_intf_iftype_WORD word0 59#define lpfc_sli_intf_valid_WORD word0
61#define lpfc_sli_intf_rev_MASK 0x0000000f
62#define lpfc_sli_intf_rev_SHIFT 4
63#define lpfc_sli_intf_rev_WORD word0
64#define LPFC_SLIREV_CONF_SLI4 4
65#define lpfc_sli_intf_family_MASK 0x000000ff
66#define lpfc_sli_intf_family_SHIFT 8
67#define lpfc_sli_intf_family_WORD word0
68#define lpfc_sli_intf_feat1_MASK 0x000000ff
69#define lpfc_sli_intf_feat1_SHIFT 16
70#define lpfc_sli_intf_feat1_WORD word0
71#define lpfc_sli_intf_feat2_MASK 0x0000001f
72#define lpfc_sli_intf_feat2_SHIFT 24
73#define lpfc_sli_intf_feat2_WORD word0
74#define lpfc_sli_intf_valid_MASK 0x00000007
75#define lpfc_sli_intf_valid_SHIFT 29
76#define lpfc_sli_intf_valid_WORD word0
77#define LPFC_SLI_INTF_VALID 6 60#define LPFC_SLI_INTF_VALID 6
61#define lpfc_sli_intf_featurelevel2_SHIFT 24
62#define lpfc_sli_intf_featurelevel2_MASK 0x0000001F
63#define lpfc_sli_intf_featurelevel2_WORD word0
64#define lpfc_sli_intf_featurelevel1_SHIFT 16
65#define lpfc_sli_intf_featurelevel1_MASK 0x000000FF
66#define lpfc_sli_intf_featurelevel1_WORD word0
67#define LPFC_SLI_INTF_FEATURELEVEL1_1 1
68#define LPFC_SLI_INTF_FEATURELEVEL1_2 2
69#define lpfc_sli_intf_sli_family_SHIFT 8
70#define lpfc_sli_intf_sli_family_MASK 0x000000FF
71#define lpfc_sli_intf_sli_family_WORD word0
72#define LPFC_SLI_INTF_FAMILY_BE2 0
73#define LPFC_SLI_INTF_FAMILY_BE3 1
74#define lpfc_sli_intf_slirev_SHIFT 4
75#define lpfc_sli_intf_slirev_MASK 0x0000000F
76#define lpfc_sli_intf_slirev_WORD word0
77#define LPFC_SLI_INTF_REV_SLI3 3
78#define LPFC_SLI_INTF_REV_SLI4 4
79#define lpfc_sli_intf_if_type_SHIFT 0
80#define lpfc_sli_intf_if_type_MASK 0x00000007
81#define lpfc_sli_intf_if_type_WORD word0
82#define LPFC_SLI_INTF_IF_TYPE_0 0
83#define LPFC_SLI_INTF_IF_TYPE_1 1
78}; 84};
79 85
80#define LPFC_SLI4_BAR0 1
81#define LPFC_SLI4_BAR1 2
82#define LPFC_SLI4_BAR2 4
83
84#define LPFC_SLI4_MBX_EMBED true 86#define LPFC_SLI4_MBX_EMBED true
85#define LPFC_SLI4_MBX_NEMBED false 87#define LPFC_SLI4_MBX_NEMBED false
86 88
@@ -161,6 +163,9 @@ struct lpfc_sli_intf {
161#define LPFC_FP_DEF_IMAX 10000 163#define LPFC_FP_DEF_IMAX 10000
162#define LPFC_SP_DEF_IMAX 10000 164#define LPFC_SP_DEF_IMAX 10000
163 165
166/* PORT_CAPABILITIES constants. */
167#define LPFC_MAX_SUPPORTED_PAGES 8
168
164struct ulp_bde64 { 169struct ulp_bde64 {
165 union ULP_BDE_TUS { 170 union ULP_BDE_TUS {
166 uint32_t w; 171 uint32_t w;
@@ -516,7 +521,7 @@ struct lpfc_register {
516#define LPFC_UERR_STATUS_LO 0x00A0 521#define LPFC_UERR_STATUS_LO 0x00A0
517#define LPFC_UE_MASK_HI 0x00AC 522#define LPFC_UE_MASK_HI 0x00AC
518#define LPFC_UE_MASK_LO 0x00A8 523#define LPFC_UE_MASK_LO 0x00A8
519#define LPFC_SCRATCHPAD 0x0058 524#define LPFC_SLI_INTF 0x0058
520 525
521/* BAR0 Registers */ 526/* BAR0 Registers */
522#define LPFC_HST_STATE 0x00AC 527#define LPFC_HST_STATE 0x00AC
@@ -576,19 +581,6 @@ struct lpfc_register {
576#define LPFC_POST_STAGE_ARMFW_READY 0xC000 581#define LPFC_POST_STAGE_ARMFW_READY 0xC000
577#define LPFC_POST_STAGE_ARMFW_UE 0xF000 582#define LPFC_POST_STAGE_ARMFW_UE 0xF000
578 583
579#define lpfc_scratchpad_slirev_SHIFT 4
580#define lpfc_scratchpad_slirev_MASK 0xF
581#define lpfc_scratchpad_slirev_WORD word0
582#define lpfc_scratchpad_chiptype_SHIFT 8
583#define lpfc_scratchpad_chiptype_MASK 0xFF
584#define lpfc_scratchpad_chiptype_WORD word0
585#define lpfc_scratchpad_featurelevel1_SHIFT 16
586#define lpfc_scratchpad_featurelevel1_MASK 0xFF
587#define lpfc_scratchpad_featurelevel1_WORD word0
588#define lpfc_scratchpad_featurelevel2_SHIFT 24
589#define lpfc_scratchpad_featurelevel2_MASK 0xFF
590#define lpfc_scratchpad_featurelevel2_WORD word0
591
592/* BAR1 Registers */ 584/* BAR1 Registers */
593#define LPFC_IMR_MASK_ALL 0xFFFFFFFF 585#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
594#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF 586#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
@@ -801,6 +793,7 @@ struct mbox_header {
801#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 793#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
802#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A 794#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
803#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B 795#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
796#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
804 797
805/* Mailbox command structures */ 798/* Mailbox command structures */
806struct eq_context { 799struct eq_context {
@@ -1149,10 +1142,7 @@ struct sli4_sge { /* SLI-4 */
1149 this flag !! */ 1142 this flag !! */
1150#define lpfc_sli4_sge_last_MASK 0x00000001 1143#define lpfc_sli4_sge_last_MASK 0x00000001
1151#define lpfc_sli4_sge_last_WORD word2 1144#define lpfc_sli4_sge_last_WORD word2
1152 uint32_t word3; 1145 uint32_t sge_len;
1153#define lpfc_sli4_sge_len_SHIFT 0
1154#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1155#define lpfc_sli4_sge_len_WORD word3
1156}; 1146};
1157 1147
1158struct fcf_record { 1148struct fcf_record {
@@ -1301,6 +1291,19 @@ struct lpfc_mbx_del_fcf_tbl_entry {
1301#define lpfc_mbx_del_fcf_tbl_index_WORD word10 1291#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1302}; 1292};
1303 1293
1294struct lpfc_mbx_redisc_fcf_tbl {
1295 struct mbox_header header;
1296 uint32_t word10;
1297#define lpfc_mbx_redisc_fcf_count_SHIFT 0
1298#define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF
1299#define lpfc_mbx_redisc_fcf_count_WORD word10
1300 uint32_t resvd;
1301 uint32_t word12;
1302#define lpfc_mbx_redisc_fcf_index_SHIFT 0
1303#define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF
1304#define lpfc_mbx_redisc_fcf_index_WORD word12
1305};
1306
1304struct lpfc_mbx_query_fw_cfg { 1307struct lpfc_mbx_query_fw_cfg {
1305 struct mbox_header header; 1308 struct mbox_header header;
1306 uint32_t config_number; 1309 uint32_t config_number;
@@ -1834,6 +1837,177 @@ struct lpfc_mbx_request_features {
1834#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 1837#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1835}; 1838};
1836 1839
1840struct lpfc_mbx_supp_pages {
1841 uint32_t word1;
1842#define qs_SHIFT 0
1843#define qs_MASK 0x00000001
1844#define qs_WORD word1
1845#define wr_SHIFT 1
1846#define wr_MASK 0x00000001
1847#define wr_WORD word1
1848#define pf_SHIFT 8
1849#define pf_MASK 0x000000ff
1850#define pf_WORD word1
1851#define cpn_SHIFT 16
1852#define cpn_MASK 0x000000ff
1853#define cpn_WORD word1
1854 uint32_t word2;
1855#define list_offset_SHIFT 0
1856#define list_offset_MASK 0x000000ff
1857#define list_offset_WORD word2
1858#define next_offset_SHIFT 8
1859#define next_offset_MASK 0x000000ff
1860#define next_offset_WORD word2
1861#define elem_cnt_SHIFT 16
1862#define elem_cnt_MASK 0x000000ff
1863#define elem_cnt_WORD word2
1864 uint32_t word3;
1865#define pn_0_SHIFT 24
1866#define pn_0_MASK 0x000000ff
1867#define pn_0_WORD word3
1868#define pn_1_SHIFT 16
1869#define pn_1_MASK 0x000000ff
1870#define pn_1_WORD word3
1871#define pn_2_SHIFT 8
1872#define pn_2_MASK 0x000000ff
1873#define pn_2_WORD word3
1874#define pn_3_SHIFT 0
1875#define pn_3_MASK 0x000000ff
1876#define pn_3_WORD word3
1877 uint32_t word4;
1878#define pn_4_SHIFT 24
1879#define pn_4_MASK 0x000000ff
1880#define pn_4_WORD word4
1881#define pn_5_SHIFT 16
1882#define pn_5_MASK 0x000000ff
1883#define pn_5_WORD word4
1884#define pn_6_SHIFT 8
1885#define pn_6_MASK 0x000000ff
1886#define pn_6_WORD word4
1887#define pn_7_SHIFT 0
1888#define pn_7_MASK 0x000000ff
1889#define pn_7_WORD word4
1890 uint32_t rsvd[27];
1891#define LPFC_SUPP_PAGES 0
1892#define LPFC_BLOCK_GUARD_PROFILES 1
1893#define LPFC_SLI4_PARAMETERS 2
1894};
1895
1896struct lpfc_mbx_sli4_params {
1897 uint32_t word1;
1898#define qs_SHIFT 0
1899#define qs_MASK 0x00000001
1900#define qs_WORD word1
1901#define wr_SHIFT 1
1902#define wr_MASK 0x00000001
1903#define wr_WORD word1
1904#define pf_SHIFT 8
1905#define pf_MASK 0x000000ff
1906#define pf_WORD word1
1907#define cpn_SHIFT 16
1908#define cpn_MASK 0x000000ff
1909#define cpn_WORD word1
1910 uint32_t word2;
1911#define if_type_SHIFT 0
1912#define if_type_MASK 0x00000007
1913#define if_type_WORD word2
1914#define sli_rev_SHIFT 4
1915#define sli_rev_MASK 0x0000000f
1916#define sli_rev_WORD word2
1917#define sli_family_SHIFT 8
1918#define sli_family_MASK 0x000000ff
1919#define sli_family_WORD word2
1920#define featurelevel_1_SHIFT 16
1921#define featurelevel_1_MASK 0x000000ff
1922#define featurelevel_1_WORD word2
1923#define featurelevel_2_SHIFT 24
1924#define featurelevel_2_MASK 0x0000001f
1925#define featurelevel_2_WORD word2
1926 uint32_t word3;
1927#define fcoe_SHIFT 0
1928#define fcoe_MASK 0x00000001
1929#define fcoe_WORD word3
1930#define fc_SHIFT 1
1931#define fc_MASK 0x00000001
1932#define fc_WORD word3
1933#define nic_SHIFT 2
1934#define nic_MASK 0x00000001
1935#define nic_WORD word3
1936#define iscsi_SHIFT 3
1937#define iscsi_MASK 0x00000001
1938#define iscsi_WORD word3
1939#define rdma_SHIFT 4
1940#define rdma_MASK 0x00000001
1941#define rdma_WORD word3
1942 uint32_t sge_supp_len;
1943 uint32_t word5;
1944#define if_page_sz_SHIFT 0
1945#define if_page_sz_MASK 0x0000ffff
1946#define if_page_sz_WORD word5
1947#define loopbk_scope_SHIFT 24
1948#define loopbk_scope_MASK 0x0000000f
1949#define loopbk_scope_WORD word5
1950#define rq_db_window_SHIFT 28
1951#define rq_db_window_MASK 0x0000000f
1952#define rq_db_window_WORD word5
1953 uint32_t word6;
1954#define eq_pages_SHIFT 0
1955#define eq_pages_MASK 0x0000000f
1956#define eq_pages_WORD word6
1957#define eqe_size_SHIFT 8
1958#define eqe_size_MASK 0x000000ff
1959#define eqe_size_WORD word6
1960 uint32_t word7;
1961#define cq_pages_SHIFT 0
1962#define cq_pages_MASK 0x0000000f
1963#define cq_pages_WORD word7
1964#define cqe_size_SHIFT 8
1965#define cqe_size_MASK 0x000000ff
1966#define cqe_size_WORD word7
1967 uint32_t word8;
1968#define mq_pages_SHIFT 0
1969#define mq_pages_MASK 0x0000000f
1970#define mq_pages_WORD word8
1971#define mqe_size_SHIFT 8
1972#define mqe_size_MASK 0x000000ff
1973#define mqe_size_WORD word8
1974#define mq_elem_cnt_SHIFT 16
1975#define mq_elem_cnt_MASK 0x000000ff
1976#define mq_elem_cnt_WORD word8
1977 uint32_t word9;
1978#define wq_pages_SHIFT 0
1979#define wq_pages_MASK 0x0000ffff
1980#define wq_pages_WORD word9
1981#define wqe_size_SHIFT 8
1982#define wqe_size_MASK 0x000000ff
1983#define wqe_size_WORD word9
1984 uint32_t word10;
1985#define rq_pages_SHIFT 0
1986#define rq_pages_MASK 0x0000ffff
1987#define rq_pages_WORD word10
1988#define rqe_size_SHIFT 8
1989#define rqe_size_MASK 0x000000ff
1990#define rqe_size_WORD word10
1991 uint32_t word11;
1992#define hdr_pages_SHIFT 0
1993#define hdr_pages_MASK 0x0000000f
1994#define hdr_pages_WORD word11
1995#define hdr_size_SHIFT 8
1996#define hdr_size_MASK 0x0000000f
1997#define hdr_size_WORD word11
1998#define hdr_pp_align_SHIFT 16
1999#define hdr_pp_align_MASK 0x0000ffff
2000#define hdr_pp_align_WORD word11
2001 uint32_t word12;
2002#define sgl_pages_SHIFT 0
2003#define sgl_pages_MASK 0x0000000f
2004#define sgl_pages_WORD word12
2005#define sgl_pp_align_SHIFT 16
2006#define sgl_pp_align_MASK 0x0000ffff
2007#define sgl_pp_align_WORD word12
2008 uint32_t rsvd_13_63[51];
2009};
2010
1837/* Mailbox Completion Queue Error Messages */ 2011/* Mailbox Completion Queue Error Messages */
1838#define MB_CQE_STATUS_SUCCESS 0x0 2012#define MB_CQE_STATUS_SUCCESS 0x0
1839#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 2013#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -1863,6 +2037,7 @@ struct lpfc_mqe {
1863 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; 2037 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1864 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; 2038 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1865 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; 2039 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
2040 struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
1866 struct lpfc_mbx_reg_fcfi reg_fcfi; 2041 struct lpfc_mbx_reg_fcfi reg_fcfi;
1867 struct lpfc_mbx_unreg_fcfi unreg_fcfi; 2042 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1868 struct lpfc_mbx_mq_create mq_create; 2043 struct lpfc_mbx_mq_create mq_create;
@@ -1883,6 +2058,8 @@ struct lpfc_mqe {
1883 struct lpfc_mbx_request_features req_ftrs; 2058 struct lpfc_mbx_request_features req_ftrs;
1884 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; 2059 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1885 struct lpfc_mbx_query_fw_cfg query_fw_cfg; 2060 struct lpfc_mbx_query_fw_cfg query_fw_cfg;
2061 struct lpfc_mbx_supp_pages supp_pages;
2062 struct lpfc_mbx_sli4_params sli4_params;
1886 struct lpfc_mbx_nop nop; 2063 struct lpfc_mbx_nop nop;
1887 } un; 2064 } un;
1888}; 2065};
@@ -1959,6 +2136,9 @@ struct lpfc_acqe_link {
1959#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 2136#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1960#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 2137#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1961#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 2138#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
2139#define lpfc_acqe_qos_link_speed_SHIFT 16
2140#define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF
2141#define lpfc_acqe_qos_link_speed_WORD word1
1962 uint32_t event_tag; 2142 uint32_t event_tag;
1963 uint32_t trailer; 2143 uint32_t trailer;
1964}; 2144};
@@ -1976,6 +2156,7 @@ struct lpfc_acqe_fcoe {
1976#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 2156#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1977#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 2157#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1978#define LPFC_FCOE_EVENT_TYPE_CVL 0x4 2158#define LPFC_FCOE_EVENT_TYPE_CVL 0x4
2159#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5
1979 uint32_t event_tag; 2160 uint32_t event_tag;
1980 uint32_t trailer; 2161 uint32_t trailer;
1981}; 2162};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b8eb1b6e5e77..d29ac7c317d9 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -544,7 +544,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
544 mempool_free(pmb, phba->mbox_mem_pool); 544 mempool_free(pmb, phba->mbox_mem_pool);
545 return -EIO; 545 return -EIO;
546 } 546 }
547 } else { 547 } else if (phba->cfg_suppress_link_up == 0) {
548 lpfc_init_link(phba, pmb, phba->cfg_topology, 548 lpfc_init_link(phba, pmb, phba->cfg_topology,
549 phba->cfg_link_speed); 549 phba->cfg_link_speed);
550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -603,6 +603,102 @@ lpfc_config_port_post(struct lpfc_hba *phba)
603} 603}
604 604
605/** 605/**
606 * lpfc_hba_init_link - Initialize the FC link
607 * @phba: pointer to lpfc hba data structure.
608 *
609 * This routine will issue the INIT_LINK mailbox command call.
610 * It is available to other drivers through the lpfc_hba data
611 * structure for use as a delayed link up mechanism with the
612 * module parameter lpfc_suppress_link_up.
613 *
614 * Return code
615 * 0 - success
616 * Any other value - error
617 **/
618int
619lpfc_hba_init_link(struct lpfc_hba *phba)
620{
621 struct lpfc_vport *vport = phba->pport;
622 LPFC_MBOXQ_t *pmb;
623 MAILBOX_t *mb;
624 int rc;
625
626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
627 if (!pmb) {
628 phba->link_state = LPFC_HBA_ERROR;
629 return -ENOMEM;
630 }
631 mb = &pmb->u.mb;
632 pmb->vport = vport;
633
634 lpfc_init_link(phba, pmb, phba->cfg_topology,
635 phba->cfg_link_speed);
636 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
637 lpfc_set_loopback_flag(phba);
638 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
639 if (rc != MBX_SUCCESS) {
640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
641 "0498 Adapter failed to init, mbxCmd x%x "
642 "INIT_LINK, mbxStatus x%x\n",
643 mb->mbxCommand, mb->mbxStatus);
644 /* Clear all interrupt enable conditions */
645 writel(0, phba->HCregaddr);
646 readl(phba->HCregaddr); /* flush */
647 /* Clear all pending interrupts */
648 writel(0xffffffff, phba->HAregaddr);
649 readl(phba->HAregaddr); /* flush */
650 phba->link_state = LPFC_HBA_ERROR;
651 if (rc != MBX_BUSY)
652 mempool_free(pmb, phba->mbox_mem_pool);
653 return -EIO;
654 }
655 phba->cfg_suppress_link_up = 0;
656
657 return 0;
658}
659
660/**
661 * lpfc_hba_down_link - this routine downs the FC link
662 *
663 * This routine will issue the DOWN_LINK mailbox command call.
664 * It is available to other drivers through the lpfc_hba data
665 * structure for use to stop the link.
666 *
667 * Return code
668 * 0 - success
669 * Any other value - error
670 **/
671int
672lpfc_hba_down_link(struct lpfc_hba *phba)
673{
674 LPFC_MBOXQ_t *pmb;
675 int rc;
676
677 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
678 if (!pmb) {
679 phba->link_state = LPFC_HBA_ERROR;
680 return -ENOMEM;
681 }
682
683 lpfc_printf_log(phba,
684 KERN_ERR, LOG_INIT,
685 "0491 Adapter Link is disabled.\n");
686 lpfc_down_link(phba, pmb);
687 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
688 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
689 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
690 lpfc_printf_log(phba,
691 KERN_ERR, LOG_INIT,
692 "2522 Adapter failed to issue DOWN_LINK"
693 " mbox command rc 0x%x\n", rc);
694
695 mempool_free(pmb, phba->mbox_mem_pool);
696 return -EIO;
697 }
698 return 0;
699}
700
701/**
606 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 702 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
607 * @phba: pointer to lpfc HBA data structure. 703 * @phba: pointer to lpfc HBA data structure.
608 * 704 *
@@ -2073,6 +2169,44 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2073} 2169}
2074 2170
2075/** 2171/**
2172 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2173 * @phba: pointer to lpfc hba data structure.
2174 *
2175 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2176 * caller of this routine should already hold the host lock.
2177 **/
2178void
2179__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2180{
2181 /* Clear pending FCF rediscovery wait timer */
2182 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2183 /* Now, try to stop the timer */
2184 del_timer(&phba->fcf.redisc_wait);
2185}
2186
2187/**
2188 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2189 * @phba: pointer to lpfc hba data structure.
2190 *
2191 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2192 * checks whether the FCF rediscovery wait timer is pending with the host
2193 * lock held before proceeding with disabling the timer and clearing the
2194 * wait timer pendig flag.
2195 **/
2196void
2197lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2198{
2199 spin_lock_irq(&phba->hbalock);
2200 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2201 /* FCF rediscovery timer already fired or stopped */
2202 spin_unlock_irq(&phba->hbalock);
2203 return;
2204 }
2205 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2206 spin_unlock_irq(&phba->hbalock);
2207}
2208
2209/**
2076 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2210 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2077 * @phba: pointer to lpfc hba data structure. 2211 * @phba: pointer to lpfc hba data structure.
2078 * 2212 *
@@ -2096,6 +2230,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
2096 break; 2230 break;
2097 case LPFC_PCI_DEV_OC: 2231 case LPFC_PCI_DEV_OC:
2098 /* Stop any OneConnect device sepcific driver timers */ 2232 /* Stop any OneConnect device sepcific driver timers */
2233 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2099 break; 2234 break;
2100 default: 2235 default:
2101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2236 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2228,6 +2363,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2228 struct lpfc_vport *vport = phba->pport; 2363 struct lpfc_vport *vport = phba->pport;
2229 struct lpfc_nodelist *ndlp, *next_ndlp; 2364 struct lpfc_nodelist *ndlp, *next_ndlp;
2230 struct lpfc_vport **vports; 2365 struct lpfc_vport **vports;
2366 struct Scsi_Host *shost;
2231 int i; 2367 int i;
2232 2368
2233 if (vport->fc_flag & FC_OFFLINE_MODE) 2369 if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -2241,11 +2377,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2241 vports = lpfc_create_vport_work_array(phba); 2377 vports = lpfc_create_vport_work_array(phba);
2242 if (vports != NULL) { 2378 if (vports != NULL) {
2243 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2379 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2244 struct Scsi_Host *shost;
2245
2246 if (vports[i]->load_flag & FC_UNLOADING) 2380 if (vports[i]->load_flag & FC_UNLOADING)
2247 continue; 2381 continue;
2382 shost = lpfc_shost_from_vport(vports[i]);
2383 spin_lock_irq(shost->host_lock);
2248 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2384 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2385 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2386 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2387 spin_unlock_irq(shost->host_lock);
2388
2249 shost = lpfc_shost_from_vport(vports[i]); 2389 shost = lpfc_shost_from_vport(vports[i]);
2250 list_for_each_entry_safe(ndlp, next_ndlp, 2390 list_for_each_entry_safe(ndlp, next_ndlp,
2251 &vports[i]->fc_nodes, 2391 &vports[i]->fc_nodes,
@@ -2401,7 +2541,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2401 shost->this_id = -1; 2541 shost->this_id = -1;
2402 shost->max_cmd_len = 16; 2542 shost->max_cmd_len = 16;
2403 if (phba->sli_rev == LPFC_SLI_REV4) { 2543 if (phba->sli_rev == LPFC_SLI_REV4) {
2404 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2544 shost->dma_boundary =
2545 phba->sli4_hba.pc_sli4_params.sge_supp_len;
2405 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2546 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2406 } 2547 }
2407 2548
@@ -2650,8 +2791,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
2650 lpfc_stop_hba_timers(phba); 2791 lpfc_stop_hba_timers(phba);
2651 phba->pport->work_port_events = 0; 2792 phba->pport->work_port_events = 0;
2652 phba->sli4_hba.intr_enable = 0; 2793 phba->sli4_hba.intr_enable = 0;
2653 /* Hard clear it for now, shall have more graceful way to wait later */
2654 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2655} 2794}
2656 2795
2657/** 2796/**
@@ -2703,7 +2842,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2703 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2842 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2704 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2843 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2705 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2844 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2706 phba->fcf.fcf_indx); 2845 phba->fcf.current_rec.fcf_indx);
2707 2846
2708 if (!phba->sli4_hba.intr_enable) 2847 if (!phba->sli4_hba.intr_enable)
2709 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2848 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -2727,6 +2866,57 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2727} 2866}
2728 2867
2729/** 2868/**
2869 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2870 * @phba: Pointer to hba for which this call is being executed.
2871 *
2872 * This routine starts the timer waiting for the FCF rediscovery to complete.
2873 **/
2874void
2875lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2876{
2877 unsigned long fcf_redisc_wait_tmo =
2878 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2879 /* Start fcf rediscovery wait period timer */
2880 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2881 spin_lock_irq(&phba->hbalock);
2882 /* Allow action to new fcf asynchronous event */
2883 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2884 /* Mark the FCF rediscovery pending state */
2885 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2886 spin_unlock_irq(&phba->hbalock);
2887}
2888
2889/**
2890 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2891 * @ptr: Map to lpfc_hba data structure pointer.
2892 *
2893 * This routine is invoked when waiting for FCF table rediscover has been
2894 * timed out. If new FCF record(s) has (have) been discovered during the
2895 * wait period, a new FCF event shall be added to the FCOE async event
2896 * list, and then worker thread shall be waked up for processing from the
2897 * worker thread context.
2898 **/
2899void
2900lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2901{
2902 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2903
2904 /* Don't send FCF rediscovery event if timer cancelled */
2905 spin_lock_irq(&phba->hbalock);
2906 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2907 spin_unlock_irq(&phba->hbalock);
2908 return;
2909 }
2910 /* Clear FCF rediscovery timer pending flag */
2911 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2912 /* FCF rediscovery event to worker thread */
2913 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2914 spin_unlock_irq(&phba->hbalock);
2915 /* wake up worker thread */
2916 lpfc_worker_wake_up(phba);
2917}
2918
2919/**
2730 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 2920 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2731 * @phba: pointer to lpfc hba data structure. 2921 * @phba: pointer to lpfc hba data structure.
2732 * 2922 *
@@ -2978,6 +3168,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2978 bf_get(lpfc_acqe_link_physical, acqe_link); 3168 bf_get(lpfc_acqe_link_physical, acqe_link);
2979 phba->sli4_hba.link_state.fault = 3169 phba->sli4_hba.link_state.fault =
2980 bf_get(lpfc_acqe_link_fault, acqe_link); 3170 bf_get(lpfc_acqe_link_fault, acqe_link);
3171 phba->sli4_hba.link_state.logical_speed =
3172 bf_get(lpfc_acqe_qos_link_speed, acqe_link);
2981 3173
2982 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3174 /* Invoke the lpfc_handle_latt mailbox command callback function */
2983 lpfc_mbx_cmpl_read_la(phba, pmb); 3175 lpfc_mbx_cmpl_read_la(phba, pmb);
@@ -3007,22 +3199,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3007 struct lpfc_nodelist *ndlp; 3199 struct lpfc_nodelist *ndlp;
3008 struct Scsi_Host *shost; 3200 struct Scsi_Host *shost;
3009 uint32_t link_state; 3201 uint32_t link_state;
3202 int active_vlink_present;
3203 struct lpfc_vport **vports;
3204 int i;
3010 3205
3011 phba->fc_eventTag = acqe_fcoe->event_tag; 3206 phba->fc_eventTag = acqe_fcoe->event_tag;
3012 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3207 phba->fcoe_eventtag = acqe_fcoe->event_tag;
3013 switch (event_type) { 3208 switch (event_type) {
3014 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3209 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3210 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3015 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3211 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3016 "2546 New FCF found index 0x%x tag 0x%x\n", 3212 "2546 New FCF found index 0x%x tag 0x%x\n",
3017 acqe_fcoe->index, 3213 acqe_fcoe->index,
3018 acqe_fcoe->event_tag); 3214 acqe_fcoe->event_tag);
3019 /*
3020 * If the current FCF is in discovered state, or
3021 * FCF discovery is in progress do nothing.
3022 */
3023 spin_lock_irq(&phba->hbalock); 3215 spin_lock_irq(&phba->hbalock);
3024 if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || 3216 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3025 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3217 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3218 /*
3219 * If the current FCF is in discovered state or
3220 * FCF discovery is in progress, do nothing.
3221 */
3222 spin_unlock_irq(&phba->hbalock);
3223 break;
3224 }
3225 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3226 /*
3227 * If fast FCF failover rescan event is pending,
3228 * do nothing.
3229 */
3026 spin_unlock_irq(&phba->hbalock); 3230 spin_unlock_irq(&phba->hbalock);
3027 break; 3231 break;
3028 } 3232 }
@@ -3049,7 +3253,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3049 " tag 0x%x\n", acqe_fcoe->index, 3253 " tag 0x%x\n", acqe_fcoe->index,
3050 acqe_fcoe->event_tag); 3254 acqe_fcoe->event_tag);
3051 /* If the event is not for currently used fcf do nothing */ 3255 /* If the event is not for currently used fcf do nothing */
3052 if (phba->fcf.fcf_indx != acqe_fcoe->index) 3256 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3053 break; 3257 break;
3054 /* 3258 /*
3055 * Currently, driver support only one FCF - so treat this as 3259 * Currently, driver support only one FCF - so treat this as
@@ -3074,14 +3278,58 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3074 if (!ndlp) 3278 if (!ndlp)
3075 break; 3279 break;
3076 shost = lpfc_shost_from_vport(vport); 3280 shost = lpfc_shost_from_vport(vport);
3281 if (phba->pport->port_state <= LPFC_FLOGI)
3282 break;
3283 /* If virtual link is not yet instantiated ignore CVL */
3284 if (vport->port_state <= LPFC_FDISC)
3285 break;
3286
3077 lpfc_linkdown_port(vport); 3287 lpfc_linkdown_port(vport);
3078 if (vport->port_type != LPFC_NPIV_PORT) { 3288 lpfc_cleanup_pending_mbox(vport);
3289 spin_lock_irq(shost->host_lock);
3290 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3291 spin_unlock_irq(shost->host_lock);
3292 active_vlink_present = 0;
3293
3294 vports = lpfc_create_vport_work_array(phba);
3295 if (vports) {
3296 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3297 i++) {
3298 if ((!(vports[i]->fc_flag &
3299 FC_VPORT_CVL_RCVD)) &&
3300 (vports[i]->port_state > LPFC_FDISC)) {
3301 active_vlink_present = 1;
3302 break;
3303 }
3304 }
3305 lpfc_destroy_vport_work_array(phba, vports);
3306 }
3307
3308 if (active_vlink_present) {
3309 /*
3310 * If there are other active VLinks present,
3311 * re-instantiate the Vlink using FDISC.
3312 */
3079 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3313 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3080 spin_lock_irq(shost->host_lock); 3314 spin_lock_irq(shost->host_lock);
3081 ndlp->nlp_flag |= NLP_DELAY_TMO; 3315 ndlp->nlp_flag |= NLP_DELAY_TMO;
3082 spin_unlock_irq(shost->host_lock); 3316 spin_unlock_irq(shost->host_lock);
3083 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 3317 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3084 vport->port_state = LPFC_FLOGI; 3318 vport->port_state = LPFC_FDISC;
3319 } else {
3320 /*
3321 * Otherwise, we request port to rediscover
3322 * the entire FCF table for a fast recovery
3323 * from possible case that the current FCF
3324 * is no longer valid.
3325 */
3326 rc = lpfc_sli4_redisc_fcf_table(phba);
3327 if (rc)
3328 /*
3329 * Last resort will be re-try on the
3330 * the current registered FCF entry.
3331 */
3332 lpfc_retry_pport_discovery(phba);
3085 } 3333 }
3086 break; 3334 break;
3087 default: 3335 default:
@@ -3158,6 +3406,34 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3158} 3406}
3159 3407
3160/** 3408/**
3409 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3410 * @phba: pointer to lpfc hba data structure.
3411 *
3412 * This routine is invoked by the worker thread to process FCF table
3413 * rediscovery pending completion event.
3414 **/
3415void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3416{
3417 int rc;
3418
3419 spin_lock_irq(&phba->hbalock);
3420 /* Clear FCF rediscovery timeout event */
3421 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3422 /* Clear driver fast failover FCF record flag */
3423 phba->fcf.failover_rec.flag = 0;
3424 /* Set state for FCF fast failover */
3425 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3426 spin_unlock_irq(&phba->hbalock);
3427
3428 /* Scan FCF table from the first entry to re-discover SAN */
3429 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
3430 if (rc)
3431 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3432 "2747 Post FCF rediscovery read FCF record "
3433 "failed 0x%x\n", rc);
3434}
3435
3436/**
3161 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3437 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3162 * @phba: pointer to lpfc hba data structure. 3438 * @phba: pointer to lpfc hba data structure.
3163 * @dev_grp: The HBA PCI-Device group number. 3439 * @dev_grp: The HBA PCI-Device group number.
@@ -3442,8 +3718,10 @@ static int
3442lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3718lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3443{ 3719{
3444 struct lpfc_sli *psli; 3720 struct lpfc_sli *psli;
3445 int rc; 3721 LPFC_MBOXQ_t *mboxq;
3446 int i, hbq_count; 3722 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3723 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3724 struct lpfc_mqe *mqe;
3447 3725
3448 /* Before proceed, wait for POST done and device ready */ 3726 /* Before proceed, wait for POST done and device ready */
3449 rc = lpfc_sli4_post_status_check(phba); 3727 rc = lpfc_sli4_post_status_check(phba);
@@ -3472,6 +3750,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3472 init_timer(&phba->eratt_poll); 3750 init_timer(&phba->eratt_poll);
3473 phba->eratt_poll.function = lpfc_poll_eratt; 3751 phba->eratt_poll.function = lpfc_poll_eratt;
3474 phba->eratt_poll.data = (unsigned long) phba; 3752 phba->eratt_poll.data = (unsigned long) phba;
3753 /* FCF rediscover timer */
3754 init_timer(&phba->fcf.redisc_wait);
3755 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3756 phba->fcf.redisc_wait.data = (unsigned long)phba;
3757
3475 /* 3758 /*
3476 * We need to do a READ_CONFIG mailbox command here before 3759 * We need to do a READ_CONFIG mailbox command here before
3477 * calling lpfc_get_cfgparam. For VFs this will report the 3760 * calling lpfc_get_cfgparam. For VFs this will report the
@@ -3496,31 +3779,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3496 * used to create the sg_dma_buf_pool must be dynamically calculated. 3779 * used to create the sg_dma_buf_pool must be dynamically calculated.
3497 * 2 segments are added since the IOCB needs a command and response bde. 3780 * 2 segments are added since the IOCB needs a command and response bde.
3498 * To insure that the scsi sgl does not cross a 4k page boundary only 3781 * To insure that the scsi sgl does not cross a 4k page boundary only
3499 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3782 * sgl sizes of must be a power of 2.
3500 * Table of sgl sizes and seg_cnt:
3501 * sgl size, sg_seg_cnt total seg
3502 * 1k 50 52
3503 * 2k 114 116
3504 * 4k 242 244
3505 * 8k 498 500
3506 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3507 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3508 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3509 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3510 */ 3783 */
3511 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3784 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
3512 phba->cfg_sg_seg_cnt = 50; 3785 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
3513 else if (phba->cfg_sg_seg_cnt <= 114) 3786 /* Feature Level 1 hardware is limited to 2 pages */
3514 phba->cfg_sg_seg_cnt = 114; 3787 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
3515 else if (phba->cfg_sg_seg_cnt <= 242) 3788 LPFC_SLI_INTF_FEATURELEVEL1_1))
3516 phba->cfg_sg_seg_cnt = 242; 3789 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
3517 else 3790 else
3518 phba->cfg_sg_seg_cnt = 498; 3791 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
3519 3792 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
3520 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3793 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
3521 + sizeof(struct fcp_rsp); 3794 dma_buf_size = dma_buf_size << 1)
3522 phba->cfg_sg_dma_buf_size += 3795 ;
3523 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3796 if (dma_buf_size == max_buf_size)
3797 phba->cfg_sg_seg_cnt = (dma_buf_size -
3798 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
3799 (2 * sizeof(struct sli4_sge))) /
3800 sizeof(struct sli4_sge);
3801 phba->cfg_sg_dma_buf_size = dma_buf_size;
3524 3802
3525 /* Initialize buffer queue management fields */ 3803 /* Initialize buffer queue management fields */
3526 hbq_count = lpfc_sli_hbq_count(); 3804 hbq_count = lpfc_sli_hbq_count();
@@ -3638,6 +3916,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3638 goto out_free_fcp_eq_hdl; 3916 goto out_free_fcp_eq_hdl;
3639 } 3917 }
3640 3918
3919 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3920 GFP_KERNEL);
3921 if (!mboxq) {
3922 rc = -ENOMEM;
3923 goto out_free_fcp_eq_hdl;
3924 }
3925
3926 /* Get the Supported Pages. It is always available. */
3927 lpfc_supported_pages(mboxq);
3928 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3929 if (unlikely(rc)) {
3930 rc = -EIO;
3931 mempool_free(mboxq, phba->mbox_mem_pool);
3932 goto out_free_fcp_eq_hdl;
3933 }
3934
3935 mqe = &mboxq->u.mqe;
3936 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
3937 LPFC_MAX_SUPPORTED_PAGES);
3938 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
3939 switch (pn_page[i]) {
3940 case LPFC_SLI4_PARAMETERS:
3941 phba->sli4_hba.pc_sli4_params.supported = 1;
3942 break;
3943 default:
3944 break;
3945 }
3946 }
3947
3948 /* Read the port's SLI4 Parameters capabilities if supported. */
3949 if (phba->sli4_hba.pc_sli4_params.supported)
3950 rc = lpfc_pc_sli4_params_get(phba, mboxq);
3951 mempool_free(mboxq, phba->mbox_mem_pool);
3952 if (rc) {
3953 rc = -EIO;
3954 goto out_free_fcp_eq_hdl;
3955 }
3641 return rc; 3956 return rc;
3642 3957
3643out_free_fcp_eq_hdl: 3958out_free_fcp_eq_hdl:
@@ -3733,6 +4048,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3733int 4048int
3734lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4049lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3735{ 4050{
4051 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4052 phba->lpfc_hba_down_link = lpfc_hba_down_link;
3736 switch (dev_grp) { 4053 switch (dev_grp) {
3737 case LPFC_PCI_DEV_LP: 4054 case LPFC_PCI_DEV_LP:
3738 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4055 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -4291,7 +4608,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4291 return NULL; 4608 return NULL;
4292 } 4609 }
4293 4610
4294 mutex_init(&phba->ct_event_mutex); 4611 spin_lock_init(&phba->ct_ev_lock);
4295 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4612 INIT_LIST_HEAD(&phba->ct_ev_waiters);
4296 4613
4297 return phba; 4614 return phba;
@@ -4641,7 +4958,7 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4641int 4958int
4642lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4959lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4643{ 4960{
4644 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 4961 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
4645 int i, port_error = -ENODEV; 4962 int i, port_error = -ENODEV;
4646 4963
4647 if (!phba->sli4_hba.STAregaddr) 4964 if (!phba->sli4_hba.STAregaddr)
@@ -4677,14 +4994,21 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4677 bf_get(lpfc_hst_state_port_status, &sta_reg)); 4994 bf_get(lpfc_hst_state_port_status, &sta_reg));
4678 4995
4679 /* Log device information */ 4996 /* Log device information */
4680 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 4997 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
4681 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4998 if (bf_get(lpfc_sli_intf_valid,
4682 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 4999 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
4683 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4684 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 5001 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4685 bf_get(lpfc_scratchpad_slirev, &scratchpad), 5002 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4686 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 5003 bf_get(lpfc_sli_intf_sli_family,
4687 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 5004 &phba->sli4_hba.sli_intf),
5005 bf_get(lpfc_sli_intf_slirev,
5006 &phba->sli4_hba.sli_intf),
5007 bf_get(lpfc_sli_intf_featurelevel1,
5008 &phba->sli4_hba.sli_intf),
5009 bf_get(lpfc_sli_intf_featurelevel2,
5010 &phba->sli4_hba.sli_intf));
5011 }
4688 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 5012 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
4689 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 5013 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
4690 /* With uncoverable error, log the error message and return error */ 5014 /* With uncoverable error, log the error message and return error */
@@ -4723,8 +5047,8 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4723 LPFC_UE_MASK_LO; 5047 LPFC_UE_MASK_LO;
4724 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5048 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4725 LPFC_UE_MASK_HI; 5049 LPFC_UE_MASK_HI;
4726 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 5050 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
4727 LPFC_SCRATCHPAD; 5051 LPFC_SLI_INTF;
4728} 5052}
4729 5053
4730/** 5054/**
@@ -5999,7 +6323,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5999 spin_lock_irqsave(&phba->hbalock, flags); 6323 spin_lock_irqsave(&phba->hbalock, flags);
6000 /* Mark the FCFI is no longer registered */ 6324 /* Mark the FCFI is no longer registered */
6001 phba->fcf.fcf_flag &= 6325 phba->fcf.fcf_flag &=
6002 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 6326 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6003 spin_unlock_irqrestore(&phba->hbalock, flags); 6327 spin_unlock_irqrestore(&phba->hbalock, flags);
6004 } 6328 }
6005} 6329}
@@ -6039,16 +6363,20 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6039 6363
6040 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6364 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6041 * number of bytes required by each mapping. They are actually 6365 * number of bytes required by each mapping. They are actually
6042 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 6366 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6043 */ 6367 */
6044 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 6368 if (pci_resource_start(pdev, 0)) {
6045 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 6369 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6046 6370 bar0map_len = pci_resource_len(pdev, 0);
6047 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 6371 } else {
6048 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 6372 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6373 bar0map_len = pci_resource_len(pdev, 1);
6374 }
6375 phba->pci_bar1_map = pci_resource_start(pdev, 2);
6376 bar1map_len = pci_resource_len(pdev, 2);
6049 6377
6050 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 6378 phba->pci_bar2_map = pci_resource_start(pdev, 4);
6051 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 6379 bar2map_len = pci_resource_len(pdev, 4);
6052 6380
6053 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6381 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6054 phba->sli4_hba.conf_regs_memmap_p = 6382 phba->sli4_hba.conf_regs_memmap_p =
@@ -6793,6 +7121,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6793 phba->pport->work_port_events = 0; 7121 phba->pport->work_port_events = 0;
6794} 7122}
6795 7123
7124 /**
7125 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7126 * @phba: Pointer to HBA context object.
7127 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7128 *
7129 * This function is called in the SLI4 code path to read the port's
7130 * sli4 capabilities.
7131 *
7132 * This function may be be called from any context that can block-wait
7133 * for the completion. The expectation is that this routine is called
7134 * typically from probe_one or from the online routine.
7135 **/
7136int
7137lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7138{
7139 int rc;
7140 struct lpfc_mqe *mqe;
7141 struct lpfc_pc_sli4_params *sli4_params;
7142 uint32_t mbox_tmo;
7143
7144 rc = 0;
7145 mqe = &mboxq->u.mqe;
7146
7147 /* Read the port's SLI4 Parameters port capabilities */
7148 lpfc_sli4_params(mboxq);
7149 if (!phba->sli4_hba.intr_enable)
7150 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7151 else {
7152 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7153 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7154 }
7155
7156 if (unlikely(rc))
7157 return 1;
7158
7159 sli4_params = &phba->sli4_hba.pc_sli4_params;
7160 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7161 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7162 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7163 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7164 &mqe->un.sli4_params);
7165 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7166 &mqe->un.sli4_params);
7167 sli4_params->proto_types = mqe->un.sli4_params.word3;
7168 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7169 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7170 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7171 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7172 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7173 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7174 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7175 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7176 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7177 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7178 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7179 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7180 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7181 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7182 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7183 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7184 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7185 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7186 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7187 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7188 return rc;
7189}
7190
6796/** 7191/**
6797 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7192 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6798 * @pdev: pointer to PCI device 7193 * @pdev: pointer to PCI device
@@ -7134,6 +7529,12 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7134 pci_set_power_state(pdev, PCI_D0); 7529 pci_set_power_state(pdev, PCI_D0);
7135 pci_restore_state(pdev); 7530 pci_restore_state(pdev);
7136 7531
7532 /*
7533 * As the new kernel behavior of pci_restore_state() API call clears
7534 * device saved_state flag, need to save the restored state again.
7535 */
7536 pci_save_state(pdev);
7537
7137 if (pdev->is_busmaster) 7538 if (pdev->is_busmaster)
7138 pci_set_master(pdev); 7539 pci_set_master(pdev);
7139 7540
@@ -7317,6 +7718,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7317 } 7718 }
7318 7719
7319 pci_restore_state(pdev); 7720 pci_restore_state(pdev);
7721
7722 /*
7723 * As the new kernel behavior of pci_restore_state() API call clears
7724 * device saved_state flag, need to save the restored state again.
7725 */
7726 pci_save_state(pdev);
7727
7320 if (pdev->is_busmaster) 7728 if (pdev->is_busmaster)
7321 pci_set_master(pdev); 7729 pci_set_master(pdev);
7322 7730
@@ -7726,6 +8134,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7726 /* Restore device state from PCI config space */ 8134 /* Restore device state from PCI config space */
7727 pci_set_power_state(pdev, PCI_D0); 8135 pci_set_power_state(pdev, PCI_D0);
7728 pci_restore_state(pdev); 8136 pci_restore_state(pdev);
8137
8138 /*
8139 * As the new kernel behavior of pci_restore_state() API call clears
8140 * device saved_state flag, need to save the restored state again.
8141 */
8142 pci_save_state(pdev);
8143
7729 if (pdev->is_busmaster) 8144 if (pdev->is_busmaster)
7730 pci_set_master(pdev); 8145 pci_set_master(pdev);
7731 8146
@@ -7845,11 +8260,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7845 int rc; 8260 int rc;
7846 struct lpfc_sli_intf intf; 8261 struct lpfc_sli_intf intf;
7847 8262
7848 if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) 8263 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
7849 return -ENODEV; 8264 return -ENODEV;
7850 8265
7851 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8266 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7852 (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) 8267 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
7853 rc = lpfc_pci_probe_one_s4(pdev, pid); 8268 rc = lpfc_pci_probe_one_s4(pdev, pid);
7854 else 8269 else
7855 rc = lpfc_pci_probe_one_s3(pdev, pid); 8270 rc = lpfc_pci_probe_one_s3(pdev, pid);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a9afd8b94b6a..6c4dce1a30ca 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1707,7 +1707,8 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1707 alloc_len - sizeof(union lpfc_sli4_cfg_shdr); 1707 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1708 } 1708 }
1709 /* The sub-header is in DMA memory, which needs endian converstion */ 1709 /* The sub-header is in DMA memory, which needs endian converstion */
1710 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1710 if (cfg_shdr)
1711 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1711 sizeof(union lpfc_sli4_cfg_shdr)); 1712 sizeof(union lpfc_sli4_cfg_shdr));
1712 1713
1713 return alloc_len; 1714 return alloc_len;
@@ -1747,6 +1748,65 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1747} 1748}
1748 1749
1749/** 1750/**
1751 * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd
1752 * @phba: pointer to lpfc hba data structure.
1753 * @fcf_index: index to fcf table.
1754 *
1755 * This routine routine allocates and constructs non-embedded mailbox command
1756 * for reading a FCF table entry refered by @fcf_index.
1757 *
1758 * Return: pointer to the mailbox command constructed if successful, otherwise
1759 * NULL.
1760 **/
1761int
1762lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba,
1763 struct lpfcMboxq *mboxq,
1764 uint16_t fcf_index)
1765{
1766 void *virt_addr;
1767 dma_addr_t phys_addr;
1768 uint8_t *bytep;
1769 struct lpfc_mbx_sge sge;
1770 uint32_t alloc_len, req_len;
1771 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1772
1773 if (!mboxq)
1774 return -ENOMEM;
1775
1776 req_len = sizeof(struct fcf_record) +
1777 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
1778
1779 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
1780 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1781 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
1782 LPFC_SLI4_MBX_NEMBED);
1783
1784 if (alloc_len < req_len) {
1785 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1786 "0291 Allocated DMA memory size (x%x) is "
1787 "less than the requested DMA memory "
1788 "size (x%x)\n", alloc_len, req_len);
1789 return -ENOMEM;
1790 }
1791
1792 /* Get the first SGE entry from the non-embedded DMA memory. This
1793 * routine only uses a single SGE.
1794 */
1795 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1796 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1797 virt_addr = mboxq->sge_array->addr[0];
1798 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1799
1800 /* Set up command fields */
1801 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
1802 /* Perform necessary endian conversion */
1803 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1804 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
1805
1806 return 0;
1807}
1808
1809/**
1750 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox 1810 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1751 * @mboxq: pointer to lpfc mbox command. 1811 * @mboxq: pointer to lpfc mbox command.
1752 * 1812 *
@@ -1946,13 +2006,14 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1946 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); 2006 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1947 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); 2007 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1948 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); 2008 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1949 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx); 2009 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2010 phba->fcf.current_rec.fcf_indx);
1950 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ 2011 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1951 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, 2012 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
1952 (~phba->fcf.addr_mode) & 0x3); 2013 if (phba->fcf.current_rec.vlan_id != 0xFFFF) {
1953 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1954 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); 2014 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1955 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id); 2015 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2016 phba->fcf.current_rec.vlan_id);
1956 } 2017 }
1957} 2018}
1958 2019
@@ -1992,3 +2053,41 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1992 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2053 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
1993 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2054 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
1994} 2055}
2056
2057/**
2058 * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
2059 * mailbox command.
2060 * @mbox: pointer to lpfc mbox command to initialize.
2061 *
2062 * The PORT_CAPABILITIES supported pages mailbox command is issued to
2063 * retrieve the particular feature pages supported by the port.
2064 **/
2065void
2066lpfc_supported_pages(struct lpfcMboxq *mbox)
2067{
2068 struct lpfc_mbx_supp_pages *supp_pages;
2069
2070 memset(mbox, 0, sizeof(*mbox));
2071 supp_pages = &mbox->u.mqe.un.supp_pages;
2072 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2073 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2074}
2075
2076/**
2077 * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
2078 * mailbox command.
2079 * @mbox: pointer to lpfc mbox command to initialize.
2080 *
2081 * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
2082 * retrieve the particular SLI4 features supported by the port.
2083 **/
2084void
2085lpfc_sli4_params(struct lpfcMboxq *mbox)
2086{
2087 struct lpfc_mbx_sli4_params *sli4_params;
2088
2089 memset(mbox, 0, sizeof(*mbox));
2090 sli4_params = &mbox->u.mqe.un.sli4_params;
2091 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2092 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2093}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index d655ed3eebef..f3cfbe2ce986 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2008 Emulex. All rights reserved. * 4 * Copyright (C) 2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -177,23 +177,3 @@ struct temp_event {
177 uint32_t data; 177 uint32_t data;
178}; 178};
179 179
180/* bsg definitions */
181#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
182#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
183
184struct set_ct_event {
185 uint32_t command;
186 uint32_t ev_req_id;
187 uint32_t ev_reg_id;
188};
189
190struct get_ct_event {
191 uint32_t command;
192 uint32_t ev_reg_id;
193 uint32_t ev_req_id;
194};
195
196struct get_ct_event_reply {
197 uint32_t immed_data;
198 uint32_t type;
199};
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2ed6af194932..d20ae6b3b3cf 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -62,7 +62,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
62 62
63int 63int
64lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 64lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
65 struct serv_parm * sp, uint32_t class) 65 struct serv_parm *sp, uint32_t class, int flogi)
66{ 66{
67 volatile struct serv_parm *hsp = &vport->fc_sparam; 67 volatile struct serv_parm *hsp = &vport->fc_sparam;
68 uint16_t hsp_value, ssp_value = 0; 68 uint16_t hsp_value, ssp_value = 0;
@@ -75,49 +75,56 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
75 * correcting the byte values. 75 * correcting the byte values.
76 */ 76 */
77 if (sp->cls1.classValid) { 77 if (sp->cls1.classValid) {
78 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 78 if (!flogi) {
79 hsp->cls1.rcvDataSizeLsb; 79 hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
80 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 80 hsp->cls1.rcvDataSizeLsb);
81 sp->cls1.rcvDataSizeLsb; 81 ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
82 if (!ssp_value) 82 sp->cls1.rcvDataSizeLsb);
83 goto bad_service_param; 83 if (!ssp_value)
84 if (ssp_value > hsp_value) { 84 goto bad_service_param;
85 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 85 if (ssp_value > hsp_value) {
86 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 86 sp->cls1.rcvDataSizeLsb =
87 hsp->cls1.rcvDataSizeLsb;
88 sp->cls1.rcvDataSizeMsb =
89 hsp->cls1.rcvDataSizeMsb;
90 }
87 } 91 }
88 } else if (class == CLASS1) { 92 } else if (class == CLASS1)
89 goto bad_service_param; 93 goto bad_service_param;
90 }
91
92 if (sp->cls2.classValid) { 94 if (sp->cls2.classValid) {
93 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 95 if (!flogi) {
94 hsp->cls2.rcvDataSizeLsb; 96 hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
95 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 97 hsp->cls2.rcvDataSizeLsb);
96 sp->cls2.rcvDataSizeLsb; 98 ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
97 if (!ssp_value) 99 sp->cls2.rcvDataSizeLsb);
98 goto bad_service_param; 100 if (!ssp_value)
99 if (ssp_value > hsp_value) { 101 goto bad_service_param;
100 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 102 if (ssp_value > hsp_value) {
101 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 103 sp->cls2.rcvDataSizeLsb =
104 hsp->cls2.rcvDataSizeLsb;
105 sp->cls2.rcvDataSizeMsb =
106 hsp->cls2.rcvDataSizeMsb;
107 }
102 } 108 }
103 } else if (class == CLASS2) { 109 } else if (class == CLASS2)
104 goto bad_service_param; 110 goto bad_service_param;
105 }
106
107 if (sp->cls3.classValid) { 111 if (sp->cls3.classValid) {
108 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 112 if (!flogi) {
109 hsp->cls3.rcvDataSizeLsb; 113 hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
110 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 114 hsp->cls3.rcvDataSizeLsb);
111 sp->cls3.rcvDataSizeLsb; 115 ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
112 if (!ssp_value) 116 sp->cls3.rcvDataSizeLsb);
113 goto bad_service_param; 117 if (!ssp_value)
114 if (ssp_value > hsp_value) { 118 goto bad_service_param;
115 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 119 if (ssp_value > hsp_value) {
116 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 120 sp->cls3.rcvDataSizeLsb =
121 hsp->cls3.rcvDataSizeLsb;
122 sp->cls3.rcvDataSizeMsb =
123 hsp->cls3.rcvDataSizeMsb;
124 }
117 } 125 }
118 } else if (class == CLASS3) { 126 } else if (class == CLASS3)
119 goto bad_service_param; 127 goto bad_service_param;
120 }
121 128
122 /* 129 /*
123 * Preserve the upper four bits of the MSB from the PLOGI response. 130 * Preserve the upper four bits of the MSB from the PLOGI response.
@@ -247,7 +254,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
247 int rc; 254 int rc;
248 255
249 memset(&stat, 0, sizeof (struct ls_rjt)); 256 memset(&stat, 0, sizeof (struct ls_rjt));
250 if (vport->port_state <= LPFC_FLOGI) { 257 if (vport->port_state <= LPFC_FDISC) {
251 /* Before responding to PLOGI, check for pt2pt mode. 258 /* Before responding to PLOGI, check for pt2pt mode.
252 * If we are pt2pt, with an outstanding FLOGI, abort 259 * If we are pt2pt, with an outstanding FLOGI, abort
253 * the FLOGI and resend it first. 260 * the FLOGI and resend it first.
@@ -295,7 +302,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
295 NULL); 302 NULL);
296 return 0; 303 return 0;
297 } 304 }
298 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 305 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
299 /* Reject this request because invalid parameters */ 306 /* Reject this request because invalid parameters */
300 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 307 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
301 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 308 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
@@ -831,7 +838,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
831 "0142 PLOGI RSP: Invalid WWN.\n"); 838 "0142 PLOGI RSP: Invalid WWN.\n");
832 goto out; 839 goto out;
833 } 840 }
834 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 841 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
835 goto out; 842 goto out;
836 /* PLOGI chkparm OK */ 843 /* PLOGI chkparm OK */
837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 844 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a246410ce9df..7f21b47db791 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -626,6 +626,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
626 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 626 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
627 if (psb->cur_iocbq.sli4_xritag == xri) { 627 if (psb->cur_iocbq.sli4_xritag == xri) {
628 list_del(&psb->list); 628 list_del(&psb->list);
629 psb->exch_busy = 0;
629 psb->status = IOSTAT_SUCCESS; 630 psb->status = IOSTAT_SUCCESS;
630 spin_unlock_irqrestore( 631 spin_unlock_irqrestore(
631 &phba->sli4_hba.abts_scsi_buf_list_lock, 632 &phba->sli4_hba.abts_scsi_buf_list_lock,
@@ -688,11 +689,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
688 list); 689 list);
689 if (status) { 690 if (status) {
690 /* Put this back on the abort scsi list */ 691 /* Put this back on the abort scsi list */
691 psb->status = IOSTAT_LOCAL_REJECT; 692 psb->exch_busy = 1;
692 psb->result = IOERR_ABORT_REQUESTED;
693 rc++; 693 rc++;
694 } else 694 } else {
695 psb->exch_busy = 0;
695 psb->status = IOSTAT_SUCCESS; 696 psb->status = IOSTAT_SUCCESS;
697 }
696 /* Put it back into the SCSI buffer list */ 698 /* Put it back into the SCSI buffer list */
697 lpfc_release_scsi_buf_s4(phba, psb); 699 lpfc_release_scsi_buf_s4(phba, psb);
698 } 700 }
@@ -796,19 +798,17 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
796 */ 798 */
797 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 799 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
798 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 800 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
799 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
800 bf_set(lpfc_sli4_sge_last, sgl, 0); 801 bf_set(lpfc_sli4_sge_last, sgl, 0);
801 sgl->word2 = cpu_to_le32(sgl->word2); 802 sgl->word2 = cpu_to_le32(sgl->word2);
802 sgl->word3 = cpu_to_le32(sgl->word3); 803 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
803 sgl++; 804 sgl++;
804 805
805 /* Setup the physical region for the FCP RSP */ 806 /* Setup the physical region for the FCP RSP */
806 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 807 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
807 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 808 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
808 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
809 bf_set(lpfc_sli4_sge_last, sgl, 1); 809 bf_set(lpfc_sli4_sge_last, sgl, 1);
810 sgl->word2 = cpu_to_le32(sgl->word2); 810 sgl->word2 = cpu_to_le32(sgl->word2);
811 sgl->word3 = cpu_to_le32(sgl->word3); 811 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
812 812
813 /* 813 /*
814 * Since the IOCB for the FCP I/O is built into this 814 * Since the IOCB for the FCP I/O is built into this
@@ -839,11 +839,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
839 psb->cur_iocbq.sli4_xritag); 839 psb->cur_iocbq.sli4_xritag);
840 if (status) { 840 if (status) {
841 /* Put this back on the abort scsi list */ 841 /* Put this back on the abort scsi list */
842 psb->status = IOSTAT_LOCAL_REJECT; 842 psb->exch_busy = 1;
843 psb->result = IOERR_ABORT_REQUESTED;
844 rc++; 843 rc++;
845 } else 844 } else {
845 psb->exch_busy = 0;
846 psb->status = IOSTAT_SUCCESS; 846 psb->status = IOSTAT_SUCCESS;
847 }
847 /* Put it back into the SCSI buffer list */ 848 /* Put it back into the SCSI buffer list */
848 lpfc_release_scsi_buf_s4(phba, psb); 849 lpfc_release_scsi_buf_s4(phba, psb);
849 break; 850 break;
@@ -857,11 +858,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
857 list); 858 list);
858 if (status) { 859 if (status) {
859 /* Put this back on the abort scsi list */ 860 /* Put this back on the abort scsi list */
860 psb->status = IOSTAT_LOCAL_REJECT; 861 psb->exch_busy = 1;
861 psb->result = IOERR_ABORT_REQUESTED;
862 rc++; 862 rc++;
863 } else 863 } else {
864 psb->exch_busy = 0;
864 psb->status = IOSTAT_SUCCESS; 865 psb->status = IOSTAT_SUCCESS;
866 }
865 /* Put it back into the SCSI buffer list */ 867 /* Put it back into the SCSI buffer list */
866 lpfc_release_scsi_buf_s4(phba, psb); 868 lpfc_release_scsi_buf_s4(phba, psb);
867 } 869 }
@@ -951,8 +953,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
951{ 953{
952 unsigned long iflag = 0; 954 unsigned long iflag = 0;
953 955
954 if (psb->status == IOSTAT_LOCAL_REJECT 956 if (psb->exch_busy) {
955 && psb->result == IOERR_ABORT_REQUESTED) {
956 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 957 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
957 iflag); 958 iflag);
958 psb->pCmd = NULL; 959 psb->pCmd = NULL;
@@ -1869,7 +1870,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1869 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1870 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1870 physaddr = sg_dma_address(sgel); 1871 physaddr = sg_dma_address(sgel);
1871 dma_len = sg_dma_len(sgel); 1872 dma_len = sg_dma_len(sgel);
1872 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1873 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 1873 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1874 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 1874 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1875 if ((num_bde + 1) == nseg) 1875 if ((num_bde + 1) == nseg)
@@ -1878,7 +1878,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1878 bf_set(lpfc_sli4_sge_last, sgl, 0); 1878 bf_set(lpfc_sli4_sge_last, sgl, 0);
1879 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1879 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1880 sgl->word2 = cpu_to_le32(sgl->word2); 1880 sgl->word2 = cpu_to_le32(sgl->word2);
1881 sgl->word3 = cpu_to_le32(sgl->word3); 1881 sgl->sge_len = cpu_to_le32(dma_len);
1882 dma_offset += dma_len; 1882 dma_offset += dma_len;
1883 sgl++; 1883 sgl++;
1884 } 1884 }
@@ -2221,6 +2221,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2221 2221
2222 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2222 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2223 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2223 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2224 /* pick up SLI4 exhange busy status from HBA */
2225 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
2226
2224 if (pnode && NLP_CHK_NODE_ACT(pnode)) 2227 if (pnode && NLP_CHK_NODE_ACT(pnode))
2225 atomic_dec(&pnode->cmd_pending); 2228 atomic_dec(&pnode->cmd_pending);
2226 2229
@@ -2637,6 +2640,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2637 } 2640 }
2638 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; 2641 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2639 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 2642 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2643 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2640 return 0; 2644 return 0;
2641} 2645}
2642 2646
@@ -2695,6 +2699,13 @@ lpfc_info(struct Scsi_Host *host)
2695 " port %s", 2699 " port %s",
2696 phba->Port); 2700 phba->Port);
2697 } 2701 }
2702 len = strlen(lpfcinfobuf);
2703 if (phba->sli4_hba.link_state.logical_speed) {
2704 snprintf(lpfcinfobuf + len,
2705 384-len,
2706 " Logical Link Speed: %d Mbps",
2707 phba->sli4_hba.link_state.logical_speed * 10);
2708 }
2698 } 2709 }
2699 return lpfcinfobuf; 2710 return lpfcinfobuf;
2700} 2711}
@@ -2990,6 +3001,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2990 3001
2991 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 3002 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2992 abtsiocb->fcp_wqidx = iocb->fcp_wqidx; 3003 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
3004 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
2993 3005
2994 if (lpfc_is_link_up(phba)) 3006 if (lpfc_is_link_up(phba))
2995 icmd->ulpCommand = CMD_ABORT_XRI_CN; 3007 icmd->ulpCommand = CMD_ABORT_XRI_CN;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 65dfc8bd5b49..5932273870a5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -118,6 +118,7 @@ struct lpfc_scsi_buf {
118 118
119 uint32_t timeout; 119 uint32_t timeout;
120 120
121 uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
121 uint16_t status; /* From IOCB Word 7- ulpStatus */ 122 uint16_t status; /* From IOCB Word 7- ulpStatus */
122 uint32_t result; /* From IOCB Word 4. */ 123 uint32_t result; /* From IOCB Word 4. */
123 124
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 589549b2bf0e..35e3b96d4e07 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -580,10 +580,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
580 else 580 else
581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
582 if (sglq) { 582 if (sglq) {
583 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED 583 if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) {
584 && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
585 && (iocbq->iocb.un.ulpWord[4]
586 == IOERR_ABORT_REQUESTED))) {
587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 584 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
588 iflag); 585 iflag);
589 list_add(&sglq->list, 586 list_add(&sglq->list,
@@ -764,10 +761,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
764 case DSSCMD_IWRITE64_CX: 761 case DSSCMD_IWRITE64_CX:
765 case DSSCMD_IREAD64_CR: 762 case DSSCMD_IREAD64_CR:
766 case DSSCMD_IREAD64_CX: 763 case DSSCMD_IREAD64_CX:
767 case DSSCMD_INVALIDATE_DEK:
768 case DSSCMD_SET_KEK:
769 case DSSCMD_GET_KEK_ID:
770 case DSSCMD_GEN_XFER:
771 type = LPFC_SOL_IOCB; 764 type = LPFC_SOL_IOCB;
772 break; 765 break;
773 case CMD_ABORT_XRI_CN: 766 case CMD_ABORT_XRI_CN:
@@ -1717,6 +1710,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1717 struct lpfc_dmabuf *mp; 1710 struct lpfc_dmabuf *mp;
1718 uint16_t rpi, vpi; 1711 uint16_t rpi, vpi;
1719 int rc; 1712 int rc;
1713 struct lpfc_vport *vport = pmb->vport;
1720 1714
1721 mp = (struct lpfc_dmabuf *) (pmb->context1); 1715 mp = (struct lpfc_dmabuf *) (pmb->context1);
1722 1716
@@ -1745,6 +1739,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1745 return; 1739 return;
1746 } 1740 }
1747 1741
1742 /* Unreg VPI, if the REG_VPI succeed after VLink failure */
1743 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
1744 !(phba->pport->load_flag & FC_UNLOADING) &&
1745 !pmb->u.mb.mbxStatus) {
1746 lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
1747 pmb->vport = vport;
1748 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1749 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1750 if (rc != MBX_NOT_FINISHED)
1751 return;
1752 }
1753
1748 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1754 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1749 lpfc_sli4_mbox_cmd_free(phba, pmb); 1755 lpfc_sli4_mbox_cmd_free(phba, pmb);
1750 else 1756 else
@@ -2228,9 +2234,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2228 * All other are passed to the completion callback. 2234 * All other are passed to the completion callback.
2229 */ 2235 */
2230 if (pring->ringno == LPFC_ELS_RING) { 2236 if (pring->ringno == LPFC_ELS_RING) {
2231 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 2237 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2238 (cmdiocbp->iocb_flag &
2239 LPFC_DRIVER_ABORTED)) {
2240 spin_lock_irqsave(&phba->hbalock,
2241 iflag);
2232 cmdiocbp->iocb_flag &= 2242 cmdiocbp->iocb_flag &=
2233 ~LPFC_DRIVER_ABORTED; 2243 ~LPFC_DRIVER_ABORTED;
2244 spin_unlock_irqrestore(&phba->hbalock,
2245 iflag);
2234 saveq->iocb.ulpStatus = 2246 saveq->iocb.ulpStatus =
2235 IOSTAT_LOCAL_REJECT; 2247 IOSTAT_LOCAL_REJECT;
2236 saveq->iocb.un.ulpWord[4] = 2248 saveq->iocb.un.ulpWord[4] =
@@ -2240,7 +2252,47 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2240 * of DMAing payload, so don't free data 2252 * of DMAing payload, so don't free data
2241 * buffer till after a hbeat. 2253 * buffer till after a hbeat.
2242 */ 2254 */
2255 spin_lock_irqsave(&phba->hbalock,
2256 iflag);
2243 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2257 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2258 spin_unlock_irqrestore(&phba->hbalock,
2259 iflag);
2260 }
2261 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2262 (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) {
2263 /* Set cmdiocb flag for the exchange
2264 * busy so sgl (xri) will not be
2265 * released until the abort xri is
2266 * received from hba, clear the
2267 * LPFC_DRIVER_ABORTED bit in case
2268 * it was driver initiated abort.
2269 */
2270 spin_lock_irqsave(&phba->hbalock,
2271 iflag);
2272 cmdiocbp->iocb_flag &=
2273 ~LPFC_DRIVER_ABORTED;
2274 cmdiocbp->iocb_flag |=
2275 LPFC_EXCHANGE_BUSY;
2276 spin_unlock_irqrestore(&phba->hbalock,
2277 iflag);
2278 cmdiocbp->iocb.ulpStatus =
2279 IOSTAT_LOCAL_REJECT;
2280 cmdiocbp->iocb.un.ulpWord[4] =
2281 IOERR_ABORT_REQUESTED;
2282 /*
2283 * For SLI4, irsiocb contains NO_XRI
2284 * in sli_xritag, it shall not affect
2285 * releasing sgl (xri) process.
2286 */
2287 saveq->iocb.ulpStatus =
2288 IOSTAT_LOCAL_REJECT;
2289 saveq->iocb.un.ulpWord[4] =
2290 IOERR_SLI_ABORTED;
2291 spin_lock_irqsave(&phba->hbalock,
2292 iflag);
2293 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2294 spin_unlock_irqrestore(&phba->hbalock,
2295 iflag);
2244 } 2296 }
2245 } 2297 }
2246 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2298 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -5687,19 +5739,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5687 5739
5688 for (i = 0; i < numBdes; i++) { 5740 for (i = 0; i < numBdes; i++) {
5689 /* Should already be byte swapped. */ 5741 /* Should already be byte swapped. */
5690 sgl->addr_hi = bpl->addrHigh; 5742 sgl->addr_hi = bpl->addrHigh;
5691 sgl->addr_lo = bpl->addrLow; 5743 sgl->addr_lo = bpl->addrLow;
5692 /* swap the size field back to the cpu so we 5744
5693 * can assign it to the sgl.
5694 */
5695 bde.tus.w = le32_to_cpu(bpl->tus.w);
5696 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5697 if ((i+1) == numBdes) 5745 if ((i+1) == numBdes)
5698 bf_set(lpfc_sli4_sge_last, sgl, 1); 5746 bf_set(lpfc_sli4_sge_last, sgl, 1);
5699 else 5747 else
5700 bf_set(lpfc_sli4_sge_last, sgl, 0); 5748 bf_set(lpfc_sli4_sge_last, sgl, 0);
5701 sgl->word2 = cpu_to_le32(sgl->word2); 5749 sgl->word2 = cpu_to_le32(sgl->word2);
5702 sgl->word3 = cpu_to_le32(sgl->word3); 5750 /* swap the size field back to the cpu so we
5751 * can assign it to the sgl.
5752 */
5753 bde.tus.w = le32_to_cpu(bpl->tus.w);
5754 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
5703 bpl++; 5755 bpl++;
5704 sgl++; 5756 sgl++;
5705 } 5757 }
@@ -5712,11 +5764,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5712 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 5764 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5713 sgl->addr_lo = 5765 sgl->addr_lo =
5714 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 5766 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5715 bf_set(lpfc_sli4_sge_len, sgl,
5716 icmd->un.genreq64.bdl.bdeSize);
5717 bf_set(lpfc_sli4_sge_last, sgl, 1); 5767 bf_set(lpfc_sli4_sge_last, sgl, 1);
5718 sgl->word2 = cpu_to_le32(sgl->word2); 5768 sgl->word2 = cpu_to_le32(sgl->word2);
5719 sgl->word3 = cpu_to_le32(sgl->word3); 5769 sgl->sge_len =
5770 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
5720 } 5771 }
5721 return sglq->sli4_xritag; 5772 return sglq->sli4_xritag;
5722} 5773}
@@ -5987,12 +6038,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5987 else 6038 else
5988 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6039 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5989 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6040 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5990 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5991 wqe->words[5] = 0; 6041 wqe->words[5] = 0;
5992 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6042 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5993 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6043 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5994 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6044 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5995 wqe->generic.abort_tag = abort_tag;
5996 /* 6045 /*
5997 * The abort handler will send us CMD_ABORT_XRI_CN or 6046 * The abort handler will send us CMD_ABORT_XRI_CN or
5998 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6047 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@@ -6121,15 +6170,15 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6121 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 6170 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6122 return IOCB_ERROR; 6171 return IOCB_ERROR;
6123 6172
6124 if (piocb->iocb_flag & LPFC_IO_FCP) { 6173 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
6174 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
6125 /* 6175 /*
6126 * For FCP command IOCB, get a new WQ index to distribute 6176 * For FCP command IOCB, get a new WQ index to distribute
6127 * WQE across the WQsr. On the other hand, for abort IOCB, 6177 * WQE across the WQsr. On the other hand, for abort IOCB,
6128 * it carries the same WQ index to the original command 6178 * it carries the same WQ index to the original command
6129 * IOCB. 6179 * IOCB.
6130 */ 6180 */
6131 if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 6181 if (piocb->iocb_flag & LPFC_IO_FCP)
6132 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
6133 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6182 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6134 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 6183 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
6135 &wqe)) 6184 &wqe))
@@ -7004,7 +7053,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7004 abort_iocb->iocb.ulpContext != abort_context || 7053 abort_iocb->iocb.ulpContext != abort_context ||
7005 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 7054 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
7006 spin_unlock_irq(&phba->hbalock); 7055 spin_unlock_irq(&phba->hbalock);
7007 else { 7056 else if (phba->sli_rev < LPFC_SLI_REV4) {
7057 /*
7058 * leave the SLI4 aborted command on the txcmplq
7059 * list and the command complete WCQE's XB bit
7060 * will tell whether the SGL (XRI) can be released
7061 * immediately or to the aborted SGL list for the
7062 * following abort XRI from the HBA.
7063 */
7008 list_del_init(&abort_iocb->list); 7064 list_del_init(&abort_iocb->list);
7009 pring->txcmplq_cnt--; 7065 pring->txcmplq_cnt--;
7010 spin_unlock_irq(&phba->hbalock); 7066 spin_unlock_irq(&phba->hbalock);
@@ -7013,11 +7069,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7013 * payload, so don't free data buffer till after 7069 * payload, so don't free data buffer till after
7014 * a hbeat. 7070 * a hbeat.
7015 */ 7071 */
7072 spin_lock_irq(&phba->hbalock);
7016 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7073 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7017
7018 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7074 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7075 spin_unlock_irq(&phba->hbalock);
7076
7019 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7077 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7020 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 7078 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
7021 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7079 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
7022 } 7080 }
7023 } 7081 }
@@ -7106,7 +7164,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7106 return 0; 7164 return 0;
7107 7165
7108 /* This signals the response to set the correct status 7166 /* This signals the response to set the correct status
7109 * before calling the completion handler. 7167 * before calling the completion handler
7110 */ 7168 */
7111 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 7169 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
7112 7170
@@ -7124,6 +7182,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7124 7182
7125 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 7183 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7126 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 7184 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
7185 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
7186 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
7127 7187
7128 if (phba->link_state >= LPFC_LINK_UP) 7188 if (phba->link_state >= LPFC_LINK_UP)
7129 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7189 iabt->ulpCommand = CMD_ABORT_XRI_CN;
@@ -7330,6 +7390,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7330 7390
7331 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 7391 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7332 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 7392 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
7393 if (iocbq->iocb_flag & LPFC_IO_FCP)
7394 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
7333 7395
7334 if (lpfc_is_link_up(phba)) 7396 if (lpfc_is_link_up(phba))
7335 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 7397 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@@ -8359,11 +8421,24 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8359 } 8421 }
8360} 8422}
8361 8423
8424/**
8425 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
8426 * @phba: pointer to lpfc hba data structure
8427 * @pIocbIn: pointer to the rspiocbq
8428 * @pIocbOut: pointer to the cmdiocbq
8429 * @wcqe: pointer to the complete wcqe
8430 *
8431 * This routine transfers the fields of a command iocbq to a response iocbq
8432 * by copying all the IOCB fields from command iocbq and transferring the
8433 * completion status information from the complete wcqe.
8434 **/
8362static void 8435static void
8363lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, 8436lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
8437 struct lpfc_iocbq *pIocbIn,
8364 struct lpfc_iocbq *pIocbOut, 8438 struct lpfc_iocbq *pIocbOut,
8365 struct lpfc_wcqe_complete *wcqe) 8439 struct lpfc_wcqe_complete *wcqe)
8366{ 8440{
8441 unsigned long iflags;
8367 size_t offset = offsetof(struct lpfc_iocbq, iocb); 8442 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8368 8443
8369 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8444 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -8377,8 +8452,17 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8377 wcqe->total_data_placed; 8452 wcqe->total_data_placed;
8378 else 8453 else
8379 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8454 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8380 else 8455 else {
8381 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8456 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8457 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
8458 }
8459
8460 /* Pick up HBA exchange busy condition */
8461 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
8462 spin_lock_irqsave(&phba->hbalock, iflags);
8463 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
8464 spin_unlock_irqrestore(&phba->hbalock, iflags);
8465 }
8382} 8466}
8383 8467
8384/** 8468/**
@@ -8419,7 +8503,7 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8419 } 8503 }
8420 8504
8421 /* Fake the irspiocbq and copy necessary response information */ 8505 /* Fake the irspiocbq and copy necessary response information */
8422 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); 8506 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
8423 8507
8424 return irspiocbq; 8508 return irspiocbq;
8425} 8509}
@@ -8849,8 +8933,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8849 int ecount = 0; 8933 int ecount = 0;
8850 uint16_t cqid; 8934 uint16_t cqid;
8851 8935
8852 if (bf_get(lpfc_eqe_major_code, eqe) != 0 || 8936 if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
8853 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8937 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8855 "0359 Not a valid slow-path completion " 8938 "0359 Not a valid slow-path completion "
8856 "event: majorcode=x%x, minorcode=x%x\n", 8939 "event: majorcode=x%x, minorcode=x%x\n",
@@ -8976,7 +9059,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8976 } 9059 }
8977 9060
8978 /* Fake the irspiocb and copy necessary response information */ 9061 /* Fake the irspiocb and copy necessary response information */
8979 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); 9062 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
8980 9063
8981 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9064 /* Pass the cmd_iocb and the rsp state to the upper layer */
8982 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9065 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@@ -9082,8 +9165,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9082 uint16_t cqid; 9165 uint16_t cqid;
9083 int ecount = 0; 9166 int ecount = 0;
9084 9167
9085 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || 9168 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
9086 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9087 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9169 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9088 "0366 Not a valid fast-path completion " 9170 "0366 Not a valid fast-path completion "
9089 "event: majorcode=x%x, minorcode=x%x\n", 9171 "event: majorcode=x%x, minorcode=x%x\n",
@@ -11871,12 +11953,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11871{ 11953{
11872 int rc = 0, error; 11954 int rc = 0, error;
11873 LPFC_MBOXQ_t *mboxq; 11955 LPFC_MBOXQ_t *mboxq;
11874 void *virt_addr;
11875 dma_addr_t phys_addr;
11876 uint8_t *bytep;
11877 struct lpfc_mbx_sge sge;
11878 uint32_t alloc_len, req_len;
11879 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11880 11956
11881 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 11957 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11882 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11958 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11887,43 +11963,19 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11887 error = -ENOMEM; 11963 error = -ENOMEM;
11888 goto fail_fcfscan; 11964 goto fail_fcfscan;
11889 } 11965 }
11890 11966 /* Construct the read FCF record mailbox command */
11891 req_len = sizeof(struct fcf_record) + 11967 rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index);
11892 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); 11968 if (rc) {
11893 11969 error = -EINVAL;
11894 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11895 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11896 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11897 LPFC_SLI4_MBX_NEMBED);
11898
11899 if (alloc_len < req_len) {
11900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11901 "0291 Allocated DMA memory size (x%x) is "
11902 "less than the requested DMA memory "
11903 "size (x%x)\n", alloc_len, req_len);
11904 error = -ENOMEM;
11905 goto fail_fcfscan; 11970 goto fail_fcfscan;
11906 } 11971 }
11907 11972 /* Issue the mailbox command asynchronously */
11908 /* Get the first SGE entry from the non-embedded DMA memory. This
11909 * routine only uses a single SGE.
11910 */
11911 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11912 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11913 virt_addr = mboxq->sge_array->addr[0];
11914 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11915
11916 /* Set up command fields */
11917 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11918 /* Perform necessary endian conversion */
11919 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11920 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11921 mboxq->vport = phba->pport; 11973 mboxq->vport = phba->pport;
11922 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 11974 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11923 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 11975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11924 if (rc == MBX_NOT_FINISHED) { 11976 if (rc == MBX_NOT_FINISHED)
11925 error = -EIO; 11977 error = -EIO;
11926 } else { 11978 else {
11927 spin_lock_irq(&phba->hbalock); 11979 spin_lock_irq(&phba->hbalock);
11928 phba->hba_flag |= FCF_DISC_INPROGRESS; 11980 phba->hba_flag |= FCF_DISC_INPROGRESS;
11929 spin_unlock_irq(&phba->hbalock); 11981 spin_unlock_irq(&phba->hbalock);
@@ -11942,6 +11994,90 @@ fail_fcfscan:
11942} 11994}
11943 11995
11944/** 11996/**
11997 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
11998 * @phba: pointer to lpfc hba data structure.
11999 *
12000 * This routine is the completion routine for the rediscover FCF table mailbox
12001 * command. If the mailbox command returned failure, it will try to stop the
12002 * FCF rediscover wait timer.
12003 **/
12004void
12005lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12006{
12007 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12008 uint32_t shdr_status, shdr_add_status;
12009
12010 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
12011
12012 shdr_status = bf_get(lpfc_mbox_hdr_status,
12013 &redisc_fcf->header.cfg_shdr.response);
12014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12015 &redisc_fcf->header.cfg_shdr.response);
12016 if (shdr_status || shdr_add_status) {
12017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12018 "2746 Requesting for FCF rediscovery failed "
12019 "status x%x add_status x%x\n",
12020 shdr_status, shdr_add_status);
12021 /*
12022 * Request failed, last resort to re-try current
12023 * registered FCF entry
12024 */
12025 lpfc_retry_pport_discovery(phba);
12026 } else
12027 /*
12028 * Start FCF rediscovery wait timer for pending FCF
12029 * before rescan FCF record table.
12030 */
12031 lpfc_fcf_redisc_wait_start_timer(phba);
12032
12033 mempool_free(mbox, phba->mbox_mem_pool);
12034}
12035
12036/**
12037 * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
12038 * @phba: pointer to lpfc hba data structure.
12039 *
12040 * This routine is invoked to request for rediscovery of the entire FCF table
12041 * by the port.
12042 **/
12043int
12044lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12045{
12046 LPFC_MBOXQ_t *mbox;
12047 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12048 int rc, length;
12049
12050 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12051 if (!mbox) {
12052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12053 "2745 Failed to allocate mbox for "
12054 "requesting FCF rediscover.\n");
12055 return -ENOMEM;
12056 }
12057
12058 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
12059 sizeof(struct lpfc_sli4_cfg_mhdr));
12060 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12061 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
12062 length, LPFC_SLI4_MBX_EMBED);
12063
12064 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
12065 /* Set count to 0 for invalidating the entire FCF database */
12066 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
12067
12068 /* Issue the mailbox command asynchronously */
12069 mbox->vport = phba->pport;
12070 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
12071 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
12072
12073 if (rc == MBX_NOT_FINISHED) {
12074 mempool_free(mbox, phba->mbox_mem_pool);
12075 return -EIO;
12076 }
12077 return 0;
12078}
12079
12080/**
11945 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 12081 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
11946 * @phba: pointer to lpfc hba data structure. 12082 * @phba: pointer to lpfc hba data structure.
11947 * 12083 *
@@ -12069,3 +12205,48 @@ out:
12069 kfree(rgn23_data); 12205 kfree(rgn23_data);
12070 return; 12206 return;
12071} 12207}
12208
12209/**
12210 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
12211 * @vport: pointer to vport data structure.
12212 *
12213 * This function iterate through the mailboxq and clean up all REG_LOGIN
12214 * and REG_VPI mailbox commands associated with the vport. This function
12215 * is called when driver want to restart discovery of the vport due to
12216 * a Clear Virtual Link event.
12217 **/
12218void
12219lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12220{
12221 struct lpfc_hba *phba = vport->phba;
12222 LPFC_MBOXQ_t *mb, *nextmb;
12223 struct lpfc_dmabuf *mp;
12224
12225 spin_lock_irq(&phba->hbalock);
12226 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
12227 if (mb->vport != vport)
12228 continue;
12229
12230 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
12231 (mb->u.mb.mbxCommand != MBX_REG_VPI))
12232 continue;
12233
12234 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12235 mp = (struct lpfc_dmabuf *) (mb->context1);
12236 if (mp) {
12237 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
12238 kfree(mp);
12239 }
12240 }
12241 list_del(&mb->list);
12242 mempool_free(mb, phba->mbox_mem_pool);
12243 }
12244 mb = phba->sli.mbox_active;
12245 if (mb && (mb->vport == vport)) {
12246 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12247 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12248 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12249 }
12250 spin_unlock_irq(&phba->hbalock);
12251}
12252
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index ba38de3c28f1..dfcf5437d1f5 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -53,17 +53,19 @@ struct lpfc_iocbq {
53 53
54 IOCB_t iocb; /* IOCB cmd */ 54 IOCB_t iocb; /* IOCB cmd */
55 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 55 uint8_t retry; /* retry counter for IOCB cmd - if needed */
56 uint8_t iocb_flag; 56 uint16_t iocb_flag;
57#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 57#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
58#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 58#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
59#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 59#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
60#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 60#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
61#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 61#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
63#define LPFC_FIP_ELS_ID_MASK 0xc0 /* ELS_ID range 0-3 */ 63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
64#define LPFC_FIP_ELS_ID_SHIFT 6 64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
65
66#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
67#define LPFC_FIP_ELS_ID_SHIFT 14
65 68
66 uint8_t abort_count;
67 uint8_t rsvd2; 69 uint8_t rsvd2;
68 uint32_t drvrTimeout; /* driver timeout in seconds */ 70 uint32_t drvrTimeout; /* driver timeout in seconds */
69 uint32_t fcp_wqidx; /* index to FCP work queue */ 71 uint32_t fcp_wqidx; /* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 44e5f574236b..86308836600f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -22,6 +22,10 @@
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32 23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10 24#define LPFC_RPI_LOW_WATER_MARK 10
25
26/* Amount of time in seconds for waiting FCF rediscovery to complete */
27#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
28
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ 29/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254 30#define LPFC_NEMBED_MBOX_SGL_CNT 254
27 31
@@ -126,24 +130,36 @@ struct lpfc_sli4_link {
126 uint8_t status; 130 uint8_t status;
127 uint8_t physical; 131 uint8_t physical;
128 uint8_t fault; 132 uint8_t fault;
133 uint16_t logical_speed;
129}; 134};
130 135
131struct lpfc_fcf { 136struct lpfc_fcf_rec {
132 uint8_t fabric_name[8]; 137 uint8_t fabric_name[8];
133 uint8_t switch_name[8]; 138 uint8_t switch_name[8];
134 uint8_t mac_addr[6]; 139 uint8_t mac_addr[6];
135 uint16_t fcf_indx; 140 uint16_t fcf_indx;
141 uint32_t priority;
142 uint16_t vlan_id;
143 uint32_t addr_mode;
144 uint32_t flag;
145#define BOOT_ENABLE 0x01
146#define RECORD_VALID 0x02
147};
148
149struct lpfc_fcf {
136 uint16_t fcfi; 150 uint16_t fcfi;
137 uint32_t fcf_flag; 151 uint32_t fcf_flag;
138#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ 152#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
139#define FCF_REGISTERED 0x02 /* FCF registered with FW */ 153#define FCF_REGISTERED 0x02 /* FCF registered with FW */
140#define FCF_DISCOVERED 0x04 /* FCF discovery started */ 154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
141#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ 155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
142#define FCF_IN_USE 0x10 /* Atleast one discovery completed */ 156#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */
143#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ 157#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */
144 uint32_t priority; 158#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */
145 uint32_t addr_mode; 159 uint32_t addr_mode;
146 uint16_t vlan_id; 160 struct lpfc_fcf_rec current_rec;
161 struct lpfc_fcf_rec failover_rec;
162 struct timer_list redisc_wait;
147}; 163};
148 164
149#define LPFC_REGION23_SIGNATURE "RG23" 165#define LPFC_REGION23_SIGNATURE "RG23"
@@ -248,7 +264,10 @@ struct lpfc_bmbx {
248#define SLI4_CT_VFI 2 264#define SLI4_CT_VFI 2
249#define SLI4_CT_FCFI 3 265#define SLI4_CT_FCFI 3
250 266
251#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 267#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
268#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
269#define LPFC_SLI4_MIN_BUF_SIZE 0x400
270#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
252 271
253/* 272/*
254 * SLI4 specific data structures 273 * SLI4 specific data structures
@@ -282,6 +301,42 @@ struct lpfc_fcp_eq_hdl {
282 struct lpfc_hba *phba; 301 struct lpfc_hba *phba;
283}; 302};
284 303
304/* Port Capabilities for SLI4 Parameters */
305struct lpfc_pc_sli4_params {
306 uint32_t supported;
307 uint32_t if_type;
308 uint32_t sli_rev;
309 uint32_t sli_family;
310 uint32_t featurelevel_1;
311 uint32_t featurelevel_2;
312 uint32_t proto_types;
313#define LPFC_SLI4_PROTO_FCOE 0x0000001
314#define LPFC_SLI4_PROTO_FC 0x0000002
315#define LPFC_SLI4_PROTO_NIC 0x0000004
316#define LPFC_SLI4_PROTO_ISCSI 0x0000008
317#define LPFC_SLI4_PROTO_RDMA 0x0000010
318 uint32_t sge_supp_len;
319 uint32_t if_page_sz;
320 uint32_t rq_db_window;
321 uint32_t loopbk_scope;
322 uint32_t eq_pages_max;
323 uint32_t eqe_size;
324 uint32_t cq_pages_max;
325 uint32_t cqe_size;
326 uint32_t mq_pages_max;
327 uint32_t mqe_size;
328 uint32_t mq_elem_cnt;
329 uint32_t wq_pages_max;
330 uint32_t wqe_size;
331 uint32_t rq_pages_max;
332 uint32_t rqe_size;
333 uint32_t hdr_pages_max;
334 uint32_t hdr_size;
335 uint32_t hdr_pp_align;
336 uint32_t sgl_pages_max;
337 uint32_t sgl_pp_align;
338};
339
285/* SLI4 HBA data structure entries */ 340/* SLI4 HBA data structure entries */
286struct lpfc_sli4_hba { 341struct lpfc_sli4_hba {
287 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 342 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -295,7 +350,7 @@ struct lpfc_sli4_hba {
295 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ 350 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
296 void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */ 351 void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
297 void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */ 352 void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
298 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ 353 void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
299 /* BAR1 FCoE function CSR register memory map */ 354 /* BAR1 FCoE function CSR register memory map */
300 void __iomem *STAregaddr; /* Address to HST_STATE register */ 355 void __iomem *STAregaddr; /* Address to HST_STATE register */
301 void __iomem *ISRregaddr; /* Address to HST_ISR register */ 356 void __iomem *ISRregaddr; /* Address to HST_ISR register */
@@ -310,6 +365,8 @@ struct lpfc_sli4_hba {
310 365
311 uint32_t ue_mask_lo; 366 uint32_t ue_mask_lo;
312 uint32_t ue_mask_hi; 367 uint32_t ue_mask_hi;
368 struct lpfc_register sli_intf;
369 struct lpfc_pc_sli4_params pc_sli4_params;
313 struct msix_entry *msix_entries; 370 struct msix_entry *msix_entries;
314 uint32_t cfg_eqn; 371 uint32_t cfg_eqn;
315 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 372 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@@ -406,6 +463,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
406void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); 463void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
407void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, 464void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
408 struct lpfc_mbx_sge *); 465 struct lpfc_mbx_sge *);
466int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *,
467 uint16_t);
409 468
410void lpfc_sli4_hba_reset(struct lpfc_hba *); 469void lpfc_sli4_hba_reset(struct lpfc_hba *);
411struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 470struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -448,6 +507,7 @@ int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
448void lpfc_sli4_free_rpi(struct lpfc_hba *, int); 507void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
449void lpfc_sli4_remove_rpis(struct lpfc_hba *); 508void lpfc_sli4_remove_rpis(struct lpfc_hba *);
450void lpfc_sli4_async_event_proc(struct lpfc_hba *); 509void lpfc_sli4_async_event_proc(struct lpfc_hba *);
510void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
451int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); 511int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
452void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 512void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
453void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 513void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 792f72263f1a..ac276aa46fba 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.7" 21#define LPFC_DRIVER_VERSION "8.3.9"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e3c7fa642306..dc86e873102a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
389 * by the port. 389 * by the port.
390 */ 390 */
391 if ((phba->sli_rev == LPFC_SLI_REV4) && 391 if ((phba->sli_rev == LPFC_SLI_REV4) &&
392 (pport->vpi_state & LPFC_VPI_REGISTERED)) { 392 (pport->fc_flag & FC_VFI_REGISTERED)) {
393 rc = lpfc_sli4_init_vpi(phba, vpi); 393 rc = lpfc_sli4_init_vpi(phba, vpi);
394 if (rc) { 394 if (rc) {
395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -505,6 +505,7 @@ enable_vport(struct fc_vport *fc_vport)
505 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 505 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
506 struct lpfc_hba *phba = vport->phba; 506 struct lpfc_hba *phba = vport->phba;
507 struct lpfc_nodelist *ndlp = NULL; 507 struct lpfc_nodelist *ndlp = NULL;
508 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
508 509
509 if ((phba->link_state < LPFC_LINK_UP) || 510 if ((phba->link_state < LPFC_LINK_UP) ||
510 (phba->fc_topology == TOPOLOGY_LOOP)) { 511 (phba->fc_topology == TOPOLOGY_LOOP)) {
@@ -512,10 +513,10 @@ enable_vport(struct fc_vport *fc_vport)
512 return VPORT_OK; 513 return VPORT_OK;
513 } 514 }
514 515
515 spin_lock_irq(&phba->hbalock); 516 spin_lock_irq(shost->host_lock);
516 vport->load_flag |= FC_LOADING; 517 vport->load_flag |= FC_LOADING;
517 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 518 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
518 spin_unlock_irq(&phba->hbalock); 519 spin_unlock_irq(shost->host_lock);
519 520
520 /* Use the Physical nodes Fabric NDLP to determine if the link is 521 /* Use the Physical nodes Fabric NDLP to determine if the link is
521 * up and ready to FDISC. 522 * up and ready to FDISC.
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index c24e86f07804..dd808ae942a1 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -22,7 +22,6 @@
22 22
23#include <asm/irq.h> 23#include <asm/irq.h>
24#include <asm/dma.h> 24#include <asm/dma.h>
25
26#include <asm/macints.h> 25#include <asm/macints.h>
27#include <asm/macintosh.h> 26#include <asm/macintosh.h>
28 27
@@ -279,24 +278,27 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
279 * Programmed IO routines follow. 278 * Programmed IO routines follow.
280 */ 279 */
281 280
282static inline int mac_esp_wait_for_fifo(struct esp *esp) 281static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
283{ 282{
284 int i = 500000; 283 int i = 500000;
285 284
286 do { 285 do {
287 if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) 286 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
288 return 0; 287
288 if (fbytes)
289 return fbytes;
289 290
290 udelay(2); 291 udelay(2);
291 } while (--i); 292 } while (--i);
292 293
293 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n", 294 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
294 esp_read8(ESP_STATUS)); 295 esp_read8(ESP_STATUS));
295 return 1; 296 return 0;
296} 297}
297 298
298static inline int mac_esp_wait_for_intr(struct esp *esp) 299static inline int mac_esp_wait_for_intr(struct esp *esp)
299{ 300{
301 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
300 int i = 500000; 302 int i = 500000;
301 303
302 do { 304 do {
@@ -308,6 +310,7 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
308 } while (--i); 310 } while (--i);
309 311
310 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg); 312 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
313 mep->error = 1;
311 return 1; 314 return 1;
312} 315}
313 316
@@ -347,11 +350,10 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
347static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, 350static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
348 u32 dma_count, int write, u8 cmd) 351 u32 dma_count, int write, u8 cmd)
349{ 352{
350 unsigned long flags;
351 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 353 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
352 u8 *fifo = esp->regs + ESP_FDATA * 16; 354 u8 *fifo = esp->regs + ESP_FDATA * 16;
353 355
354 local_irq_save(flags); 356 disable_irq(esp->host->irq);
355 357
356 cmd &= ~ESP_CMD_DMA; 358 cmd &= ~ESP_CMD_DMA;
357 mep->error = 0; 359 mep->error = 0;
@@ -359,11 +361,35 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
359 if (write) { 361 if (write) {
360 scsi_esp_cmd(esp, cmd); 362 scsi_esp_cmd(esp, cmd);
361 363
362 if (!mac_esp_wait_for_intr(esp)) { 364 while (1) {
363 if (mac_esp_wait_for_fifo(esp)) 365 unsigned int n;
364 esp_count = 0; 366
365 } else { 367 n = mac_esp_wait_for_fifo(esp);
366 esp_count = 0; 368 if (!n)
369 break;
370
371 if (n > esp_count)
372 n = esp_count;
373 esp_count -= n;
374
375 MAC_ESP_PIO_LOOP("%2@,%0@+", n);
376
377 if (!esp_count)
378 break;
379
380 if (mac_esp_wait_for_intr(esp))
381 break;
382
383 if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
384 ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
385 break;
386
387 esp->ireg = esp_read8(ESP_INTRPT);
388 if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
389 ESP_INTR_BSERV)
390 break;
391
392 scsi_esp_cmd(esp, ESP_CMD_TI);
367 } 393 }
368 } else { 394 } else {
369 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 395 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -374,47 +400,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
374 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count); 400 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
375 401
376 scsi_esp_cmd(esp, cmd); 402 scsi_esp_cmd(esp, cmd);
377 }
378
379 while (esp_count) {
380 unsigned int n;
381
382 if (mac_esp_wait_for_intr(esp)) {
383 mep->error = 1;
384 break;
385 }
386
387 if (esp->sreg & ESP_STAT_SPAM) {
388 printk(KERN_ERR PFX "gross error\n");
389 mep->error = 1;
390 break;
391 }
392 403
393 n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 404 while (esp_count) {
394 405 unsigned int n;
395 if (write) {
396 if (n > esp_count)
397 n = esp_count;
398 esp_count -= n;
399
400 MAC_ESP_PIO_LOOP("%2@,%0@+", n);
401 406
402 if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP) 407 if (mac_esp_wait_for_intr(esp))
403 break; 408 break;
404 409
405 if (esp_count) { 410 if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
406 esp->ireg = esp_read8(ESP_INTRPT); 411 ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
407 if (esp->ireg & ESP_INTR_DC) 412 break;
408 break;
409 413
410 scsi_esp_cmd(esp, ESP_CMD_TI);
411 }
412 } else {
413 esp->ireg = esp_read8(ESP_INTRPT); 414 esp->ireg = esp_read8(ESP_INTRPT);
414 if (esp->ireg & ESP_INTR_DC) 415 if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
416 ESP_INTR_BSERV)
415 break; 417 break;
416 418
417 n = MAC_ESP_FIFO_SIZE - n; 419 n = MAC_ESP_FIFO_SIZE -
420 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
418 if (n > esp_count) 421 if (n > esp_count)
419 n = esp_count; 422 n = esp_count;
420 423
@@ -429,7 +432,7 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
429 } 432 }
430 } 433 }
431 434
432 local_irq_restore(flags); 435 enable_irq(esp->host->irq);
433} 436}
434 437
435static int mac_esp_irq_pending(struct esp *esp) 438static int mac_esp_irq_pending(struct esp *esp)
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index d9b8ca5116bc..409648f5845f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.04.12-rc1 13 * Version : v00.00.04.17.1-rc1
14 * 14 *
15 * Authors: 15 * Authors:
16 * (email-id : megaraidlinux@lsi.com) 16 * (email-id : megaraidlinux@lsi.com)
@@ -843,6 +843,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
843 pthru->lun = scp->device->lun; 843 pthru->lun = scp->device->lun;
844 pthru->cdb_len = scp->cmd_len; 844 pthru->cdb_len = scp->cmd_len;
845 pthru->timeout = 0; 845 pthru->timeout = 0;
846 pthru->pad_0 = 0;
846 pthru->flags = flags; 847 pthru->flags = flags;
847 pthru->data_xfer_len = scsi_bufflen(scp); 848 pthru->data_xfer_len = scsi_bufflen(scp);
848 849
@@ -874,6 +875,12 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
874 pthru->sge_count = megasas_make_sgl32(instance, scp, 875 pthru->sge_count = megasas_make_sgl32(instance, scp,
875 &pthru->sgl); 876 &pthru->sgl);
876 877
878 if (pthru->sge_count > instance->max_num_sge) {
879 printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
880 pthru->sge_count);
881 return 0;
882 }
883
877 /* 884 /*
878 * Sense info specific 885 * Sense info specific
879 */ 886 */
@@ -1000,6 +1007,12 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1000 } else 1007 } else
1001 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1008 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1002 1009
1010 if (ldio->sge_count > instance->max_num_sge) {
1011 printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
1012 ldio->sge_count);
1013 return 0;
1014 }
1015
1003 /* 1016 /*
1004 * Sense info specific 1017 * Sense info specific
1005 */ 1018 */
@@ -2250,6 +2263,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
2250 dcmd->sge_count = 1; 2263 dcmd->sge_count = 1;
2251 dcmd->flags = MFI_FRAME_DIR_READ; 2264 dcmd->flags = MFI_FRAME_DIR_READ;
2252 dcmd->timeout = 0; 2265 dcmd->timeout = 0;
2266 dcmd->pad_0 = 0;
2253 dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); 2267 dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
2254 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 2268 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
2255 dcmd->sgl.sge32[0].phys_addr = ci_h; 2269 dcmd->sgl.sge32[0].phys_addr = ci_h;
@@ -2294,6 +2308,86 @@ megasas_get_pd_list(struct megasas_instance *instance)
2294 return ret; 2308 return ret;
2295} 2309}
2296 2310
2311/*
2312 * megasas_get_ld_list_info - Returns FW's ld_list structure
2313 * @instance: Adapter soft state
2314 * @ld_list: ld_list structure
2315 *
2316 * Issues an internal command (DCMD) to get the FW's controller PD
2317 * list structure. This information is mainly used to find out SYSTEM
2318 * supported by the FW.
2319 */
2320static int
2321megasas_get_ld_list(struct megasas_instance *instance)
2322{
2323 int ret = 0, ld_index = 0, ids = 0;
2324 struct megasas_cmd *cmd;
2325 struct megasas_dcmd_frame *dcmd;
2326 struct MR_LD_LIST *ci;
2327 dma_addr_t ci_h = 0;
2328
2329 cmd = megasas_get_cmd(instance);
2330
2331 if (!cmd) {
2332 printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
2333 return -ENOMEM;
2334 }
2335
2336 dcmd = &cmd->frame->dcmd;
2337
2338 ci = pci_alloc_consistent(instance->pdev,
2339 sizeof(struct MR_LD_LIST),
2340 &ci_h);
2341
2342 if (!ci) {
2343 printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
2344 megasas_return_cmd(instance, cmd);
2345 return -ENOMEM;
2346 }
2347
2348 memset(ci, 0, sizeof(*ci));
2349 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2350
2351 dcmd->cmd = MFI_CMD_DCMD;
2352 dcmd->cmd_status = 0xFF;
2353 dcmd->sge_count = 1;
2354 dcmd->flags = MFI_FRAME_DIR_READ;
2355 dcmd->timeout = 0;
2356 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
2357 dcmd->opcode = MR_DCMD_LD_GET_LIST;
2358 dcmd->sgl.sge32[0].phys_addr = ci_h;
2359 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
2360 dcmd->pad_0 = 0;
2361
2362 if (!megasas_issue_polled(instance, cmd)) {
2363 ret = 0;
2364 } else {
2365 ret = -1;
2366 }
2367
2368 /* the following function will get the instance PD LIST */
2369
2370 if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) {
2371 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2372
2373 for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
2374 if (ci->ldList[ld_index].state != 0) {
2375 ids = ci->ldList[ld_index].ref.targetId;
2376 instance->ld_ids[ids] =
2377 ci->ldList[ld_index].ref.targetId;
2378 }
2379 }
2380 }
2381
2382 pci_free_consistent(instance->pdev,
2383 sizeof(struct MR_LD_LIST),
2384 ci,
2385 ci_h);
2386
2387 megasas_return_cmd(instance, cmd);
2388 return ret;
2389}
2390
2297/** 2391/**
2298 * megasas_get_controller_info - Returns FW's controller structure 2392 * megasas_get_controller_info - Returns FW's controller structure
2299 * @instance: Adapter soft state 2393 * @instance: Adapter soft state
@@ -2339,6 +2433,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
2339 dcmd->sge_count = 1; 2433 dcmd->sge_count = 1;
2340 dcmd->flags = MFI_FRAME_DIR_READ; 2434 dcmd->flags = MFI_FRAME_DIR_READ;
2341 dcmd->timeout = 0; 2435 dcmd->timeout = 0;
2436 dcmd->pad_0 = 0;
2342 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); 2437 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
2343 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2438 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2344 dcmd->sgl.sge32[0].phys_addr = ci_h; 2439 dcmd->sgl.sge32[0].phys_addr = ci_h;
@@ -2590,6 +2685,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2590 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 2685 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
2591 megasas_get_pd_list(instance); 2686 megasas_get_pd_list(instance);
2592 2687
2688 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2689 megasas_get_ld_list(instance);
2690
2593 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); 2691 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
2594 2692
2595 /* 2693 /*
@@ -2714,6 +2812,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
2714 dcmd->sge_count = 1; 2812 dcmd->sge_count = 1;
2715 dcmd->flags = MFI_FRAME_DIR_READ; 2813 dcmd->flags = MFI_FRAME_DIR_READ;
2716 dcmd->timeout = 0; 2814 dcmd->timeout = 0;
2815 dcmd->pad_0 = 0;
2717 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); 2816 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
2718 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2817 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2719 dcmd->sgl.sge32[0].phys_addr = el_info_h; 2818 dcmd->sgl.sge32[0].phys_addr = el_info_h;
@@ -2828,6 +2927,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
2828 dcmd->sge_count = 1; 2927 dcmd->sge_count = 1;
2829 dcmd->flags = MFI_FRAME_DIR_READ; 2928 dcmd->flags = MFI_FRAME_DIR_READ;
2830 dcmd->timeout = 0; 2929 dcmd->timeout = 0;
2930 dcmd->pad_0 = 0;
2831 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); 2931 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
2832 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 2932 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
2833 dcmd->mbox.w[0] = seq_num; 2933 dcmd->mbox.w[0] = seq_num;
@@ -3166,6 +3266,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
3166 dcmd->sge_count = 0; 3266 dcmd->sge_count = 0;
3167 dcmd->flags = MFI_FRAME_DIR_NONE; 3267 dcmd->flags = MFI_FRAME_DIR_NONE;
3168 dcmd->timeout = 0; 3268 dcmd->timeout = 0;
3269 dcmd->pad_0 = 0;
3169 dcmd->data_xfer_len = 0; 3270 dcmd->data_xfer_len = 0;
3170 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3271 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3171 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3272 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
@@ -3205,6 +3306,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
3205 dcmd->sge_count = 0; 3306 dcmd->sge_count = 0;
3206 dcmd->flags = MFI_FRAME_DIR_NONE; 3307 dcmd->flags = MFI_FRAME_DIR_NONE;
3207 dcmd->timeout = 0; 3308 dcmd->timeout = 0;
3309 dcmd->pad_0 = 0;
3208 dcmd->data_xfer_len = 0; 3310 dcmd->data_xfer_len = 0;
3209 dcmd->opcode = opcode; 3311 dcmd->opcode = opcode;
3210 3312
@@ -3984,6 +4086,7 @@ megasas_aen_polling(struct work_struct *work)
3984 struct Scsi_Host *host; 4086 struct Scsi_Host *host;
3985 struct scsi_device *sdev1; 4087 struct scsi_device *sdev1;
3986 u16 pd_index = 0; 4088 u16 pd_index = 0;
4089 u16 ld_index = 0;
3987 int i, j, doscan = 0; 4090 int i, j, doscan = 0;
3988 u32 seq_num; 4091 u32 seq_num;
3989 int error; 4092 int error;
@@ -3999,8 +4102,124 @@ megasas_aen_polling(struct work_struct *work)
3999 4102
4000 switch (instance->evt_detail->code) { 4103 switch (instance->evt_detail->code) {
4001 case MR_EVT_PD_INSERTED: 4104 case MR_EVT_PD_INSERTED:
4105 if (megasas_get_pd_list(instance) == 0) {
4106 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
4107 for (j = 0;
4108 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4109 j++) {
4110
4111 pd_index =
4112 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4113
4114 sdev1 =
4115 scsi_device_lookup(host, i, j, 0);
4116
4117 if (instance->pd_list[pd_index].driveState
4118 == MR_PD_STATE_SYSTEM) {
4119 if (!sdev1) {
4120 scsi_add_device(host, i, j, 0);
4121 }
4122
4123 if (sdev1)
4124 scsi_device_put(sdev1);
4125 }
4126 }
4127 }
4128 }
4129 doscan = 0;
4130 break;
4131
4002 case MR_EVT_PD_REMOVED: 4132 case MR_EVT_PD_REMOVED:
4133 if (megasas_get_pd_list(instance) == 0) {
4134 megasas_get_pd_list(instance);
4135 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
4136 for (j = 0;
4137 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4138 j++) {
4139
4140 pd_index =
4141 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4142
4143 sdev1 =
4144 scsi_device_lookup(host, i, j, 0);
4145
4146 if (instance->pd_list[pd_index].driveState
4147 == MR_PD_STATE_SYSTEM) {
4148 if (sdev1) {
4149 scsi_device_put(sdev1);
4150 }
4151 } else {
4152 if (sdev1) {
4153 scsi_remove_device(sdev1);
4154 scsi_device_put(sdev1);
4155 }
4156 }
4157 }
4158 }
4159 }
4160 doscan = 0;
4161 break;
4162
4163 case MR_EVT_LD_OFFLINE:
4164 case MR_EVT_LD_DELETED:
4165 megasas_get_ld_list(instance);
4166 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
4167 for (j = 0;
4168 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4169 j++) {
4170
4171 ld_index =
4172 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4173
4174 sdev1 = scsi_device_lookup(host,
4175 i + MEGASAS_MAX_LD_CHANNELS,
4176 j,
4177 0);
4178
4179 if (instance->ld_ids[ld_index] != 0xff) {
4180 if (sdev1) {
4181 scsi_device_put(sdev1);
4182 }
4183 } else {
4184 if (sdev1) {
4185 scsi_remove_device(sdev1);
4186 scsi_device_put(sdev1);
4187 }
4188 }
4189 }
4190 }
4191 doscan = 0;
4192 break;
4193 case MR_EVT_LD_CREATED:
4194 megasas_get_ld_list(instance);
4195 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
4196 for (j = 0;
4197 j < MEGASAS_MAX_DEV_PER_CHANNEL;
4198 j++) {
4199 ld_index =
4200 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4201
4202 sdev1 = scsi_device_lookup(host,
4203 i+MEGASAS_MAX_LD_CHANNELS,
4204 j, 0);
4205
4206 if (instance->ld_ids[ld_index] !=
4207 0xff) {
4208 if (!sdev1) {
4209 scsi_add_device(host,
4210 i + 2,
4211 j, 0);
4212 }
4213 }
4214 if (sdev1) {
4215 scsi_device_put(sdev1);
4216 }
4217 }
4218 }
4219 doscan = 0;
4220 break;
4003 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4221 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4222 case MR_EVT_FOREIGN_CFG_IMPORTED:
4004 doscan = 1; 4223 doscan = 1;
4005 break; 4224 break;
4006 default: 4225 default:
@@ -4035,6 +4254,31 @@ megasas_aen_polling(struct work_struct *work)
4035 } 4254 }
4036 } 4255 }
4037 } 4256 }
4257
4258 megasas_get_ld_list(instance);
4259 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
4260 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
4261 ld_index =
4262 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
4263
4264 sdev1 = scsi_device_lookup(host,
4265 i+MEGASAS_MAX_LD_CHANNELS, j, 0);
4266 if (instance->ld_ids[ld_index] != 0xff) {
4267 if (!sdev1) {
4268 scsi_add_device(host,
4269 i+2,
4270 j, 0);
4271 } else {
4272 scsi_device_put(sdev1);
4273 }
4274 } else {
4275 if (sdev1) {
4276 scsi_remove_device(sdev1);
4277 scsi_device_put(sdev1);
4278 }
4279 }
4280 }
4281 }
4038 } 4282 }
4039 4283
4040 if ( instance->aen_cmd != NULL ) { 4284 if ( instance->aen_cmd != NULL ) {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 72b28e436e32..9d8b6bf605aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/* 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.04.12-rc1" 21#define MEGASAS_VERSION "00.00.04.17.1-rc1"
22#define MEGASAS_RELDATE "Sep. 17, 2009" 22#define MEGASAS_RELDATE "Oct. 29, 2009"
23#define MEGASAS_EXT_VERSION "Thu Sep. 17 11:41:51 PST 2009" 23#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
@@ -117,6 +117,7 @@
117#define MFI_CMD_STP 0x08 117#define MFI_CMD_STP 0x08
118 118
119#define MR_DCMD_CTRL_GET_INFO 0x01010000 119#define MR_DCMD_CTRL_GET_INFO 0x01010000
120#define MR_DCMD_LD_GET_LIST 0x03010000
120 121
121#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 122#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
122#define MR_FLUSH_CTRL_CACHE 0x01 123#define MR_FLUSH_CTRL_CACHE 0x01
@@ -349,6 +350,32 @@ struct megasas_pd_list {
349 u8 driveState; 350 u8 driveState;
350} __packed; 351} __packed;
351 352
353 /*
354 * defines the logical drive reference structure
355 */
356union MR_LD_REF {
357 struct {
358 u8 targetId;
359 u8 reserved;
360 u16 seqNum;
361 };
362 u32 ref;
363} __packed;
364
365/*
366 * defines the logical drive list structure
367 */
368struct MR_LD_LIST {
369 u32 ldCount;
370 u32 reserved;
371 struct {
372 union MR_LD_REF ref;
373 u8 state;
374 u8 reserved[3];
375 u64 size;
376 } ldList[MAX_LOGICAL_DRIVES];
377} __packed;
378
352/* 379/*
353 * SAS controller properties 380 * SAS controller properties
354 */ 381 */
@@ -637,6 +664,8 @@ struct megasas_ctrl_info {
637#define MEGASAS_MAX_LD 64 664#define MEGASAS_MAX_LD 64
638#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ 665#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
639 MEGASAS_MAX_DEV_PER_CHANNEL) 666 MEGASAS_MAX_DEV_PER_CHANNEL)
667#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
668 MEGASAS_MAX_DEV_PER_CHANNEL)
640 669
641#define MEGASAS_DBG_LVL 1 670#define MEGASAS_DBG_LVL 1
642 671
@@ -1187,6 +1216,7 @@ struct megasas_instance {
1187 struct megasas_register_set __iomem *reg_set; 1216 struct megasas_register_set __iomem *reg_set;
1188 1217
1189 struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; 1218 struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
1219 u8 ld_ids[MEGASAS_MAX_LD_IDS];
1190 s8 init_id; 1220 s8 init_id;
1191 1221
1192 u16 max_num_sge; 1222 u16 max_num_sge;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 70c4c2467dd8..ba8e128de238 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -44,6 +44,7 @@ config SCSI_MPT2SAS
44 tristate "LSI MPT Fusion SAS 2.0 Device Driver" 44 tristate "LSI MPT Fusion SAS 2.0 Device Driver"
45 depends on PCI && SCSI 45 depends on PCI && SCSI
46 select SCSI_SAS_ATTRS 46 select SCSI_SAS_ATTRS
47 select RAID_ATTRS
47 ---help--- 48 ---help---
48 This driver supports PCI-Express SAS 6Gb/s Host Adapters. 49 This driver supports PCI-Express SAS 6Gb/s Host Adapters.
49 50
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 914168105297..9958d847a88d 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.13 11 * mpi2.h Version: 02.00.14
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -53,6 +53,10 @@
53 * bytes reserved. 53 * bytes reserved.
54 * Added RAID Accelerator functionality. 54 * Added RAID Accelerator functionality.
55 * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. 55 * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
56 * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
57 * Added MSI-x index mask and shift for Reply Post Host
58 * Index register.
59 * Added function code for Host Based Discovery Action.
56 * -------------------------------------------------------------------------- 60 * --------------------------------------------------------------------------
57 */ 61 */
58 62
@@ -78,7 +82,7 @@
78#define MPI2_VERSION_02_00 (0x0200) 82#define MPI2_VERSION_02_00 (0x0200)
79 83
80/* versioning for this MPI header set */ 84/* versioning for this MPI header set */
81#define MPI2_HEADER_VERSION_UNIT (0x0D) 85#define MPI2_HEADER_VERSION_UNIT (0x0E)
82#define MPI2_HEADER_VERSION_DEV (0x00) 86#define MPI2_HEADER_VERSION_DEV (0x00)
83#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 87#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
84#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 88#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -232,9 +236,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
232#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) 236#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
233 237
234/* 238/*
235 * Offset for the Reply Descriptor Post Queue 239 * Defines for the Reply Descriptor Post Queue
236 */ 240 */
237#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) 241#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
242#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
243#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
244#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
238 245
239/* 246/*
240 * Defines for the HCBSize and address 247 * Defines for the HCBSize and address
@@ -497,12 +504,13 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
497#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ 504#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
498#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ 505#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
499#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/ 506#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/
507/* Host Based Discovery Action */
508#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
500 509
501 510
502 511
503/* Doorbell functions */ 512/* Doorbell functions */
504#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) 513#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
505/* #define MPI2_FUNCTION_IO_UNIT_RESET (0x41) */
506#define MPI2_FUNCTION_HANDSHAKE (0x42) 514#define MPI2_FUNCTION_HANDSHAKE (0x42)
507 515
508 516
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 1611c57a6fdf..cf0ac9f40c97 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.12 9 * mpi2_cnfg.h Version: 02.00.13
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -107,6 +107,8 @@
107 * to SAS Device Page 0 Flags field. 107 * to SAS Device Page 0 Flags field.
108 * Added PhyInfo defines for power condition. 108 * Added PhyInfo defines for power condition.
109 * Added Ethernet configuration pages. 109 * Added Ethernet configuration pages.
110 * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
111 * Added SAS PHY Page 4 structure and defines.
110 * -------------------------------------------------------------------------- 112 * --------------------------------------------------------------------------
111 */ 113 */
112 114
@@ -712,6 +714,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
712#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) 714#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
713 715
714/* IO Unit Page 1 Flags defines */ 716/* IO Unit Page 1 Flags defines */
717#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
715#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) 718#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
716#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) 719#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
717#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) 720#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
@@ -2291,6 +2294,26 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
2291#define MPI2_SASPHY3_PAGEVERSION (0x00) 2294#define MPI2_SASPHY3_PAGEVERSION (0x00)
2292 2295
2293 2296
2297/* SAS PHY Page 4 */
2298
2299typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
2300 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2301 U16 Reserved1; /* 0x08 */
2302 U8 Reserved2; /* 0x0A */
2303 U8 Flags; /* 0x0B */
2304 U8 InitialFrame[28]; /* 0x0C */
2305} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
2306 Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t;
2307
2308#define MPI2_SASPHY4_PAGEVERSION (0x00)
2309
2310/* values for the Flags field */
2311#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
2312#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
2313
2314
2315
2316
2294/**************************************************************************** 2317/****************************************************************************
2295* SAS Port Config Pages 2318* SAS Port Config Pages
2296****************************************************************************/ 2319****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index 65fcaa31cb30..c4adf76b49d9 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -5,23 +5,24 @@
5 Copyright (c) 2000-2009 LSI Corporation. 5 Copyright (c) 2000-2009 LSI Corporation.
6 6
7 --------------------------------------- 7 ---------------------------------------
8 Header Set Release Version: 02.00.12 8 Header Set Release Version: 02.00.14
9 Header Set Release Date: 05-06-09 9 Header Set Release Date: 10-28-09
10 --------------------------------------- 10 ---------------------------------------
11 11
12 Filename Current version Prior version 12 Filename Current version Prior version
13 ---------- --------------- ------------- 13 ---------- --------------- -------------
14 mpi2.h 02.00.12 02.00.11 14 mpi2.h 02.00.14 02.00.13
15 mpi2_cnfg.h 02.00.11 02.00.10 15 mpi2_cnfg.h 02.00.13 02.00.12
16 mpi2_init.h 02.00.07 02.00.06 16 mpi2_init.h 02.00.08 02.00.07
17 mpi2_ioc.h 02.00.11 02.00.10 17 mpi2_ioc.h 02.00.13 02.00.12
18 mpi2_raid.h 02.00.03 02.00.03 18 mpi2_raid.h 02.00.04 02.00.04
19 mpi2_sas.h 02.00.02 02.00.02 19 mpi2_sas.h 02.00.03 02.00.02
20 mpi2_targ.h 02.00.03 02.00.03 20 mpi2_targ.h 02.00.03 02.00.03
21 mpi2_tool.h 02.00.03 02.00.02 21 mpi2_tool.h 02.00.04 02.00.04
22 mpi2_type.h 02.00.00 02.00.00 22 mpi2_type.h 02.00.00 02.00.00
23 mpi2_ra.h 02.00.00 23 mpi2_ra.h 02.00.00 02.00.00
24 mpi2_history.txt 02.00.11 02.00.12 24 mpi2_hbd.h 02.00.00
25 mpi2_history.txt 02.00.14 02.00.13
25 26
26 27
27 * Date Version Description 28 * Date Version Description
@@ -65,6 +66,11 @@ mpi2.h
65 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those 66 * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
66 * bytes reserved. 67 * bytes reserved.
67 * Added RAID Accelerator functionality. 68 * Added RAID Accelerator functionality.
69 * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
70 * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
71 * Added MSI-x index mask and shift for Reply Post Host
72 * Index register.
73 * Added function code for Host Based Discovery Action.
68 * -------------------------------------------------------------------------- 74 * --------------------------------------------------------------------------
69 75
70mpi2_cnfg.h 76mpi2_cnfg.h
@@ -155,6 +161,15 @@ mpi2_cnfg.h
155 * Added expander reduced functionality data to SAS 161 * Added expander reduced functionality data to SAS
156 * Expander Page 0. 162 * Expander Page 0.
157 * Added SAS PHY Page 2 and SAS PHY Page 3. 163 * Added SAS PHY Page 2 and SAS PHY Page 3.
164 * 07-30-09 02.00.12 Added IO Unit Page 7.
165 * Added new device ids.
166 * Added SAS IO Unit Page 5.
167 * Added partial and slumber power management capable flags
168 * to SAS Device Page 0 Flags field.
169 * Added PhyInfo defines for power condition.
170 * Added Ethernet configuration pages.
171 * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
172 * Added SAS PHY Page 4 structure and defines.
158 * -------------------------------------------------------------------------- 173 * --------------------------------------------------------------------------
159 174
160mpi2_init.h 175mpi2_init.h
@@ -172,6 +187,10 @@ mpi2_init.h
172 * Query Asynchronous Event. 187 * Query Asynchronous Event.
173 * Defined two new bits in the SlotStatus field of the SCSI 188 * Defined two new bits in the SlotStatus field of the SCSI
174 * Enclosure Processor Request and Reply. 189 * Enclosure Processor Request and Reply.
190 * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
191 * both SCSI IO Error Reply and SCSI Task Management Reply.
192 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
193 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
175 * -------------------------------------------------------------------------- 194 * --------------------------------------------------------------------------
176 195
177mpi2_ioc.h 196mpi2_ioc.h
@@ -246,6 +265,20 @@ mpi2_ioc.h
246 * Added two new reason codes for SAS Device Status Change 265 * Added two new reason codes for SAS Device Status Change
247 * Event. 266 * Event.
248 * Added new event: SAS PHY Counter. 267 * Added new event: SAS PHY Counter.
268 * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
269 * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
270 * Added new product id family for 2208.
271 * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
272 * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
273 * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
274 * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
275 * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
276 * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
277 * Added Host Based Discovery Phy Event data.
278 * Added defines for ProductID Product field
279 * (MPI2_FW_HEADER_PID_).
280 * Modified values for SAS ProductID Family
281 * (MPI2_FW_HEADER_PID_FAMILY_).
249 * -------------------------------------------------------------------------- 282 * --------------------------------------------------------------------------
250 283
251mpi2_raid.h 284mpi2_raid.h
@@ -256,6 +289,8 @@ mpi2_raid.h
256 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that 289 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
257 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT 290 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
258 * can be sized by the build environment. 291 * can be sized by the build environment.
292 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
293 * VolumeCreationFlags and marked the old one as obsolete.
259 * -------------------------------------------------------------------------- 294 * --------------------------------------------------------------------------
260 295
261mpi2_sas.h 296mpi2_sas.h
@@ -264,6 +299,8 @@ mpi2_sas.h
264 * Control Request. 299 * Control Request.
265 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control 300 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
266 * Request. 301 * Request.
302 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
303 * to MPI2_SGE_IO_UNION since it supports chained SGLs.
267 * -------------------------------------------------------------------------- 304 * --------------------------------------------------------------------------
268 305
269mpi2_targ.h 306mpi2_targ.h
@@ -283,6 +320,10 @@ mpi2_tool.h
283 * structures and defines. 320 * structures and defines.
284 * 02-29-08 02.00.02 Modified various names to make them 32-character unique. 321 * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
285 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. 322 * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
323 * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
324 * and reply messages.
325 * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
326 * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
286 * -------------------------------------------------------------------------- 327 * --------------------------------------------------------------------------
287 328
288mpi2_type.h 329mpi2_type.h
@@ -293,20 +334,26 @@ mpi2_ra.h
293 * 05-06-09 02.00.00 Initial version. 334 * 05-06-09 02.00.00 Initial version.
294 * -------------------------------------------------------------------------- 335 * --------------------------------------------------------------------------
295 336
337mpi2_hbd.h
338 * 10-28-09 02.00.00 Initial version.
339 * --------------------------------------------------------------------------
340
341
296mpi2_history.txt Parts list history 342mpi2_history.txt Parts list history
297 343
298Filename 02.00.12 344Filename 02.00.14 02.00.13 02.00.12
299---------- -------- 345---------- -------- -------- --------
300mpi2.h 02.00.12 346mpi2.h 02.00.14 02.00.13 02.00.12
301mpi2_cnfg.h 02.00.11 347mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
302mpi2_init.h 02.00.07 348mpi2_init.h 02.00.08 02.00.07 02.00.07
303mpi2_ioc.h 02.00.11 349mpi2_ioc.h 02.00.13 02.00.12 02.00.11
304mpi2_raid.h 02.00.03 350mpi2_raid.h 02.00.04 02.00.04 02.00.03
305mpi2_sas.h 02.00.02 351mpi2_sas.h 02.00.03 02.00.02 02.00.02
306mpi2_targ.h 02.00.03 352mpi2_targ.h 02.00.03 02.00.03 02.00.03
307mpi2_tool.h 02.00.03 353mpi2_tool.h 02.00.04 02.00.04 02.00.03
308mpi2_type.h 02.00.00 354mpi2_type.h 02.00.00 02.00.00 02.00.00
309mpi2_ra.h 02.00.00 355mpi2_ra.h 02.00.00 02.00.00 02.00.00
356mpi2_hbd.h 02.00.00
310 357
311Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 358Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
312---------- -------- -------- -------- -------- -------- -------- 359---------- -------- -------- -------- -------- -------- --------
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 563e56d2e945..6541945e97c3 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.07 9 * mpi2_init.h Version: 02.00.08
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -27,6 +27,10 @@
27 * Query Asynchronous Event. 27 * Query Asynchronous Event.
28 * Defined two new bits in the SlotStatus field of the SCSI 28 * Defined two new bits in the SlotStatus field of the SCSI
29 * Enclosure Processor Request and Reply. 29 * Enclosure Processor Request and Reply.
30 * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
31 * both SCSI IO Error Reply and SCSI Task Management Reply.
32 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
30 * -------------------------------------------------------------------------- 34 * --------------------------------------------------------------------------
31 */ 35 */
32 36
@@ -254,6 +258,11 @@ typedef struct _MPI2_SCSI_IO_REPLY
254#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) 258#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
255#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) 259#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
256 260
261/* masks and shifts for the ResponseInfo field */
262
263#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
264#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
265
257#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) 266#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
258 267
259 268
@@ -327,6 +336,7 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
327 U16 IOCStatus; /* 0x0E */ 336 U16 IOCStatus; /* 0x0E */
328 U32 IOCLogInfo; /* 0x10 */ 337 U32 IOCLogInfo; /* 0x10 */
329 U32 TerminationCount; /* 0x14 */ 338 U32 TerminationCount; /* 0x14 */
339 U32 ResponseInfo; /* 0x18 */
330} MPI2_SCSI_TASK_MANAGE_REPLY, 340} MPI2_SCSI_TASK_MANAGE_REPLY,
331 MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY, 341 MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
332 Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t; 342 Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
@@ -339,8 +349,20 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
339#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) 349#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
340#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) 350#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
341#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) 351#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
352#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
342#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) 353#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
343 354
355/* masks and shifts for the ResponseInfo field */
356
357#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
358#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
359#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
360#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
361#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
362#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
363#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
364#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
365
344 366
345/**************************************************************************** 367/****************************************************************************
346* SCSI Enclosure Processor messages 368* SCSI Enclosure Processor messages
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index ea51ce868690..754938422f6a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.12 9 * mpi2_ioc.h Version: 02.00.13
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -87,6 +87,17 @@
87 * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. 87 * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
88 * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. 88 * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
89 * Added new product id family for 2208. 89 * Added new product id family for 2208.
90 * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
91 * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
92 * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
93 * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
94 * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
95 * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
96 * Added Host Based Discovery Phy Event data.
97 * Added defines for ProductID Product field
98 * (MPI2_FW_HEADER_PID_).
99 * Modified values for SAS ProductID Family
100 * (MPI2_FW_HEADER_PID_FAMILY_).
90 * -------------------------------------------------------------------------- 101 * --------------------------------------------------------------------------
91 */ 102 */
92 103
@@ -119,8 +130,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST
119 U16 MsgVersion; /* 0x0C */ 130 U16 MsgVersion; /* 0x0C */
120 U16 HeaderVersion; /* 0x0E */ 131 U16 HeaderVersion; /* 0x0E */
121 U32 Reserved5; /* 0x10 */ 132 U32 Reserved5; /* 0x10 */
122 U32 Reserved6; /* 0x14 */ 133 U16 Reserved6; /* 0x14 */
123 U16 Reserved7; /* 0x18 */ 134 U8 Reserved7; /* 0x16 */
135 U8 HostMSIxVectors; /* 0x17 */
136 U16 Reserved8; /* 0x18 */
124 U16 SystemRequestFrameSize; /* 0x1A */ 137 U16 SystemRequestFrameSize; /* 0x1A */
125 U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ 138 U16 ReplyDescriptorPostQueueDepth; /* 0x1C */
126 U16 ReplyFreeQueueDepth; /* 0x1E */ 139 U16 ReplyFreeQueueDepth; /* 0x1E */
@@ -215,7 +228,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
215 U8 MaxChainDepth; /* 0x14 */ 228 U8 MaxChainDepth; /* 0x14 */
216 U8 WhoInit; /* 0x15 */ 229 U8 WhoInit; /* 0x15 */
217 U8 NumberOfPorts; /* 0x16 */ 230 U8 NumberOfPorts; /* 0x16 */
218 U8 Reserved2; /* 0x17 */ 231 U8 MaxMSIxVectors; /* 0x17 */
219 U16 RequestCredit; /* 0x18 */ 232 U16 RequestCredit; /* 0x18 */
220 U16 ProductID; /* 0x1A */ 233 U16 ProductID; /* 0x1A */
221 U32 IOCCapabilities; /* 0x1C */ 234 U32 IOCCapabilities; /* 0x1C */
@@ -233,7 +246,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY
233 U8 MaxVolumes; /* 0x37 */ 246 U8 MaxVolumes; /* 0x37 */
234 U16 MaxDevHandle; /* 0x38 */ 247 U16 MaxDevHandle; /* 0x38 */
235 U16 MaxPersistentEntries; /* 0x3A */ 248 U16 MaxPersistentEntries; /* 0x3A */
236 U32 Reserved4; /* 0x3C */ 249 U16 MinDevHandle; /* 0x3C */
250 U16 Reserved4; /* 0x3E */
237} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY, 251} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
238 Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t; 252 Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
239 253
@@ -269,6 +283,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
269/* ProductID field uses MPI2_FW_HEADER_PID_ */ 283/* ProductID field uses MPI2_FW_HEADER_PID_ */
270 284
271/* IOCCapabilities */ 285/* IOCCapabilities */
286#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
272#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) 287#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
273#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) 288#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
274#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) 289#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
@@ -453,6 +468,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
453#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) 468#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
454#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) 469#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
455#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) 470#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
471#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
456 472
457 473
458/* Log Entry Added Event data */ 474/* Log Entry Added Event data */
@@ -793,6 +809,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
793 MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t; 809 MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
794 810
795/* values for the ExpStatus field */ 811/* values for the ExpStatus field */
812#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
796#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) 813#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
797#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) 814#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
798#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) 815#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
@@ -878,6 +895,44 @@ typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
878 * */ 895 * */
879 896
880 897
898/* Host Based Discovery Phy Event data */
899
900typedef struct _MPI2_EVENT_HBD_PHY_SAS {
901 U8 Flags; /* 0x00 */
902 U8 NegotiatedLinkRate; /* 0x01 */
903 U8 PhyNum; /* 0x02 */
904 U8 PhysicalPort; /* 0x03 */
905 U32 Reserved1; /* 0x04 */
906 U8 InitialFrame[28]; /* 0x08 */
907} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS,
908 Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t;
909
910/* values for the Flags field */
911#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
912#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
913
914/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for
915 * the NegotiatedLinkRate field */
916
917typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
918 MPI2_EVENT_HBD_PHY_SAS Sas;
919} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR,
920 Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t;
921
922typedef struct _MPI2_EVENT_DATA_HBD_PHY {
923 U8 DescriptorType; /* 0x00 */
924 U8 Reserved1; /* 0x01 */
925 U16 Reserved2; /* 0x02 */
926 U32 Reserved3; /* 0x04 */
927 MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */
928} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY,
929 Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t;
930
931/* values for the DescriptorType field */
932#define MPI2_EVENT_HBD_DT_SAS (0x01)
933
934
935
881/**************************************************************************** 936/****************************************************************************
882* EventAck message 937* EventAck message
883****************************************************************************/ 938****************************************************************************/
@@ -1126,13 +1181,17 @@ typedef struct _MPI2_FW_IMAGE_HEADER
1126#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) 1181#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
1127#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) 1182#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
1128 1183
1129#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) 1184#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1130#define MPI2_FW_HEADER_PID_PROD_A (0x0000) 1185#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
1186#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1187#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
1188#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
1189
1131 1190
1132#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) 1191#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
1133/* SAS */ 1192/* SAS */
1134#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010) 1193#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
1135#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0011) 1194#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
1136 1195
1137/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ 1196/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
1138 1197
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 8a42b136cf53..2d8aeed51392 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
6 * Title: MPI Serial Attached SCSI structures and definitions 6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: February 9, 2007 7 * Creation Date: February 9, 2007
8 * 8 *
9 * mpi2.h Version: 02.00.02 9 * mpi2.h Version: 02.00.03
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -18,6 +18,8 @@
18 * Control Request. 18 * Control Request.
19 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control 19 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
20 * Request. 20 * Request.
21 * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
22 * to MPI2_SGE_IO_UNION since it supports chained SGLs.
21 * -------------------------------------------------------------------------- 23 * --------------------------------------------------------------------------
22 */ 24 */
23 25
@@ -160,7 +162,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
160 U32 Reserved4; /* 0x14 */ 162 U32 Reserved4; /* 0x14 */
161 U32 DataLength; /* 0x18 */ 163 U32 DataLength; /* 0x18 */
162 U8 CommandFIS[20]; /* 0x1C */ 164 U8 CommandFIS[20]; /* 0x1C */
163 MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */ 165 MPI2_SGE_IO_UNION SGL; /* 0x20 */
164} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, 166} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
165 Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; 167 Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
166 168
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 89d02401b9ec..88e6eebc3159 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -107,8 +107,7 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
107 if (ret) 107 if (ret)
108 return ret; 108 return ret;
109 109
110 printk(KERN_INFO "setting logging_level(0x%08x)\n", 110 printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
111 mpt2sas_fwfault_debug);
112 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) 111 list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
113 ioc->fwfault_debug = mpt2sas_fwfault_debug; 112 ioc->fwfault_debug = mpt2sas_fwfault_debug;
114 return 0; 113 return 0;
@@ -1222,6 +1221,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1222 u32 memap_sz; 1221 u32 memap_sz;
1223 u32 pio_sz; 1222 u32 pio_sz;
1224 int i, r = 0; 1223 int i, r = 0;
1224 u64 pio_chip = 0;
1225 u64 chip_phys = 0;
1225 1226
1226 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", 1227 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
1227 ioc->name, __func__)); 1228 ioc->name, __func__));
@@ -1255,12 +1256,13 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1255 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) { 1256 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
1256 if (pio_sz) 1257 if (pio_sz)
1257 continue; 1258 continue;
1258 ioc->pio_chip = pci_resource_start(pdev, i); 1259 pio_chip = (u64)pci_resource_start(pdev, i);
1259 pio_sz = pci_resource_len(pdev, i); 1260 pio_sz = pci_resource_len(pdev, i);
1260 } else { 1261 } else {
1261 if (memap_sz) 1262 if (memap_sz)
1262 continue; 1263 continue;
1263 ioc->chip_phys = pci_resource_start(pdev, i); 1264 ioc->chip_phys = pci_resource_start(pdev, i);
1265 chip_phys = (u64)ioc->chip_phys;
1264 memap_sz = pci_resource_len(pdev, i); 1266 memap_sz = pci_resource_len(pdev, i);
1265 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 1267 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1266 if (ioc->chip == NULL) { 1268 if (ioc->chip == NULL) {
@@ -1280,10 +1282,10 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1280 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", 1282 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1281 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 1283 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1282 "IO-APIC enabled"), ioc->pci_irq); 1284 "IO-APIC enabled"), ioc->pci_irq);
1283 printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n", 1285 printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1284 ioc->name, ioc->chip_phys, ioc->chip, memap_sz); 1286 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1285 printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n", 1287 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1286 ioc->name, ioc->pio_chip, pio_sz); 1288 ioc->name, (unsigned long long)pio_chip, pio_sz);
1287 1289
1288 return 0; 1290 return 0;
1289 1291
@@ -3573,6 +3575,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3573 3575
3574 init_waitqueue_head(&ioc->reset_wq); 3576 init_waitqueue_head(&ioc->reset_wq);
3575 3577
3578 ioc->fwfault_debug = mpt2sas_fwfault_debug;
3579
3576 /* base internal command bits */ 3580 /* base internal command bits */
3577 mutex_init(&ioc->base_cmds.mutex); 3581 mutex_init(&ioc->base_cmds.mutex);
3578 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3582 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index bb4f14656afa..e18b0544c38f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "03.100.03.00" 72#define MPT2SAS_DRIVER_VERSION "04.100.01.00"
73#define MPT2SAS_MAJOR_VERSION 03 73#define MPT2SAS_MAJOR_VERSION 04
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 03 75#define MPT2SAS_BUILD_VERSION 01
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
77 77
78/* 78/*
@@ -323,6 +323,7 @@ struct _sas_device {
323 * @device_info: bitfield provides detailed info about the hidden components 323 * @device_info: bitfield provides detailed info about the hidden components
324 * @num_pds: number of hidden raid components 324 * @num_pds: number of hidden raid components
325 * @responding: used in _scsih_raid_device_mark_responding 325 * @responding: used in _scsih_raid_device_mark_responding
326 * @percent_complete: resync percent complete
326 */ 327 */
327struct _raid_device { 328struct _raid_device {
328 struct list_head list; 329 struct list_head list;
@@ -336,6 +337,7 @@ struct _raid_device {
336 u32 device_info; 337 u32 device_info;
337 u8 num_pds; 338 u8 num_pds;
338 u8 responding; 339 u8 responding;
340 u8 percent_complete;
339}; 341};
340 342
341/** 343/**
@@ -464,7 +466,6 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
464 * @pdev: pci pdev object 466 * @pdev: pci pdev object
465 * @chip: memory mapped register space 467 * @chip: memory mapped register space
466 * @chip_phys: physical addrss prior to mapping 468 * @chip_phys: physical addrss prior to mapping
467 * @pio_chip: I/O mapped register space
468 * @logging_level: see mpt2sas_debug.h 469 * @logging_level: see mpt2sas_debug.h
469 * @fwfault_debug: debuging FW timeouts 470 * @fwfault_debug: debuging FW timeouts
470 * @ir_firmware: IR firmware present 471 * @ir_firmware: IR firmware present
@@ -587,8 +588,7 @@ struct MPT2SAS_ADAPTER {
587 char tmp_string[MPT_STRING_LENGTH]; 588 char tmp_string[MPT_STRING_LENGTH];
588 struct pci_dev *pdev; 589 struct pci_dev *pdev;
589 Mpi2SystemInterfaceRegs_t __iomem *chip; 590 Mpi2SystemInterfaceRegs_t __iomem *chip;
590 unsigned long chip_phys; 591 resource_size_t chip_phys;
591 unsigned long pio_chip;
592 int logging_level; 592 int logging_level;
593 int fwfault_debug; 593 int fwfault_debug;
594 u8 ir_firmware; 594 u8 ir_firmware;
@@ -853,6 +853,8 @@ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
853 *mpi_reply, Mpi2IOUnitPage1_t *config_page); 853 *mpi_reply, Mpi2IOUnitPage1_t *config_page);
854int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 854int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
855 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); 855 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
856int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
857 Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
856int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 858int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
857 *mpi_reply, Mpi2IOCPage8_t *config_page); 859 *mpi_reply, Mpi2IOCPage8_t *config_page);
858int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 860int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 594a389c6526..411c27d7f787 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -324,7 +324,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
324 if (r != 0) 324 if (r != 0)
325 goto out; 325 goto out;
326 if (mpi_request->Action == 326 if (mpi_request->Action ==
327 MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) { 327 MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
328 mpi_request->Action ==
329 MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
328 ioc->base_add_sg_single(&mpi_request->PageBufferSGE, 330 ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
329 MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, 331 MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
330 mem.page_dma); 332 mem.page_dma);
@@ -882,7 +884,7 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
882} 884}
883 885
884/** 886/**
885 * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 0 887 * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
886 * @ioc: per adapter object 888 * @ioc: per adapter object
887 * @mpi_reply: reply mf payload returned from firmware 889 * @mpi_reply: reply mf payload returned from firmware
888 * @config_page: contents of the config page 890 * @config_page: contents of the config page
@@ -907,7 +909,7 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
907 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 909 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
908 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; 910 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
909 mpi_request.Header.PageNumber = 1; 911 mpi_request.Header.PageNumber = 1;
910 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; 912 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
911 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 913 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
912 r = _config_request(ioc, &mpi_request, mpi_reply, 914 r = _config_request(ioc, &mpi_request, mpi_reply,
913 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); 915 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
@@ -922,6 +924,49 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
922} 924}
923 925
924/** 926/**
927 * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1
928 * @ioc: per adapter object
929 * @mpi_reply: reply mf payload returned from firmware
930 * @config_page: contents of the config page
931 * @sz: size of buffer passed in config_page
932 * Context: sleep.
933 *
934 * Calling function should call config_get_number_hba_phys prior to
935 * this function, so enough memory is allocated for config_page.
936 *
937 * Returns 0 for success, non-zero for failure.
938 */
939int
940mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
941 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
942{
943 Mpi2ConfigRequest_t mpi_request;
944 int r;
945
946 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
947 mpi_request.Function = MPI2_FUNCTION_CONFIG;
948 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
949 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
950 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
951 mpi_request.Header.PageNumber = 1;
952 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
953 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
954 r = _config_request(ioc, &mpi_request, mpi_reply,
955 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
956 if (r)
957 goto out;
958
959 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
960 _config_request(ioc, &mpi_request, mpi_reply,
961 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
962 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
963 r = _config_request(ioc, &mpi_request, mpi_reply,
964 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
965 out:
966 return r;
967}
968
969/**
925 * mpt2sas_config_get_expander_pg0 - obtain expander page 0 970 * mpt2sas_config_get_expander_pg0 - obtain expander page 0
926 * @ioc: per adapter object 971 * @ioc: per adapter object
927 * @mpi_reply: reply mf payload returned from firmware 972 * @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 84a124f8e21f..fa9bf83819d5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -891,6 +891,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
891 891
892 issue_host_reset: 892 issue_host_reset:
893 if (issue_reset) { 893 if (issue_reset) {
894 ret = -ENODATA;
894 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 895 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
895 mpi_request->Function == 896 mpi_request->Function ==
896 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 897 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
@@ -2202,14 +2203,10 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
2202 karg.data_out_size = karg32.data_out_size; 2203 karg.data_out_size = karg32.data_out_size;
2203 karg.max_sense_bytes = karg32.max_sense_bytes; 2204 karg.max_sense_bytes = karg32.max_sense_bytes;
2204 karg.data_sge_offset = karg32.data_sge_offset; 2205 karg.data_sge_offset = karg32.data_sge_offset;
2205 memcpy(&karg.reply_frame_buf_ptr, &karg32.reply_frame_buf_ptr, 2206 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2206 sizeof(uint32_t)); 2207 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2207 memcpy(&karg.data_in_buf_ptr, &karg32.data_in_buf_ptr, 2208 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2208 sizeof(uint32_t)); 2209 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2209 memcpy(&karg.data_out_buf_ptr, &karg32.data_out_buf_ptr,
2210 sizeof(uint32_t));
2211 memcpy(&karg.sense_data_ptr, &karg32.sense_data_ptr,
2212 sizeof(uint32_t));
2213 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2210 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2214 return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); 2211 return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
2215} 2212}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index efabea1a3ce4..c7ec3f174782 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -52,6 +52,7 @@
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/pci.h> 53#include <linux/pci.h>
54#include <linux/interrupt.h> 54#include <linux/interrupt.h>
55#include <linux/raid_class.h>
55 56
56#include "mpt2sas_base.h" 57#include "mpt2sas_base.h"
57 58
@@ -133,6 +134,9 @@ struct fw_event_work {
133 void *event_data; 134 void *event_data;
134}; 135};
135 136
137/* raid transport support */
138static struct raid_template *mpt2sas_raid_template;
139
136/** 140/**
137 * struct _scsi_io_transfer - scsi io transfer 141 * struct _scsi_io_transfer - scsi io transfer
138 * @handle: sas device handle (assigned by firmware) 142 * @handle: sas device handle (assigned by firmware)
@@ -1305,7 +1309,6 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1305 struct MPT2SAS_DEVICE *sas_device_priv_data; 1309 struct MPT2SAS_DEVICE *sas_device_priv_data;
1306 struct scsi_target *starget; 1310 struct scsi_target *starget;
1307 struct _raid_device *raid_device; 1311 struct _raid_device *raid_device;
1308 struct _sas_device *sas_device;
1309 unsigned long flags; 1312 unsigned long flags;
1310 1313
1311 sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 1314 sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1332,21 +1335,8 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1332 if (raid_device) 1335 if (raid_device)
1333 raid_device->sdev = sdev; /* raid is single lun */ 1336 raid_device->sdev = sdev; /* raid is single lun */
1334 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1337 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1335 } else {
1336 /* set TLR bit for SSP devices */
1337 if (!(ioc->facts.IOCCapabilities &
1338 MPI2_IOCFACTS_CAPABILITY_TLR))
1339 goto out;
1340 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1341 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1342 sas_device_priv_data->sas_target->sas_address);
1343 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1344 if (sas_device && sas_device->device_info &
1345 MPI2_SAS_DEVICE_INFO_SSP_TARGET)
1346 sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON;
1347 } 1338 }
1348 1339
1349 out:
1350 return 0; 1340 return 0;
1351} 1341}
1352 1342
@@ -1419,6 +1409,140 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1419} 1409}
1420 1410
1421/** 1411/**
1412 * _scsih_is_raid - return boolean indicating device is raid volume
1413 * @dev the device struct object
1414 */
1415static int
1416_scsih_is_raid(struct device *dev)
1417{
1418 struct scsi_device *sdev = to_scsi_device(dev);
1419
1420 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1421}
1422
1423/**
1424 * _scsih_get_resync - get raid volume resync percent complete
1425 * @dev the device struct object
1426 */
1427static void
1428_scsih_get_resync(struct device *dev)
1429{
1430 struct scsi_device *sdev = to_scsi_device(dev);
1431 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
1432 static struct _raid_device *raid_device;
1433 unsigned long flags;
1434 Mpi2RaidVolPage0_t vol_pg0;
1435 Mpi2ConfigReply_t mpi_reply;
1436 u32 volume_status_flags;
1437 u8 percent_complete = 0;
1438
1439 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1440 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1441 sdev->channel);
1442 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1443
1444 if (!raid_device)
1445 goto out;
1446
1447 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1448 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
1449 sizeof(Mpi2RaidVolPage0_t))) {
1450 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1451 ioc->name, __FILE__, __LINE__, __func__);
1452 goto out;
1453 }
1454
1455 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1456 if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)
1457 percent_complete = raid_device->percent_complete;
1458 out:
1459 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
1460}
1461
1462/**
1463 * _scsih_get_state - get raid volume level
1464 * @dev the device struct object
1465 */
1466static void
1467_scsih_get_state(struct device *dev)
1468{
1469 struct scsi_device *sdev = to_scsi_device(dev);
1470 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
1471 static struct _raid_device *raid_device;
1472 unsigned long flags;
1473 Mpi2RaidVolPage0_t vol_pg0;
1474 Mpi2ConfigReply_t mpi_reply;
1475 u32 volstate;
1476 enum raid_state state = RAID_STATE_UNKNOWN;
1477
1478 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1479 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1480 sdev->channel);
1481 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1482
1483 if (!raid_device)
1484 goto out;
1485
1486 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1487 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
1488 sizeof(Mpi2RaidVolPage0_t))) {
1489 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1490 ioc->name, __FILE__, __LINE__, __func__);
1491 goto out;
1492 }
1493
1494 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1495 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
1496 state = RAID_STATE_RESYNCING;
1497 goto out;
1498 }
1499
1500 switch (vol_pg0.VolumeState) {
1501 case MPI2_RAID_VOL_STATE_OPTIMAL:
1502 case MPI2_RAID_VOL_STATE_ONLINE:
1503 state = RAID_STATE_ACTIVE;
1504 break;
1505 case MPI2_RAID_VOL_STATE_DEGRADED:
1506 state = RAID_STATE_DEGRADED;
1507 break;
1508 case MPI2_RAID_VOL_STATE_FAILED:
1509 case MPI2_RAID_VOL_STATE_MISSING:
1510 state = RAID_STATE_OFFLINE;
1511 break;
1512 }
1513 out:
1514 raid_set_state(mpt2sas_raid_template, dev, state);
1515}
1516
1517/**
1518 * _scsih_set_level - set raid level
1519 * @sdev: scsi device struct
1520 * @raid_device: raid_device object
1521 */
1522static void
1523_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
1524{
1525 enum raid_level level = RAID_LEVEL_UNKNOWN;
1526
1527 switch (raid_device->volume_type) {
1528 case MPI2_RAID_VOL_TYPE_RAID0:
1529 level = RAID_LEVEL_0;
1530 break;
1531 case MPI2_RAID_VOL_TYPE_RAID10:
1532 level = RAID_LEVEL_10;
1533 break;
1534 case MPI2_RAID_VOL_TYPE_RAID1E:
1535 level = RAID_LEVEL_1E;
1536 break;
1537 case MPI2_RAID_VOL_TYPE_RAID1:
1538 level = RAID_LEVEL_1;
1539 break;
1540 }
1541
1542 raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level);
1543}
1544
1545/**
1422 * _scsih_get_volume_capabilities - volume capabilities 1546 * _scsih_get_volume_capabilities - volume capabilities
1423 * @ioc: per adapter object 1547 * @ioc: per adapter object
1424 * @sas_device: the raid_device object 1548 * @sas_device: the raid_device object
@@ -1479,6 +1603,32 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1479} 1603}
1480 1604
1481/** 1605/**
1606 * _scsih_enable_tlr - setting TLR flags
1607 * @ioc: per adapter object
1608 * @sdev: scsi device struct
1609 *
1610 * Enabling Transaction Layer Retries for tape devices when
1611 * vpd page 0x90 is present
1612 *
1613 */
1614static void
1615_scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev)
1616{
1617 /* only for TAPE */
1618 if (sdev->type != TYPE_TAPE)
1619 return;
1620
1621 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
1622 return;
1623
1624 sas_enable_tlr(sdev);
1625 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
1626 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
1627 return;
1628
1629}
1630
1631/**
1482 * _scsih_slave_configure - device configure routine. 1632 * _scsih_slave_configure - device configure routine.
1483 * @sdev: scsi device struct 1633 * @sdev: scsi device struct
1484 * 1634 *
@@ -1574,6 +1724,8 @@ _scsih_slave_configure(struct scsi_device *sdev)
1574 (unsigned long long)raid_device->wwid, 1724 (unsigned long long)raid_device->wwid,
1575 raid_device->num_pds, ds); 1725 raid_device->num_pds, ds);
1576 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1726 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
1727 /* raid transport support */
1728 _scsih_set_level(sdev, raid_device);
1577 return 0; 1729 return 0;
1578 } 1730 }
1579 1731
@@ -1621,8 +1773,10 @@ _scsih_slave_configure(struct scsi_device *sdev)
1621 1773
1622 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1774 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
1623 1775
1624 if (ssp_target) 1776 if (ssp_target) {
1625 sas_read_port_mode_page(sdev); 1777 sas_read_port_mode_page(sdev);
1778 _scsih_enable_tlr(ioc, sdev);
1779 }
1626 return 0; 1780 return 0;
1627} 1781}
1628 1782
@@ -2908,8 +3062,9 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2908 3062
2909 } else 3063 } else
2910 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 3064 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2911 3065 /* Make sure Device is not raid volume */
2912 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON)) 3066 if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
3067 sas_is_tlr_enabled(scmd->device))
2913 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3068 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
2914 3069
2915 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 3070 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -3298,10 +3453,12 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3298 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 3453 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
3299 if (!sas_device_priv_data->tlr_snoop_check) { 3454 if (!sas_device_priv_data->tlr_snoop_check) {
3300 sas_device_priv_data->tlr_snoop_check++; 3455 sas_device_priv_data->tlr_snoop_check++;
3301 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && 3456 if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
3302 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) 3457 sas_is_tlr_enabled(scmd->device) &&
3303 sas_device_priv_data->flags &= 3458 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
3304 ~MPT_DEVICE_TLR_ON; 3459 sas_disable_tlr(scmd->device);
3460 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
3461 }
3305 } 3462 }
3306 3463
3307 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 3464 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
@@ -5170,11 +5327,33 @@ static void
5170_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, 5327_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
5171 struct fw_event_work *fw_event) 5328 struct fw_event_work *fw_event)
5172{ 5329{
5330 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
5331 static struct _raid_device *raid_device;
5332 unsigned long flags;
5333 u16 handle;
5334
5173#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5335#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5174 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 5336 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
5175 _scsih_sas_ir_operation_status_event_debug(ioc, 5337 _scsih_sas_ir_operation_status_event_debug(ioc,
5176 fw_event->event_data); 5338 event_data);
5177#endif 5339#endif
5340
5341 /* code added for raid transport support */
5342 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
5343
5344 handle = le16_to_cpu(event_data->VolDevHandle);
5345
5346 spin_lock_irqsave(&ioc->raid_device_lock, flags);
5347 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
5348 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
5349
5350 if (!raid_device)
5351 return;
5352
5353 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
5354 raid_device->percent_complete =
5355 event_data->PercentComplete;
5356 }
5178} 5357}
5179 5358
5180/** 5359/**
@@ -5998,6 +6177,8 @@ _scsih_remove(struct pci_dev *pdev)
5998 struct _sas_port *mpt2sas_port; 6177 struct _sas_port *mpt2sas_port;
5999 struct _sas_device *sas_device; 6178 struct _sas_device *sas_device;
6000 struct _sas_node *expander_sibling; 6179 struct _sas_node *expander_sibling;
6180 struct _raid_device *raid_device, *next;
6181 struct MPT2SAS_TARGET *sas_target_priv_data;
6001 struct workqueue_struct *wq; 6182 struct workqueue_struct *wq;
6002 unsigned long flags; 6183 unsigned long flags;
6003 6184
@@ -6011,6 +6192,21 @@ _scsih_remove(struct pci_dev *pdev)
6011 if (wq) 6192 if (wq)
6012 destroy_workqueue(wq); 6193 destroy_workqueue(wq);
6013 6194
6195 /* release all the volumes */
6196 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
6197 list) {
6198 if (raid_device->starget) {
6199 sas_target_priv_data =
6200 raid_device->starget->hostdata;
6201 sas_target_priv_data->deleted = 1;
6202 scsi_remove_target(&raid_device->starget->dev);
6203 }
6204 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
6205 "(0x%016llx)\n", ioc->name, raid_device->handle,
6206 (unsigned long long) raid_device->wwid);
6207 _scsih_raid_device_remove(ioc, raid_device);
6208 }
6209
6014 /* free ports attached to the sas_host */ 6210 /* free ports attached to the sas_host */
6015 retry_again: 6211 retry_again:
6016 list_for_each_entry(mpt2sas_port, 6212 list_for_each_entry(mpt2sas_port,
@@ -6373,6 +6569,13 @@ static struct pci_driver scsih_driver = {
6373#endif 6569#endif
6374}; 6570};
6375 6571
6572/* raid transport support */
6573static struct raid_function_template mpt2sas_raid_functions = {
6574 .cookie = &scsih_driver_template,
6575 .is_raid = _scsih_is_raid,
6576 .get_resync = _scsih_get_resync,
6577 .get_state = _scsih_get_state,
6578};
6376 6579
6377/** 6580/**
6378 * _scsih_init - main entry point for this driver. 6581 * _scsih_init - main entry point for this driver.
@@ -6392,6 +6595,12 @@ _scsih_init(void)
6392 sas_attach_transport(&mpt2sas_transport_functions); 6595 sas_attach_transport(&mpt2sas_transport_functions);
6393 if (!mpt2sas_transport_template) 6596 if (!mpt2sas_transport_template)
6394 return -ENODEV; 6597 return -ENODEV;
6598 /* raid transport support */
6599 mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions);
6600 if (!mpt2sas_raid_template) {
6601 sas_release_transport(mpt2sas_transport_template);
6602 return -ENODEV;
6603 }
6395 6604
6396 mpt2sas_base_initialize_callback_handler(); 6605 mpt2sas_base_initialize_callback_handler();
6397 6606
@@ -6426,8 +6635,11 @@ _scsih_init(void)
6426 mpt2sas_ctl_init(); 6635 mpt2sas_ctl_init();
6427 6636
6428 error = pci_register_driver(&scsih_driver); 6637 error = pci_register_driver(&scsih_driver);
6429 if (error) 6638 if (error) {
6639 /* raid transport support */
6640 raid_class_release(mpt2sas_raid_template);
6430 sas_release_transport(mpt2sas_transport_template); 6641 sas_release_transport(mpt2sas_transport_template);
6642 }
6431 6643
6432 return error; 6644 return error;
6433} 6645}
@@ -6445,7 +6657,8 @@ _scsih_exit(void)
6445 6657
6446 pci_unregister_driver(&scsih_driver); 6658 pci_unregister_driver(&scsih_driver);
6447 6659
6448 sas_release_transport(mpt2sas_transport_template); 6660 mpt2sas_ctl_exit();
6661
6449 mpt2sas_base_release_callback_handler(scsi_io_cb_idx); 6662 mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
6450 mpt2sas_base_release_callback_handler(tm_cb_idx); 6663 mpt2sas_base_release_callback_handler(tm_cb_idx);
6451 mpt2sas_base_release_callback_handler(base_cb_idx); 6664 mpt2sas_base_release_callback_handler(base_cb_idx);
@@ -6457,7 +6670,10 @@ _scsih_exit(void)
6457 mpt2sas_base_release_callback_handler(tm_tr_cb_idx); 6670 mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
6458 mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx); 6671 mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
6459 6672
6460 mpt2sas_ctl_exit(); 6673 /* raid transport support */
6674 raid_class_release(mpt2sas_raid_template);
6675 sas_release_transport(mpt2sas_transport_template);
6676
6461} 6677}
6462 6678
6463module_init(_scsih_init); 6679module_init(_scsih_init);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 3a82872bad44..789f9ee7f001 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -855,6 +855,17 @@ rphy_to_ioc(struct sas_rphy *rphy)
855 return shost_priv(shost); 855 return shost_priv(shost);
856} 856}
857 857
858static struct _sas_phy *
859_transport_find_local_phy(struct MPT2SAS_ADAPTER *ioc, struct sas_phy *phy)
860{
861 int i;
862
863 for (i = 0; i < ioc->sas_hba.num_phys; i++)
864 if (ioc->sas_hba.phy[i].phy == phy)
865 return(&ioc->sas_hba.phy[i]);
866 return NULL;
867}
868
858/** 869/**
859 * _transport_get_linkerrors - 870 * _transport_get_linkerrors -
860 * @phy: The sas phy object 871 * @phy: The sas phy object
@@ -870,14 +881,8 @@ _transport_get_linkerrors(struct sas_phy *phy)
870 struct _sas_phy *mpt2sas_phy; 881 struct _sas_phy *mpt2sas_phy;
871 Mpi2ConfigReply_t mpi_reply; 882 Mpi2ConfigReply_t mpi_reply;
872 Mpi2SasPhyPage1_t phy_pg1; 883 Mpi2SasPhyPage1_t phy_pg1;
873 int i;
874 884
875 for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys && 885 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
876 !mpt2sas_phy; i++) {
877 if (ioc->sas_hba.phy[i].phy != phy)
878 continue;
879 mpt2sas_phy = &ioc->sas_hba.phy[i];
880 }
881 886
882 if (!mpt2sas_phy) /* this phy not on sas_host */ 887 if (!mpt2sas_phy) /* this phy not on sas_host */
883 return -EINVAL; 888 return -EINVAL;
@@ -971,14 +976,8 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
971 struct _sas_phy *mpt2sas_phy; 976 struct _sas_phy *mpt2sas_phy;
972 Mpi2SasIoUnitControlReply_t mpi_reply; 977 Mpi2SasIoUnitControlReply_t mpi_reply;
973 Mpi2SasIoUnitControlRequest_t mpi_request; 978 Mpi2SasIoUnitControlRequest_t mpi_request;
974 int i;
975 979
976 for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys && 980 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
977 !mpt2sas_phy; i++) {
978 if (ioc->sas_hba.phy[i].phy != phy)
979 continue;
980 mpt2sas_phy = &ioc->sas_hba.phy[i];
981 }
982 981
983 if (!mpt2sas_phy) /* this phy not on sas_host */ 982 if (!mpt2sas_phy) /* this phy not on sas_host */
984 return -EINVAL; 983 return -EINVAL;
@@ -1006,6 +1005,173 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
1006} 1005}
1007 1006
1008/** 1007/**
1008 * _transport_phy_enable - enable/disable phys
1009 * @phy: The sas phy object
1010 * @enable: enable phy when true
1011 *
1012 * Only support sas_host direct attached phys.
1013 * Returns 0 for success, non-zero for failure.
1014 */
1015static int
1016_transport_phy_enable(struct sas_phy *phy, int enable)
1017{
1018 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1019 struct _sas_phy *mpt2sas_phy;
1020 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1021 Mpi2ConfigReply_t mpi_reply;
1022 u16 ioc_status;
1023 u16 sz;
1024 int rc = 0;
1025
1026 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
1027
1028 if (!mpt2sas_phy) /* this phy not on sas_host */
1029 return -EINVAL;
1030
1031 /* sas_iounit page 1 */
1032 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
1033 sizeof(Mpi2SasIOUnit1PhyData_t));
1034 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1035 if (!sas_iounit_pg1) {
1036 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1037 ioc->name, __FILE__, __LINE__, __func__);
1038 rc = -ENOMEM;
1039 goto out;
1040 }
1041 if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1042 sas_iounit_pg1, sz))) {
1043 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1044 ioc->name, __FILE__, __LINE__, __func__);
1045 rc = -ENXIO;
1046 goto out;
1047 }
1048 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1049 MPI2_IOCSTATUS_MASK;
1050 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1051 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1052 ioc->name, __FILE__, __LINE__, __func__);
1053 rc = -EIO;
1054 goto out;
1055 }
1056
1057 if (enable)
1058 sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
1059 &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
1060 else
1061 sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
1062 |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
1063
1064 mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
1065
1066 out:
1067 kfree(sas_iounit_pg1);
1068 return rc;
1069}
1070
1071/**
1072 * _transport_phy_speed - set phy min/max link rates
1073 * @phy: The sas phy object
1074 * @rates: rates defined in sas_phy_linkrates
1075 *
1076 * Only support sas_host direct attached phys.
1077 * Returns 0 for success, non-zero for failure.
1078 */
1079static int
1080_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1081{
1082 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1083 struct _sas_phy *mpt2sas_phy;
1084 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1085 Mpi2SasPhyPage0_t phy_pg0;
1086 Mpi2ConfigReply_t mpi_reply;
1087 u16 ioc_status;
1088 u16 sz;
1089 int i;
1090 int rc = 0;
1091
1092 mpt2sas_phy = _transport_find_local_phy(ioc, phy);
1093
1094 if (!mpt2sas_phy) /* this phy not on sas_host */
1095 return -EINVAL;
1096
1097 if (!rates->minimum_linkrate)
1098 rates->minimum_linkrate = phy->minimum_linkrate;
1099 else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
1100 rates->minimum_linkrate = phy->minimum_linkrate_hw;
1101
1102 if (!rates->maximum_linkrate)
1103 rates->maximum_linkrate = phy->maximum_linkrate;
1104 else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
1105 rates->maximum_linkrate = phy->maximum_linkrate_hw;
1106
1107 /* sas_iounit page 1 */
1108 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
1109 sizeof(Mpi2SasIOUnit1PhyData_t));
1110 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1111 if (!sas_iounit_pg1) {
1112 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1113 ioc->name, __FILE__, __LINE__, __func__);
1114 rc = -ENOMEM;
1115 goto out;
1116 }
1117 if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1118 sas_iounit_pg1, sz))) {
1119 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1120 ioc->name, __FILE__, __LINE__, __func__);
1121 rc = -ENXIO;
1122 goto out;
1123 }
1124 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1125 MPI2_IOCSTATUS_MASK;
1126 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1127 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1128 ioc->name, __FILE__, __LINE__, __func__);
1129 rc = -EIO;
1130 goto out;
1131 }
1132
1133 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
1134 if (mpt2sas_phy->phy_id != i) {
1135 sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
1136 (ioc->sas_hba.phy[i].phy->minimum_linkrate +
1137 (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
1138 } else {
1139 sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
1140 (rates->minimum_linkrate +
1141 (rates->maximum_linkrate << 4));
1142 }
1143 }
1144
1145 if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
1146 sz)) {
1147 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1148 ioc->name, __FILE__, __LINE__, __func__);
1149 rc = -ENXIO;
1150 goto out;
1151 }
1152
1153 /* link reset */
1154 _transport_phy_reset(phy, 0);
1155
1156 /* read phy page 0, then update the rates in the sas transport phy */
1157 if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
1158 mpt2sas_phy->phy_id)) {
1159 phy->minimum_linkrate = _transport_convert_phy_link_rate(
1160 phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
1161 phy->maximum_linkrate = _transport_convert_phy_link_rate(
1162 phy_pg0.ProgrammedLinkRate >> 4);
1163 phy->negotiated_linkrate = _transport_convert_phy_link_rate(
1164 phy_pg0.NegotiatedLinkRate &
1165 MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
1166 }
1167
1168 out:
1169 kfree(sas_iounit_pg1);
1170 return rc;
1171}
1172
1173
1174/**
1009 * _transport_smp_handler - transport portal for smp passthru 1175 * _transport_smp_handler - transport portal for smp passthru
1010 * @shost: shost object 1176 * @shost: shost object
1011 * @rphy: sas transport rphy object 1177 * @rphy: sas transport rphy object
@@ -1207,6 +1373,8 @@ struct sas_function_template mpt2sas_transport_functions = {
1207 .get_enclosure_identifier = _transport_get_enclosure_identifier, 1373 .get_enclosure_identifier = _transport_get_enclosure_identifier,
1208 .get_bay_identifier = _transport_get_bay_identifier, 1374 .get_bay_identifier = _transport_get_bay_identifier,
1209 .phy_reset = _transport_phy_reset, 1375 .phy_reset = _transport_phy_reset,
1376 .phy_enable = _transport_phy_enable,
1377 .set_phy_speed = _transport_phy_speed,
1210 .smp_handler = _transport_smp_handler, 1378 .smp_handler = _transport_smp_handler,
1211}; 1379};
1212 1380
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c2f1032496cb..f80c1da8f6ca 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -654,7 +654,7 @@ static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
654 } 654 }
655 chip = &pm8001_chips[ent->driver_data]; 655 chip = &pm8001_chips[ent->driver_data];
656 SHOST_TO_SAS_HA(shost) = 656 SHOST_TO_SAS_HA(shost) =
657 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); 657 kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
658 if (!SHOST_TO_SAS_HA(shost)) { 658 if (!SHOST_TO_SAS_HA(shost)) {
659 rc = -ENOMEM; 659 rc = -ENOMEM;
660 goto err_out_free_host; 660 goto err_out_free_host;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8371d917a9a2..49ac4148493b 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1640,8 +1640,10 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1640 uint16_t mb[MAILBOX_REGISTER_COUNT], i; 1640 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1641 int err; 1641 int err;
1642 1642
1643 spin_unlock_irq(ha->host->host_lock);
1643 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname, 1644 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
1644 &ha->pdev->dev); 1645 &ha->pdev->dev);
1646 spin_lock_irq(ha->host->host_lock);
1645 if (err) { 1647 if (err) {
1646 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 1648 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1647 ql1280_board_tbl[ha->devnum].fwname, err); 1649 ql1280_board_tbl[ha->devnum].fwname, err);
@@ -1699,8 +1701,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1699 return -ENOMEM; 1701 return -ENOMEM;
1700#endif 1702#endif
1701 1703
1704 spin_unlock_irq(ha->host->host_lock);
1702 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname, 1705 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
1703 &ha->pdev->dev); 1706 &ha->pdev->dev);
1707 spin_lock_irq(ha->host->host_lock);
1704 if (err) { 1708 if (err) {
1705 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 1709 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1706 ql1280_board_tbl[ha->devnum].fwname, err); 1710 ql1280_board_tbl[ha->devnum].fwname, err);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3a9f5b288aee..90d1e062ec4f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -11,7 +11,9 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12 12
13static int qla24xx_vport_disable(struct fc_vport *, bool); 13static int qla24xx_vport_disable(struct fc_vport *, bool);
14 14static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
15int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
16static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
15/* SYSFS attributes --------------------------------------------------------- */ 17/* SYSFS attributes --------------------------------------------------------- */
16 18
17static ssize_t 19static ssize_t
@@ -1168,6 +1170,28 @@ qla2x00_total_isp_aborts_show(struct device *dev,
1168} 1170}
1169 1171
1170static ssize_t 1172static ssize_t
1173qla24xx_84xx_fw_version_show(struct device *dev,
1174 struct device_attribute *attr, char *buf)
1175{
1176 int rval = QLA_SUCCESS;
1177 uint16_t status[2] = {0, 0};
1178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1179 struct qla_hw_data *ha = vha->hw;
1180
1181 if (IS_QLA84XX(ha) && ha->cs84xx) {
1182 if (ha->cs84xx->op_fw_version == 0) {
1183 rval = qla84xx_verify_chip(vha, status);
1184 }
1185
1186 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1187 return snprintf(buf, PAGE_SIZE, "%u\n",
1188 (uint32_t)ha->cs84xx->op_fw_version);
1189 }
1190
1191 return snprintf(buf, PAGE_SIZE, "\n");
1192}
1193
1194static ssize_t
1171qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1195qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1172 char *buf) 1196 char *buf)
1173{ 1197{
@@ -1281,6 +1305,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1281 qla2x00_optrom_fcode_version_show, NULL); 1305 qla2x00_optrom_fcode_version_show, NULL);
1282static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1306static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1283 NULL); 1307 NULL);
1308static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1309 NULL);
1284static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1310static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1285 NULL); 1311 NULL);
1286static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 1312static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@@ -1310,6 +1336,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1310 &dev_attr_optrom_efi_version, 1336 &dev_attr_optrom_efi_version,
1311 &dev_attr_optrom_fcode_version, 1337 &dev_attr_optrom_fcode_version,
1312 &dev_attr_optrom_fw_version, 1338 &dev_attr_optrom_fw_version,
1339 &dev_attr_84xx_fw_version,
1313 &dev_attr_total_isp_aborts, 1340 &dev_attr_total_isp_aborts,
1314 &dev_attr_mpi_version, 1341 &dev_attr_mpi_version,
1315 &dev_attr_phy_version, 1342 &dev_attr_phy_version,
@@ -1504,8 +1531,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1504 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1531 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1505 fcport->loop_id, fcport->d_id.b.domain, 1532 fcport->loop_id, fcport->d_id.b.domain,
1506 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1533 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1507
1508 qla2x00_abort_fcport_cmds(fcport);
1509} 1534}
1510 1535
1511static int 1536static int
@@ -1795,6 +1820,581 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1795 return 0; 1820 return 0;
1796} 1821}
1797 1822
1823/* BSG support for ELS/CT pass through */
1824inline srb_t *
1825qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1826{
1827 srb_t *sp;
1828 struct qla_hw_data *ha = vha->hw;
1829 struct srb_bsg_ctx *ctx;
1830
1831 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1832 if (!sp)
1833 goto done;
1834 ctx = kzalloc(size, GFP_KERNEL);
1835 if (!ctx) {
1836 mempool_free(sp, ha->srb_mempool);
1837 goto done;
1838 }
1839
1840 memset(sp, 0, sizeof(*sp));
1841 sp->fcport = fcport;
1842 sp->ctx = ctx;
1843done:
1844 return sp;
1845}
1846
1847static int
1848qla2x00_process_els(struct fc_bsg_job *bsg_job)
1849{
1850 struct fc_rport *rport;
1851 fc_port_t *fcport;
1852 struct Scsi_Host *host;
1853 scsi_qla_host_t *vha;
1854 struct qla_hw_data *ha;
1855 srb_t *sp;
1856 const char *type;
1857 int req_sg_cnt, rsp_sg_cnt;
1858 int rval = (DRIVER_ERROR << 16);
1859 uint16_t nextlid = 0;
1860 struct srb_bsg *els;
1861
1862 /* Multiple SG's are not supported for ELS requests */
1863 if (bsg_job->request_payload.sg_cnt > 1 ||
1864 bsg_job->reply_payload.sg_cnt > 1) {
1865 DEBUG2(printk(KERN_INFO
1866 "multiple SG's are not supported for ELS requests"
1867 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1868 bsg_job->request_payload.sg_cnt,
1869 bsg_job->reply_payload.sg_cnt));
1870 rval = -EPERM;
1871 goto done;
1872 }
1873
1874 /* ELS request for rport */
1875 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1876 rport = bsg_job->rport;
1877 fcport = *(fc_port_t **) rport->dd_data;
1878 host = rport_to_shost(rport);
1879 vha = shost_priv(host);
1880 ha = vha->hw;
1881 type = "FC_BSG_RPT_ELS";
1882
1883 /* make sure the rport is logged in,
1884 * if not perform fabric login
1885 */
1886 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1887 DEBUG2(qla_printk(KERN_WARNING, ha,
1888 "failed to login port %06X for ELS passthru\n",
1889 fcport->d_id.b24));
1890 rval = -EIO;
1891 goto done;
1892 }
1893 } else {
1894 host = bsg_job->shost;
1895 vha = shost_priv(host);
1896 ha = vha->hw;
1897 type = "FC_BSG_HST_ELS_NOLOGIN";
1898
1899 /* Allocate a dummy fcport structure, since functions
1900 * preparing the IOCB and mailbox command retrieves port
1901 * specific information from fcport structure. For Host based
1902 * ELS commands there will be no fcport structure allocated
1903 */
1904 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1905 if (!fcport) {
1906 rval = -ENOMEM;
1907 goto done;
1908 }
1909
1910 /* Initialize all required fields of fcport */
1911 fcport->vha = vha;
1912 fcport->vp_idx = vha->vp_idx;
1913 fcport->d_id.b.al_pa =
1914 bsg_job->request->rqst_data.h_els.port_id[0];
1915 fcport->d_id.b.area =
1916 bsg_job->request->rqst_data.h_els.port_id[1];
1917 fcport->d_id.b.domain =
1918 bsg_job->request->rqst_data.h_els.port_id[2];
1919 fcport->loop_id =
1920 (fcport->d_id.b.al_pa == 0xFD) ?
1921 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1922 }
1923
1924 if (!vha->flags.online) {
1925 DEBUG2(qla_printk(KERN_WARNING, ha,
1926 "host not online\n"));
1927 rval = -EIO;
1928 goto done;
1929 }
1930
1931 req_sg_cnt =
1932 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1934 if (!req_sg_cnt) {
1935 rval = -ENOMEM;
1936 goto done_free_fcport;
1937 }
1938 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1939 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1940 if (!rsp_sg_cnt) {
1941 rval = -ENOMEM;
1942 goto done_free_fcport;
1943 }
1944
1945 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1946 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1947 {
1948 DEBUG2(printk(KERN_INFO
1949 "dma mapping resulted in different sg counts \
1950 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1951 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1952 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1953 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1954 rval = -EAGAIN;
1955 goto done_unmap_sg;
1956 }
1957
1958 /* Alloc SRB structure */
1959 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1960 if (!sp) {
1961 rval = -ENOMEM;
1962 goto done_unmap_sg;
1963 }
1964
1965 els = sp->ctx;
1966 els->ctx.type =
1967 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1968 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1969 els->bsg_job = bsg_job;
1970
1971 DEBUG2(qla_printk(KERN_INFO, ha,
1972 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1973 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1974 bsg_job->request->rqst_data.h_els.command_code,
1975 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1976 fcport->d_id.b.al_pa));
1977
1978 rval = qla2x00_start_sp(sp);
1979 if (rval != QLA_SUCCESS) {
1980 kfree(sp->ctx);
1981 mempool_free(sp, ha->srb_mempool);
1982 rval = -EIO;
1983 goto done_unmap_sg;
1984 }
1985 return rval;
1986
1987done_unmap_sg:
1988 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1989 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1990 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1991 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1992 goto done_free_fcport;
1993
1994done_free_fcport:
1995 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
1996 kfree(fcport);
1997done:
1998 return rval;
1999}
2000
2001static int
2002qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2003{
2004 srb_t *sp;
2005 struct Scsi_Host *host = bsg_job->shost;
2006 scsi_qla_host_t *vha = shost_priv(host);
2007 struct qla_hw_data *ha = vha->hw;
2008 int rval = (DRIVER_ERROR << 16);
2009 int req_sg_cnt, rsp_sg_cnt;
2010 uint16_t loop_id;
2011 struct fc_port *fcport;
2012 char *type = "FC_BSG_HST_CT";
2013 struct srb_bsg *ct;
2014
2015 /* pass through is supported only for ISP 4Gb or higher */
2016 if (!IS_FWI2_CAPABLE(ha)) {
2017 DEBUG2(qla_printk(KERN_INFO, ha,
2018 "scsi(%ld):Firmware is not capable to support FC "
2019 "CT pass thru\n", vha->host_no));
2020 rval = -EPERM;
2021 goto done;
2022 }
2023
2024 req_sg_cnt =
2025 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2026 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2027 if (!req_sg_cnt) {
2028 rval = -ENOMEM;
2029 goto done;
2030 }
2031
2032 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2033 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2034 if (!rsp_sg_cnt) {
2035 rval = -ENOMEM;
2036 goto done;
2037 }
2038
2039 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2040 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2041 {
2042 DEBUG2(qla_printk(KERN_WARNING, ha,
2043 "dma mapping resulted in different sg counts \
2044 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2045 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2046 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2047 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2048 rval = -EAGAIN;
2049 goto done_unmap_sg;
2050 }
2051
2052 if (!vha->flags.online) {
2053 DEBUG2(qla_printk(KERN_WARNING, ha,
2054 "host not online\n"));
2055 rval = -EIO;
2056 goto done_unmap_sg;
2057 }
2058
2059 loop_id =
2060 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2061 >> 24;
2062 switch (loop_id) {
2063 case 0xFC:
2064 loop_id = cpu_to_le16(NPH_SNS);
2065 break;
2066 case 0xFA:
2067 loop_id = vha->mgmt_svr_loop_id;
2068 break;
2069 default:
2070 DEBUG2(qla_printk(KERN_INFO, ha,
2071 "Unknown loop id: %x\n", loop_id));
2072 rval = -EINVAL;
2073 goto done_unmap_sg;
2074 }
2075
2076 /* Allocate a dummy fcport structure, since functions preparing the
2077 * IOCB and mailbox command retrieves port specific information
2078 * from fcport structure. For Host based ELS commands there will be
2079 * no fcport structure allocated
2080 */
2081 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2082 if (!fcport)
2083 {
2084 rval = -ENOMEM;
2085 goto done_unmap_sg;
2086 }
2087
2088 /* Initialize all required fields of fcport */
2089 fcport->vha = vha;
2090 fcport->vp_idx = vha->vp_idx;
2091 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2092 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2093 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2094 fcport->loop_id = loop_id;
2095
2096 /* Alloc SRB structure */
2097 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2098 if (!sp) {
2099 rval = -ENOMEM;
2100 goto done_free_fcport;
2101 }
2102
2103 ct = sp->ctx;
2104 ct->ctx.type = SRB_CT_CMD;
2105 ct->bsg_job = bsg_job;
2106
2107 DEBUG2(qla_printk(KERN_INFO, ha,
2108 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2109 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2110 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2111 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2112 fcport->d_id.b.al_pa));
2113
2114 rval = qla2x00_start_sp(sp);
2115 if (rval != QLA_SUCCESS) {
2116 kfree(sp->ctx);
2117 mempool_free(sp, ha->srb_mempool);
2118 rval = -EIO;
2119 goto done_free_fcport;
2120 }
2121 return rval;
2122
2123done_free_fcport:
2124 kfree(fcport);
2125done_unmap_sg:
2126 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2127 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2128 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2129 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2130done:
2131 return rval;
2132}
2133
2134static int
2135qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2136{
2137 struct Scsi_Host *host = bsg_job->shost;
2138 scsi_qla_host_t *vha = shost_priv(host);
2139 struct qla_hw_data *ha = vha->hw;
2140 int rval;
2141 uint8_t command_sent;
2142 uint32_t vendor_cmd;
2143 char *type;
2144 struct msg_echo_lb elreq;
2145 uint16_t response[MAILBOX_REGISTER_COUNT];
2146 uint8_t* fw_sts_ptr;
2147 uint8_t *req_data;
2148 dma_addr_t req_data_dma;
2149 uint32_t req_data_len;
2150 uint8_t *rsp_data;
2151 dma_addr_t rsp_data_dma;
2152 uint32_t rsp_data_len;
2153
2154 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2155 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2156 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2157 rval = -EBUSY;
2158 goto done;
2159 }
2160
2161 if (!vha->flags.online) {
2162 DEBUG2(qla_printk(KERN_WARNING, ha,
2163 "host not online\n"));
2164 rval = -EIO;
2165 goto done;
2166 }
2167
2168 elreq.req_sg_cnt =
2169 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2170 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2171 if (!elreq.req_sg_cnt) {
2172 rval = -ENOMEM;
2173 goto done;
2174 }
2175 elreq.rsp_sg_cnt =
2176 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2177 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2178 if (!elreq.rsp_sg_cnt) {
2179 rval = -ENOMEM;
2180 goto done;
2181 }
2182
2183 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2184 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2185 {
2186 DEBUG2(printk(KERN_INFO
2187 "dma mapping resulted in different sg counts \
2188 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2189 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2190 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2191 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2192 rval = -EAGAIN;
2193 goto done_unmap_sg;
2194 }
2195 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2196 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2197 &req_data_dma, GFP_KERNEL);
2198
2199 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2200 &rsp_data_dma, GFP_KERNEL);
2201
2202 /* Copy the request buffer in req_data now */
2203 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2204 bsg_job->request_payload.sg_cnt, req_data,
2205 req_data_len);
2206
2207 elreq.send_dma = req_data_dma;
2208 elreq.rcv_dma = rsp_data_dma;
2209 elreq.transfer_size = req_data_len;
2210
2211 /* Vendor cmd : loopback or ECHO diagnostic
2212 * Options:
2213 * Loopback : Either internal or external loopback
2214 * ECHO: ECHO ELS or Vendor specific FC4 link data
2215 */
2216 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2217 elreq.options =
2218 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2219 + 1);
2220
2221 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2222 case QL_VND_LOOPBACK:
2223 if (ha->current_topology != ISP_CFG_F) {
2224 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2225
2226 DEBUG2(qla_printk(KERN_INFO, ha,
2227 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2228 vha->host_no, type, vendor_cmd, elreq.options));
2229
2230 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2231 rval = qla2x00_loopback_test(vha, &elreq, response);
2232 if (IS_QLA81XX(ha)) {
2233 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2234 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2235 "ISP\n", __func__, vha->host_no));
2236 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2237 qla2xxx_wake_dpc(vha);
2238 }
2239 }
2240 } else {
2241 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2242 DEBUG2(qla_printk(KERN_INFO, ha,
2243 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2244 vha->host_no, type, vendor_cmd, elreq.options));
2245
2246 command_sent = INT_DEF_LB_ECHO_CMD;
2247 rval = qla2x00_echo_test(vha, &elreq, response);
2248 }
2249 break;
2250 case QLA84_RESET:
2251 if (!IS_QLA84XX(vha->hw)) {
2252 rval = -EINVAL;
2253 DEBUG16(printk(
2254 "%s(%ld): 8xxx exiting.\n",
2255 __func__, vha->host_no));
2256 return rval;
2257 }
2258 rval = qla84xx_reset(vha, &elreq, bsg_job);
2259 break;
2260 case QLA84_MGMT_CMD:
2261 if (!IS_QLA84XX(vha->hw)) {
2262 rval = -EINVAL;
2263 DEBUG16(printk(
2264 "%s(%ld): 8xxx exiting.\n",
2265 __func__, vha->host_no));
2266 return rval;
2267 }
2268 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2269 break;
2270 default:
2271 rval = -ENOSYS;
2272 }
2273
2274 if (rval != QLA_SUCCESS) {
2275 DEBUG2(qla_printk(KERN_WARNING, ha,
2276 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2277 rval = 0;
2278 bsg_job->reply->result = (DID_ERROR << 16);
2279 bsg_job->reply->reply_payload_rcv_len = 0;
2280 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2281 memcpy( fw_sts_ptr, response, sizeof(response));
2282 fw_sts_ptr += sizeof(response);
2283 *fw_sts_ptr = command_sent;
2284 } else {
2285 DEBUG2(qla_printk(KERN_WARNING, ha,
2286 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2287 rval = bsg_job->reply->result = 0;
2288 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2289 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2290 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2291 memcpy(fw_sts_ptr, response, sizeof(response));
2292 fw_sts_ptr += sizeof(response);
2293 *fw_sts_ptr = command_sent;
2294 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2295 bsg_job->reply_payload.sg_cnt, rsp_data,
2296 rsp_data_len);
2297 }
2298 bsg_job->job_done(bsg_job);
2299
2300done_unmap_sg:
2301
2302 if(req_data)
2303 dma_free_coherent(&ha->pdev->dev, req_data_len,
2304 req_data, req_data_dma);
2305 dma_unmap_sg(&ha->pdev->dev,
2306 bsg_job->request_payload.sg_list,
2307 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2308 dma_unmap_sg(&ha->pdev->dev,
2309 bsg_job->reply_payload.sg_list,
2310 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2311
2312done:
2313 return rval;
2314}
2315
2316static int
2317qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2318{
2319 int ret = -EINVAL;
2320
2321 switch (bsg_job->request->msgcode) {
2322 case FC_BSG_RPT_ELS:
2323 case FC_BSG_HST_ELS_NOLOGIN:
2324 ret = qla2x00_process_els(bsg_job);
2325 break;
2326 case FC_BSG_HST_CT:
2327 ret = qla2x00_process_ct(bsg_job);
2328 break;
2329 case FC_BSG_HST_VENDOR:
2330 ret = qla2x00_process_vendor_specific(bsg_job);
2331 break;
2332 case FC_BSG_HST_ADD_RPORT:
2333 case FC_BSG_HST_DEL_RPORT:
2334 case FC_BSG_RPT_CT:
2335 default:
2336 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2337 break;
2338 }
2339 return ret;
2340}
2341
2342static int
2343qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2344{
2345 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2346 struct qla_hw_data *ha = vha->hw;
2347 srb_t *sp;
2348 int cnt, que;
2349 unsigned long flags;
2350 struct req_que *req;
2351 struct srb_bsg *sp_bsg;
2352
2353 /* find the bsg job from the active list of commands */
2354 spin_lock_irqsave(&ha->hardware_lock, flags);
2355 for (que = 0; que < ha->max_req_queues; que++) {
2356 req = ha->req_q_map[que];
2357 if (!req)
2358 continue;
2359
2360 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2361 sp = req->outstanding_cmds[cnt];
2362
2363 if (sp) {
2364 sp_bsg = (struct srb_bsg*)sp->ctx;
2365
2366 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2367 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2368 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2369 (sp_bsg->bsg_job == bsg_job)) {
2370 if (ha->isp_ops->abort_command(sp)) {
2371 DEBUG2(qla_printk(KERN_INFO, ha,
2372 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2373 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2374 } else {
2375 DEBUG2(qla_printk(KERN_INFO, ha,
2376 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2377 bsg_job->req->errors = bsg_job->reply->result = 0;
2378 }
2379 goto done;
2380 }
2381 }
2382 }
2383 }
2384 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2385 DEBUG2(qla_printk(KERN_INFO, ha,
2386 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2387 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2388 return 0;
2389
2390done:
2391 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2392 kfree(sp->fcport);
2393 kfree(sp->ctx);
2394 mempool_free(sp, ha->srb_mempool);
2395 return 0;
2396}
2397
1798struct fc_function_template qla2xxx_transport_functions = { 2398struct fc_function_template qla2xxx_transport_functions = {
1799 2399
1800 .show_host_node_name = 1, 2400 .show_host_node_name = 1,
@@ -1838,6 +2438,8 @@ struct fc_function_template qla2xxx_transport_functions = {
1838 .vport_create = qla24xx_vport_create, 2438 .vport_create = qla24xx_vport_create,
1839 .vport_disable = qla24xx_vport_disable, 2439 .vport_disable = qla24xx_vport_disable,
1840 .vport_delete = qla24xx_vport_delete, 2440 .vport_delete = qla24xx_vport_delete,
2441 .bsg_request = qla24xx_bsg_request,
2442 .bsg_timeout = qla24xx_bsg_timeout,
1841}; 2443};
1842 2444
1843struct fc_function_template qla2xxx_transport_vport_functions = { 2445struct fc_function_template qla2xxx_transport_vport_functions = {
@@ -1878,6 +2480,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1878 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2480 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1879 .terminate_rport_io = qla2x00_terminate_rport_io, 2481 .terminate_rport_io = qla2x00_terminate_rport_io,
1880 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2482 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2483 .bsg_request = qla24xx_bsg_request,
2484 .bsg_timeout = qla24xx_bsg_timeout,
1881}; 2485};
1882 2486
1883void 2487void
@@ -1906,3 +2510,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1906 speed = FC_PORTSPEED_1GBIT; 2510 speed = FC_PORTSPEED_1GBIT;
1907 fc_host_supported_speeds(vha->host) = speed; 2511 fc_host_supported_speeds(vha->host) = speed;
1908} 2512}
2513static int
2514qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2515{
2516 int ret = 0;
2517 int cmd;
2518 uint16_t cmd_status;
2519
2520 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2521
2522 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2523 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2524 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2525 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2526 &cmd_status);
2527 return ret;
2528}
2529
2530static int
2531qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2532{
2533 struct access_chip_84xx *mn;
2534 dma_addr_t mn_dma, mgmt_dma;
2535 void *mgmt_b = NULL;
2536 int ret = 0;
2537 int rsp_hdr_len, len = 0;
2538 struct qla84_msg_mgmt *ql84_mgmt;
2539
2540 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2541 ql84_mgmt->cmd =
2542 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2543 ql84_mgmt->mgmtp.u.mem.start_addr =
2544 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2545 ql84_mgmt->len =
2546 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2547 ql84_mgmt->mgmtp.u.config.id =
2548 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2549 ql84_mgmt->mgmtp.u.config.param0 =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2551 ql84_mgmt->mgmtp.u.config.param1 =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2553 ql84_mgmt->mgmtp.u.info.type =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2555 ql84_mgmt->mgmtp.u.info.context =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2557
2558 rsp_hdr_len = bsg_job->request_payload.payload_len;
2559
2560 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2561 if (mn == NULL) {
2562 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2563 "failed%lu\n", __func__, ha->host_no));
2564 return -ENOMEM;
2565 }
2566
2567 memset(mn, 0, sizeof (struct access_chip_84xx));
2568
2569 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2570 mn->entry_count = 1;
2571
2572 switch (ql84_mgmt->cmd) {
2573 case QLA84_MGMT_READ_MEM:
2574 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2575 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2576 break;
2577 case QLA84_MGMT_WRITE_MEM:
2578 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2579 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2580 break;
2581 case QLA84_MGMT_CHNG_CONFIG:
2582 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2583 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2584 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2585 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2586 break;
2587 case QLA84_MGMT_GET_INFO:
2588 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2591 break;
2592 default:
2593 ret = -EIO;
2594 goto exit_mgmt0;
2595 }
2596
2597 if ((len == ql84_mgmt->len) &&
2598 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2599 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2600 &mgmt_dma, GFP_KERNEL);
2601 if (mgmt_b == NULL) {
2602 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2603 "failed%lu\n", __func__, ha->host_no));
2604 ret = -ENOMEM;
2605 goto exit_mgmt0;
2606 }
2607 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2608 mn->dseg_count = cpu_to_le16(1);
2609 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2610 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2611 mn->dseg_length = cpu_to_le32(len);
2612
2613 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2614 memcpy(mgmt_b, ql84_mgmt->payload, len);
2615 }
2616 }
2617
2618 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2619 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2620 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2621 if (ret != QLA_SUCCESS)
2622 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2623 __func__, ha->host_no));
2624 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2625 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2626 }
2627
2628 if (mgmt_b)
2629 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2630
2631exit_mgmt0:
2632 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2633 return ret;
2634}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1263d9796e89..afa95614aaf8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -31,6 +31,7 @@
31#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h> 32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h>
34 35
35#define QLA2XXX_DRIVER_NAME "qla2xxx" 36#define QLA2XXX_DRIVER_NAME "qla2xxx"
36 37
@@ -228,6 +229,27 @@ struct srb_logio {
228 uint16_t flags; 229 uint16_t flags;
229}; 230};
230 231
232struct srb_bsg_ctx {
233#define SRB_ELS_CMD_RPT 3
234#define SRB_ELS_CMD_HST 4
235#define SRB_CT_CMD 5
236 uint16_t type;
237};
238
239struct srb_bsg {
240 struct srb_bsg_ctx ctx;
241 struct fc_bsg_job *bsg_job;
242};
243
244struct msg_echo_lb {
245 dma_addr_t send_dma;
246 dma_addr_t rcv_dma;
247 uint16_t req_sg_cnt;
248 uint16_t rsp_sg_cnt;
249 uint16_t options;
250 uint32_t transfer_size;
251};
252
231/* 253/*
232 * ISP I/O Register Set structure definitions. 254 * ISP I/O Register Set structure definitions.
233 */ 255 */
@@ -522,6 +544,8 @@ typedef struct {
522#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ 544#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
523#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ 545#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
524 546
547/* ISP mailbox loopback echo diagnostic error code */
548#define MBS_LB_RESET 0x17
525/* 549/*
526 * Firmware options 1, 2, 3. 550 * Firmware options 1, 2, 3.
527 */ 551 */
@@ -2230,6 +2254,13 @@ struct req_que {
2230 int max_q_depth; 2254 int max_q_depth;
2231}; 2255};
2232 2256
2257/* Place holder for FW buffer parameters */
2258struct qlfc_fw {
2259 void *fw_buf;
2260 dma_addr_t fw_dma;
2261 uint32_t len;
2262};
2263
2233/* 2264/*
2234 * Qlogic host adapter specific data structure. 2265 * Qlogic host adapter specific data structure.
2235*/ 2266*/
@@ -2594,6 +2625,7 @@ struct qla_hw_data {
2594 struct qla_statistics qla_stats; 2625 struct qla_statistics qla_stats;
2595 struct isp_operations *isp_ops; 2626 struct isp_operations *isp_ops;
2596 struct workqueue_struct *wq; 2627 struct workqueue_struct *wq;
2628 struct qlfc_fw fw_buf;
2597}; 2629};
2598 2630
2599/* 2631/*
@@ -2766,4 +2798,127 @@ typedef struct scsi_qla_host {
2766 2798
2767#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 2799#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
2768 2800
2801/*
2802 * BSG Vendor specific commands
2803 */
2804
2805#define QL_VND_LOOPBACK 0x01
2806#define QLA84_RESET 0x02
2807#define QLA84_UPDATE_FW 0x03
2808#define QLA84_MGMT_CMD 0x04
2809
2810/* BSG definations for interpreting CommandSent field */
2811#define INT_DEF_LB_LOOPBACK_CMD 0
2812#define INT_DEF_LB_ECHO_CMD 1
2813
2814/* BSG Vendor specific definations */
2815typedef struct _A84_RESET {
2816 uint16_t Flags;
2817 uint16_t Reserved;
2818#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
2819} __attribute__((packed)) A84_RESET, *PA84_RESET;
2820
2821#define A84_ISSUE_WRITE_TYPE_CMD 0
2822#define A84_ISSUE_READ_TYPE_CMD 1
2823#define A84_CLEANUP_CMD 2
2824#define A84_ISSUE_RESET_OP_FW 3
2825#define A84_ISSUE_RESET_DIAG_FW 4
2826#define A84_ISSUE_UPDATE_OPFW_CMD 5
2827#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
2828
2829struct qla84_mgmt_param {
2830 union {
2831 struct {
2832 uint32_t start_addr;
2833 } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
2834 struct {
2835 uint32_t id;
2836#define QLA84_MGMT_CONFIG_ID_UIF 1
2837#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
2838#define QLA84_MGMT_CONFIG_ID_PAUSE 3
2839#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
2840
2841 uint32_t param0;
2842 uint32_t param1;
2843 } config; /* for QLA84_MGMT_CHNG_CONFIG */
2844
2845 struct {
2846 uint32_t type;
2847#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
2848#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
2849#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
2850#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
2851#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
2852#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
2853#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
2854
2855 uint32_t context;
2856/*
2857* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
2858*/
2859#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
2860#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
2861#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
2862#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
2863#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
2864#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
2865#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
2866#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
2867#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
2868#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
2869
2870/*
2871* context definitions for QLA84_MGMT_INFO_PORT_STAT
2872*/
2873#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
2874#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
2875#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
2876#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
2877#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
2878#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
2879
2880
2881/*
2882* context definitions for QLA84_MGMT_INFO_LIF_STAT
2883*/
2884#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
2885#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
2886#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
2887#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
2888#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
2889
2890 } info; /* for QLA84_MGMT_GET_INFO */
2891 } u;
2892};
2893
2894struct qla84_msg_mgmt {
2895 uint16_t cmd;
2896#define QLA84_MGMT_READ_MEM 0x00
2897#define QLA84_MGMT_WRITE_MEM 0x01
2898#define QLA84_MGMT_CHNG_CONFIG 0x02
2899#define QLA84_MGMT_GET_INFO 0x03
2900 uint16_t rsrvd;
2901 struct qla84_mgmt_param mgmtp;/* parameters for cmd */
2902 uint32_t len; /* bytes in payload following this struct */
2903 uint8_t payload[0]; /* payload for cmd */
2904};
2905
2906struct msg_update_fw {
2907 /*
2908 * diag_fw = 0 operational fw
2909 * otherwise diagnostic fw
2910 * offset, len, fw_len are present to overcome the current limitation
2911 * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
2912 * specifies the byte "offset" where it fits in the fw buffer. The
2913 * number of bytes in each chunk is specified in "len". "fw_len"
2914 * is the total size of fw. The first chunk should start at offset = 0.
2915 * When offset+len == fw_len, the fw is written to the HBA.
2916 */
2917 uint32_t diag_fw;
2918 uint32_t offset;/* start offset */
2919 uint32_t len; /* num bytes in cur xfer */
2920 uint32_t fw_len; /* size of fw in bytes */
2921 uint8_t fw_bytes[0];
2922};
2923
2769#endif 2924#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 66a8da5d7d08..cebf4f1bb7d9 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -627,6 +627,39 @@ struct els_entry_24xx {
627 uint32_t rx_len; /* Data segment 1 length. */ 627 uint32_t rx_len; /* Data segment 1 length. */
628}; 628};
629 629
630struct els_sts_entry_24xx {
631 uint8_t entry_type; /* Entry type. */
632 uint8_t entry_count; /* Entry count. */
633 uint8_t sys_define; /* System Defined. */
634 uint8_t entry_status; /* Entry Status. */
635
636 uint32_t handle; /* System handle. */
637
638 uint16_t comp_status;
639
640 uint16_t nport_handle; /* N_PORT handle. */
641
642 uint16_t reserved_1;
643
644 uint8_t vp_index;
645 uint8_t sof_type;
646
647 uint32_t rx_xchg_address; /* Receive exchange address. */
648 uint16_t reserved_2;
649
650 uint8_t opcode;
651 uint8_t reserved_3;
652
653 uint8_t port_id[3];
654 uint8_t reserved_4;
655
656 uint16_t reserved_5;
657
658 uint16_t control_flags; /* Control flags. */
659 uint32_t total_byte_count;
660 uint32_t error_subcode_1;
661 uint32_t error_subcode_2;
662};
630/* 663/*
631 * ISP queue - Mailbox Command entry structure definition. 664 * ISP queue - Mailbox Command entry structure definition.
632 */ 665 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8bc6f53691e9..3a89bc514e2b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -60,6 +60,8 @@ extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, 60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
61 uint16_t *); 61 uint16_t *);
62 62
63extern fc_port_t *
64qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
63/* 65/*
64 * Global Data in qla_os.c source file. 66 * Global Data in qla_os.c source file.
65 */ 67 */
@@ -76,6 +78,7 @@ extern int ql2xiidmaenable;
76extern int ql2xmaxqueues; 78extern int ql2xmaxqueues;
77extern int ql2xmultique_tag; 79extern int ql2xmultique_tag;
78extern int ql2xfwloadbin; 80extern int ql2xfwloadbin;
81extern int ql2xetsenable;
79 82
80extern int qla2x00_loop_reset(scsi_qla_host_t *); 83extern int qla2x00_loop_reset(scsi_qla_host_t *);
81extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 84extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -94,7 +97,6 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
94 97
95extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); 98extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
96 99
97extern void qla2x00_abort_fcport_cmds(fc_port_t *);
98extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, 100extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
99 struct qla_hw_data *); 101 struct qla_hw_data *);
100extern void qla2x00_free_host(struct scsi_qla_host *); 102extern void qla2x00_free_host(struct scsi_qla_host *);
@@ -154,6 +156,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
154int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 156int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
155 uint16_t, uint16_t, uint8_t); 157 uint16_t, uint16_t, uint8_t);
156extern int qla2x00_start_sp(srb_t *); 158extern int qla2x00_start_sp(srb_t *);
159extern void qla2x00_ctx_sp_free(srb_t *);
157 160
158/* 161/*
159 * Global Function Prototypes in qla_mbx.c source file. 162 * Global Function Prototypes in qla_mbx.c source file.
@@ -426,6 +429,8 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
426extern void qla2x00_init_host_attr(scsi_qla_host_t *); 429extern void qla2x00_init_host_attr(scsi_qla_host_t *);
427extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 430extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
428extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); 431extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
432extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
433extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
429 434
430/* 435/*
431 * Global Function Prototypes in qla_dfs.c source file. 436 * Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f8e8495b743..a67b2bafb882 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -62,7 +62,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
62 ctx->free(sp); 62 ctx->free(sp);
63} 63}
64 64
65static void 65void
66qla2x00_ctx_sp_free(srb_t *sp) 66qla2x00_ctx_sp_free(srb_t *sp)
67{ 67{
68 struct srb_ctx *ctx = sp->ctx; 68 struct srb_ctx *ctx = sp->ctx;
@@ -338,6 +338,16 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
338 rval = qla2x00_init_rings(vha); 338 rval = qla2x00_init_rings(vha);
339 ha->flags.chip_reset_done = 1; 339 ha->flags.chip_reset_done = 1;
340 340
341 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
342 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
343 rval = qla84xx_init_chip(vha);
344 if (rval != QLA_SUCCESS) {
345 qla_printk(KERN_ERR, ha,
346 "Unable to initialize ISP84XX.\n");
347 qla84xx_put_chip(vha);
348 }
349 }
350
341 return (rval); 351 return (rval);
342} 352}
343 353
@@ -2216,7 +2226,7 @@ qla2x00_rport_del(void *data)
2216 * 2226 *
2217 * Returns a pointer to the allocated fcport, or NULL, if none available. 2227 * Returns a pointer to the allocated fcport, or NULL, if none available.
2218 */ 2228 */
2219static fc_port_t * 2229fc_port_t *
2220qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 2230qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2221{ 2231{
2222 fc_port_t *fcport; 2232 fc_port_t *fcport;
@@ -2900,8 +2910,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2900 if (qla2x00_is_reserved_id(vha, loop_id)) 2910 if (qla2x00_is_reserved_id(vha, loop_id))
2901 continue; 2911 continue;
2902 2912
2903 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha)) 2913 if (atomic_read(&vha->loop_down_timer) ||
2914 LOOP_TRANSITION(vha)) {
2915 atomic_set(&vha->loop_down_timer, 0);
2916 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2917 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2904 break; 2918 break;
2919 }
2905 2920
2906 if (swl != NULL) { 2921 if (swl != NULL) {
2907 if (last_dev) { 2922 if (last_dev) {
@@ -4877,6 +4892,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4877} 4892}
4878 4893
4879void 4894void
4880qla81xx_update_fw_options(scsi_qla_host_t *ha) 4895qla81xx_update_fw_options(scsi_qla_host_t *vha)
4881{ 4896{
4897 struct qla_hw_data *ha = vha->hw;
4898
4899 if (!ql2xetsenable)
4900 return;
4901
4902 /* Enable ETS Burst. */
4903 memset(ha->fw_options, 0, sizeof(ha->fw_options));
4904 ha->fw_options[2] |= BIT_9;
4905 qla2x00_set_fw_options(vha, ha->fw_options);
4882} 4906}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5ccac0bef76..8299a9891bfe 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1025,6 +1025,119 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1025 /* Implicit: mbx->mbx10 = 0. */ 1025 /* Implicit: mbx->mbx10 = 0. */
1026} 1026}
1027 1027
1028static void
1029qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1030{
1031 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1032
1033 els_iocb->entry_type = ELS_IOCB_TYPE;
1034 els_iocb->entry_count = 1;
1035 els_iocb->sys_define = 0;
1036 els_iocb->entry_status = 0;
1037 els_iocb->handle = sp->handle;
1038 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1039 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1040 els_iocb->vp_index = sp->fcport->vp_idx;
1041 els_iocb->sof_type = EST_SOFI3;
1042 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1043
1044 els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
1045 bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
1046 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1047 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1048 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1049 els_iocb->control_flags = 0;
1050 els_iocb->rx_byte_count =
1051 cpu_to_le32(bsg_job->reply_payload.payload_len);
1052 els_iocb->tx_byte_count =
1053 cpu_to_le32(bsg_job->request_payload.payload_len);
1054
1055 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1056 (bsg_job->request_payload.sg_list)));
1057 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1058 (bsg_job->request_payload.sg_list)));
1059 els_iocb->tx_len = cpu_to_le32(sg_dma_len
1060 (bsg_job->request_payload.sg_list));
1061
1062 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1063 (bsg_job->reply_payload.sg_list)));
1064 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1065 (bsg_job->reply_payload.sg_list)));
1066 els_iocb->rx_len = cpu_to_le32(sg_dma_len
1067 (bsg_job->reply_payload.sg_list));
1068}
1069
1070static void
1071qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1072{
1073 uint16_t avail_dsds;
1074 uint32_t *cur_dsd;
1075 struct scatterlist *sg;
1076 int index;
1077 uint16_t tot_dsds;
1078 scsi_qla_host_t *vha = sp->fcport->vha;
1079 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1080 int loop_iterartion = 0;
1081 int cont_iocb_prsnt = 0;
1082 int entry_count = 1;
1083
1084 ct_iocb->entry_type = CT_IOCB_TYPE;
1085 ct_iocb->entry_status = 0;
1086 ct_iocb->sys_define = 0;
1087 ct_iocb->handle = sp->handle;
1088
1089 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1090 ct_iocb->vp_index = sp->fcport->vp_idx;
1091 ct_iocb->comp_status = __constant_cpu_to_le16(0);
1092
1093 ct_iocb->cmd_dsd_count =
1094 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1095 ct_iocb->timeout = 0;
1096 ct_iocb->rsp_dsd_count =
1097 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1098 ct_iocb->rsp_byte_count =
1099 cpu_to_le32(bsg_job->reply_payload.payload_len);
1100 ct_iocb->cmd_byte_count =
1101 cpu_to_le32(bsg_job->request_payload.payload_len);
1102 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1103 (bsg_job->request_payload.sg_list)));
1104 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1105 (bsg_job->request_payload.sg_list)));
1106 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1107 (bsg_job->request_payload.sg_list));
1108
1109 avail_dsds = 1;
1110 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1111 index = 0;
1112 tot_dsds = bsg_job->reply_payload.sg_cnt;
1113
1114 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1115 dma_addr_t sle_dma;
1116 cont_a64_entry_t *cont_pkt;
1117
1118 /* Allocate additional continuation packets? */
1119 if (avail_dsds == 0) {
1120 /*
1121 * Five DSDs are available in the Cont.
1122 * Type 1 IOCB.
1123 */
1124 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1125 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1126 avail_dsds = 5;
1127 cont_iocb_prsnt = 1;
1128 entry_count++;
1129 }
1130
1131 sle_dma = sg_dma_address(sg);
1132 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1133 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1134 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1135 loop_iterartion++;
1136 avail_dsds--;
1137 }
1138 ct_iocb->entry_count = entry_count;
1139}
1140
1028int 1141int
1029qla2x00_start_sp(srb_t *sp) 1142qla2x00_start_sp(srb_t *sp)
1030{ 1143{
@@ -1052,6 +1165,13 @@ qla2x00_start_sp(srb_t *sp)
1052 qla24xx_logout_iocb(sp, pkt): 1165 qla24xx_logout_iocb(sp, pkt):
1053 qla2x00_logout_iocb(sp, pkt); 1166 qla2x00_logout_iocb(sp, pkt);
1054 break; 1167 break;
1168 case SRB_ELS_CMD_RPT:
1169 case SRB_ELS_CMD_HST:
1170 qla24xx_els_iocb(sp, pkt);
1171 break;
1172 case SRB_CT_CMD:
1173 qla24xx_ct_iocb(sp, pkt);
1174 break;
1055 default: 1175 default:
1056 break; 1176 break;
1057 } 1177 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6fc63b98818c..ab90329ff2e4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
11#include <scsi/scsi_bsg_fc.h>
11 12
12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 13static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 14static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -881,7 +882,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
881 index); 882 index);
882 return NULL; 883 return NULL;
883 } 884 }
885
884 req->outstanding_cmds[index] = NULL; 886 req->outstanding_cmds[index] = NULL;
887
885done: 888done:
886 return sp; 889 return sp;
887} 890}
@@ -982,6 +985,100 @@ done_post_logio_done_work:
982} 985}
983 986
984static void 987static void
988qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
989 struct sts_entry_24xx *pkt, int iocb_type)
990{
991 const char func[] = "ELS_CT_IOCB";
992 const char *type;
993 struct qla_hw_data *ha = vha->hw;
994 srb_t *sp;
995 struct srb_bsg *sp_bsg;
996 struct fc_bsg_job *bsg_job;
997 uint16_t comp_status;
998 uint32_t fw_status[3];
999 uint8_t* fw_sts_ptr;
1000
1001 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1002 if (!sp)
1003 return;
1004 sp_bsg = (struct srb_bsg*)sp->ctx;
1005 bsg_job = sp_bsg->bsg_job;
1006
1007 type = NULL;
1008 switch (sp_bsg->ctx.type) {
1009 case SRB_ELS_CMD_RPT:
1010 case SRB_ELS_CMD_HST:
1011 type = "els";
1012 break;
1013 case SRB_CT_CMD:
1014 type = "ct pass-through";
1015 break;
1016 default:
1017 qla_printk(KERN_WARNING, ha,
1018 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1019 sp_bsg->ctx.type);
1020 return;
1021 }
1022
1023 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1024 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1025 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1026
1027 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1028 * fc payload to the caller
1029 */
1030 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1031 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1032
1033 if (comp_status != CS_COMPLETE) {
1034 if (comp_status == CS_DATA_UNDERRUN) {
1035 bsg_job->reply->result = DID_OK << 16;
1036 bsg_job->reply->reply_payload_rcv_len =
1037 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1038
1039 DEBUG2(qla_printk(KERN_WARNING, ha,
1040 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1041 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1042 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
1043 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
1044 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1045 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1046 }
1047 else {
1048 DEBUG2(qla_printk(KERN_WARNING, ha,
1049 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1050 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1051 vha->host_no, sp->handle, type, comp_status,
1052 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
1053 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
1054 bsg_job->reply->result = DID_ERROR << 16;
1055 bsg_job->reply->reply_payload_rcv_len = 0;
1056 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1057 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1058 }
1059 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1060 }
1061 else {
1062 bsg_job->reply->result = DID_OK << 16;;
1063 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1064 bsg_job->reply_len = 0;
1065 }
1066
1067 dma_unmap_sg(&ha->pdev->dev,
1068 bsg_job->request_payload.sg_list,
1069 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1070 dma_unmap_sg(&ha->pdev->dev,
1071 bsg_job->reply_payload.sg_list,
1072 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1073 if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
1074 (sp_bsg->ctx.type == SRB_CT_CMD))
1075 kfree(sp->fcport);
1076 kfree(sp->ctx);
1077 mempool_free(sp, ha->srb_mempool);
1078 bsg_job->job_done(bsg_job);
1079}
1080
1081static void
985qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1082qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
986 struct logio_entry_24xx *logio) 1083 struct logio_entry_24xx *logio)
987{ 1084{
@@ -1749,6 +1846,13 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1749 qla24xx_logio_entry(vha, rsp->req, 1846 qla24xx_logio_entry(vha, rsp->req,
1750 (struct logio_entry_24xx *)pkt); 1847 (struct logio_entry_24xx *)pkt);
1751 break; 1848 break;
1849 case CT_IOCB_TYPE:
1850 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1851 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
1852 break;
1853 case ELS_IOCB_TYPE:
1854 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
1855 break;
1752 default: 1856 default:
1753 /* Type Not Supported. */ 1857 /* Type Not Supported. */
1754 DEBUG4(printk(KERN_WARNING 1858 DEBUG4(printk(KERN_WARNING
@@ -2049,7 +2153,6 @@ qla24xx_msix_default(int irq, void *dev_id)
2049 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2153 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2050 complete(&ha->mbx_intr_comp); 2154 complete(&ha->mbx_intr_comp);
2051 } 2155 }
2052
2053 return IRQ_HANDLED; 2156 return IRQ_HANDLED;
2054} 2157}
2055 2158
@@ -2255,10 +2358,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2255 2358
2256 if (ha->flags.msix_enabled) 2359 if (ha->flags.msix_enabled)
2257 qla24xx_disable_msix(ha); 2360 qla24xx_disable_msix(ha);
2258 else if (ha->flags.inta_enabled) { 2361 else if (ha->flags.msi_enabled) {
2259 free_irq(ha->pdev->irq, rsp); 2362 free_irq(ha->pdev->irq, rsp);
2260 pci_disable_msi(ha->pdev); 2363 pci_disable_msi(ha->pdev);
2261 } 2364 } else
2365 free_irq(ha->pdev->irq, rsp);
2262} 2366}
2263 2367
2264 2368
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 056e4d4505f3..6e53bdbb1da8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3636,6 +3636,157 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3636} 3636}
3637 3637
3638int 3638int
3639qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
3640{
3641 int rval;
3642 mbx_cmd_t mc;
3643 mbx_cmd_t *mcp = &mc;
3644 uint32_t iter_cnt = 0x1;
3645
3646 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
3647
3648 memset(mcp->mb, 0 , sizeof(mcp->mb));
3649 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
3650 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
3651
3652 /* transfer count */
3653 mcp->mb[10] = LSW(mreq->transfer_size);
3654 mcp->mb[11] = MSW(mreq->transfer_size);
3655
3656 /* send data address */
3657 mcp->mb[14] = LSW(mreq->send_dma);
3658 mcp->mb[15] = MSW(mreq->send_dma);
3659 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3660 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3661
3662 /* recieve data address */
3663 mcp->mb[16] = LSW(mreq->rcv_dma);
3664 mcp->mb[17] = MSW(mreq->rcv_dma);
3665 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3666 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3667
3668 /* Iteration count */
3669 mcp->mb[18] = LSW(iter_cnt);
3670 mcp->mb[19] = MSW(iter_cnt);
3671
3672 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3673 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3674 if (IS_QLA81XX(vha->hw))
3675 mcp->out_mb |= MBX_2;
3676 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3677
3678 mcp->buf_size = mreq->transfer_size;
3679 mcp->tov = MBX_TOV_SECONDS;
3680 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3681
3682 rval = qla2x00_mailbox_command(vha, mcp);
3683
3684 if (rval != QLA_SUCCESS) {
3685 DEBUG2(printk(KERN_WARNING
3686 "(%ld): failed=%x mb[0]=0x%x "
3687 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
3688 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3689 } else {
3690 DEBUG2(printk(KERN_WARNING
3691 "scsi(%ld): done.\n", vha->host_no));
3692 }
3693
3694 /* Copy mailbox information */
3695 memcpy( mresp, mcp->mb, 64);
3696 mresp[3] = mcp->mb[18];
3697 mresp[4] = mcp->mb[19];
3698 return rval;
3699}
3700
3701int
3702qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
3703{
3704 int rval;
3705 mbx_cmd_t mc;
3706 mbx_cmd_t *mcp = &mc;
3707 struct qla_hw_data *ha = vha->hw;
3708
3709 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
3710
3711 memset(mcp->mb, 0 , sizeof(mcp->mb));
3712 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3713 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3714 if (IS_QLA81XX(ha))
3715 mcp->mb[1] |= BIT_15;
3716 mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
3717 mcp->mb[16] = LSW(mreq->rcv_dma);
3718 mcp->mb[17] = MSW(mreq->rcv_dma);
3719 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3720 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3721
3722 mcp->mb[10] = LSW(mreq->transfer_size);
3723
3724 mcp->mb[14] = LSW(mreq->send_dma);
3725 mcp->mb[15] = MSW(mreq->send_dma);
3726 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3727 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3728
3729 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3730 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3731 if (IS_QLA81XX(ha))
3732 mcp->out_mb |= MBX_2;
3733
3734 mcp->in_mb = MBX_0;
3735 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
3736 mcp->in_mb |= MBX_1;
3737 if (IS_QLA81XX(ha))
3738 mcp->in_mb |= MBX_3;
3739
3740 mcp->tov = MBX_TOV_SECONDS;
3741 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3742 mcp->buf_size = mreq->transfer_size;
3743
3744 rval = qla2x00_mailbox_command(vha, mcp);
3745
3746 if (rval != QLA_SUCCESS) {
3747 DEBUG2(printk(KERN_WARNING
3748 "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
3749 vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
3750 } else {
3751 DEBUG2(printk(KERN_WARNING
3752 "scsi(%ld): done.\n", vha->host_no));
3753 }
3754
3755 /* Copy mailbox information */
3756 memcpy( mresp, mcp->mb, 32);
3757 return rval;
3758}
3759int
3760qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
3761 uint16_t *cmd_status)
3762{
3763 int rval;
3764 mbx_cmd_t mc;
3765 mbx_cmd_t *mcp = &mc;
3766
3767 DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
3768 ha->host_no, enable_diagnostic));
3769
3770 mcp->mb[0] = MBC_ISP84XX_RESET;
3771 mcp->mb[1] = enable_diagnostic;
3772 mcp->out_mb = MBX_1|MBX_0;
3773 mcp->in_mb = MBX_1|MBX_0;
3774 mcp->tov = MBX_TOV_SECONDS;
3775 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3776 rval = qla2x00_mailbox_command(ha, mcp);
3777
3778 /* Return mailbox statuses. */
3779 *cmd_status = mcp->mb[0];
3780 if (rval != QLA_SUCCESS)
3781 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
3782 rval));
3783 else
3784 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
3785
3786 return rval;
3787}
3788
3789int
3639qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 3790qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3640{ 3791{
3641 int rval; 3792 int rval;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8529eb1f3cd4..46720b23028f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -107,6 +107,12 @@ MODULE_PARM_DESC(ql2xfwloadbin,
107 " 1 -- load firmware from flash.\n" 107 " 1 -- load firmware from flash.\n"
108 " 0 -- use default semantics.\n"); 108 " 0 -- use default semantics.\n");
109 109
110int ql2xetsenable;
111module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
112MODULE_PARM_DESC(ql2xetsenable,
113 "Enables firmware ETS burst."
114 "Default is 0 - skip ETS enablement.");
115
110/* 116/*
111 * SCSI host template entry points 117 * SCSI host template entry points
112 */ 118 */
@@ -682,44 +688,6 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
682 return (return_status); 688 return (return_status);
683} 689}
684 690
685void
686qla2x00_abort_fcport_cmds(fc_port_t *fcport)
687{
688 int cnt;
689 unsigned long flags;
690 srb_t *sp;
691 scsi_qla_host_t *vha = fcport->vha;
692 struct qla_hw_data *ha = vha->hw;
693 struct req_que *req;
694
695 spin_lock_irqsave(&ha->hardware_lock, flags);
696 req = vha->req;
697 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
698 sp = req->outstanding_cmds[cnt];
699 if (!sp)
700 continue;
701 if (sp->fcport != fcport)
702 continue;
703 if (sp->ctx)
704 continue;
705
706 spin_unlock_irqrestore(&ha->hardware_lock, flags);
707 if (ha->isp_ops->abort_command(sp)) {
708 DEBUG2(qla_printk(KERN_WARNING, ha,
709 "Abort failed -- %lx\n",
710 sp->cmd->serial_number));
711 } else {
712 if (qla2x00_eh_wait_on_command(sp->cmd) !=
713 QLA_SUCCESS)
714 DEBUG2(qla_printk(KERN_WARNING, ha,
715 "Abort failed while waiting -- %lx\n",
716 sp->cmd->serial_number));
717 }
718 spin_lock_irqsave(&ha->hardware_lock, flags);
719 }
720 spin_unlock_irqrestore(&ha->hardware_lock, flags);
721}
722
723/************************************************************************** 691/**************************************************************************
724* qla2xxx_eh_abort 692* qla2xxx_eh_abort
725* 693*
@@ -1095,6 +1063,20 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1095 struct fc_port *fcport; 1063 struct fc_port *fcport;
1096 struct qla_hw_data *ha = vha->hw; 1064 struct qla_hw_data *ha = vha->hw;
1097 1065
1066 if (ha->flags.enable_target_reset) {
1067 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1068 if (fcport->port_type != FCT_TARGET)
1069 continue;
1070
1071 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1072 if (ret != QLA_SUCCESS) {
1073 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1074 "target_reset=%d d_id=%x.\n", __func__,
1075 vha->host_no, ret, fcport->d_id.b24));
1076 }
1077 }
1078 }
1079
1098 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) { 1080 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
1099 ret = qla2x00_full_login_lip(vha); 1081 ret = qla2x00_full_login_lip(vha);
1100 if (ret != QLA_SUCCESS) { 1082 if (ret != QLA_SUCCESS) {
@@ -1117,19 +1099,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1117 qla2x00_wait_for_loop_ready(vha); 1099 qla2x00_wait_for_loop_ready(vha);
1118 } 1100 }
1119 1101
1120 if (ha->flags.enable_target_reset) {
1121 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1122 if (fcport->port_type != FCT_TARGET)
1123 continue;
1124
1125 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1126 if (ret != QLA_SUCCESS) {
1127 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1128 "target_reset=%d d_id=%x.\n", __func__,
1129 vha->host_no, ret, fcport->d_id.b24));
1130 }
1131 }
1132 }
1133 /* Issue marker command only when we are going to start the I/O */ 1102 /* Issue marker command only when we are going to start the I/O */
1134 vha->marker_needed = 1; 1103 vha->marker_needed = 1;
1135 1104
@@ -1160,8 +1129,19 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1160 qla2x00_sp_compl(ha, sp); 1129 qla2x00_sp_compl(ha, sp);
1161 } else { 1130 } else {
1162 ctx = sp->ctx; 1131 ctx = sp->ctx;
1163 del_timer_sync(&ctx->timer); 1132 if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
1164 ctx->free(sp); 1133 del_timer_sync(&ctx->timer);
1134 ctx->free(sp);
1135 } else {
1136 struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
1137 if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
1138 kfree(sp->fcport);
1139 sp_bsg->bsg_job->req->errors = 0;
1140 sp_bsg->bsg_job->reply->result = res;
1141 sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
1142 kfree(sp->ctx);
1143 mempool_free(sp, ha->srb_mempool);
1144 }
1165 } 1145 }
1166 } 1146 }
1167 } 1147 }
@@ -1258,7 +1238,7 @@ qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1258 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); 1238 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1259 break; 1239 break;
1260 default: 1240 default:
1261 return EOPNOTSUPP; 1241 return -EOPNOTSUPP;
1262 } 1242 }
1263 1243
1264 return sdev->queue_depth; 1244 return sdev->queue_depth;
@@ -1818,7 +1798,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1818 /* Set EEH reset type to fundamental if required by hba */ 1798 /* Set EEH reset type to fundamental if required by hba */
1819 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { 1799 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1820 pdev->needs_freset = 1; 1800 pdev->needs_freset = 1;
1821 pci_save_state(pdev);
1822 } 1801 }
1823 1802
1824 /* Configure PCI I/O space */ 1803 /* Configure PCI I/O space */
@@ -1970,11 +1949,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1970 host->max_channel = MAX_BUSES - 1; 1949 host->max_channel = MAX_BUSES - 1;
1971 host->max_lun = MAX_LUNS; 1950 host->max_lun = MAX_LUNS;
1972 host->transportt = qla2xxx_transport_template; 1951 host->transportt = qla2xxx_transport_template;
1952 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
1973 1953
1974 /* Set up the irqs */ 1954 /* Set up the irqs */
1975 ret = qla2x00_request_irqs(ha, rsp); 1955 ret = qla2x00_request_irqs(ha, rsp);
1976 if (ret) 1956 if (ret)
1977 goto probe_init_failed; 1957 goto probe_init_failed;
1958
1959 pci_save_state(pdev);
1960
1978 /* Alloc arrays of request and response ring ptrs */ 1961 /* Alloc arrays of request and response ring ptrs */
1979que_init: 1962que_init:
1980 if (!qla2x00_alloc_queues(ha)) { 1963 if (!qla2x00_alloc_queues(ha)) {
@@ -2176,6 +2159,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
2176 kfree(ha); 2159 kfree(ha);
2177 ha = NULL; 2160 ha = NULL;
2178 2161
2162 pci_disable_pcie_error_reporting(pdev);
2163
2179 pci_disable_device(pdev); 2164 pci_disable_device(pdev);
2180 pci_set_drvdata(pdev, NULL); 2165 pci_set_drvdata(pdev, NULL);
2181} 2166}
@@ -3310,6 +3295,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3310 return PCI_ERS_RESULT_CAN_RECOVER; 3295 return PCI_ERS_RESULT_CAN_RECOVER;
3311 case pci_channel_io_frozen: 3296 case pci_channel_io_frozen:
3312 ha->flags.eeh_busy = 1; 3297 ha->flags.eeh_busy = 1;
3298 qla2x00_free_irqs(vha);
3313 pci_disable_device(pdev); 3299 pci_disable_device(pdev);
3314 return PCI_ERS_RESULT_NEED_RESET; 3300 return PCI_ERS_RESULT_NEED_RESET;
3315 case pci_channel_io_perm_failure: 3301 case pci_channel_io_perm_failure:
@@ -3363,10 +3349,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3363 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 3349 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
3364 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 3350 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3365 struct qla_hw_data *ha = base_vha->hw; 3351 struct qla_hw_data *ha = base_vha->hw;
3366 int rc; 3352 struct rsp_que *rsp;
3353 int rc, retries = 10;
3367 3354
3368 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); 3355 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3369 3356
3357 /* Workaround: qla2xxx driver which access hardware earlier
3358 * needs error state to be pci_channel_io_online.
3359 * Otherwise mailbox command timesout.
3360 */
3361 pdev->error_state = pci_channel_io_normal;
3362
3363 pci_restore_state(pdev);
3364
3365 /* pci_restore_state() clears the saved_state flag of the device
3366 * save restored state which resets saved_state flag
3367 */
3368 pci_save_state(pdev);
3369
3370 if (ha->mem_only) 3370 if (ha->mem_only)
3371 rc = pci_enable_device_mem(pdev); 3371 rc = pci_enable_device_mem(pdev);
3372 else 3372 else
@@ -3378,27 +3378,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3378 return ret; 3378 return ret;
3379 } 3379 }
3380 3380
3381 rsp = ha->rsp_q_map[0];
3382 if (qla2x00_request_irqs(ha, rsp))
3383 return ret;
3384
3381 if (ha->isp_ops->pci_config(base_vha)) 3385 if (ha->isp_ops->pci_config(base_vha))
3382 return ret; 3386 return ret;
3383 3387
3384#ifdef QL_DEBUG_LEVEL_17 3388 while (ha->flags.mbox_busy && retries--)
3385 { 3389 msleep(1000);
3386 uint8_t b;
3387 uint32_t i;
3388 3390
3389 printk("slot_reset_1: ");
3390 for (i = 0; i < 256; i++) {
3391 pci_read_config_byte(ha->pdev, i, &b);
3392 printk("%s%02x", (i%16) ? " " : "\n", b);
3393 }
3394 printk("\n");
3395 }
3396#endif
3397 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3391 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3398 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) 3392 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
3399 ret = PCI_ERS_RESULT_RECOVERED; 3393 ret = PCI_ERS_RESULT_RECOVERED;
3400 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3394 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3401 3395
3396 pci_cleanup_aer_uncorrect_error_status(pdev);
3397
3402 DEBUG17(qla_printk(KERN_WARNING, ha, 3398 DEBUG17(qla_printk(KERN_WARNING, ha,
3403 "slot_reset-return:ret=%x\n", ret)); 3399 "slot_reset-return:ret=%x\n", ret));
3404 3400
@@ -3422,8 +3418,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
3422 } 3418 }
3423 3419
3424 ha->flags.eeh_busy = 0; 3420 ha->flags.eeh_busy = 0;
3425
3426 pci_cleanup_aer_uncorrect_error_status(pdev);
3427} 3421}
3428 3422
3429static struct pci_error_handlers qla2xxx_err_handler = { 3423static struct pci_error_handlers qla2xxx_err_handler = {
@@ -3536,4 +3530,3 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
3536MODULE_FIRMWARE(FW_FILE_ISP2322); 3530MODULE_FIRMWARE(FW_FILE_ISP2322);
3537MODULE_FIRMWARE(FW_FILE_ISP24XX); 3531MODULE_FIRMWARE(FW_FILE_ISP24XX);
3538MODULE_FIRMWARE(FW_FILE_ISP25XX); 3532MODULE_FIRMWARE(FW_FILE_ISP25XX);
3539MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ed36279a33c1..8d2fc2fa7a6b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k10" 10#define QLA2XXX_VERSION "8.03.02-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 1 14#define QLA_DRIVER_PATCH_VER 2
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index af8c3233e8ae..92329a461c68 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -844,10 +844,10 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
844 DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, 844 DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
845 __func__)); 845 __func__));
846 if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) 846 if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
847 return (QLA_ERROR); 847 return QLA_ERROR;
848 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { 848 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
849 ql4xxx_unlock_flash(ha); 849 ql4xxx_unlock_flash(ha);
850 return (QLA_ERROR); 850 return QLA_ERROR;
851 } 851 }
852 852
853 /* Get EEPRom Parameters from NVRAM and validate */ 853 /* Get EEPRom Parameters from NVRAM and validate */
@@ -858,20 +858,18 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
858 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); 858 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
859 spin_unlock_irqrestore(&ha->hardware_lock, flags); 859 spin_unlock_irqrestore(&ha->hardware_lock, flags);
860 } else { 860 } else {
861 /*
862 * QLogic adapters should always have a valid NVRAM.
863 * If not valid, do not load.
864 */
865 dev_warn(&ha->pdev->dev, 861 dev_warn(&ha->pdev->dev,
866 "scsi%ld: %s: EEProm checksum invalid. " 862 "scsi%ld: %s: EEProm checksum invalid. "
867 "Please update your EEPROM\n", ha->host_no, 863 "Please update your EEPROM\n", ha->host_no,
868 __func__); 864 __func__);
869 865
870 /* set defaults */ 866 /* Attempt to set defaults */
871 if (is_qla4010(ha)) 867 if (is_qla4010(ha))
872 extHwConfig.Asuint32_t = 0x1912; 868 extHwConfig.Asuint32_t = 0x1912;
873 else if (is_qla4022(ha) | is_qla4032(ha)) 869 else if (is_qla4022(ha) | is_qla4032(ha))
874 extHwConfig.Asuint32_t = 0x0023; 870 extHwConfig.Asuint32_t = 0x0023;
871 else
872 return QLA_ERROR;
875 } 873 }
876 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", 874 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
877 ha->host_no, __func__, extHwConfig.Asuint32_t)); 875 ha->host_no, __func__, extHwConfig.Asuint32_t));
@@ -884,7 +882,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
884 ql4xxx_unlock_nvram(ha); 882 ql4xxx_unlock_nvram(ha);
885 ql4xxx_unlock_flash(ha); 883 ql4xxx_unlock_flash(ha);
886 884
887 return (QLA_SUCCESS); 885 return QLA_SUCCESS;
888} 886}
889 887
890static void qla4x00_pci_config(struct scsi_qla_host *ha) 888static void qla4x00_pci_config(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 8e5c169b03fb..bd88349b8526 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -149,6 +149,7 @@ static struct {
149 { RAID_LEVEL_0, "raid0" }, 149 { RAID_LEVEL_0, "raid0" },
150 { RAID_LEVEL_1, "raid1" }, 150 { RAID_LEVEL_1, "raid1" },
151 { RAID_LEVEL_10, "raid10" }, 151 { RAID_LEVEL_10, "raid10" },
152 { RAID_LEVEL_1E, "raid1e" },
152 { RAID_LEVEL_3, "raid3" }, 153 { RAID_LEVEL_3, "raid3" },
153 { RAID_LEVEL_4, "raid4" }, 154 { RAID_LEVEL_4, "raid4" },
154 { RAID_LEVEL_5, "raid5" }, 155 { RAID_LEVEL_5, "raid5" },
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a60da5555577..513661f45e5f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1026,55 +1026,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
1026 * responsible for calling kfree() on this pointer when it is no longer 1026 * responsible for calling kfree() on this pointer when it is no longer
1027 * needed. If we cannot retrieve the VPD page this routine returns %NULL. 1027 * needed. If we cannot retrieve the VPD page this routine returns %NULL.
1028 */ 1028 */
1029unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page) 1029int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1030 int buf_len)
1030{ 1031{
1031 int i, result; 1032 int i, result;
1032 unsigned int len;
1033 const unsigned int init_vpd_len = 255;
1034 unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
1035
1036 if (!buf)
1037 return NULL;
1038 1033
1039 /* Ask for all the pages supported by this device */ 1034 /* Ask for all the pages supported by this device */
1040 result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len); 1035 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1041 if (result) 1036 if (result)
1042 goto fail; 1037 goto fail;
1043 1038
1044 /* If the user actually wanted this page, we can skip the rest */ 1039 /* If the user actually wanted this page, we can skip the rest */
1045 if (page == 0) 1040 if (page == 0)
1046 return buf; 1041 return -EINVAL;
1047 1042
1048 for (i = 0; i < buf[3]; i++) 1043 for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
1049 if (buf[i + 4] == page) 1044 if (buf[i + 4] == page)
1050 goto found; 1045 goto found;
1046
1047 if (i < buf[3] && i > buf_len)
1048 /* ran off the end of the buffer, give us benefit of doubt */
1049 goto found;
1051 /* The device claims it doesn't support the requested page */ 1050 /* The device claims it doesn't support the requested page */
1052 goto fail; 1051 goto fail;
1053 1052
1054 found: 1053 found:
1055 result = scsi_vpd_inquiry(sdev, buf, page, 255); 1054 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
1056 if (result) 1055 if (result)
1057 goto fail; 1056 goto fail;
1058 1057
1059 /* 1058 return 0;
1060 * Some pages are longer than 255 bytes. The actual length of
1061 * the page is returned in the header.
1062 */
1063 len = ((buf[2] << 8) | buf[3]) + 4;
1064 if (len <= init_vpd_len)
1065 return buf;
1066
1067 kfree(buf);
1068 buf = kmalloc(len, GFP_KERNEL);
1069 result = scsi_vpd_inquiry(sdev, buf, page, len);
1070 if (result)
1071 goto fail;
1072
1073 return buf;
1074 1059
1075 fail: 1060 fail:
1076 kfree(buf); 1061 return -EINVAL;
1077 return NULL;
1078} 1062}
1079EXPORT_SYMBOL_GPL(scsi_get_vpd_page); 1063EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
1080 1064
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c6642423cc67..56977097de9f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 * we already took a copy of the original into rq->errors which 773 * we already took a copy of the original into rq->errors which
774 * is what gets returned to the user 774 * is what gets returned to the user
775 */ 775 */
776 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { 776 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
777 if (!(req->cmd_flags & REQ_QUIET)) 777 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
778 * print since caller wants ATA registers. Only occurs on
779 * SCSI ATA PASS_THROUGH commands when CK_COND=1
780 */
781 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
782 ;
783 else if (!(req->cmd_flags & REQ_QUIET))
778 scsi_print_sense("", cmd); 784 scsi_print_sense("", cmd);
779 result = 0; 785 result = 0;
780 /* BLOCK_PC may have set error */ 786 /* BLOCK_PC may have set error */
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index 998cb5be6833..6266a5d73d0f 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -5,7 +5,7 @@
5#define SAS_PHY_ATTRS 17 5#define SAS_PHY_ATTRS 17
6#define SAS_PORT_ATTRS 1 6#define SAS_PORT_ATTRS 1
7#define SAS_RPORT_ATTRS 7 7#define SAS_RPORT_ATTRS 7
8#define SAS_END_DEV_ATTRS 3 8#define SAS_END_DEV_ATTRS 5
9#define SAS_EXPANDER_ATTRS 7 9#define SAS_EXPANDER_ATTRS 7
10 10
11struct sas_internal { 11struct sas_internal {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 012f73a96880..f697229ae5a9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1339,8 +1339,10 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1339 sdev = scsi_alloc_sdev(starget, 0, NULL); 1339 sdev = scsi_alloc_sdev(starget, 0, NULL);
1340 if (!sdev) 1340 if (!sdev)
1341 return 0; 1341 return 0;
1342 if (scsi_device_get(sdev)) 1342 if (scsi_device_get(sdev)) {
1343 __scsi_remove_device(sdev);
1343 return 0; 1344 return 0;
1345 }
1344 } 1346 }
1345 1347
1346 sprintf(devname, "host %d channel %d id %d", 1348 sprintf(devname, "host %d channel %d id %d",
@@ -1907,10 +1909,9 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1907 goto out; 1909 goto out;
1908 1910
1909 sdev = scsi_alloc_sdev(starget, 0, NULL); 1911 sdev = scsi_alloc_sdev(starget, 0, NULL);
1910 if (sdev) { 1912 if (sdev)
1911 sdev->sdev_gendev.parent = get_device(&starget->dev);
1912 sdev->borken = 0; 1913 sdev->borken = 0;
1913 } else 1914 else
1914 scsi_target_reap(starget); 1915 scsi_target_reap(starget);
1915 put_device(&starget->dev); 1916 put_device(&starget->dev);
1916 out: 1917 out:
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 5a065055e68a..a4936c4e2f46 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -878,7 +878,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
878 struct request_queue *rq = sdev->request_queue; 878 struct request_queue *rq = sdev->request_queue;
879 struct scsi_target *starget = sdev->sdev_target; 879 struct scsi_target *starget = sdev->sdev_target;
880 880
881 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0) 881 error = scsi_device_set_state(sdev, SDEV_RUNNING);
882 if (error)
882 return error; 883 return error;
883 884
884 error = scsi_target_add(starget); 885 error = scsi_target_add(starget);
@@ -889,13 +890,13 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
889 error = device_add(&sdev->sdev_gendev); 890 error = device_add(&sdev->sdev_gendev);
890 if (error) { 891 if (error) {
891 printk(KERN_INFO "error 1\n"); 892 printk(KERN_INFO "error 1\n");
892 goto out_remove; 893 return error;
893 } 894 }
894 error = device_add(&sdev->sdev_dev); 895 error = device_add(&sdev->sdev_dev);
895 if (error) { 896 if (error) {
896 printk(KERN_INFO "error 2\n"); 897 printk(KERN_INFO "error 2\n");
897 device_del(&sdev->sdev_gendev); 898 device_del(&sdev->sdev_gendev);
898 goto out_remove; 899 return error;
899 } 900 }
900 transport_add_device(&sdev->sdev_gendev); 901 transport_add_device(&sdev->sdev_gendev);
901 sdev->is_visible = 1; 902 sdev->is_visible = 1;
@@ -910,14 +911,14 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
910 else 911 else
911 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); 912 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
912 if (error) 913 if (error)
913 goto out_remove; 914 return error;
914 915
915 if (sdev->host->hostt->change_queue_type) 916 if (sdev->host->hostt->change_queue_type)
916 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); 917 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
917 else 918 else
918 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); 919 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
919 if (error) 920 if (error)
920 goto out_remove; 921 return error;
921 922
922 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 923 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
923 924
@@ -933,16 +934,11 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
933 error = device_create_file(&sdev->sdev_gendev, 934 error = device_create_file(&sdev->sdev_gendev,
934 sdev->host->hostt->sdev_attrs[i]); 935 sdev->host->hostt->sdev_attrs[i]);
935 if (error) 936 if (error)
936 goto out_remove; 937 return error;
937 } 938 }
938 } 939 }
939 940
940 return 0;
941
942 out_remove:
943 __scsi_remove_device(sdev);
944 return error; 941 return error;
945
946} 942}
947 943
948void __scsi_remove_device(struct scsi_device *sdev) 944void __scsi_remove_device(struct scsi_device *sdev)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 653f22a8deb9..79660ee3e211 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -475,7 +475,8 @@ MODULE_PARM_DESC(dev_loss_tmo,
475 "Maximum number of seconds that the FC transport should" 475 "Maximum number of seconds that the FC transport should"
476 " insulate the loss of a remote port. Once this value is" 476 " insulate the loss of a remote port. Once this value is"
477 " exceeded, the scsi target is removed. Value should be" 477 " exceeded, the scsi target is removed. Value should be"
478 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); 478 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
479 " fast_io_fail_tmo is not set.");
479 480
480/* 481/*
481 * Netlink Infrastructure 482 * Netlink Infrastructure
@@ -842,9 +843,17 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
842 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 843 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
843 return -EBUSY; 844 return -EBUSY;
844 val = simple_strtoul(buf, &cp, 0); 845 val = simple_strtoul(buf, &cp, 0);
845 if ((*cp && (*cp != '\n')) || 846 if ((*cp && (*cp != '\n')) || (val < 0))
846 (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
847 return -EINVAL; 847 return -EINVAL;
848
849 /*
850 * If fast_io_fail is off we have to cap
851 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
852 */
853 if (rport->fast_io_fail_tmo == -1 &&
854 val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
855 return -EINVAL;
856
848 i->f->set_rport_dev_loss_tmo(rport, val); 857 i->f->set_rport_dev_loss_tmo(rport, val);
849 return count; 858 return count;
850} 859}
@@ -925,9 +934,16 @@ store_fc_rport_fast_io_fail_tmo(struct device *dev,
925 rport->fast_io_fail_tmo = -1; 934 rport->fast_io_fail_tmo = -1;
926 else { 935 else {
927 val = simple_strtoul(buf, &cp, 0); 936 val = simple_strtoul(buf, &cp, 0);
928 if ((*cp && (*cp != '\n')) || 937 if ((*cp && (*cp != '\n')) || (val < 0))
929 (val < 0) || (val >= rport->dev_loss_tmo))
930 return -EINVAL; 938 return -EINVAL;
939 /*
940 * Cap fast_io_fail by dev_loss_tmo or
941 * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
942 */
943 if ((val >= rport->dev_loss_tmo) ||
944 (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
945 return -EINVAL;
946
931 rport->fast_io_fail_tmo = val; 947 rport->fast_io_fail_tmo = val;
932 } 948 }
933 return count; 949 return count;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f27e52d963d3..927e99cb7225 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -155,6 +155,17 @@ static struct {
155sas_bitfield_name_search(linkspeed, sas_linkspeed_names) 155sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
156sas_bitfield_name_set(linkspeed, sas_linkspeed_names) 156sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
157 157
158static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev)
159{
160 struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target);
161 struct sas_end_device *rdev;
162
163 BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
164
165 rdev = rphy_to_end_device(rphy);
166 return rdev;
167}
168
158static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, 169static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
159 struct sas_rphy *rphy) 170 struct sas_rphy *rphy)
160{ 171{
@@ -358,6 +369,85 @@ void sas_remove_host(struct Scsi_Host *shost)
358} 369}
359EXPORT_SYMBOL(sas_remove_host); 370EXPORT_SYMBOL(sas_remove_host);
360 371
372/**
373 * sas_tlr_supported - checking TLR bit in vpd 0x90
374 * @sdev: scsi device struct
375 *
376 * Check Transport Layer Retries are supported or not.
377 * If vpd page 0x90 is present, TRL is supported.
378 *
379 */
380unsigned int
381sas_tlr_supported(struct scsi_device *sdev)
382{
383 const int vpd_len = 32;
384 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
385 char *buffer = kzalloc(vpd_len, GFP_KERNEL);
386 int ret = 0;
387
388 if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len))
389 goto out;
390
391 /*
392 * Magic numbers: the VPD Protocol page (0x90)
393 * has a 4 byte header and then one entry per device port
394 * the TLR bit is at offset 8 on each port entry
395 * if we take the first port, that's at total offset 12
396 */
397 ret = buffer[12] & 0x01;
398
399 out:
400 kfree(buffer);
401 rdev->tlr_supported = ret;
402 return ret;
403
404}
405EXPORT_SYMBOL_GPL(sas_tlr_supported);
406
407/**
408 * sas_disable_tlr - setting TLR flags
409 * @sdev: scsi device struct
410 *
411 * Seting tlr_enabled flag to 0.
412 *
413 */
414void
415sas_disable_tlr(struct scsi_device *sdev)
416{
417 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
418
419 rdev->tlr_enabled = 0;
420}
421EXPORT_SYMBOL_GPL(sas_disable_tlr);
422
423/**
424 * sas_enable_tlr - setting TLR flags
425 * @sdev: scsi device struct
426 *
427 * Seting tlr_enabled flag 1.
428 *
429 */
430void sas_enable_tlr(struct scsi_device *sdev)
431{
432 unsigned int tlr_supported = 0;
433 tlr_supported = sas_tlr_supported(sdev);
434
435 if (tlr_supported) {
436 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
437
438 rdev->tlr_enabled = 1;
439 }
440
441 return;
442}
443EXPORT_SYMBOL_GPL(sas_enable_tlr);
444
445unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
446{
447 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
448 return rdev->tlr_enabled;
449}
450EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
361 451
362/* 452/*
363 * SAS Phy attributes 453 * SAS Phy attributes
@@ -1146,15 +1236,10 @@ sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
1146int sas_read_port_mode_page(struct scsi_device *sdev) 1236int sas_read_port_mode_page(struct scsi_device *sdev)
1147{ 1237{
1148 char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; 1238 char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
1149 struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target); 1239 struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
1150 struct sas_end_device *rdev;
1151 struct scsi_mode_data mode_data; 1240 struct scsi_mode_data mode_data;
1152 int res, error; 1241 int res, error;
1153 1242
1154 BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
1155
1156 rdev = rphy_to_end_device(rphy);
1157
1158 if (!buffer) 1243 if (!buffer)
1159 return -ENOMEM; 1244 return -ENOMEM;
1160 1245
@@ -1207,6 +1292,10 @@ sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout,
1207 "%d\n", int); 1292 "%d\n", int);
1208sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout, 1293sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout,
1209 "%d\n", int); 1294 "%d\n", int);
1295sas_end_dev_simple_attr(tlr_supported, tlr_supported,
1296 "%d\n", int);
1297sas_end_dev_simple_attr(tlr_enabled, tlr_enabled,
1298 "%d\n", int);
1210 1299
1211static DECLARE_TRANSPORT_CLASS(sas_expander_class, 1300static DECLARE_TRANSPORT_CLASS(sas_expander_class,
1212 "sas_expander", NULL, NULL, NULL); 1301 "sas_expander", NULL, NULL, NULL);
@@ -1733,6 +1822,8 @@ sas_attach_transport(struct sas_function_template *ft)
1733 SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning); 1822 SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning);
1734 SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout); 1823 SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout);
1735 SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout); 1824 SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout);
1825 SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported);
1826 SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled);
1736 i->end_dev_attrs[count] = NULL; 1827 i->end_dev_attrs[count] = NULL;
1737 1828
1738 count = 0; 1829 count = 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 255da53e5a01..1dd4d8407694 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1196,19 +1196,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1196 SCpnt->result = 0; 1196 SCpnt->result = 0;
1197 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1197 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1198 break; 1198 break;
1199 case ABORTED_COMMAND: 1199 case ABORTED_COMMAND: /* DIF: Target detected corruption */
1200 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */ 1200 case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
1201 scsi_print_result(SCpnt); 1201 if (sshdr.asc == 0x10)
1202 scsi_print_sense("sd", SCpnt);
1203 good_bytes = sd_completed_bytes(SCpnt); 1202 good_bytes = sd_completed_bytes(SCpnt);
1204 }
1205 break;
1206 case ILLEGAL_REQUEST:
1207 if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
1208 scsi_print_result(SCpnt);
1209 scsi_print_sense("sd", SCpnt);
1210 good_bytes = sd_completed_bytes(SCpnt);
1211 }
1212 break; 1203 break;
1213 default: 1204 default:
1214 break; 1205 break;
@@ -1218,8 +1209,19 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1218 sd_dif_complete(SCpnt, good_bytes); 1209 sd_dif_complete(SCpnt, good_bytes);
1219 1210
1220 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type) 1211 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1221 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) 1212 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
1213
1214 /* We have to print a failed command here as the
1215 * extended CDB gets freed before scsi_io_completion()
1216 * is called.
1217 */
1218 if (result)
1219 scsi_print_command(SCpnt);
1220
1222 mempool_free(SCpnt->cmnd, sd_cdb_pool); 1221 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1222 SCpnt->cmnd = NULL;
1223 SCpnt->cmd_len = 0;
1224 }
1223 1225
1224 return good_bytes; 1226 return good_bytes;
1225} 1227}
@@ -1946,13 +1948,13 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1946{ 1948{
1947 struct request_queue *q = sdkp->disk->queue; 1949 struct request_queue *q = sdkp->disk->queue;
1948 unsigned int sector_sz = sdkp->device->sector_size; 1950 unsigned int sector_sz = sdkp->device->sector_size;
1949 char *buffer; 1951 const int vpd_len = 32;
1952 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
1950 1953
1951 /* Block Limits VPD */ 1954 if (!buffer ||
1952 buffer = scsi_get_vpd_page(sdkp->device, 0xb0); 1955 /* Block Limits VPD */
1953 1956 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
1954 if (buffer == NULL) 1957 goto out;
1955 return;
1956 1958
1957 blk_queue_io_min(sdkp->disk->queue, 1959 blk_queue_io_min(sdkp->disk->queue,
1958 get_unaligned_be16(&buffer[6]) * sector_sz); 1960 get_unaligned_be16(&buffer[6]) * sector_sz);
@@ -1984,6 +1986,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1984 get_unaligned_be32(&buffer[32]) & ~(1 << 31); 1986 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
1985 } 1987 }
1986 1988
1989 out:
1987 kfree(buffer); 1990 kfree(buffer);
1988} 1991}
1989 1992
@@ -1993,20 +1996,23 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1993 */ 1996 */
1994static void sd_read_block_characteristics(struct scsi_disk *sdkp) 1997static void sd_read_block_characteristics(struct scsi_disk *sdkp)
1995{ 1998{
1996 char *buffer; 1999 unsigned char *buffer;
1997 u16 rot; 2000 u16 rot;
2001 const int vpd_len = 32;
1998 2002
1999 /* Block Device Characteristics VPD */ 2003 buffer = kmalloc(vpd_len, GFP_KERNEL);
2000 buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
2001 2004
2002 if (buffer == NULL) 2005 if (!buffer ||
2003 return; 2006 /* Block Device Characteristics VPD */
2007 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
2008 goto out;
2004 2009
2005 rot = get_unaligned_be16(&buffer[4]); 2010 rot = get_unaligned_be16(&buffer[4]);
2006 2011
2007 if (rot == 1) 2012 if (rot == 1)
2008 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); 2013 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
2009 2014
2015 out:
2010 kfree(buffer); 2016 kfree(buffer);
2011} 2017}
2012 2018
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 55b034b72708..1d7a8780e00c 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -448,13 +448,17 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
448 .addr = 0, 448 .addr = 0,
449 }; 449 };
450 450
451 buf = scsi_get_vpd_page(sdev, 0x83); 451 buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
452 if (!buf) 452 if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
453 return; 453 goto free;
454 454
455 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 455 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
456 456
457 vpd_len = ((buf[2] << 8) | buf[3]) + 4; 457 vpd_len = ((buf[2] << 8) | buf[3]) + 4;
458 kfree(buf);
459 buf = kmalloc(vpd_len, GFP_KERNEL);
460 if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
461 goto free;
458 462
459 desc = buf + 4; 463 desc = buf + 4;
460 while (desc < buf + vpd_len) { 464 while (desc < buf + vpd_len) {
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 54023d41fd15..26e8e0e6b8dd 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1070,7 +1070,7 @@ static int option_setup(char *str) {
1070 char *cur = str; 1070 char *cur = str;
1071 int i = 1; 1071 int i = 1;
1072 1072
1073 while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) { 1073 while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
1074 ints[i++] = simple_strtoul(cur, NULL, 0); 1074 ints[i++] = simple_strtoul(cur, NULL, 0);
1075 1075
1076 if ((cur = strchr(cur, ',')) != NULL) cur++; 1076 if ((cur = strchr(cur, ',')) != NULL) cur++;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index d2604c813a20..e4ac5829b637 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1069,7 +1069,8 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
1069 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); 1069 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
1070} 1070}
1071 1071
1072static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq) 1072static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
1073 unsigned int *irq)
1073{ 1074{
1074 struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; 1075 struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
1075 int ret; 1076 int ret;